diff --git a/Documentation/ABI/testing/sysfs-bus-dfl b/Documentation/ABI/testing/sysfs-bus-dfl new file mode 100644 index 0000000000000..cd00abcda4427 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dfl @@ -0,0 +1,15 @@ +What: /sys/bus/dfl/devices/.../type +Date: March 2020 +KernelVersion: 5.7 +Contact: Xu Yilun +Description: Read-only. It returns type of DFL FIU of the device. Now DFL + supports 2 FIU types, 0 for FME, 1 for PORT. + Format: 0x%x + +What: /sys/bus/dfl/devices/.../feature_id +Date: March 2020 +KernelVersion: 5.7 +Contact: Xu Yilun +Description: Read-only. It returns feature identifier local to its DFL FIU + type. + Format: 0x%llx diff --git a/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios b/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios new file mode 100644 index 0000000000000..f9e00e9087350 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dfl-devices-n3000-nios @@ -0,0 +1,16 @@ +What: /sys/bus/dfl/devices/dfl-fme.X.X/fec_mode +Date: March 2020 +KernelVersion: 5.8 +Contact: Xu Yilun +Description: Read-only. It returns the FEC mode of the ethernet retimer + configured by NIOS firmware. "rs" for RS FEC mode, "kr" for + KR FEC mode, "no" FOR NO FEC mode. + Format: string + +What: /sys/bus/dfl/devices/dfl-fme.X.X/nios_fw_version +Date: March 2020 +KernelVersion: 5.8 +Contact: Xu Yilun +Description: Read-only. It returns the NIOS firmware version in FPGA. Its + format is "major.minor.patch". + Format: %x.%x.%x diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme new file mode 100644 index 0000000000000..c9278a3b3df16 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-dfl_fme @@ -0,0 +1,104 @@ +What: /sys/bus/event_source/devices/dfl_fmeX/format +Date: April 2020 +KernelVersion: 5.8 +Contact: Wu Hao +Description: Read-only. Attribute group to describe the magic bits + that go into perf_event_attr.config for a particular pmu. + (See ABI/testing/sysfs-bus-event_source-devices-format). + + Each attribute under this group defines a bit range of the + perf_event_attr.config. All supported attributes are listed + below. + + event = "config:0-11" - event ID + evtype = "config:12-15" - event type + portid = "config:16-23" - event source + + For example, + + fab_mmio_read = "event=0x06,evtype=0x02,portid=0xff" + + It shows this fab_mmio_read is a fabric type (0x02) event with + 0x06 local event id for overall monitoring (portid=0xff). + +What: /sys/bus/event_source/devices/dfl_fmeX/cpumask +Date: April 2020 +KernelVersion: 5.8 +Contact: Wu Hao +Description: Read-only. This file always returns cpu which the PMU is bound + for access to all fme pmu performance monitoring events. + +What: /sys/bus/event_source/devices/dfl_fmeX/events +Date: April 2020 +KernelVersion: 5.8 +Contact: Wu Hao +Description: Read-only. Attribute group to describe performance monitoring + events specific to fme. Each attribute in this group describes + a single performance monitoring event supported by this fme pmu. + The name of the file is the name of the event. + (See ABI/testing/sysfs-bus-event_source-devices-events). + + All supported performance monitoring events are listed below. + + Basic events (evtype=0x00) + + clock = "event=0x00,evtype=0x00,portid=0xff" + + Cache events (evtype=0x01) + + cache_read_hit = "event=0x00,evtype=0x01,portid=0xff" + cache_read_miss = "event=0x01,evtype=0x01,portid=0xff" + cache_write_hit = "event=0x02,evtype=0x01,portid=0xff" + cache_write_miss = "event=0x03,evtype=0x01,portid=0xff" + cache_hold_request = "event=0x05,evtype=0x01,portid=0xff" + cache_data_write_port_contention = + "event=0x06,evtype=0x01,portid=0xff" + cache_tag_write_port_contention = + "event=0x07,evtype=0x01,portid=0xff" + cache_tx_req_stall = "event=0x08,evtype=0x01,portid=0xff" + cache_rx_req_stall = "event=0x09,evtype=0x01,portid=0xff" + cache_eviction = "event=0x0a,evtype=0x01,portid=0xff" + + Fabric events (evtype=0x02) + + fab_pcie0_read = "event=0x00,evtype=0x02,portid=0xff" + fab_pcie0_write = "event=0x01,evtype=0x02,portid=0xff" + fab_pcie1_read = "event=0x02,evtype=0x02,portid=0xff" + fab_pcie1_write = "event=0x03,evtype=0x02,portid=0xff" + fab_upi_read = "event=0x04,evtype=0x02,portid=0xff" + fab_upi_write = "event=0x05,evtype=0x02,portid=0xff" + fab_mmio_read = "event=0x06,evtype=0x02,portid=0xff" + fab_mmio_write = "event=0x07,evtype=0x02,portid=0xff" + fab_port_pcie0_read = "event=0x00,evtype=0x02,portid=?" + fab_port_pcie0_write = "event=0x01,evtype=0x02,portid=?" + fab_port_pcie1_read = "event=0x02,evtype=0x02,portid=?" + fab_port_pcie1_write = "event=0x03,evtype=0x02,portid=?" + fab_port_upi_read = "event=0x04,evtype=0x02,portid=?" + fab_port_upi_write = "event=0x05,evtype=0x02,portid=?" + fab_port_mmio_read = "event=0x06,evtype=0x02,portid=?" + fab_port_mmio_write = "event=0x07,evtype=0x02,portid=?" + + VTD events (evtype=0x03) + + vtd_port_read_transaction = "event=0x00,evtype=0x03,portid=?" + vtd_port_write_transaction = "event=0x01,evtype=0x03,portid=?" + vtd_port_devtlb_read_hit = "event=0x02,evtype=0x03,portid=?" + vtd_port_devtlb_write_hit = "event=0x03,evtype=0x03,portid=?" + vtd_port_devtlb_4k_fill = "event=0x04,evtype=0x03,portid=?" + vtd_port_devtlb_2m_fill = "event=0x05,evtype=0x03,portid=?" + vtd_port_devtlb_1g_fill = "event=0x06,evtype=0x03,portid=?" + + VTD SIP events (evtype=0x04) + + vtd_sip_iotlb_4k_hit = "event=0x00,evtype=0x04,portid=0xff" + vtd_sip_iotlb_2m_hit = "event=0x01,evtype=0x04,portid=0xff" + vtd_sip_iotlb_1g_hit = "event=0x02,evtype=0x04,portid=0xff" + vtd_sip_slpwc_l3_hit = "event=0x03,evtype=0x04,portid=0xff" + vtd_sip_slpwc_l4_hit = "event=0x04,evtype=0x04,portid=0xff" + vtd_sip_rcc_hit = "event=0x05,evtype=0x04,portid=0xff" + vtd_sip_iotlb_4k_miss = "event=0x06,evtype=0x04,portid=0xff" + vtd_sip_iotlb_2m_miss = "event=0x07,evtype=0x04,portid=0xff" + vtd_sip_iotlb_1g_miss = "event=0x08,evtype=0x04,portid=0xff" + vtd_sip_slpwc_l3_miss = "event=0x09,evtype=0x04,portid=0xff" + vtd_sip_slpwc_l4_miss = "event=0x0a,evtype=0x04,portid=0xff" + vtd_sip_rcc_miss = "event=0x0b,evtype=0x04,portid=0xff" diff --git a/Documentation/ABI/testing/sysfs-class-ifpga-sec-mgr b/Documentation/ABI/testing/sysfs-class-ifpga-sec-mgr new file mode 100644 index 0000000000000..893273c3a4ac6 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-ifpga-sec-mgr @@ -0,0 +1,131 @@ +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/sr_root_entry_hash +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns the root entry hash for the static + region if one is programmed, else it returns the + string: "hash not programmed". This file is only + visible if the underlying device supports it. + Format: "0x%x". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/pr_root_entry_hash +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns the root entry hash for the partial + reconfiguration region if one is programmed, else it + returns the string: "hash not programmed". This file + is only visible if the underlying device supports it. + Format: "0x%x". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/bmc_root_entry_hash +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns the root entry hash for the BMC image + if one is programmed, else it returns the string: + "hash not programmed". This file is only visible if the + underlying device supports it. + Format: "0x%x". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/sr_canceled_csks +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns a list of indices for canceled code + signing keys for the static region. The standard bitmap + list format is used (e.g. "1,2-6,9"). + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/pr_canceled_csks +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns a list of indices for canceled code + signing keys for the partial reconfiguration region. The + standard bitmap list format is used (e.g. "1,2-6,9"). + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/bmc_canceled_csks +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns a list of indices for canceled code + signing keys for the BMC. The standard bitmap list format + is used (e.g. "1,2-6,9"). + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/user_flash_count +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns number of times the user image for the + static region has been flashed. + Format: "%d". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/security/bmc_flash_count +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns number of times the BMC image has been + flashed. + Format: "%d". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/filename +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Write only. Write the filename of an Intel image + file to this sysfs file to initiate a secure + update. The file must have an appropriate header + which, among other things, identifies the target + for the update. This mechanism is used to update + BMC images, BMC firmware, Static Region images, + and Root Entry Hashes, and to cancel Code Signing + Keys (CSK). + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/cancel +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Write-only. Write a "1" to this file to request + that a current update be canceled. This request + will be rejected (EBUSY) if the programming phase + has already started. + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/status +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read-only. Returns a string describing the current + status of an update. The string will be one of the + following: idle, preparing, writing, programming. + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/remaining_size +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read-only. Returns the size of data that remains to + be written to the secure update engine. The size + value is initialized to the full size of the file + image and the value is updated periodically during + the "writing" phase of the update. + Format: "%u". + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/error +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read-only. Returns an error string describing the + failure of a secure update. This string will be one + of the following: operation-error, crc-error, + auth-error, timeout-error, user-abort, device-busy, + invalid_size. + +What: /sys/class/ifpga_sec_mgr/ifpga_secX/update/hw_errinfo +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read-only. Returns a 64 bit error value providing + hardware specific information that may be useful in + debugging errors that occur during FPGA image updates. + This file is only visible if the underlying device + supports it. + Format: "0x%llx". diff --git a/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc new file mode 100644 index 0000000000000..1a34839ceb20f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-intel-m10-bmc @@ -0,0 +1,19 @@ +What: /sys/bus/spi/devices/.../mac_address +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns the base mac address assigned to + the board managed by the Intel MAX10 BMC. It is + stored in flash and is mirrored in the MAX10 BMC + register space. + Format: "%02x:%02x:%02x:%02x:%02x:%02x". + +What: /sys/bus/spi/devices/.../mac_count +Date: June 2020 +KernelVersion: 5.8 +Contact: Russ Weight +Description: Read only. Returns the number of mac addresses + assigned to the board managed by the Intel MAX10 + BMC. This value is stored in flash and is mirrored + in the MAX10 BMC register space. + Format: "%u". diff --git a/Documentation/fpga/dfl.rst b/Documentation/fpga/dfl.rst index 094fc8aacd8e5..367ffef5fd467 100644 --- a/Documentation/fpga/dfl.rst +++ b/Documentation/fpga/dfl.rst @@ -89,6 +89,8 @@ The following functions are exposed through ioctls: - Program bitstream (DFL_FPGA_FME_PORT_PR) - Assign port to PF (DFL_FPGA_FME_PORT_ASSIGN) - Release port from PF (DFL_FPGA_FME_PORT_RELEASE) +- Get number of irqs of FME global error (DFL_FPGA_FME_ERR_GET_IRQ_NUM) +- Set interrupt trigger for FME error (DFL_FPGA_FME_ERR_SET_IRQ) More functions are exposed through sysfs (/sys/class/fpga_region/regionX/dfl-fme.n/): @@ -118,6 +120,11 @@ More functions are exposed through sysfs management information (current temperature, thresholds, threshold status, etc.). + Performance reporting + performance counters are exposed through perf PMU APIs. Standard perf tool + can be used to monitor all available perf events. Please see performance + counter section below for more detailed information. + FIU - PORT ========== @@ -144,6 +151,10 @@ The following functions are exposed through ioctls: - Map DMA buffer (DFL_FPGA_PORT_DMA_MAP) - Unmap DMA buffer (DFL_FPGA_PORT_DMA_UNMAP) - Reset AFU (DFL_FPGA_PORT_RESET) +- Get number of irqs of port error (DFL_FPGA_PORT_ERR_GET_IRQ_NUM) +- Set interrupt trigger for port error (DFL_FPGA_PORT_ERR_SET_IRQ) +- Get number of irqs of UINT (DFL_FPGA_PORT_UINT_GET_IRQ_NUM) +- Set interrupt trigger for UINT (DFL_FPGA_PORT_UINT_SET_IRQ) DFL_FPGA_PORT_RESET: reset the FPGA Port and its AFU. Userspace can do Port @@ -378,6 +389,97 @@ The device nodes used for ioctl() or mmap() can be referenced through:: /sys/class/fpga_region///dev +Performance Counters +==================== +Performance reporting is one private feature implemented in FME. It could +supports several independent, system-wide, device counter sets in hardware to +monitor and count for performance events, including "basic", "cache", "fabric", +"vtd" and "vtd_sip" counters. Users could use standard perf tool to monitor +FPGA cache hit/miss rate, transaction number, interface clock counter of AFU +and other FPGA performance events. + +Different FPGA devices may have different counter sets, it depends on hardware +implementation. e.g. some discrete FPGA cards don't have any cache. User could +use "perf list" to check which perf events are supported by target hardware. + +In order to allow user to use standard perf API to access these performance +counters, driver creates a perf PMU, and related sysfs interfaces in +/sys/bus/event_source/devices/dfl_fme* to describe available perf events and +configuration options. + +The "format" directory describes the format of the config field of struct +perf_event_attr. There are 3 bitfields for config, "evtype" defines which type +the perf event belongs to. "event" is the identity of the event within its +category. "portid" is introduced to decide counters set to monitor on FPGA +overall data or a specific port. + +The "events" directory describes the configuration templates for all available +events which can be used with perf tool directly. For example, fab_mmio_read +has the configuration "event=0x06,evtype=0x02,portid=0xff", which shows this +event belongs to fabric type (0x02), the local event id is 0x06 and it is for +overall monitoring (portid=0xff). + +Example usage of perf:: + + $# perf list |grep dfl_fme + + dfl_fme0/fab_mmio_read/ [Kernel PMU event] + <...> + dfl_fme0/fab_port_mmio_read,portid=?/ [Kernel PMU event] + <...> + + $# perf stat -a -e dfl_fme0/fab_mmio_read/ + or + $# perf stat -a -e dfl_fme0/event=0x06,evtype=0x02,portid=0xff/ + or + $# perf stat -a -e dfl_fme0/config=0xff2006/ + +Another example, fab_port_mmio_read monitors mmio read of a specific port. So +its configuration template is "event=0x06,evtype=0x01,portid=?". The portid +should be explicitly set. + +Its usage of perf:: + + $# perf stat -a -e dfl_fme0/fab_port_mmio_read,portid=0x0/ + or + $# perf stat -a -e dfl_fme0/event=0x06,evtype=0x02,portid=0x0/ + or + $# perf stat -a -e dfl_fme0/config=0x2006/ + +Please note for fabric counters, overall perf events (fab_*) and port perf +events (fab_port_*) actually share one set of counters in hardware, so it can't +monitor both at the same time. If this set of counters is configured to monitor +overall data, then per port perf data is not supported. See below example:: + + $# perf stat -e dfl_fme0/fab_mmio_read/,dfl_fme0/fab_port_mmio_write,\ + portid=0/ sleep 1 + + Performance counter stats for 'system wide': + + 3 dfl_fme0/fab_mmio_read/ + dfl_fme0/fab_port_mmio_write,portid=0x0/ + + 1.001750904 seconds time elapsed + +The driver also provides a "cpumask" sysfs attribute, which contains only one +cpu id used to access these perf events. Counting on multiple CPUs is not +allowed since they are system-wide counters on FPGA device. + +The current driver does not support sampling. So "perf record" is unsupported. + + +Interrupt support +================= +Some FME and AFU private features are able to generate interrupts. As mentioned +above, users could call ioctl (DFL_FPGA_*_GET_IRQ_NUM) to know whether or how +many interrupts are supported for this private feature. Drivers also implement +an eventfd based interrupt handling mechanism for users to get notified when +interrupt happens. Users could set eventfds to driver via +ioctl (DFL_FPGA_*_SET_IRQ), and then poll/select on these eventfds waiting for +notification. +In Current DFL, 3 sub features (Port error, FME global error and AFU interrupt) +support interrupts. + Add new FIUs support ==================== It's possible that developers made some new function blocks (FIUs) under this diff --git a/MAINTAINERS b/MAINTAINERS index e64e5db314976..bb838d0b01926 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6679,6 +6679,14 @@ F: Documentation/fpga/ F: drivers/fpga/ F: include/linux/fpga/ +INTEL FPGA SECURITY MANAGER DRIVERS +M: Russ Weight +L: linux-fpga@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/sysfs-class-ifpga-sec-mgr +F: drivers/fpga/ifpga-sec-mgr.c +F: include/linux/fpga/ifpga-sec-mgr.h + FPU EMULATOR M: Bill Metzenthen S: Maintained @@ -8775,6 +8783,13 @@ F: drivers/firmware/stratix10-svc.c F: include/linux/firmware/intel/stratix10-smc.h F: include/linux/firmware/intel/stratix10-svc-client.h +INTEL STRATIX10 PHY DRIVER +M: Russ Weight +L: linux-fpga@vger.kernel.org +S: Maintained +F: drivers/net/phy/intel-s10-phy.c +F: include/linux/phy/intel-s10-phy.h + INTEL TELEMETRY DRIVER M: Rajneesh Bhardwaj M: "David E. Box" diff --git a/configs/n3000_d5005_defconfig b/configs/n3000_d5005_defconfig new file mode 100644 index 0000000000000..6da7ffacae58c --- /dev/null +++ b/configs/n3000_d5005_defconfig @@ -0,0 +1,28 @@ +CONFIG_FPGA_DFL=m +CONFIG_FPGA_DFL_FME=m +CONFIG_FPGA_DFL_FME_MGR=m +CONFIG_FPGA_DFL_FME_BRIDGE=m +CONFIG_FPGA_DFL_FME_REGION=m +CONFIG_FPGA_DFL_AFU=m +CONFIG_FPGA_DFL_N3000_NIOS=m +CONFIG_FPGA_DFL_SPI_ALTERA=m +CONFIG_FPGA_DFL_PCI=m +CONFIG_FPGA_DFl_HSSI=m + +CONFIG_FPGA=m +CONFIG_FPGA_BRIDGE=m +CONFIG_FPGA_REGION=m + +CONFIG_SENSORS_INTEL_M10_BMC_HWMON=m +CONFIG_MFD_INTEL_M10_BMC=m +CONFIG_MFD_INTEL_M10_BMC_SECURE=m + +CONFIG_SPI_ALTERA=m + +CONFIG_FPGA_DFL_ETH_GROUP=m +CONFIG_INTEL_S10_PHY=m +CONFIG_INTEL_LL_10G_MAC=m + +CONFIG_REGMAP_MMIO=m + +CONFIG_IFPGA_SEC_MGR=m diff --git a/configs/readme.txt b/configs/readme.txt new file mode 100644 index 0000000000000..285d09021633e --- /dev/null +++ b/configs/readme.txt @@ -0,0 +1,16 @@ +This directory contains linux kernel configuration fragments related to +the Device Feature List (DFL) driver collection. By design the DFL driver +collection is extendable, and it is anticipated that new drivers will be added +to the collection. + +The fragments are intended to be appended to a base kernel configuration. +For example the following commands would configure the kernel source to +support the Intel n3000 and d5005 PCIe cards: + + # cd kernel_source_directory + # cp /boot/config-`uname -r` .config + # cat configs/n3000_d5005_defconfig >> .config + # make olddefconfig + +n3000_d5005_defconfig + Default configuration for Intel n3000 and d5005 PCIe cards. diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig index 72380e1d31c7f..745c187eb213d 100644 --- a/drivers/fpga/Kconfig +++ b/drivers/fpga/Kconfig @@ -156,7 +156,7 @@ config FPGA_DFL config FPGA_DFL_FME tristate "FPGA DFL FME Driver" - depends on FPGA_DFL && HWMON + depends on FPGA_DFL && HWMON && PERF_EVENTS help The FPGA Management Engine (FME) is a feature device implemented under Device Feature List (DFL) framework. Select this option to @@ -191,6 +191,35 @@ config FPGA_DFL_AFU to the FPGA infrastructure via a Port. There may be more than one Port/AFU per DFL based FPGA device. +config FPGA_DFL_N3000_NIOS + tristate "FPGA DFL N3000 NIOS Driver" + depends on FPGA_DFL + select REGMAP + help + This is the driver for the nios handshake private feature on Intel + N3000 FPGA Card. This private feature provides a handshake interface + to FPGA NIOS firmware, which receives retimer configuration command + from host and executes via an internal SPI master. When nios finished + the configuration, host takes over the ownership of the SPI master to + control an Intel MAX10 BMC Chip on the SPI bus. + +config FPGA_DFL_SPI_ALTERA + tristate "FPGA DFL Altera SPI Master Driver" + depends on FPGA_DFL + select REGMAP + help + This is a DFL bus driver for the Altera SPI master controller. + The SPI master is connected to a SPI slave to Avalon Master + bridge in a Intel MAX BMC. + +config FPGA_DFl_HSSI + tristate "FPGA DFL HSSI Driver" + depends on FPGA_DFL + help + This is the HSSI Ethernet driver for the Intel Stratix 10 FPGA. + This driver provides the ability to view and change some of the + transceiver tuning parameters. + config FPGA_DFL_PCI tristate "FPGA DFL PCIe Device Driver" depends on PCI && FPGA_DFL @@ -215,4 +244,13 @@ config FPGA_MGR_ZYNQMP_FPGA to configure the programmable logic(PL) through PS on ZynqMP SoC. +config IFPGA_SEC_MGR + tristate "Intel Security Manager for FPGA" + help + The Intel Security Manager class driver presents a common + user API for managing secure updates for Intel FPGA + devices, including flash images for the FPGA static + region and for the BMC. Select this option to enable + updates for secure FPGA devices. + endif # FPGA diff --git a/drivers/fpga/Makefile b/drivers/fpga/Makefile index 4865b74b00a4b..a0dc26fee6ba7 100644 --- a/drivers/fpga/Makefile +++ b/drivers/fpga/Makefile @@ -6,6 +6,9 @@ # Core FPGA Manager Framework obj-$(CONFIG_FPGA) += fpga-mgr.o +# Intel FPGA Security Manager Framework +obj-$(CONFIG_IFPGA_SEC_MGR) += ifpga-sec-mgr.o + # FPGA Manager Drivers obj-$(CONFIG_FPGA_MGR_ALTERA_CVP) += altera-cvp.o obj-$(CONFIG_FPGA_MGR_ALTERA_PS_SPI) += altera-ps-spi.o @@ -40,8 +43,15 @@ obj-$(CONFIG_FPGA_DFL_FME_REGION) += dfl-fme-region.o obj-$(CONFIG_FPGA_DFL_AFU) += dfl-afu.o dfl-fme-objs := dfl-fme-main.o dfl-fme-pr.o dfl-fme-error.o +dfl-fme-objs += dfl-fme-perf.o dfl-afu-objs := dfl-afu-main.o dfl-afu-region.o dfl-afu-dma-region.o dfl-afu-objs += dfl-afu-error.o +obj-$(CONFIG_FPGA_DFL_N3000_NIOS) += dfl-n3000-nios.o + +obj-$(CONFIG_FPGA_DFL_SPI_ALTERA) += dfl-spi-altera.o + + # Drivers for FPGAs which implement DFL obj-$(CONFIG_FPGA_DFL_PCI) += dfl-pci.o +obj-$(CONFIG_FPGA_DFl_HSSI) += dfl-hssi.o diff --git a/drivers/fpga/dfl-afu-error.c b/drivers/fpga/dfl-afu-error.c index c1467ae1a6b63..facbd7b8a3ae3 100644 --- a/drivers/fpga/dfl-afu-error.c +++ b/drivers/fpga/dfl-afu-error.c @@ -15,6 +15,7 @@ */ #include +#include #include "dfl-afu.h" @@ -219,6 +220,26 @@ static void port_err_uinit(struct platform_device *pdev, afu_port_err_mask(&pdev->dev, true); } +static long +port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + long ret = -ENODEV; + + switch (cmd) { + case DFL_FPGA_PORT_ERR_GET_IRQ_NUM: + ret = dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + break; + case DFL_FPGA_PORT_ERR_SET_IRQ: + ret = dfl_feature_ioctl_set_irq(pdev, feature, arg); + break; + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + } + + return ret; +} + const struct dfl_feature_id port_err_id_table[] = { {.id = PORT_FEATURE_ID_ERROR,}, {0,} @@ -227,4 +248,5 @@ const struct dfl_feature_id port_err_id_table[] = { const struct dfl_feature_ops port_err_ops = { .init = port_err_init, .uinit = port_err_uinit, + .ioctl = port_err_ioctl, }; diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 65437b6a68424..d5e9d229ee9a5 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev) * on this port and minimum soft reset pulse width has elapsed. * Driver polls port_soft_reset_ack to determine if reset done by HW. */ - if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST, + if (readq_poll_timeout(base + PORT_HDR_CTRL, v, + v & PORT_CTRL_SFTRST_ACK, RST_POLL_INVL, RST_POLL_TIMEOUT)) { dev_err(&pdev->dev, "timeout, fail to reset device\n"); return -ETIMEDOUT; @@ -529,6 +530,34 @@ static const struct dfl_feature_ops port_stp_ops = { .init = port_stp_init, }; +static long +port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + long ret = -ENODEV; + + switch (cmd) { + case DFL_FPGA_PORT_UINT_GET_IRQ_NUM: + ret = dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + break; + case DFL_FPGA_PORT_UINT_SET_IRQ: + ret = dfl_feature_ioctl_set_irq(pdev, feature, arg); + break; + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + } + return ret; +} + +static const struct dfl_feature_id port_uint_id_table[] = { + {.id = PORT_FEATURE_ID_UINT,}, + {0,} +}; + +static const struct dfl_feature_ops port_uint_ops = { + .ioctl = port_uint_ioctl, +}; + static struct dfl_feature_driver port_feature_drvs[] = { { .id_table = port_hdr_id_table, @@ -546,6 +575,10 @@ static struct dfl_feature_driver port_feature_drvs[] = { .id_table = port_stp_id_table, .ops = &port_stp_ops, }, + { + .id_table = port_uint_id_table, + .ops = &port_uint_ops, + }, { .ops = NULL, } @@ -561,32 +594,40 @@ static int afu_open(struct inode *inode, struct file *filp) if (WARN_ON(!pdata)) return -ENODEV; - ret = dfl_feature_dev_use_begin(pdata); - if (ret) - return ret; - - dev_dbg(&fdev->dev, "Device File Open\n"); - filp->private_data = fdev; + mutex_lock(&pdata->lock); + ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL); + if (!ret) { + dev_dbg(&fdev->dev, "Device File Opened %d Times\n", + dfl_feature_dev_use_count(pdata)); + filp->private_data = fdev; + } + mutex_unlock(&pdata->lock); - return 0; + return ret; } static int afu_release(struct inode *inode, struct file *filp) { struct platform_device *pdev = filp->private_data; struct dfl_feature_platform_data *pdata; + struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); pdata = dev_get_platdata(&pdev->dev); mutex_lock(&pdata->lock); - __port_reset(pdev); - afu_dma_region_destroy(pdata); - mutex_unlock(&pdata->lock); - dfl_feature_dev_use_end(pdata); + if (!dfl_feature_dev_use_count(pdata)) { + dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_fpga_set_irq_triggers(feature, 0, + feature->nr_irqs, NULL); + __port_reset(pdev); + afu_dma_region_destroy(pdata); + } + mutex_unlock(&pdata->lock); + return 0; } diff --git a/drivers/fpga/dfl-fme-error.c b/drivers/fpga/dfl-fme-error.c index f897d414b923a..a4cbf8c514f32 100644 --- a/drivers/fpga/dfl-fme-error.c +++ b/drivers/fpga/dfl-fme-error.c @@ -16,6 +16,7 @@ */ #include +#include #include "dfl.h" #include "dfl-fme.h" @@ -348,6 +349,27 @@ static void fme_global_err_uinit(struct platform_device *pdev, fme_err_mask(&pdev->dev, true); } +static long +fme_global_error_ioctl(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned int cmd, unsigned long arg) +{ + long ret = -ENODEV; + + switch (cmd) { + case DFL_FPGA_FME_ERR_GET_IRQ_NUM: + ret = dfl_feature_ioctl_get_num_irqs(pdev, feature, arg); + break; + case DFL_FPGA_FME_ERR_SET_IRQ: + ret = dfl_feature_ioctl_set_irq(pdev, feature, arg); + break; + default: + dev_dbg(&pdev->dev, "%x cmd not handled", cmd); + } + + return ret; +} + const struct dfl_feature_id fme_global_err_id_table[] = { {.id = FME_FEATURE_ID_GLOBAL_ERR,}, {0,} @@ -356,4 +378,5 @@ const struct dfl_feature_id fme_global_err_id_table[] = { const struct dfl_feature_ops fme_global_err_ops = { .init = fme_global_err_init, .uinit = fme_global_err_uinit, + .ioctl = fme_global_error_ioctl, }; diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 1d4690c99268c..77ea04d4edbef 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -579,6 +579,10 @@ static struct dfl_feature_driver fme_feature_drvs[] = { .id_table = fme_power_mgmt_id_table, .ops = &fme_power_mgmt_ops, }, + { + .id_table = fme_perf_id_table, + .ops = &fme_perf_ops, + }, { .ops = NULL, }, @@ -600,24 +604,35 @@ static int fme_open(struct inode *inode, struct file *filp) if (WARN_ON(!pdata)) return -ENODEV; - ret = dfl_feature_dev_use_begin(pdata); - if (ret) - return ret; - - dev_dbg(&fdev->dev, "Device File Open\n"); - filp->private_data = pdata; + mutex_lock(&pdata->lock); + ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL); + if (!ret) { + dev_dbg(&fdev->dev, "Device File Opened %d Times\n", + dfl_feature_dev_use_count(pdata)); + filp->private_data = pdata; + } + mutex_unlock(&pdata->lock); - return 0; + return ret; } static int fme_release(struct inode *inode, struct file *filp) { struct dfl_feature_platform_data *pdata = filp->private_data; struct platform_device *pdev = pdata->dev; + struct dfl_feature *feature; dev_dbg(&pdev->dev, "Device File Release\n"); + + mutex_lock(&pdata->lock); dfl_feature_dev_use_end(pdata); + if (!dfl_feature_dev_use_count(pdata)) + dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_fpga_set_irq_triggers(feature, 0, + feature->nr_irqs, NULL); + mutex_unlock(&pdata->lock); + return 0; } diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c index b3f7eee3c93f6..46e17f0496660 100644 --- a/drivers/fpga/dfl-fme-mgr.c +++ b/drivers/fpga/dfl-fme-mgr.c @@ -22,6 +22,7 @@ #include #include +#include "dfl.h" #include "dfl-fme-pr.h" /* FME Partial Reconfiguration Sub Feature Register Set */ @@ -30,6 +31,7 @@ #define FME_PR_STS 0x10 #define FME_PR_DATA 0x18 #define FME_PR_ERR 0x20 +#define FME_PR_512_DATA 0x40 /* Data Register for 512bit datawidth PR */ #define FME_PR_INTFC_ID_L 0xA8 #define FME_PR_INTFC_ID_H 0xB0 @@ -67,8 +69,43 @@ #define PR_WAIT_TIMEOUT 8000000 #define PR_HOST_STATUS_IDLE 0 +#if defined(CONFIG_X86) && defined(CONFIG_AS_AVX512) + +#include +#include + +static inline int is_cpu_avx512_enabled(void) +{ + return cpu_feature_enabled(X86_FEATURE_AVX512F); +} + +static inline void copy512(const void *src, void __iomem *dst) +{ + kernel_fpu_begin(); + + asm volatile("vmovdqu64 (%0), %%zmm0;" + "vmovntdq %%zmm0, (%1);" + : + : "r"(src), "r"(dst) + : "memory"); + + kernel_fpu_end(); +} +#else +static inline int is_cpu_avx512_enabled(void) +{ + return 0; +} + +static inline void copy512(const void *src, void __iomem *dst) +{ + WARN_ON_ONCE(1); +} +#endif + struct fme_mgr_priv { void __iomem *ioaddr; + unsigned int pr_datawidth; u64 pr_error; }; @@ -169,7 +206,7 @@ static int fme_mgr_write(struct fpga_manager *mgr, struct fme_mgr_priv *priv = mgr->priv; void __iomem *fme_pr = priv->ioaddr; u64 pr_ctrl, pr_status, pr_data; - int delay = 0, pr_credit, i = 0; + int ret = 0, delay = 0, pr_credit; dev_dbg(dev, "start request\n"); @@ -181,9 +218,9 @@ static int fme_mgr_write(struct fpga_manager *mgr, /* * driver can push data to PR hardware using PR_DATA register once HW - * has enough pr_credit (> 1), pr_credit reduces one for every 32bit - * pr data write to PR_DATA register. If pr_credit <= 1, driver needs - * to wait for enough pr_credit from hardware by polling. + * has enough pr_credit (> 1), pr_credit reduces one for every pr data + * width write to PR_DATA register. If pr_credit <= 1, driver needs to + * wait for enough pr_credit from hardware by polling. */ pr_status = readq(fme_pr + FME_PR_STS); pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status); @@ -192,7 +229,8 @@ static int fme_mgr_write(struct fpga_manager *mgr, while (pr_credit <= 1) { if (delay++ > PR_WAIT_TIMEOUT) { dev_err(dev, "PR_CREDIT timeout\n"); - return -ETIMEDOUT; + ret = -ETIMEDOUT; + goto done; } udelay(1); @@ -200,21 +238,27 @@ static int fme_mgr_write(struct fpga_manager *mgr, pr_credit = FIELD_GET(FME_PR_STS_PR_CREDIT, pr_status); } - if (count < 4) { - dev_err(dev, "Invalid PR bitstream size\n"); - return -EINVAL; + WARN_ON(count < priv->pr_datawidth); + + switch (priv->pr_datawidth) { + case 4: + pr_data = FIELD_PREP(FME_PR_DATA_PR_DATA_RAW, + *(u32 *)buf); + writeq(pr_data, fme_pr + FME_PR_DATA); + break; + case 64: + copy512(buf, fme_pr + FME_PR_512_DATA); + break; + default: + WARN_ON_ONCE(1); } - - pr_data = 0; - pr_data |= FIELD_PREP(FME_PR_DATA_PR_DATA_RAW, - *(((u32 *)buf) + i)); - writeq(pr_data, fme_pr + FME_PR_DATA); - count -= 4; + buf += priv->pr_datawidth; + count -= priv->pr_datawidth; pr_credit--; - i++; } - return 0; +done: + return ret; } static int fme_mgr_write_complete(struct fpga_manager *mgr, @@ -279,6 +323,36 @@ static void fme_mgr_get_compat_id(void __iomem *fme_pr, id->id_h = readq(fme_pr + FME_PR_INTFC_ID_H); } +static u8 fme_mgr_get_pr_datawidth(struct device *dev, void __iomem *fme_pr) +{ + u8 revision = dfl_feature_revision(fme_pr); + + if (revision < 2) { + /* + * revision 0 and 1 only support 32bit data width partial + * reconfiguration, so pr_datawidth is 4 (Byte). + */ + return 4; + } else if (revision == 2) { + /* + * revision 2 hardware has optimization to support 512bit data + * width partial reconfiguration with AVX512 instructions. So + * pr_datawidth is 64 (Byte). As revision 2 hardware is only + * used in integrated solution, CPU supports AVX512 instructions + * for sure, but it still needs to check here as AVX512 could be + * disabled in kernel (e.g. using clearcpuid boot option). + */ + if (is_cpu_avx512_enabled()) + return 64; + + dev_err(dev, "revision 2: AVX512 is disabled\n"); + return 0; + } + + dev_err(dev, "revision %d is not supported yet\n", revision); + return 0; +} + static int fme_mgr_probe(struct platform_device *pdev) { struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev); @@ -302,6 +376,10 @@ static int fme_mgr_probe(struct platform_device *pdev) return PTR_ERR(priv->ioaddr); } + priv->pr_datawidth = fme_mgr_get_pr_datawidth(dev, priv->ioaddr); + if (!priv->pr_datawidth) + return -ENODEV; + compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL); if (!compat_id) return -ENOMEM; diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c new file mode 100644 index 0000000000000..b2ce5bfcacd20 --- /dev/null +++ b/drivers/fpga/dfl-fme-perf.c @@ -0,0 +1,1019 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for FPGA Management Engine (FME) Global Performance Reporting + * + * Copyright 2019 Intel Corporation, Inc. + * + * Authors: + * Kang Luwei + * Xiao Guangrong + * Wu Hao + * Xu Yilun + * Joseph Grecco + * Enno Luebbers + * Tim Whisonant + * Ananda Ravuri + * Mitchel, Henry + */ + +#include +#include "dfl.h" +#include "dfl-fme.h" + +/* + * Performance Counter Registers for Cache. + * + * Cache Events are listed below as CACHE_EVNT_*. + */ +#define CACHE_CTRL 0x8 +#define CACHE_RESET_CNTR BIT_ULL(0) +#define CACHE_FREEZE_CNTR BIT_ULL(8) +#define CACHE_CTRL_EVNT GENMASK_ULL(19, 16) +#define CACHE_EVNT_RD_HIT 0x0 +#define CACHE_EVNT_WR_HIT 0x1 +#define CACHE_EVNT_RD_MISS 0x2 +#define CACHE_EVNT_WR_MISS 0x3 +#define CACHE_EVNT_RSVD 0x4 +#define CACHE_EVNT_HOLD_REQ 0x5 +#define CACHE_EVNT_DATA_WR_PORT_CONTEN 0x6 +#define CACHE_EVNT_TAG_WR_PORT_CONTEN 0x7 +#define CACHE_EVNT_TX_REQ_STALL 0x8 +#define CACHE_EVNT_RX_REQ_STALL 0x9 +#define CACHE_EVNT_EVICTIONS 0xa +#define CACHE_EVNT_MAX CACHE_EVNT_EVICTIONS +#define CACHE_CHANNEL_SEL BIT_ULL(20) +#define CACHE_CHANNEL_RD 0 +#define CACHE_CHANNEL_WR 1 +#define CACHE_CNTR0 0x10 +#define CACHE_CNTR1 0x18 +#define CACHE_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) +#define CACHE_CNTR_EVNT GENMASK_ULL(63, 60) + +/* + * Performance Counter Registers for Fabric. + * + * Fabric Events are listed below as FAB_EVNT_* + */ +#define FAB_CTRL 0x20 +#define FAB_RESET_CNTR BIT_ULL(0) +#define FAB_FREEZE_CNTR BIT_ULL(8) +#define FAB_CTRL_EVNT GENMASK_ULL(19, 16) +#define FAB_EVNT_PCIE0_RD 0x0 +#define FAB_EVNT_PCIE0_WR 0x1 +#define FAB_EVNT_PCIE1_RD 0x2 +#define FAB_EVNT_PCIE1_WR 0x3 +#define FAB_EVNT_UPI_RD 0x4 +#define FAB_EVNT_UPI_WR 0x5 +#define FAB_EVNT_MMIO_RD 0x6 +#define FAB_EVNT_MMIO_WR 0x7 +#define FAB_EVNT_MAX FAB_EVNT_MMIO_WR +#define FAB_PORT_ID GENMASK_ULL(21, 20) +#define FAB_PORT_FILTER BIT_ULL(23) +#define FAB_PORT_FILTER_DISABLE 0 +#define FAB_PORT_FILTER_ENABLE 1 +#define FAB_CNTR 0x28 +#define FAB_CNTR_EVNT_CNTR GENMASK_ULL(59, 0) +#define FAB_CNTR_EVNT GENMASK_ULL(63, 60) + +/* + * Performance Counter Registers for Clock. + * + * Clock Counter can't be reset or frozen by SW. + */ +#define CLK_CNTR 0x30 +#define BASIC_EVNT_CLK 0x0 +#define BASIC_EVNT_MAX BASIC_EVNT_CLK + +/* + * Performance Counter Registers for IOMMU / VT-D. + * + * VT-D Events are listed below as VTD_EVNT_* and VTD_SIP_EVNT_* + */ +#define VTD_CTRL 0x38 +#define VTD_RESET_CNTR BIT_ULL(0) +#define VTD_FREEZE_CNTR BIT_ULL(8) +#define VTD_CTRL_EVNT GENMASK_ULL(19, 16) +#define VTD_EVNT_AFU_MEM_RD_TRANS 0x0 +#define VTD_EVNT_AFU_MEM_WR_TRANS 0x1 +#define VTD_EVNT_AFU_DEVTLB_RD_HIT 0x2 +#define VTD_EVNT_AFU_DEVTLB_WR_HIT 0x3 +#define VTD_EVNT_DEVTLB_4K_FILL 0x4 +#define VTD_EVNT_DEVTLB_2M_FILL 0x5 +#define VTD_EVNT_DEVTLB_1G_FILL 0x6 +#define VTD_EVNT_MAX VTD_EVNT_DEVTLB_1G_FILL +#define VTD_CNTR 0x40 +#define VTD_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) +#define VTD_CNTR_EVNT GENMASK_ULL(63, 60) + +#define VTD_SIP_CTRL 0x48 +#define VTD_SIP_RESET_CNTR BIT_ULL(0) +#define VTD_SIP_FREEZE_CNTR BIT_ULL(8) +#define VTD_SIP_CTRL_EVNT GENMASK_ULL(19, 16) +#define VTD_SIP_EVNT_IOTLB_4K_HIT 0x0 +#define VTD_SIP_EVNT_IOTLB_2M_HIT 0x1 +#define VTD_SIP_EVNT_IOTLB_1G_HIT 0x2 +#define VTD_SIP_EVNT_SLPWC_L3_HIT 0x3 +#define VTD_SIP_EVNT_SLPWC_L4_HIT 0x4 +#define VTD_SIP_EVNT_RCC_HIT 0x5 +#define VTD_SIP_EVNT_IOTLB_4K_MISS 0x6 +#define VTD_SIP_EVNT_IOTLB_2M_MISS 0x7 +#define VTD_SIP_EVNT_IOTLB_1G_MISS 0x8 +#define VTD_SIP_EVNT_SLPWC_L3_MISS 0x9 +#define VTD_SIP_EVNT_SLPWC_L4_MISS 0xa +#define VTD_SIP_EVNT_RCC_MISS 0xb +#define VTD_SIP_EVNT_MAX VTD_SIP_EVNT_SLPWC_L4_MISS +#define VTD_SIP_CNTR 0X50 +#define VTD_SIP_CNTR_EVNT_CNTR GENMASK_ULL(47, 0) +#define VTD_SIP_CNTR_EVNT GENMASK_ULL(63, 60) + +#define PERF_TIMEOUT 30 + +#define PERF_MAX_PORT_NUM 1U + +/** + * struct fme_perf_priv - priv data structure for fme perf driver + * + * @dev: parent device. + * @ioaddr: mapped base address of mmio region. + * @pmu: pmu data structure for fme perf counters. + * @id: id of this fme performance report private feature. + * @fab_users: current user number on fabric counters. + * @fab_port_id: used to indicate current working mode of fabric counters. + * @fab_lock: lock to protect fabric counters working mode. + * @cpu: active CPU to which the PMU is bound for accesses. + * @cpuhp_node: node for CPU hotplug notifier link. + * @cpuhp_state: state for CPU hotplug notification; + */ +struct fme_perf_priv { + struct device *dev; + void __iomem *ioaddr; + struct pmu pmu; + u64 id; + + u32 fab_users; + u32 fab_port_id; + spinlock_t fab_lock; + + unsigned int cpu; + struct hlist_node node; + enum cpuhp_state cpuhp_state; +}; + +/** + * struct fme_perf_event_ops - callbacks for fme perf events + * + * @event_init: callback invoked during event init. + * @event_destroy: callback invoked during event destroy. + * @read_counter: callback to read hardware counters. + */ +struct fme_perf_event_ops { + int (*event_init)(struct fme_perf_priv *priv, u32 event, u32 portid); + void (*event_destroy)(struct fme_perf_priv *priv, u32 event, + u32 portid); + u64 (*read_counter)(struct fme_perf_priv *priv, u32 event, u32 portid); +}; + +#define to_fme_perf_priv(_pmu) container_of(_pmu, struct fme_perf_priv, pmu) + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct pmu *pmu = dev_get_drvdata(dev); + struct fme_perf_priv *priv; + + priv = to_fme_perf_priv(pmu); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(priv->cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *fme_perf_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group fme_perf_cpumask_group = { + .attrs = fme_perf_cpumask_attrs, +}; + +#define FME_EVENT_MASK GENMASK_ULL(11, 0) +#define FME_EVENT_SHIFT 0 +#define FME_EVTYPE_MASK GENMASK_ULL(15, 12) +#define FME_EVTYPE_SHIFT 12 +#define FME_EVTYPE_BASIC 0 +#define FME_EVTYPE_CACHE 1 +#define FME_EVTYPE_FABRIC 2 +#define FME_EVTYPE_VTD 3 +#define FME_EVTYPE_VTD_SIP 4 +#define FME_EVTYPE_MAX FME_EVTYPE_VTD_SIP +#define FME_PORTID_MASK GENMASK_ULL(23, 16) +#define FME_PORTID_SHIFT 16 +#define FME_PORTID_ROOT (0xffU) + +#define get_event(_config) FIELD_GET(FME_EVENT_MASK, _config) +#define get_evtype(_config) FIELD_GET(FME_EVTYPE_MASK, _config) +#define get_portid(_config) FIELD_GET(FME_PORTID_MASK, _config) + +PMU_FORMAT_ATTR(event, "config:0-11"); +PMU_FORMAT_ATTR(evtype, "config:12-15"); +PMU_FORMAT_ATTR(portid, "config:16-23"); + +static struct attribute *fme_perf_format_attrs[] = { + &format_attr_event.attr, + &format_attr_evtype.attr, + &format_attr_portid.attr, + NULL, +}; + +static struct attribute_group fme_perf_format_group = { + .name = "format", + .attrs = fme_perf_format_attrs, +}; + +/* + * There are no default events, but we need to create + * "events" group (with empty attrs) before updating + * it with detected events (using pmu->attr_update). + */ +static struct attribute *fme_perf_events_attrs_empty[] = { + NULL, +}; + +static struct attribute_group fme_perf_events_group = { + .name = "events", + .attrs = fme_perf_events_attrs_empty, +}; + +static const struct attribute_group *fme_perf_groups[] = { + &fme_perf_format_group, + &fme_perf_cpumask_group, + &fme_perf_events_group, + NULL, +}; + +static bool is_portid_root(u32 portid) +{ + return portid == FME_PORTID_ROOT; +} + +static bool is_portid_port(u32 portid) +{ + return portid < PERF_MAX_PORT_NUM; +} + +static bool is_portid_root_or_port(u32 portid) +{ + return is_portid_root(portid) || is_portid_port(portid); +} + +static u64 fme_read_perf_cntr_reg(void __iomem *addr) +{ + u32 low; + u64 v; + + /* + * For 64bit counter registers, the counter may increases and carries + * out of bit [31] between 2 32bit reads. So add extra reads to help + * to prevent this issue. This only happens in platforms which don't + * support 64bit read - readq is split into 2 readl. + */ + do { + v = readq(addr); + low = readl(addr); + } while (((u32)v) > low); + + return v; +} + +static int basic_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) +{ + if (event <= BASIC_EVNT_MAX && is_portid_root(portid)) + return 0; + + return -EINVAL; +} + +static u64 basic_read_event_counter(struct fme_perf_priv *priv, + u32 event, u32 portid) +{ + void __iomem *base = priv->ioaddr; + + return fme_read_perf_cntr_reg(base + CLK_CNTR); +} + +static int cache_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) +{ + if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && + event <= CACHE_EVNT_MAX && is_portid_root(portid)) + return 0; + + return -EINVAL; +} + +static u64 cache_read_event_counter(struct fme_perf_priv *priv, + u32 event, u32 portid) +{ + void __iomem *base = priv->ioaddr; + u64 v, count; + u8 channel; + + if (event == CACHE_EVNT_WR_HIT || event == CACHE_EVNT_WR_MISS || + event == CACHE_EVNT_DATA_WR_PORT_CONTEN || + event == CACHE_EVNT_TAG_WR_PORT_CONTEN) + channel = CACHE_CHANNEL_WR; + else + channel = CACHE_CHANNEL_RD; + + /* set channel access type and cache event code. */ + v = readq(base + CACHE_CTRL); + v &= ~(CACHE_CHANNEL_SEL | CACHE_CTRL_EVNT); + v |= FIELD_PREP(CACHE_CHANNEL_SEL, channel); + v |= FIELD_PREP(CACHE_CTRL_EVNT, event); + writeq(v, base + CACHE_CTRL); + + if (readq_poll_timeout_atomic(base + CACHE_CNTR0, v, + FIELD_GET(CACHE_CNTR_EVNT, v) == event, + 1, PERF_TIMEOUT)) { + dev_err(priv->dev, "timeout, unmatched cache event code in counter register.\n"); + return 0; + } + + v = fme_read_perf_cntr_reg(base + CACHE_CNTR0); + count = FIELD_GET(CACHE_CNTR_EVNT_CNTR, v); + v = fme_read_perf_cntr_reg(base + CACHE_CNTR1); + count += FIELD_GET(CACHE_CNTR_EVNT_CNTR, v); + + return count; +} + +static bool is_fabric_event_supported(struct fme_perf_priv *priv, u32 event, + u32 portid) +{ + if (event > FAB_EVNT_MAX || !is_portid_root_or_port(portid)) + return false; + + if (priv->id == FME_FEATURE_ID_GLOBAL_DPERF && + (event == FAB_EVNT_PCIE1_RD || event == FAB_EVNT_UPI_RD || + event == FAB_EVNT_PCIE1_WR || event == FAB_EVNT_UPI_WR)) + return false; + + return true; +} + +static int fabric_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) +{ + void __iomem *base = priv->ioaddr; + int ret = 0; + u64 v; + + if (!is_fabric_event_supported(priv, event, portid)) + return -EINVAL; + + /* + * as fabric counter set only can be in either overall or port mode. + * In overall mode, it counts overall data for FPGA, and in port mode, + * it is configured to monitor on one individual port. + * + * so every time, a new event is initialized, driver checks + * current working mode and if someone is using this counter set. + */ + spin_lock(&priv->fab_lock); + if (priv->fab_users && priv->fab_port_id != portid) { + dev_dbg(priv->dev, "conflict fabric event monitoring mode.\n"); + ret = -EOPNOTSUPP; + goto exit; + } + + priv->fab_users++; + + /* + * skip if current working mode matches, otherwise change the working + * mode per input port_id, to monitor overall data or another port. + */ + if (priv->fab_port_id == portid) + goto exit; + + priv->fab_port_id = portid; + + v = readq(base + FAB_CTRL); + v &= ~(FAB_PORT_FILTER | FAB_PORT_ID); + + if (is_portid_root(portid)) { + v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_DISABLE); + } else { + v |= FIELD_PREP(FAB_PORT_FILTER, FAB_PORT_FILTER_ENABLE); + v |= FIELD_PREP(FAB_PORT_ID, portid); + } + writeq(v, base + FAB_CTRL); + +exit: + spin_unlock(&priv->fab_lock); + return ret; +} + +static void fabric_event_destroy(struct fme_perf_priv *priv, u32 event, + u32 portid) +{ + spin_lock(&priv->fab_lock); + priv->fab_users--; + spin_unlock(&priv->fab_lock); +} + +static u64 fabric_read_event_counter(struct fme_perf_priv *priv, u32 event, + u32 portid) +{ + void __iomem *base = priv->ioaddr; + u64 v; + + v = readq(base + FAB_CTRL); + v &= ~FAB_CTRL_EVNT; + v |= FIELD_PREP(FAB_CTRL_EVNT, event); + writeq(v, base + FAB_CTRL); + + if (readq_poll_timeout_atomic(base + FAB_CNTR, v, + FIELD_GET(FAB_CNTR_EVNT, v) == event, + 1, PERF_TIMEOUT)) { + dev_err(priv->dev, "timeout, unmatched fab event code in counter register.\n"); + return 0; + } + + v = fme_read_perf_cntr_reg(base + FAB_CNTR); + return FIELD_GET(FAB_CNTR_EVNT_CNTR, v); +} + +static int vtd_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) +{ + if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && + event <= VTD_EVNT_MAX && is_portid_port(portid)) + return 0; + + return -EINVAL; +} + +static u64 vtd_read_event_counter(struct fme_perf_priv *priv, u32 event, + u32 portid) +{ + void __iomem *base = priv->ioaddr; + u64 v; + + event += (portid * (VTD_EVNT_MAX + 1)); + + v = readq(base + VTD_CTRL); + v &= ~VTD_CTRL_EVNT; + v |= FIELD_PREP(VTD_CTRL_EVNT, event); + writeq(v, base + VTD_CTRL); + + if (readq_poll_timeout_atomic(base + VTD_CNTR, v, + FIELD_GET(VTD_CNTR_EVNT, v) == event, + 1, PERF_TIMEOUT)) { + dev_err(priv->dev, "timeout, unmatched vtd event code in counter register.\n"); + return 0; + } + + v = fme_read_perf_cntr_reg(base + VTD_CNTR); + return FIELD_GET(VTD_CNTR_EVNT_CNTR, v); +} + +static int vtd_sip_event_init(struct fme_perf_priv *priv, u32 event, u32 portid) +{ + if (priv->id == FME_FEATURE_ID_GLOBAL_IPERF && + event <= VTD_SIP_EVNT_MAX && is_portid_root(portid)) + return 0; + + return -EINVAL; +} + +static u64 vtd_sip_read_event_counter(struct fme_perf_priv *priv, u32 event, + u32 portid) +{ + void __iomem *base = priv->ioaddr; + u64 v; + + v = readq(base + VTD_SIP_CTRL); + v &= ~VTD_SIP_CTRL_EVNT; + v |= FIELD_PREP(VTD_SIP_CTRL_EVNT, event); + writeq(v, base + VTD_SIP_CTRL); + + if (readq_poll_timeout_atomic(base + VTD_SIP_CNTR, v, + FIELD_GET(VTD_SIP_CNTR_EVNT, v) == event, + 1, PERF_TIMEOUT)) { + dev_err(priv->dev, "timeout, unmatched vtd sip event code in counter register\n"); + return 0; + } + + v = fme_read_perf_cntr_reg(base + VTD_SIP_CNTR); + return FIELD_GET(VTD_SIP_CNTR_EVNT_CNTR, v); +} + +static struct fme_perf_event_ops fme_perf_event_ops[] = { + [FME_EVTYPE_BASIC] = {.event_init = basic_event_init, + .read_counter = basic_read_event_counter,}, + [FME_EVTYPE_CACHE] = {.event_init = cache_event_init, + .read_counter = cache_read_event_counter,}, + [FME_EVTYPE_FABRIC] = {.event_init = fabric_event_init, + .event_destroy = fabric_event_destroy, + .read_counter = fabric_read_event_counter,}, + [FME_EVTYPE_VTD] = {.event_init = vtd_event_init, + .read_counter = vtd_read_event_counter,}, + [FME_EVTYPE_VTD_SIP] = {.event_init = vtd_sip_event_init, + .read_counter = vtd_sip_read_event_counter,}, +}; + +static ssize_t fme_perf_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + unsigned long config; + char *ptr = buf; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + config = (unsigned long)eattr->var; + + ptr += sprintf(ptr, "event=0x%02x", (unsigned int)get_event(config)); + ptr += sprintf(ptr, ",evtype=0x%02x", (unsigned int)get_evtype(config)); + + if (is_portid_root(get_portid(config))) + ptr += sprintf(ptr, ",portid=0x%02x\n", FME_PORTID_ROOT); + else + ptr += sprintf(ptr, ",portid=?\n"); + + return (ssize_t)(ptr - buf); +} + +#define FME_EVENT_ATTR(_name) \ + __ATTR(_name, 0444, fme_perf_event_show, NULL) + +#define FME_PORT_EVENT_CONFIG(_event, _type) \ + (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \ + (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK)) + +#define FME_EVENT_CONFIG(_event, _type) \ + (void *)((((_event) << FME_EVENT_SHIFT) & FME_EVENT_MASK) | \ + (((_type) << FME_EVTYPE_SHIFT) & FME_EVTYPE_MASK) | \ + (FME_PORTID_ROOT << FME_PORTID_SHIFT)) + +/* FME Perf Basic Events */ +#define FME_EVENT_BASIC(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_##_name = { \ + .attr = FME_EVENT_ATTR(_name), \ + .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_BASIC), \ +} + +FME_EVENT_BASIC(clock, BASIC_EVNT_CLK); + +static struct attribute *fme_perf_basic_events_attrs[] = { + &fme_perf_event_clock.attr.attr, + NULL, +}; + +static const struct attribute_group fme_perf_basic_events_group = { + .name = "events", + .attrs = fme_perf_basic_events_attrs, +}; + +/* FME Perf Cache Events */ +#define FME_EVENT_CACHE(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_cache_##_name = { \ + .attr = FME_EVENT_ATTR(cache_##_name), \ + .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_CACHE), \ +} + +FME_EVENT_CACHE(read_hit, CACHE_EVNT_RD_HIT); +FME_EVENT_CACHE(read_miss, CACHE_EVNT_RD_MISS); +FME_EVENT_CACHE(write_hit, CACHE_EVNT_WR_HIT); +FME_EVENT_CACHE(write_miss, CACHE_EVNT_WR_MISS); +FME_EVENT_CACHE(hold_request, CACHE_EVNT_HOLD_REQ); +FME_EVENT_CACHE(tx_req_stall, CACHE_EVNT_TX_REQ_STALL); +FME_EVENT_CACHE(rx_req_stall, CACHE_EVNT_RX_REQ_STALL); +FME_EVENT_CACHE(eviction, CACHE_EVNT_EVICTIONS); +FME_EVENT_CACHE(data_write_port_contention, CACHE_EVNT_DATA_WR_PORT_CONTEN); +FME_EVENT_CACHE(tag_write_port_contention, CACHE_EVNT_TAG_WR_PORT_CONTEN); + +static struct attribute *fme_perf_cache_events_attrs[] = { + &fme_perf_event_cache_read_hit.attr.attr, + &fme_perf_event_cache_read_miss.attr.attr, + &fme_perf_event_cache_write_hit.attr.attr, + &fme_perf_event_cache_write_miss.attr.attr, + &fme_perf_event_cache_hold_request.attr.attr, + &fme_perf_event_cache_tx_req_stall.attr.attr, + &fme_perf_event_cache_rx_req_stall.attr.attr, + &fme_perf_event_cache_eviction.attr.attr, + &fme_perf_event_cache_data_write_port_contention.attr.attr, + &fme_perf_event_cache_tag_write_port_contention.attr.attr, + NULL, +}; + +static umode_t fme_perf_events_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); + struct fme_perf_priv *priv = to_fme_perf_priv(pmu); + + return (priv->id == FME_FEATURE_ID_GLOBAL_IPERF) ? attr->mode : 0; +} + +static const struct attribute_group fme_perf_cache_events_group = { + .name = "events", + .attrs = fme_perf_cache_events_attrs, + .is_visible = fme_perf_events_visible, +}; + +/* FME Perf Fabric Events */ +#define FME_EVENT_FABRIC(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_fab_##_name = { \ + .attr = FME_EVENT_ATTR(fab_##_name), \ + .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \ +} + +#define FME_EVENT_FABRIC_PORT(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_fab_port_##_name = { \ + .attr = FME_EVENT_ATTR(fab_port_##_name), \ + .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_FABRIC), \ +} + +FME_EVENT_FABRIC(pcie0_read, FAB_EVNT_PCIE0_RD); +FME_EVENT_FABRIC(pcie0_write, FAB_EVNT_PCIE0_WR); +FME_EVENT_FABRIC(pcie1_read, FAB_EVNT_PCIE1_RD); +FME_EVENT_FABRIC(pcie1_write, FAB_EVNT_PCIE1_WR); +FME_EVENT_FABRIC(upi_read, FAB_EVNT_UPI_RD); +FME_EVENT_FABRIC(upi_write, FAB_EVNT_UPI_WR); +FME_EVENT_FABRIC(mmio_read, FAB_EVNT_MMIO_RD); +FME_EVENT_FABRIC(mmio_write, FAB_EVNT_MMIO_WR); + +FME_EVENT_FABRIC_PORT(pcie0_read, FAB_EVNT_PCIE0_RD); +FME_EVENT_FABRIC_PORT(pcie0_write, FAB_EVNT_PCIE0_WR); +FME_EVENT_FABRIC_PORT(pcie1_read, FAB_EVNT_PCIE1_RD); +FME_EVENT_FABRIC_PORT(pcie1_write, FAB_EVNT_PCIE1_WR); +FME_EVENT_FABRIC_PORT(upi_read, FAB_EVNT_UPI_RD); +FME_EVENT_FABRIC_PORT(upi_write, FAB_EVNT_UPI_WR); +FME_EVENT_FABRIC_PORT(mmio_read, FAB_EVNT_MMIO_RD); +FME_EVENT_FABRIC_PORT(mmio_write, FAB_EVNT_MMIO_WR); + +static struct attribute *fme_perf_fabric_events_attrs[] = { + &fme_perf_event_fab_pcie0_read.attr.attr, + &fme_perf_event_fab_pcie0_write.attr.attr, + &fme_perf_event_fab_pcie1_read.attr.attr, + &fme_perf_event_fab_pcie1_write.attr.attr, + &fme_perf_event_fab_upi_read.attr.attr, + &fme_perf_event_fab_upi_write.attr.attr, + &fme_perf_event_fab_mmio_read.attr.attr, + &fme_perf_event_fab_mmio_write.attr.attr, + &fme_perf_event_fab_port_pcie0_read.attr.attr, + &fme_perf_event_fab_port_pcie0_write.attr.attr, + &fme_perf_event_fab_port_pcie1_read.attr.attr, + &fme_perf_event_fab_port_pcie1_write.attr.attr, + &fme_perf_event_fab_port_upi_read.attr.attr, + &fme_perf_event_fab_port_upi_write.attr.attr, + &fme_perf_event_fab_port_mmio_read.attr.attr, + &fme_perf_event_fab_port_mmio_write.attr.attr, + NULL, +}; + +static umode_t fme_perf_fabric_events_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct pmu *pmu = dev_get_drvdata(kobj_to_dev(kobj)); + struct fme_perf_priv *priv = to_fme_perf_priv(pmu); + struct dev_ext_attribute *eattr; + unsigned long var; + + eattr = container_of(attr, struct dev_ext_attribute, attr.attr); + var = (unsigned long)eattr->var; + + if (is_fabric_event_supported(priv, get_event(var), get_portid(var))) + return attr->mode; + + return 0; +} + +static const struct attribute_group fme_perf_fabric_events_group = { + .name = "events", + .attrs = fme_perf_fabric_events_attrs, + .is_visible = fme_perf_fabric_events_visible, +}; + +/* FME Perf VTD Events */ +#define FME_EVENT_VTD_PORT(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_vtd_port_##_name = { \ + .attr = FME_EVENT_ATTR(vtd_port_##_name), \ + .var = FME_PORT_EVENT_CONFIG(_event, FME_EVTYPE_VTD), \ +} + +FME_EVENT_VTD_PORT(read_transaction, VTD_EVNT_AFU_MEM_RD_TRANS); +FME_EVENT_VTD_PORT(write_transaction, VTD_EVNT_AFU_MEM_WR_TRANS); +FME_EVENT_VTD_PORT(devtlb_read_hit, VTD_EVNT_AFU_DEVTLB_RD_HIT); +FME_EVENT_VTD_PORT(devtlb_write_hit, VTD_EVNT_AFU_DEVTLB_WR_HIT); +FME_EVENT_VTD_PORT(devtlb_4k_fill, VTD_EVNT_DEVTLB_4K_FILL); +FME_EVENT_VTD_PORT(devtlb_2m_fill, VTD_EVNT_DEVTLB_2M_FILL); +FME_EVENT_VTD_PORT(devtlb_1g_fill, VTD_EVNT_DEVTLB_1G_FILL); + +static struct attribute *fme_perf_vtd_events_attrs[] = { + &fme_perf_event_vtd_port_read_transaction.attr.attr, + &fme_perf_event_vtd_port_write_transaction.attr.attr, + &fme_perf_event_vtd_port_devtlb_read_hit.attr.attr, + &fme_perf_event_vtd_port_devtlb_write_hit.attr.attr, + &fme_perf_event_vtd_port_devtlb_4k_fill.attr.attr, + &fme_perf_event_vtd_port_devtlb_2m_fill.attr.attr, + &fme_perf_event_vtd_port_devtlb_1g_fill.attr.attr, + NULL, +}; + +static const struct attribute_group fme_perf_vtd_events_group = { + .name = "events", + .attrs = fme_perf_vtd_events_attrs, + .is_visible = fme_perf_events_visible, +}; + +/* FME Perf VTD SIP Events */ +#define FME_EVENT_VTD_SIP(_name, _event) \ +static struct dev_ext_attribute fme_perf_event_vtd_sip_##_name = { \ + .attr = FME_EVENT_ATTR(vtd_sip_##_name), \ + .var = FME_EVENT_CONFIG(_event, FME_EVTYPE_VTD_SIP), \ +} + +FME_EVENT_VTD_SIP(iotlb_4k_hit, VTD_SIP_EVNT_IOTLB_4K_HIT); +FME_EVENT_VTD_SIP(iotlb_2m_hit, VTD_SIP_EVNT_IOTLB_2M_HIT); +FME_EVENT_VTD_SIP(iotlb_1g_hit, VTD_SIP_EVNT_IOTLB_1G_HIT); +FME_EVENT_VTD_SIP(slpwc_l3_hit, VTD_SIP_EVNT_SLPWC_L3_HIT); +FME_EVENT_VTD_SIP(slpwc_l4_hit, VTD_SIP_EVNT_SLPWC_L4_HIT); +FME_EVENT_VTD_SIP(rcc_hit, VTD_SIP_EVNT_RCC_HIT); +FME_EVENT_VTD_SIP(iotlb_4k_miss, VTD_SIP_EVNT_IOTLB_4K_MISS); +FME_EVENT_VTD_SIP(iotlb_2m_miss, VTD_SIP_EVNT_IOTLB_2M_MISS); +FME_EVENT_VTD_SIP(iotlb_1g_miss, VTD_SIP_EVNT_IOTLB_1G_MISS); +FME_EVENT_VTD_SIP(slpwc_l3_miss, VTD_SIP_EVNT_SLPWC_L3_MISS); +FME_EVENT_VTD_SIP(slpwc_l4_miss, VTD_SIP_EVNT_SLPWC_L4_MISS); +FME_EVENT_VTD_SIP(rcc_miss, VTD_SIP_EVNT_RCC_MISS); + +static struct attribute *fme_perf_vtd_sip_events_attrs[] = { + &fme_perf_event_vtd_sip_iotlb_4k_hit.attr.attr, + &fme_perf_event_vtd_sip_iotlb_2m_hit.attr.attr, + &fme_perf_event_vtd_sip_iotlb_1g_hit.attr.attr, + &fme_perf_event_vtd_sip_slpwc_l3_hit.attr.attr, + &fme_perf_event_vtd_sip_slpwc_l4_hit.attr.attr, + &fme_perf_event_vtd_sip_rcc_hit.attr.attr, + &fme_perf_event_vtd_sip_iotlb_4k_miss.attr.attr, + &fme_perf_event_vtd_sip_iotlb_2m_miss.attr.attr, + &fme_perf_event_vtd_sip_iotlb_1g_miss.attr.attr, + &fme_perf_event_vtd_sip_slpwc_l3_miss.attr.attr, + &fme_perf_event_vtd_sip_slpwc_l4_miss.attr.attr, + &fme_perf_event_vtd_sip_rcc_miss.attr.attr, + NULL, +}; + +static const struct attribute_group fme_perf_vtd_sip_events_group = { + .name = "events", + .attrs = fme_perf_vtd_sip_events_attrs, + .is_visible = fme_perf_events_visible, +}; + +static const struct attribute_group *fme_perf_events_groups[] = { + &fme_perf_basic_events_group, + &fme_perf_cache_events_group, + &fme_perf_fabric_events_group, + &fme_perf_vtd_events_group, + &fme_perf_vtd_sip_events_group, + NULL, +}; + +static struct fme_perf_event_ops *get_event_ops(u32 evtype) +{ + if (evtype > FME_EVTYPE_MAX) + return NULL; + + return &fme_perf_event_ops[evtype]; +} + +static void fme_perf_event_destroy(struct perf_event *event) +{ + struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base); + struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); + + if (ops->event_destroy) + ops->event_destroy(priv, event->hw.idx, event->hw.config_base); +} + +static int fme_perf_event_init(struct perf_event *event) +{ + struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); + struct hw_perf_event *hwc = &event->hw; + struct fme_perf_event_ops *ops; + u32 eventid, evtype, portid; + + /* test the event attr type check for PMU enumeration */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * fme counters are shared across all cores. + * Therefore, it does not support per-process mode. + * Also, it does not support event sampling mode. + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + if (event->cpu != priv->cpu) + return -EINVAL; + + eventid = get_event(event->attr.config); + portid = get_portid(event->attr.config); + evtype = get_evtype(event->attr.config); + if (evtype > FME_EVTYPE_MAX) + return -EINVAL; + + hwc->event_base = evtype; + hwc->idx = (int)eventid; + hwc->config_base = portid; + + event->destroy = fme_perf_event_destroy; + + dev_dbg(priv->dev, "%s event=0x%x, evtype=0x%x, portid=0x%x,\n", + __func__, eventid, evtype, portid); + + ops = get_event_ops(evtype); + if (ops->event_init) + return ops->event_init(priv, eventid, portid); + + return 0; +} + +static void fme_perf_event_update(struct perf_event *event) +{ + struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base); + struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 now, prev, delta; + + now = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base); + prev = local64_read(&hwc->prev_count); + delta = now - prev; + + local64_add(delta, &event->count); +} + +static void fme_perf_event_start(struct perf_event *event, int flags) +{ + struct fme_perf_event_ops *ops = get_event_ops(event->hw.event_base); + struct fme_perf_priv *priv = to_fme_perf_priv(event->pmu); + struct hw_perf_event *hwc = &event->hw; + u64 count; + + count = ops->read_counter(priv, (u32)hwc->idx, hwc->config_base); + local64_set(&hwc->prev_count, count); +} + +static void fme_perf_event_stop(struct perf_event *event, int flags) +{ + fme_perf_event_update(event); +} + +static int fme_perf_event_add(struct perf_event *event, int flags) +{ + if (flags & PERF_EF_START) + fme_perf_event_start(event, flags); + + return 0; +} + +static void fme_perf_event_del(struct perf_event *event, int flags) +{ + fme_perf_event_stop(event, PERF_EF_UPDATE); +} + +static void fme_perf_event_read(struct perf_event *event) +{ + fme_perf_event_update(event); +} + +static void fme_perf_setup_hardware(struct fme_perf_priv *priv) +{ + void __iomem *base = priv->ioaddr; + u64 v; + + /* read and save current working mode for fabric counters */ + v = readq(base + FAB_CTRL); + + if (FIELD_GET(FAB_PORT_FILTER, v) == FAB_PORT_FILTER_DISABLE) + priv->fab_port_id = FME_PORTID_ROOT; + else + priv->fab_port_id = FIELD_GET(FAB_PORT_ID, v); +} + +static int fme_perf_pmu_register(struct platform_device *pdev, + struct fme_perf_priv *priv) +{ + struct pmu *pmu = &priv->pmu; + char *name; + int ret; + + spin_lock_init(&priv->fab_lock); + + fme_perf_setup_hardware(priv); + + pmu->task_ctx_nr = perf_invalid_context; + pmu->attr_groups = fme_perf_groups; + pmu->attr_update = fme_perf_events_groups; + pmu->event_init = fme_perf_event_init; + pmu->add = fme_perf_event_add; + pmu->del = fme_perf_event_del; + pmu->start = fme_perf_event_start; + pmu->stop = fme_perf_event_stop; + pmu->read = fme_perf_event_read; + pmu->capabilities = PERF_PMU_CAP_NO_INTERRUPT | + PERF_PMU_CAP_NO_EXCLUDE; + + name = devm_kasprintf(priv->dev, GFP_KERNEL, "dfl_fme%d", pdev->id); + + ret = perf_pmu_register(pmu, name, -1); + if (ret) + return ret; + + return 0; +} + +static void fme_perf_pmu_unregister(struct fme_perf_priv *priv) +{ + perf_pmu_unregister(&priv->pmu); +} + +static int fme_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) +{ + struct fme_perf_priv *priv = hlist_entry(node, struct fme_perf_priv, + node); + int target; + + if (cpu != priv->cpu) + return 0; + + target = cpumask_any_but(cpu_online_mask, cpu); + if (target >= nr_cpu_ids) + return 0; + + priv->cpu = target; + return 0; +} + +static int fme_perf_init(struct platform_device *pdev, + struct dfl_feature *feature) +{ + struct fme_perf_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = &pdev->dev; + priv->ioaddr = feature->ioaddr; + priv->id = feature->id; + priv->cpu = raw_smp_processor_id(); + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/fpga/dfl_fme:online", + NULL, fme_perf_offline_cpu); + if (ret < 0) + return ret; + + priv->cpuhp_state = ret; + + /* Register the pmu instance for cpu hotplug */ + ret = cpuhp_state_add_instance_nocalls(priv->cpuhp_state, &priv->node); + if (ret) + goto cpuhp_instance_err; + + ret = fme_perf_pmu_register(pdev, priv); + if (ret) + goto pmu_register_err; + + feature->priv = priv; + return 0; + +pmu_register_err: + cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(priv->cpuhp_state); + return ret; +} + +static void fme_perf_uinit(struct platform_device *pdev, + struct dfl_feature *feature) +{ + struct fme_perf_priv *priv = feature->priv; + + fme_perf_pmu_unregister(priv); + cpuhp_state_remove_instance_nocalls(priv->cpuhp_state, &priv->node); + cpuhp_remove_multi_state(priv->cpuhp_state); +} + +const struct dfl_feature_id fme_perf_id_table[] = { + {.id = FME_FEATURE_ID_GLOBAL_IPERF,}, + {.id = FME_FEATURE_ID_GLOBAL_DPERF,}, + {0,} +}; + +const struct dfl_feature_ops fme_perf_ops = { + .init = fme_perf_init, + .uinit = fme_perf_uinit, +}; diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c index a233a53db7081..52f1745dfb252 100644 --- a/drivers/fpga/dfl-fme-pr.c +++ b/drivers/fpga/dfl-fme-pr.c @@ -83,7 +83,7 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) if (copy_from_user(&port_pr, argp, minsz)) return -EFAULT; - if (port_pr.argsz < minsz || port_pr.flags) + if (port_pr.argsz < minsz || port_pr.flags || !port_pr.buffer_size) return -EINVAL; /* get fme header region */ @@ -101,15 +101,25 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) port_pr.buffer_size)) return -EFAULT; + mutex_lock(&pdata->lock); + fme = dfl_fpga_pdata_get_private(pdata); + /* fme device has been unregistered. */ + if (!fme) { + ret = -EINVAL; + goto unlock_exit; + } + /* * align PR buffer per PR bandwidth, as HW ignores the extra padding * data automatically. */ - length = ALIGN(port_pr.buffer_size, 4); + length = ALIGN(port_pr.buffer_size, fme->pr_datawidth); buf = vmalloc(length); - if (!buf) - return -ENOMEM; + if (!buf) { + ret = -ENOMEM; + goto unlock_exit; + } if (copy_from_user(buf, (void __user *)(unsigned long)port_pr.buffer_address, @@ -127,18 +137,10 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) info->flags |= FPGA_MGR_PARTIAL_RECONFIG; - mutex_lock(&pdata->lock); - fme = dfl_fpga_pdata_get_private(pdata); - /* fme device has been unregistered. */ - if (!fme) { - ret = -EINVAL; - goto unlock_exit; - } - region = dfl_fme_region_find(fme, port_pr.port_id); if (!region) { ret = -EINVAL; - goto unlock_exit; + goto free_exit; } fpga_image_info_free(region->info); @@ -159,10 +161,10 @@ static int fme_pr(struct platform_device *pdev, unsigned long arg) fpga_bridges_put(®ion->bridge_list); put_device(®ion->dev); -unlock_exit: - mutex_unlock(&pdata->lock); free_exit: vfree(buf); +unlock_exit: + mutex_unlock(&pdata->lock); return ret; } @@ -388,6 +390,17 @@ static int pr_mgmt_init(struct platform_device *pdev, mutex_lock(&pdata->lock); priv = dfl_fpga_pdata_get_private(pdata); + /* + * Initialize PR data width. + * Only revision 2 supports 512bit datawidth for better performance, + * other revisions use default 32bit datawidth. This is used for + * buffer alignment. + */ + if (dfl_feature_revision(feature->ioaddr) == 2) + priv->pr_datawidth = 64; + else + priv->pr_datawidth = 4; + /* Initialize the region and bridge sub device list */ INIT_LIST_HEAD(&priv->region_list); INIT_LIST_HEAD(&priv->bridge_list); diff --git a/drivers/fpga/dfl-fme.h b/drivers/fpga/dfl-fme.h index 6685c8ef965bd..552a17eb89601 100644 --- a/drivers/fpga/dfl-fme.h +++ b/drivers/fpga/dfl-fme.h @@ -21,12 +21,14 @@ /** * struct dfl_fme - dfl fme private data * + * @pr_datawidth: data width for partial reconfiguration. * @mgr: FME's FPGA manager platform device. * @region_list: linked list of FME's FPGA regions. * @bridge_list: linked list of FME's FPGA bridges. * @pdata: fme platform device's pdata. */ struct dfl_fme { + int pr_datawidth; struct platform_device *mgr; struct list_head region_list; struct list_head bridge_list; @@ -38,5 +40,7 @@ extern const struct dfl_feature_id fme_pr_mgmt_id_table[]; extern const struct dfl_feature_ops fme_global_err_ops; extern const struct dfl_feature_id fme_global_err_id_table[]; extern const struct attribute_group fme_global_err_group; +extern const struct dfl_feature_ops fme_perf_ops; +extern const struct dfl_feature_id fme_perf_id_table[]; #endif /* __DFL_FME_H */ diff --git a/drivers/fpga/dfl-hssi.c b/drivers/fpga/dfl-hssi.c new file mode 100644 index 0000000000000..bd05ac49c1d53 --- /dev/null +++ b/drivers/fpga/dfl-hssi.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for DFL HSSI Configurable Ethernet private feature + * + * Copyright 2019-2020 Intel Corporation, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include "dfl.h" + +/* HSSI Private Feature: Capability - Read-Only */ +#define HSSI_CAPABILITY 0x8 +#define DATA_RATE_AVAIL_1G BIT_ULL(0) +#define DATA_RATE_AVAIL_10G BIT_ULL(1) +#define DATA_RATE_AVAIL_25G BIT_ULL(2) +#define DATA_RATE_AVAIL_40G BIT_ULL(3) +#define DATA_RATE_AVAIL_50G BIT_ULL(4) +#define DATA_RATE_AVAIL_100G BIT_ULL(5) +#define DATA_RATE_AVAIL_200G BIT_ULL(6) +#define DATA_RATE_AVAIL_400G BIT_ULL(7) +#define CONTAINS_PCS_1G BIT_ULL(8) +#define CONTAINS_PCS_10G BIT_ULL(9) +#define CONTAINS_PCS_25G BIT_ULL(10) +#define CONTAINS_PCS_40G BIT_ULL(11) +#define CONTAINS_PCS_50G BIT_ULL(12) +#define CONTAINS_PCS_100G BIT_ULL(13) +#define CONTAINS_PCS_200G BIT_ULL(14) +#define CONTAINS_PCS_400G BIT_ULL(15) +#define CONTAINS_FEC_1G BIT_ULL(16) +#define CONTAINS_FEC_10G BIT_ULL(17) +#define CONTAINS_FEC_25G BIT_ULL(18) +#define CONTAINS_FEC_40G BIT_ULL(19) +#define CONTAINS_FEC_50G BIT_ULL(20) +#define CONTAINS_FEC_100G BIT_ULL(21) +#define CONTAINS_FEC_200G BIT_ULL(22) +#define CONTAINS_FEC_400G BIT_ULL(23) +#define DATA_RATE_SWITCH BIT_ULL(24) +#define LINK_TRAINING BIT_ULL(25) +#define AUTO_NEGOTIATION BIT_ULL(26) +#define CONTAINS_MAC BIT_ULL(27) +#define NUM_QSFP_INTERFACES GENMASK_ULL(39, 32) + +/* QSFP register space */ +#define HSSI_QSFP_BASE 0x10 +#define HSSI_QSFP_SIZE 0x20 + +struct dfl_hssi { + void __iomem *csr_base; + struct device *dev; + unsigned int qsfp_cnt; + struct platform_device *intel_s10_phy[]; +}; + +static int hssi_create_qsfp(struct dfl_hssi *hssi, struct dfl_device *dfl_dev, + int index) +{ + struct intel_s10_platform_data pdata = { 0 }; + struct platform_device_info pdevinfo = { 0 }; + struct platform_device *pdev; + + pdata.csr_base = hssi->csr_base; + pdata.phy_offset = HSSI_QSFP_BASE + index * HSSI_QSFP_SIZE; + + pdevinfo.name = INTEL_S10_PHY_DRV_NAME; + pdevinfo.id = PLATFORM_DEVID_AUTO; + pdevinfo.parent = hssi->dev; + pdevinfo.data = &pdata; + pdevinfo.size_data = sizeof(pdata); + + pdev = platform_device_register_full(&pdevinfo); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + hssi->qsfp_cnt++; + hssi->intel_s10_phy[index] = pdev; + + return 0; +} + +static void hssi_destroy_qsfp(struct dfl_hssi *hssi, int index) +{ + platform_device_unregister(hssi->intel_s10_phy[index]); +} + +static ssize_t capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_hssi *hssi = dev_get_drvdata(dev); + u64 v = readq(hssi->csr_base + HSSI_CAPABILITY); + + return sprintf(buf, "0x%016llx\n", v); +} +static DEVICE_ATTR_RO(capability); + +static struct attribute *hssi_attrs[] = { + &dev_attr_capability.attr, + NULL, +}; +ATTRIBUTE_GROUPS(hssi); + +static int dfl_hssi_probe(struct dfl_device *dfl_dev) +{ + struct device *dev = &dfl_dev->dev; + struct dfl_hssi *hssi; + int ret, qsfp_cnt, i; + void __iomem *csr_base; + u64 v; + + csr_base = devm_ioremap_resource(&dfl_dev->dev, &dfl_dev->mmio_res); + if (IS_ERR(csr_base)) { + dev_err(dev, "get mem resource fail!\n"); + return PTR_ERR(csr_base); + } + + if (!dfl_feature_revision(csr_base)) { + dev_info(dev, "hssi feature revision 0 not supported\n"); + return -ENOTSUPP; + } + + v = readq(csr_base + HSSI_CAPABILITY); + qsfp_cnt = FIELD_GET(NUM_QSFP_INTERFACES, v); + + hssi = devm_kzalloc(dev, sizeof(*hssi) + qsfp_cnt * sizeof(void *), + GFP_KERNEL); + if (!hssi) + return -ENOMEM; + + dev_set_drvdata(&dfl_dev->dev, hssi); + + hssi->csr_base = csr_base; + hssi->dev = dev; + + for (i = 0; i < qsfp_cnt; i++) { + ret = hssi_create_qsfp(hssi, dfl_dev, i); + if (ret) + goto error_exit; + } + + return 0; + +error_exit: + for (i = 0; i < hssi->qsfp_cnt; i++) + hssi_destroy_qsfp(hssi, i); + + return ret; +} + +static int dfl_hssi_remove(struct dfl_device *dfl_dev) +{ + struct dfl_hssi *hssi = dev_get_drvdata(&dfl_dev->dev); + int i; + + for (i = 0; i < hssi->qsfp_cnt; i++) + hssi_destroy_qsfp(hssi, i); + + return 0; +} + +#define FME_FEATURE_ID_HSSI_ETH 0xa + +static const struct dfl_device_id dfl_hssi_ids[] = { + { FME_ID, FME_FEATURE_ID_HSSI_ETH }, + { } +}; + +static struct dfl_driver dfl_hssi_driver = { + .drv = { + .name = "intel-s10-hssi", + .dev_groups = hssi_groups, + }, + .id_table = dfl_hssi_ids, + .probe = dfl_hssi_probe, + .remove = dfl_hssi_remove, +}; + +module_dfl_driver(dfl_hssi_driver); + +MODULE_DEVICE_TABLE(dfl, dfl_hssi_ids); +MODULE_DESCRIPTION("DFL HSSI driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/fpga/dfl-n3000-nios.c b/drivers/fpga/dfl-n3000-nios.c new file mode 100644 index 0000000000000..a2ae252e524fe --- /dev/null +++ b/drivers/fpga/dfl-n3000-nios.c @@ -0,0 +1,495 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for DFL N3000 NIOS private feature + * + * Copyright (C) 2019-2020 Intel Corporation, Inc. + * + * Authors: + * Wu Hao + * Xu Yilun + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char *fec_mode = "rs"; +module_param(fec_mode, charp, 0444); +MODULE_PARM_DESC(child, "FEC mode"); + +/* N3000 NIOS private feature registers */ +#define NIOS_SPI_PARAM 0x8 +#define SHIFT_MODE BIT_ULL(1) +#define SHIFT_MODE_MSB 0 +#define SHIFT_MODE_LSB 1 +#define DATA_WIDTH GENMASK_ULL(7, 2) +#define NUM_CHIPSELECT GENMASK_ULL(13, 8) +#define CLK_POLARITY BIT_ULL(14) +#define CLK_PHASE BIT_ULL(15) +#define PERIPHERAL_ID GENMASK_ULL(47, 32) + +#define NIOS_SPI_CTRL 0x10 +#define CTRL_WR_DATA GENMASK_ULL(31, 0) +#define CTRL_ADDR GENMASK_ULL(44, 32) +#define CTRL_CMD GENMASK_ULL(63, 62) +#define CMD_NOP 0 +#define CMD_RD 1 +#define CMD_WR 2 + +#define NIOS_SPI_STAT 0x18 +#define STAT_RD_DATA GENMASK_ULL(31, 0) +#define STAT_RW_VAL BIT_ULL(32) + +/* + * Altera spi controller register base, indirect access. Pass it to altera-spi + * driver. + */ +#define ALTR_SPI_BASE 0x0 + +/* nios handshake registers, indirect access */ +#define NIOS_INIT 0x1000 +#define NIOS_INIT_DONE BIT(0) +#define NIOS_INIT_START BIT(1) +/* Mode for PKVL A, link 0, the same below */ +#define REQ_FEC_MODE_A0 GENMASK(9, 8) +#define REQ_FEC_MODE_A1 GENMASK(11, 10) +#define REQ_FEC_MODE_A2 GENMASK(13, 12) +#define REQ_FEC_MODE_A3 GENMASK(15, 14) +#define REQ_FEC_MODE_B0 GENMASK(17, 16) +#define REQ_FEC_MODE_B1 GENMASK(19, 18) +#define REQ_FEC_MODE_B2 GENMASK(21, 20) +#define REQ_FEC_MODE_B3 GENMASK(23, 22) +#define FEC_MODE_NO 0x0 +#define FEC_MODE_KR 0x1 +#define FEC_MODE_RS 0x2 + +#define NIOS_FW_VERSION 0x1004 +#define NIOS_FW_VERSION_PATCH GENMASK(23, 20) +#define NIOS_FW_VERSION_MINOR GENMASK(27, 24) +#define NIOS_FW_VERSION_MAJOR GENMASK(31, 28) + +#define PKVL_A_MODE_STS 0x1020 +#define PKVL_B_MODE_STS 0x1024 + +#define NS_REGBUS_WAIT_TIMEOUT 10000 /* loop count */ +#define NIOS_INIT_TIMEOUT 10000000 /* usec */ +#define NIOS_INIT_TIME_INTV 100000 /* usec */ + +struct dfl_n3000_nios { + void __iomem *base; + struct regmap *regmap; + struct device *dev; + struct platform_device *altr_spi; +}; + +static int n3000_nios_writel(struct dfl_n3000_nios *ns, unsigned int reg, + unsigned int val) +{ + int ret; + + ret = regmap_write(ns->regmap, reg, val); + if (ret) + dev_err(ns->dev, "fail to write reg 0x%x val 0x%x: %d\n", + reg, val, ret); + + return ret; +} + +static int n3000_nios_readl(struct dfl_n3000_nios *ns, unsigned int reg, + unsigned int *val) +{ + int ret; + + ret = regmap_read(ns->regmap, reg, val); + if (ret) + dev_err(ns->dev, "fail to read reg 0x%x: %d\n", reg, ret); + + return ret; +} + +static ssize_t nios_fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_n3000_nios *ns = dev_get_drvdata(dev); + unsigned int val; + int ret; + + ret = n3000_nios_readl(ns, NIOS_FW_VERSION, &val); + if (ret) + return ret; + + return sprintf(buf, "%x.%x.%x\n", + (u8)FIELD_GET(NIOS_FW_VERSION_MAJOR, val), + (u8)FIELD_GET(NIOS_FW_VERSION_MINOR, val), + (u8)FIELD_GET(NIOS_FW_VERSION_PATCH, val)); +} +static DEVICE_ATTR_RO(nios_fw_version); + +static ssize_t fec_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dfl_n3000_nios *ns = dev_get_drvdata(dev); + unsigned int val, mode; + int ret; + + ret = n3000_nios_readl(ns, NIOS_INIT, &val); + if (ret) + return ret; + + /* + * FEC mode should always be the same for all links, as we set them + * in this way. + */ + mode = FIELD_GET(REQ_FEC_MODE_A0, val); + if (mode != FIELD_GET(REQ_FEC_MODE_A1, val) || + mode != FIELD_GET(REQ_FEC_MODE_A2, val) || + mode != FIELD_GET(REQ_FEC_MODE_A3, val) || + mode != FIELD_GET(REQ_FEC_MODE_B0, val) || + mode != FIELD_GET(REQ_FEC_MODE_B1, val) || + mode != FIELD_GET(REQ_FEC_MODE_B2, val) || + mode != FIELD_GET(REQ_FEC_MODE_B3, val)) + return -EFAULT; + + switch (mode) { + case FEC_MODE_NO: + return sprintf(buf, "no\n"); + case FEC_MODE_KR: + return sprintf(buf, "kr\n"); + case FEC_MODE_RS: + return sprintf(buf, "rs\n"); + } + + return -EFAULT; +} +static DEVICE_ATTR_RO(fec_mode); + +static struct attribute *n3000_nios_attrs[] = { + &dev_attr_nios_fw_version.attr, + &dev_attr_fec_mode.attr, + NULL, +}; +ATTRIBUTE_GROUPS(n3000_nios); + +static int init_error_detected(struct dfl_n3000_nios *ns) +{ + unsigned int val; + + if (n3000_nios_readl(ns, PKVL_A_MODE_STS, &val)) + return true; + + if (val >= 0x100) + return true; + + if (n3000_nios_readl(ns, PKVL_B_MODE_STS, &val)) + return true; + + if (val >= 0x100) + return true; + + return false; +} + +static void dump_error_stat(struct dfl_n3000_nios *ns) +{ + unsigned int val; + + if (n3000_nios_readl(ns, PKVL_A_MODE_STS, &val)) + return; + + dev_info(ns->dev, "PKVL_A_MODE_STS %x\n", val); + + if (n3000_nios_readl(ns, PKVL_B_MODE_STS, &val)) + return; + + dev_info(ns->dev, "PKVL_B_MODE_STS %x\n", val); +} + +static int n3000_nios_init_done_check(struct dfl_n3000_nios *ns) +{ + struct device *dev = ns->dev; + unsigned int val, mode; + int ret; + + /* + * this SPI is shared by NIOS core inside FPGA, NIOS will use this SPI + * master to do some one time initialization after power up, and then + * release the control to OS. driver needs to poll on INIT_DONE to + * see when driver could take the control. + * + * Please note that after 3.x.x version, INIT_START is introduced, so + * driver needs to trigger START firstly and then check INIT_DONE. + */ + + ret = n3000_nios_readl(ns, NIOS_FW_VERSION, &val); + if (ret) + return ret; + + /* + * If NIOS version register is totally uninitialized(== 0x0), then the + * NIOS firmware is missing. So host could take control of SPI master + * safely, but initialization work for NIOS is not done. This is an + * issue of FPGA image. We didn't error out because we need SPI master + * to reprogram a new image. + */ + if (val == 0) { + dev_warn(dev, "NIOS version reg = 0x%x, skip INIT_DONE check, but PKVL may be uninitialized\n", + val); + return 0; + } + + if (FIELD_GET(NIOS_FW_VERSION_MAJOR, val) >= 3) { + /* read NIOS_INIT to check if PKVL INIT done or not */ + ret = n3000_nios_readl(ns, NIOS_INIT, &val); + if (ret) + return ret; + + /* check if PKVLs are initialized already */ + if (val & NIOS_INIT_DONE || val & NIOS_INIT_START) + goto nios_init_done; + + /* configure FEC mode per module param */ + val = NIOS_INIT_START; + + /* FEC mode will be ignored by hardware in 10G mode */ + if (!strcmp(fec_mode, "no")) + mode = FEC_MODE_NO; + else if (!strcmp(fec_mode, "kr")) + mode = FEC_MODE_KR; + else if (!strcmp(fec_mode, "rs")) + mode = FEC_MODE_RS; + else + return -EINVAL; + + /* set the same FEC mode for all links */ + val |= FIELD_PREP(REQ_FEC_MODE_A0, mode) | + FIELD_PREP(REQ_FEC_MODE_A1, mode) | + FIELD_PREP(REQ_FEC_MODE_A2, mode) | + FIELD_PREP(REQ_FEC_MODE_A3, mode) | + FIELD_PREP(REQ_FEC_MODE_B0, mode) | + FIELD_PREP(REQ_FEC_MODE_B1, mode) | + FIELD_PREP(REQ_FEC_MODE_B2, mode) | + FIELD_PREP(REQ_FEC_MODE_B3, mode); + + ret = n3000_nios_writel(ns, NIOS_INIT, val); + if (ret) + return ret; + } + +nios_init_done: + /* polls on NIOS_INIT_DONE */ + ret = regmap_read_poll_timeout(ns->regmap, NIOS_INIT, val, + val & NIOS_INIT_DONE, + NIOS_INIT_TIME_INTV, + NIOS_INIT_TIMEOUT); + if (ret) { + dev_err(dev, "NIOS_INIT_DONE %s\n", + (ret == -ETIMEDOUT) ? "timed out" : "check error"); + goto dump_sts; + } + + /* + * after INIT_DONE is detected, it still needs to check if any ERR + * detected. + * We won't error out here even if error detected. Nios will release + * spi controller when INIT_DONE is set, so driver could continue to + * initialize spi controller device. + */ + if (init_error_detected(ns)) { + dev_warn(dev, "NIOS_INIT_DONE OK, but err found during init\n"); + goto dump_sts; + } + return 0; + +dump_sts: + dump_error_stat(ns); + + return ret; +} + +struct spi_board_info m10_n3000_info = { + .modalias = "m10-n3000", + .max_speed_hz = 12500000, + .bus_num = 0, + .chip_select = 0, +}; + +static int create_altr_spi_controller(struct dfl_n3000_nios *ns) +{ + struct altera_spi_platform_data pdata = { 0 }; + struct platform_device_info pdevinfo = { 0 }; + void __iomem *base = ns->base; + u64 v; + + v = readq(base + NIOS_SPI_PARAM); + + pdata.mode_bits = SPI_CS_HIGH; + if (FIELD_GET(CLK_POLARITY, v)) + pdata.mode_bits |= SPI_CPOL; + if (FIELD_GET(CLK_PHASE, v)) + pdata.mode_bits |= SPI_CPHA; + + pdata.num_chipselect = FIELD_GET(NUM_CHIPSELECT, v); + pdata.bits_per_word_mask = + SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v)); + + pdata.num_devices = 1; + pdata.devices = &m10_n3000_info; + pdata.use_parent_regmap = true; + pdata.regoff = ALTR_SPI_BASE; + + dev_dbg(ns->dev, "%s cs %hu bpm 0x%x mode 0x%hx\n", __func__, + pdata.num_chipselect, pdata.bits_per_word_mask, + pdata.mode_bits); + + pdevinfo.name = ALTERA_SPI_DRV_NAME; + pdevinfo.id = PLATFORM_DEVID_AUTO; + pdevinfo.parent = ns->dev; + pdevinfo.data = &pdata; + pdevinfo.size_data = sizeof(pdata); + + ns->altr_spi = platform_device_register_full(&pdevinfo); + if (IS_ERR(ns->altr_spi)) + return PTR_ERR(ns->altr_spi); + + return 0; +} + +static void destroy_altr_spi_controller(struct dfl_n3000_nios *ns) +{ + platform_device_unregister(ns->altr_spi); +} + +static int ns_bus_poll_stat_timeout(void __iomem *base, u64 *v) +{ + int loops = NS_REGBUS_WAIT_TIMEOUT; + + /* + * Usually the state changes in few loops, so we try to be simple here + * for performance. + */ + do { + *v = readq(base + NIOS_SPI_STAT); + if (*v & STAT_RW_VAL) + break; + cpu_relax(); + } while (--loops); + + return loops ? 0 : -ETIMEDOUT; +} + +static int ns_bus_reg_write(void *context, unsigned int reg, unsigned int val) +{ + void __iomem *base = context; + u64 v = 0; + + v |= FIELD_PREP(CTRL_CMD, CMD_WR); + v |= FIELD_PREP(CTRL_ADDR, reg); + v |= FIELD_PREP(CTRL_WR_DATA, val); + writeq(v, base + NIOS_SPI_CTRL); + + return ns_bus_poll_stat_timeout(base, &v); +} + +static int ns_bus_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + void __iomem *base = context; + u64 v = 0; + int ret; + + v |= FIELD_PREP(CTRL_CMD, CMD_RD); + v |= FIELD_PREP(CTRL_ADDR, reg); + writeq(v, base + NIOS_SPI_CTRL); + + ret = ns_bus_poll_stat_timeout(base, &v); + if (!ret) + *val = FIELD_GET(STAT_RD_DATA, v); + + return ret; +} + +static const struct regmap_config ns_regbus_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + + .reg_write = ns_bus_reg_write, + .reg_read = ns_bus_reg_read, +}; + +static int dfl_n3000_nios_probe(struct dfl_device *dfl_dev) +{ + struct device *dev = &dfl_dev->dev; + struct dfl_n3000_nios *ns; + int ret; + + ns = devm_kzalloc(dev, sizeof(*ns), GFP_KERNEL); + if (!ns) + return -ENOMEM; + + dev_set_drvdata(&dfl_dev->dev, ns); + + ns->dev = dev; + + ns->base = devm_ioremap_resource(&dfl_dev->dev, &dfl_dev->mmio_res); + if (IS_ERR(ns->base)) { + dev_err(dev, "get mem resource fail!\n"); + return PTR_ERR(ns->base); + } + + ns->regmap = devm_regmap_init(dev, NULL, ns->base, &ns_regbus_cfg); + if (IS_ERR(ns->regmap)) + return PTR_ERR(ns->regmap); + + ret = n3000_nios_init_done_check(ns); + if (ret) + return ret; + + ret = create_altr_spi_controller(ns); + if (ret) + dev_err(dev, "altr spi controller create failed: %d\n", ret); + + return ret; +} + +static int dfl_n3000_nios_remove(struct dfl_device *dfl_dev) +{ + struct dfl_n3000_nios *ns = dev_get_drvdata(&dfl_dev->dev); + + destroy_altr_spi_controller(ns); + + return 0; +} + +#define FME_FEATURE_ID_N3000_NIOS 0xd + +static const struct dfl_device_id dfl_n3000_nios_ids[] = { + { FME_ID, FME_FEATURE_ID_N3000_NIOS }, + { } +}; + +static struct dfl_driver dfl_n3000_nios_driver = { + .drv = { + .name = "dfl-n3000-nios", + .dev_groups = n3000_nios_groups, + }, + .id_table = dfl_n3000_nios_ids, + .probe = dfl_n3000_nios_probe, + .remove = dfl_n3000_nios_remove, +}; + +module_dfl_driver(dfl_n3000_nios_driver); + +MODULE_DEVICE_TABLE(dfl, dfl_n3000_nios_ids); +MODULE_DESCRIPTION("DFL N3000 NIOS driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/fpga/dfl-pci.c b/drivers/fpga/dfl-pci.c index 89ca292236ad8..7d20cae167ee7 100644 --- a/drivers/fpga/dfl-pci.c +++ b/drivers/fpga/dfl-pci.c @@ -39,14 +39,43 @@ static void __iomem *cci_pci_ioremap_bar(struct pci_dev *pcidev, int bar) return pcim_iomap_table(pcidev)[bar]; } +static void cci_pci_iounmap_bars(struct pci_dev *pcidev, int mapped_bars) +{ + pcim_iounmap_regions(pcidev, mapped_bars); +} + +static int cci_pci_alloc_irq(struct pci_dev *pcidev) +{ + int ret, nvec = pci_msix_vec_count(pcidev); + + if (nvec <= 0) { + dev_dbg(&pcidev->dev, "fpga interrupt not supported\n"); + return 0; + } + + ret = pci_alloc_irq_vectors(pcidev, nvec, nvec, PCI_IRQ_MSIX); + if (ret < 0) + return ret; + + return nvec; +} + +static void cci_pci_free_irq(struct pci_dev *pcidev) +{ + pci_free_irq_vectors(pcidev); +} + /* PCI Device ID */ #define PCIE_DEVICE_ID_PF_INT_5_X 0xBCBD #define PCIE_DEVICE_ID_PF_INT_6_X 0xBCC0 #define PCIE_DEVICE_ID_PF_DSC_1_X 0x09C4 +#define PCIE_DEVICE_ID_PF_PAC_N3000 0x0B30 +#define PCIE_DEVICE_ID_PF_PAC_D5005 0x0B2B /* VF Device */ #define PCIE_DEVICE_ID_VF_INT_5_X 0xBCBF #define PCIE_DEVICE_ID_VF_INT_6_X 0xBCC1 #define PCIE_DEVICE_ID_VF_DSC_1_X 0x09C5 +#define PCIE_DEVICE_ID_VF_PAC_D5005 0x0B2C static struct pci_device_id cci_pcie_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_INT_5_X),}, @@ -55,6 +84,9 @@ static struct pci_device_id cci_pcie_id_tbl[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_INT_6_X),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_DSC_1_X),}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_DSC_1_X),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_PAC_N3000),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_PF_PAC_D5005),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCIE_DEVICE_ID_VF_PAC_D5005),}, {0,} }; MODULE_DEVICE_TABLE(pci, cci_pcie_id_tbl); @@ -78,17 +110,38 @@ static void cci_remove_feature_devs(struct pci_dev *pcidev) /* remove all children feature devices */ dfl_fpga_feature_devs_remove(drvdata->cdev); + cci_pci_free_irq(pcidev); +} + +static int *cci_pci_create_irq_table(struct pci_dev *pcidev, unsigned int nvec) +{ + unsigned int i; + int *table; + + table = kcalloc(nvec, sizeof(int), GFP_KERNEL); + if (table) { + for (i = 0; i < nvec; i++) + table[i] = pci_irq_vector(pcidev, i); + } + + return table; +} + +static void cci_pci_free_irq_table(int *table) +{ + kfree(table); } /* enumerate feature devices under pci device */ static int cci_enumerate_feature_devs(struct pci_dev *pcidev) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); + int port_num, bar, i, nvec, mapped_bars, ret = 0; struct dfl_fpga_enum_info *info; struct dfl_fpga_cdev *cdev; resource_size_t start, len; - int port_num, bar, i, ret = 0; void __iomem *base; + int *irq_table; u32 offset; u64 v; @@ -97,13 +150,34 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev) if (!info) return -ENOMEM; + /* add irq info for enumeration if the device support irq */ + nvec = cci_pci_alloc_irq(pcidev); + if (nvec < 0) { + dev_err(&pcidev->dev, "Fail to alloc irq %d.\n", nvec); + ret = nvec; + goto enum_info_free_exit; + } else if (nvec) { + irq_table = cci_pci_create_irq_table(pcidev, nvec); + if (!irq_table) { + ret = -ENOMEM; + goto irq_free_exit; + } + + ret = dfl_fpga_enum_info_add_irq(info, nvec, irq_table); + cci_pci_free_irq_table(irq_table); + if (ret) + goto irq_free_exit; + } + /* start to find Device Feature List from Bar 0 */ base = cci_pci_ioremap_bar(pcidev, 0); if (!base) { ret = -ENOMEM; - goto enum_info_free_exit; + goto irq_free_exit; } + mapped_bars = BIT(0); + /* * PF device has FME and Ports/AFUs, and VF device only has one * Port/AFU. Check them and add related "Device Feature List" info @@ -113,7 +187,7 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev) start = pci_resource_start(pcidev, 0); len = pci_resource_len(pcidev, 0); - dfl_fpga_enum_info_add_dfl(info, start, len, base); + dfl_fpga_enum_info_add_dfl(info, start, len); /* * find more Device Feature Lists (e.g. Ports) per information @@ -141,32 +215,39 @@ static int cci_enumerate_feature_devs(struct pci_dev *pcidev) if (!base) continue; + mapped_bars |= BIT(bar); + start = pci_resource_start(pcidev, bar) + offset; len = pci_resource_len(pcidev, bar) - offset; - dfl_fpga_enum_info_add_dfl(info, start, len, - base + offset); + dfl_fpga_enum_info_add_dfl(info, start, len); } } else if (dfl_feature_is_port(base)) { start = pci_resource_start(pcidev, 0); len = pci_resource_len(pcidev, 0); - dfl_fpga_enum_info_add_dfl(info, start, len, base); + dfl_fpga_enum_info_add_dfl(info, start, len); } else { ret = -ENODEV; - goto enum_info_free_exit; + goto irq_free_exit; } + /* release I/O mappings for next step enumeration */ + cci_pci_iounmap_bars(pcidev, mapped_bars); + /* start enumeration with prepared enumeration information */ cdev = dfl_fpga_feature_devs_enumerate(info); if (IS_ERR(cdev)) { dev_err(&pcidev->dev, "Enumeration failure\n"); ret = PTR_ERR(cdev); - goto enum_info_free_exit; + goto irq_free_exit; } drvdata->cdev = cdev; +irq_free_exit: + if (ret) + cci_pci_free_irq(pcidev); enum_info_free_exit: dfl_fpga_enum_info_free(info); @@ -211,12 +292,10 @@ int cci_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *pcidevid) } ret = cci_enumerate_feature_devs(pcidev); - if (ret) { - dev_err(&pcidev->dev, "enumeration failure %d.\n", ret); - goto disable_error_report_exit; - } + if (!ret) + return ret; - return ret; + dev_err(&pcidev->dev, "enumeration failure %d.\n", ret); disable_error_report_exit: pci_disable_pcie_error_reporting(pcidev); @@ -227,7 +306,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) { struct cci_drvdata *drvdata = pci_get_drvdata(pcidev); struct dfl_fpga_cdev *cdev = drvdata->cdev; - int ret = 0; if (!num_vfs) { /* @@ -239,6 +317,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) dfl_fpga_cdev_config_ports_pf(cdev); } else { + int ret; + /* * before enable SRIOV, put released ports into VF access mode * first of all. @@ -248,11 +328,13 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs) return ret; ret = pci_enable_sriov(pcidev, num_vfs); - if (ret) + if (ret) { dfl_fpga_cdev_config_ports_pf(cdev); + return ret; + } } - return ret; + return num_vfs; } static void cci_pci_remove(struct pci_dev *pcidev) diff --git a/drivers/fpga/dfl-spi-altera.c b/drivers/fpga/dfl-spi-altera.c new file mode 100644 index 0000000000000..59b948c608896 --- /dev/null +++ b/drivers/fpga/dfl-spi-altera.c @@ -0,0 +1,226 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DFL bus driver for Altera SPI Master + * + * Copyright (C) 2020 Intel Corporation, Inc. + * + * Authors: + * Matthew Gerlach + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct dfl_altera_spi { + void __iomem *base; + struct regmap *regmap; + struct device *dev; + struct platform_device *altr_spi; +}; + +#define SPI_CORE_PARAMETER 0x8 +#define SHIFT_MODE BIT_ULL(1) +#define SHIFT_MODE_MSB 0 +#define SHIFT_MODE_LSB 1 +#define DATA_WIDTH GENMASK_ULL(7, 2) +#define NUM_CHIPSELECT GENMASK_ULL(13, 8) +#define CLK_POLARITY BIT_ULL(14) +#define CLK_PHASE BIT_ULL(15) +#define PERIPHERAL_ID GENMASK_ULL(47, 32) +#define SPI_CLK GENMASK_ULL(31, 22) +#define SPI_INDIRECT_ACC_OFST 0x10 + +#define INDIRECT_ADDR (SPI_INDIRECT_ACC_OFST+0x0) +#define INDIRECT_WR BIT_ULL(8) +#define INDIRECT_RD BIT_ULL(9) +#define INDIRECT_RD_DATA (SPI_INDIRECT_ACC_OFST+0x8) +#define INDIRECT_DATA_MASK GENMASK_ULL(31, 0) +#define INDIRECT_DEBUG BIT_ULL(32) +#define INDIRECT_WR_DATA (SPI_INDIRECT_ACC_OFST+0x10) +#define INDIRECT_TIMEOUT 10000 + +#define ALTR_SPI_BASE 0x0 + +static int indirect_bus_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + void __iomem *base = context; + int loops; + u64 v; + + writeq((reg >> 2) | INDIRECT_RD, base + INDIRECT_ADDR); + + loops = 0; + while ((readq(base + INDIRECT_ADDR) & INDIRECT_RD) && + (loops++ < INDIRECT_TIMEOUT)) + cpu_relax(); + + if (loops >= INDIRECT_TIMEOUT) { + pr_err("%s timed out %d\n", __func__, loops); + return -ETIME; + } + + v = readq(base + INDIRECT_RD_DATA); + + *val = v & INDIRECT_DATA_MASK; + + return 0; +} + +static int indirect_bus_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + void __iomem *base = context; + int loops; + + writeq(val, base + INDIRECT_WR_DATA); + writeq((reg >> 2) | INDIRECT_WR, base + INDIRECT_ADDR); + + loops = 0; + while ((readq(base + INDIRECT_ADDR) & INDIRECT_WR) && + (loops++ < INDIRECT_TIMEOUT)) + cpu_relax(); + + if (loops >= INDIRECT_TIMEOUT) { + pr_err("%s timed out %d\n", __func__, loops); + return -ETIME; + } + return 0; +} + +static const struct regmap_config indirect_regbus_cfg = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + + .reg_write = indirect_bus_reg_write, + .reg_read = indirect_bus_reg_read, +}; + +static struct spi_board_info m10_bmc_info = { + .modalias = "m10-d5005", + .max_speed_hz = 12500000, + .bus_num = 0, + .chip_select = 0, +}; + +static struct platform_device *create_cntrl(struct device *dev, + void __iomem *base, + struct spi_board_info *m10_info) +{ + struct altera_spi_platform_data pdata; + struct platform_device_info pdevinfo; + u64 v; + + v = readq(base + SPI_CORE_PARAMETER); + + memset(&pdata, 0, sizeof(pdata)); + pdata.mode_bits = SPI_CS_HIGH; + if (FIELD_GET(CLK_POLARITY, v)) + pdata.mode_bits |= SPI_CPOL; + if (FIELD_GET(CLK_PHASE, v)) + pdata.mode_bits |= SPI_CPHA; + + pdata.num_chipselect = FIELD_GET(NUM_CHIPSELECT, v); + pdata.bits_per_word_mask = + SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v)); + + pdata.num_devices = 1; + pdata.devices = m10_info; + pdata.use_parent_regmap = true; + pdata.regoff = ALTR_SPI_BASE; + + dev_dbg(dev, "%s cs %hu bpm 0x%x mode 0x%hx\n", __func__, + pdata.num_chipselect, pdata.bits_per_word_mask, + pdata.mode_bits); + + memset(&pdevinfo, 0, sizeof(pdevinfo)); + + pdevinfo.name = ALTERA_SPI_DRV_NAME; + pdevinfo.id = PLATFORM_DEVID_AUTO; + pdevinfo.parent = dev; + pdevinfo.data = &pdata; + pdevinfo.size_data = sizeof(pdata); + + return platform_device_register_full(&pdevinfo); +} +static int dfl_spi_altera_probe(struct dfl_device *dfl_dev) +{ + struct device *dev = &dfl_dev->dev; + struct dfl_altera_spi *aspi; + + aspi = devm_kzalloc(dev, sizeof(*aspi), GFP_KERNEL); + + if (!aspi) + return -ENOMEM; + + dev_set_drvdata(dev, aspi); + + aspi->dev = dev; + + aspi->base = devm_ioremap_resource(dev, &dfl_dev->mmio_res); + + if (IS_ERR(aspi->base)) { + dev_err(dev, "%s get mem resource fail!\n", __func__); + return PTR_ERR(aspi->base); + } + + aspi->regmap = devm_regmap_init(dev, NULL, aspi->base, + &indirect_regbus_cfg); + if (IS_ERR(aspi->regmap)) + return PTR_ERR(aspi->regmap); + + aspi->altr_spi = create_cntrl(dev, aspi->base, &m10_bmc_info); + + if (IS_ERR(aspi->altr_spi)) { + dev_err(dev, "%s failed to create spi platform driver\n", + __func__); + return PTR_ERR(aspi->base); + } + + return 0; +} + +static int dfl_spi_altera_remove(struct dfl_device *dfl_dev) +{ + struct dfl_altera_spi *aspi = dev_get_drvdata(&dfl_dev->dev); + + platform_device_unregister(aspi->altr_spi); + + return 0; +} + +#define FME_FEATURE_ID_MAX10_SPI 0xe + +static const struct dfl_device_id dfl_spi_altera_ids[] = { + { FME_ID, FME_FEATURE_ID_MAX10_SPI }, + { } +}; + +static struct dfl_driver dfl_spi_altera_driver = { + .drv = { + .name = "dfl-spi-altera", + }, + .id_table = dfl_spi_altera_ids, + .probe = dfl_spi_altera_probe, + .remove = dfl_spi_altera_remove, +}; + +module_dfl_driver(dfl_spi_altera_driver); + +MODULE_DEVICE_TABLE(dfl, dfl_spi_altera_ids); +MODULE_DESCRIPTION("DFL spi altera driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index 96a2b8274a33a..50e75ea0dd5de 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -10,7 +10,10 @@ * Wu Hao * Xiao Guangrong */ +#include #include +#include +#include #include "dfl.h" @@ -28,12 +31,6 @@ static DEFINE_MUTEX(dfl_id_mutex); * index to dfl_chardevs table. If no chardev support just set devt_type * as one invalid index (DFL_FPGA_DEVT_MAX). */ -enum dfl_id_type { - FME_ID, /* fme id allocation and mapping */ - PORT_ID, /* port id allocation and mapping */ - DFL_ID_MAX, -}; - enum dfl_fpga_devt_type { DFL_FPGA_DEVT_FME, DFL_FPGA_DEVT_PORT, @@ -248,6 +245,233 @@ int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id) } EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id); +static bool is_header_feature(struct dfl_feature *feature) +{ + return feature->id == FEATURE_ID_FIU_HEADER; +} + +static const struct dfl_device_id * +dfl_match_one_device(const struct dfl_device_id *id, + struct dfl_device *dfl_dev) +{ + if (id->type == dfl_dev->type && + id->feature_id == dfl_dev->feature_id) + return id; + + return NULL; +} + +static int dfl_bus_match(struct device *dev, struct device_driver *drv) +{ + struct dfl_device *dfl_dev = to_dfl_dev(dev); + struct dfl_driver *dfl_drv = to_dfl_drv(drv); + const struct dfl_device_id *id_entry = dfl_drv->id_table; + + while (id_entry->feature_id) { + if (dfl_match_one_device(id_entry, dfl_dev)) { + dfl_dev->id_entry = id_entry; + return 1; + } + id_entry++; + } + + return 0; +} + +static int dfl_bus_probe(struct device *dev) +{ + struct dfl_device *dfl_dev = to_dfl_dev(dev); + struct dfl_driver *dfl_drv = to_dfl_drv(dev->driver); + + return dfl_drv->probe(dfl_dev); +} + +static int dfl_bus_remove(struct device *dev) +{ + struct dfl_device *dfl_dev = to_dfl_dev(dev); + struct dfl_driver *dfl_drv = to_dfl_drv(dev->driver); + + if (dfl_drv->remove) + dfl_drv->remove(dfl_dev); + + return 0; +} + +static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct dfl_device *dfl_dev = to_dfl_dev(dev); + + if (add_uevent_var(env, "MODALIAS=dfl:%08x:%016llx", + dfl_dev->type, dfl_dev->feature_id)) + return -ENOMEM; + + return 0; +} + +/* show dfl info fields */ +#define dfl_info_attr(field, format_string) \ +static ssize_t \ +field##_show(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct dfl_device *dfl_dev = to_dfl_dev(dev); \ + \ + return sprintf(buf, format_string, dfl_dev->field); \ +} \ +static DEVICE_ATTR_RO(field) + +dfl_info_attr(type, "0x%x\n"); +dfl_info_attr(feature_id, "0x%llx\n"); + +static struct attribute *dfl_dev_attrs[] = { + &dev_attr_type.attr, + &dev_attr_feature_id.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dfl_dev); + +static struct bus_type dfl_bus_type = { + .name = "dfl", + .match = dfl_bus_match, + .probe = dfl_bus_probe, + .remove = dfl_bus_remove, + .uevent = dfl_bus_uevent, + .dev_groups = dfl_dev_groups, +}; + +static void release_dfl_dev(struct device *dev) +{ + struct dfl_device *dfl_dev = to_dfl_dev(dev); + + release_resource(&dfl_dev->mmio_res); + kfree(dfl_dev->irqs); + kfree(dfl_dev); +} + +static struct dfl_device * +dfl_dev_add(struct dfl_feature_platform_data *pdata, + struct dfl_feature *feature) +{ + struct platform_device *pdev = pdata->dev; + struct dfl_device *dfl_dev; + int i, ret; + + dfl_dev = kzalloc(sizeof(*dfl_dev), GFP_KERNEL); + if (!dfl_dev) + return ERR_PTR(-ENOMEM); + + dfl_dev->cdev = pdata->dfl_cdev; + + dfl_dev->mmio_res.parent = &pdev->resource[feature->resource_index]; + dfl_dev->mmio_res.flags = IORESOURCE_MEM; + dfl_dev->mmio_res.start = + pdev->resource[feature->resource_index].start; + dfl_dev->mmio_res.end = pdev->resource[feature->resource_index].end; + + /* then add irq resource */ + if (feature->nr_irqs) { + dfl_dev->irqs = kcalloc(feature->nr_irqs, + sizeof(*dfl_dev->irqs), GFP_KERNEL); + if (!dfl_dev->irqs) { + ret = -ENOMEM; + goto free_dfl_dev; + } + + for (i = 0; i < feature->nr_irqs; i++) + dfl_dev->irqs[i] = feature->irq_ctx[i].irq; + + dfl_dev->num_irqs = feature->nr_irqs; + } + + dfl_dev->type = feature_dev_id_type(pdev); + dfl_dev->feature_id = (unsigned long long)feature->id; + + dfl_dev->dev.parent = &pdev->dev; + dfl_dev->dev.bus = &dfl_bus_type; + dfl_dev->dev.release = release_dfl_dev; + dev_set_name(&dfl_dev->dev, "%s.%d", dev_name(&pdev->dev), + feature->index); + + dfl_dev->mmio_res.name = dev_name(&dfl_dev->dev); + ret = insert_resource(dfl_dev->mmio_res.parent, &dfl_dev->mmio_res); + if (ret) { + dev_err(&pdev->dev, "%s failed to claim resource: %pR\n", + dev_name(&dfl_dev->dev), &dfl_dev->mmio_res); + goto free_irqs; + } + + ret = device_register(&dfl_dev->dev); + if (ret) { + put_device(&dfl_dev->dev); + return ERR_PTR(ret); + } + + dev_info(&pdev->dev, "add dfl_dev: %s\n", + dev_name(&dfl_dev->dev)); + return dfl_dev; + +free_irqs: + kfree(dfl_dev->irqs); +free_dfl_dev: + kfree(dfl_dev); + return ERR_PTR(ret); +} + +static void dfl_devs_uinit(struct dfl_feature_platform_data *pdata) +{ + struct dfl_device *dfl_dev; + struct dfl_feature *feature; + + dfl_fpga_dev_for_each_feature(pdata, feature) { + if (!feature->ioaddr && feature->priv) { + dfl_dev = feature->priv; + device_unregister(&dfl_dev->dev); + feature->priv = NULL; + } + } +} + +static int dfl_devs_init(struct platform_device *pdev) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_feature *feature; + struct dfl_device *dfl_dev; + + dfl_fpga_dev_for_each_feature(pdata, feature) { + if (feature->ioaddr || feature->priv) + continue; + + dfl_dev = dfl_dev_add(pdata, feature); + if (IS_ERR(dfl_dev)) { + dfl_devs_uinit(pdata); + return PTR_ERR(dfl_dev); + } + + feature->priv = dfl_dev; + } + + return 0; +} + +int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner) +{ + if (!dfl_drv || !dfl_drv->probe || !dfl_drv->id_table) + return -EINVAL; + + dfl_drv->drv.owner = owner; + dfl_drv->drv.bus = &dfl_bus_type; + + return driver_register(&dfl_drv->drv); +} +EXPORT_SYMBOL(__dfl_driver_register); + +void dfl_driver_unregister(struct dfl_driver *dfl_drv) +{ + driver_unregister(&dfl_drv->drv); +} +EXPORT_SYMBOL(dfl_driver_unregister); + /** * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device * @pdev: feature device. @@ -257,12 +481,15 @@ void dfl_fpga_dev_feature_uinit(struct platform_device *pdev) struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); struct dfl_feature *feature; - dfl_fpga_dev_for_each_feature(pdata, feature) + dfl_devs_uinit(pdata); + + dfl_fpga_dev_for_each_feature(pdata, feature) { if (feature->ops) { if (feature->ops->uinit) feature->ops->uinit(pdev, feature); feature->ops = NULL; } + } } EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit); @@ -271,8 +498,20 @@ static int dfl_feature_instance_init(struct platform_device *pdev, struct dfl_feature *feature, struct dfl_feature_driver *drv) { + void __iomem *base; int ret = 0; + if (!is_header_feature(feature)) { + base = devm_platform_ioremap_resource(pdev, + feature->resource_index); + if (IS_ERR(base)) { + dev_err(&pdev->dev, "fail to get iomem resource!\n"); + return PTR_ERR(base); + } + + feature->ioaddr = base; + } + if (drv->ops->init) { ret = drv->ops->init(pdev, feature); if (ret) @@ -329,6 +568,10 @@ int dfl_fpga_dev_feature_init(struct platform_device *pdev, drv++; } + ret = dfl_devs_init(pdev); + if (ret) + goto exit; + return 0; exit: dfl_fpga_dev_feature_uinit(pdev); @@ -421,16 +664,27 @@ EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister); * * @dev: device to enumerate. * @cdev: the container device for all feature devices. + * @nr_irqs: number of irqs for all feature devices. + * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of + * this device. * @feature_dev: current feature device. - * @ioaddr: header register region address of feature device in enumeration. + * @ioaddr: header register region address of current FIU in enumeration. + * @start: register resource start of current FIU. + * @len: max register resource length of current FIU. * @sub_features: a sub features linked list for feature device in enumeration. * @feature_num: number of sub features for feature device in enumeration. */ struct build_feature_devs_info { struct device *dev; struct dfl_fpga_cdev *cdev; + unsigned int nr_irqs; + int *irq_table; + struct platform_device *feature_dev; void __iomem *ioaddr; + resource_size_t start; + resource_size_t len; + struct list_head sub_features; int feature_num; }; @@ -442,12 +696,16 @@ struct build_feature_devs_info { * @mmio_res: mmio resource of this sub feature. * @ioaddr: mapped base address of mmio resource. * @node: node in sub_features linked list. + * @irq_base: start of irq index in this sub feature. + * @nr_irqs: number of irqs of this sub feature. */ struct dfl_feature_info { u64 fid; struct resource mmio_res; void __iomem *ioaddr; struct list_head node; + unsigned int irq_base; + unsigned int nr_irqs; }; static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev, @@ -472,10 +730,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) struct dfl_feature_platform_data *pdata; struct dfl_feature_info *finfo, *p; enum dfl_id_type type; - int ret, index = 0; - - if (!fdev) - return 0; + int ret, index = 0, res_idx = 0; type = feature_dev_id_type(fdev); if (WARN_ON_ONCE(type >= DFL_ID_MAX)) @@ -519,13 +774,46 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) /* fill features and resource information for feature dev */ list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) { - struct dfl_feature *feature = &pdata->features[index]; + struct dfl_feature *feature = &pdata->features[index++]; + struct dfl_feature_irq_ctx *ctx; + unsigned int i; + + feature->index = index; /* save resource information for each feature */ + feature->dev = fdev; feature->id = finfo->fid; - feature->resource_index = index; - feature->ioaddr = finfo->ioaddr; - fdev->resource[index++] = finfo->mmio_res; + + /* + * map header resource for dfl bus device. Don't add header + * resource to feature devices, or the resource tree will be + * disordered and cause warning on resource release + */ + if (is_header_feature(feature)) { + feature->resource_index = -1; + feature->ioaddr = + devm_ioremap_resource(binfo->dev, + &finfo->mmio_res); + if (IS_ERR(feature->ioaddr)) + return PTR_ERR(feature->ioaddr); + } else { + feature->resource_index = res_idx; + fdev->resource[res_idx++] = finfo->mmio_res; + } + + if (finfo->nr_irqs) { + ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs, + sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + for (i = 0; i < finfo->nr_irqs; i++) + ctx[i].irq = + binfo->irq_table[finfo->irq_base + i]; + + feature->irq_ctx = ctx; + feature->nr_irqs = finfo->nr_irqs; + } list_del(&finfo->node); kfree(finfo); @@ -554,19 +842,13 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo) static int build_info_create_dev(struct build_feature_devs_info *binfo, - enum dfl_id_type type, void __iomem *ioaddr) + enum dfl_id_type type) { struct platform_device *fdev; - int ret; if (type >= DFL_ID_MAX) return -EINVAL; - /* we will create a new device, commit current device first */ - ret = build_info_commit_dev(binfo); - if (ret) - return ret; - /* * we use -ENODEV as the initialization indicator which indicates * whether the id need to be reclaimed @@ -577,7 +859,7 @@ build_info_create_dev(struct build_feature_devs_info *binfo, binfo->feature_dev = fdev; binfo->feature_num = 0; - binfo->ioaddr = ioaddr; + INIT_LIST_HEAD(&binfo->sub_features); fdev->id = dfl_id_alloc(type, &fdev->dev); @@ -638,6 +920,80 @@ static u64 feature_id(void __iomem *start) return 0; } +static int parse_feature_irqs(struct build_feature_devs_info *binfo, + resource_size_t ofst, u64 fid, + unsigned int *irq_base, unsigned int *nr_irqs) +{ + void __iomem *base = binfo->ioaddr + ofst; + unsigned int i, ibase, inr = 0; + int virq; + u64 v; + + /* + * Ideally DFL framework should only read info from DFL header, but + * current version DFL only provides mmio resources information for + * each feature in DFL Header, no field for interrupt resources. + * Some interrupt resources information are provided by specific + * mmio registers of each components(e.g. different private features) + * which supports interrupt. So in order to parse and assign irq + * resources to different components, DFL framework has to look into + * specific capability registers of these core private features. + * + * Once future DFL version supports generic interrupt resources + * information in common DFL headers, some generic interrupt parsing + * code could be added. But in order to be compatible to old version + * DFL, driver may still fall back to these quirks. + */ + switch (fid) { + case PORT_FEATURE_ID_UINT: + v = readq(base + PORT_UINT_CAP); + ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v); + inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v); + break; + case PORT_FEATURE_ID_ERROR: + v = readq(base + PORT_ERROR_CAP); + ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v); + break; + case FME_FEATURE_ID_GLOBAL_ERR: + v = readq(base + FME_ERROR_CAP); + ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v); + inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v); + break; + } + + if (!inr) { + *irq_base = 0; + *nr_irqs = 0; + return 0; + } + + dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n", + (unsigned long long)fid, ibase, inr); + + if (ibase + inr > binfo->nr_irqs) { + dev_err(binfo->dev, + "Invalid interrupt number in feature 0x%llx\n", + (unsigned long long)fid); + return -EINVAL; + } + + for (i = 0; i < inr; i++) { + virq = binfo->irq_table[ibase + i]; + if (virq < 0 || virq > NR_IRQS) { + dev_err(binfo->dev, + "Invalid irq table entry for feature 0x%llx\n", + (unsigned long long)fid); + return -EINVAL; + } + } + + *irq_base = (unsigned int)ibase; + *nr_irqs = (unsigned int)inr; + + return 0; +} + /* * when create sub feature instances, for private features, it doesn't need * to provide resource size and feature id as they could be read from DFH @@ -647,27 +1003,33 @@ static u64 feature_id(void __iomem *start) */ static int create_feature_instance(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst, - resource_size_t size, u64 fid) + resource_size_t ofst, resource_size_t size, u64 fid) { + unsigned int irq_base = 0, nr_irqs = 0; struct dfl_feature_info *finfo; + int ret; /* read feature size and id if inputs are invalid */ - size = size ? size : feature_size(dfl->ioaddr + ofst); - fid = fid ? fid : feature_id(dfl->ioaddr + ofst); + size = size ? size : feature_size(binfo->ioaddr + ofst); + fid = fid ? fid : feature_id(binfo->ioaddr + ofst); - if (dfl->len - ofst < size) + if (binfo->len - ofst < size) return -EINVAL; + ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs); + if (ret) + return ret; + finfo = kzalloc(sizeof(*finfo), GFP_KERNEL); if (!finfo) return -ENOMEM; finfo->fid = fid; - finfo->mmio_res.start = dfl->start + ofst; + finfo->mmio_res.start = binfo->start + ofst; finfo->mmio_res.end = finfo->mmio_res.start + size - 1; finfo->mmio_res.flags = IORESOURCE_MEM; - finfo->ioaddr = dfl->ioaddr + ofst; + finfo->irq_base = irq_base; + finfo->nr_irqs = nr_irqs; list_add_tail(&finfo->node, &binfo->sub_features); binfo->feature_num++; @@ -676,7 +1038,6 @@ create_feature_instance(struct build_feature_devs_info *binfo, } static int parse_feature_port_afu(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst) { u64 v = readq(binfo->ioaddr + PORT_HDR_CAP); @@ -684,11 +1045,10 @@ static int parse_feature_port_afu(struct build_feature_devs_info *binfo, WARN_ON(!size); - return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU); + return create_feature_instance(binfo, ofst, size, FEATURE_ID_AFU); } static int parse_feature_afu(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst) { if (!binfo->feature_dev) { @@ -698,7 +1058,7 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo, switch (feature_dev_id_type(binfo->feature_dev)) { case PORT_ID: - return parse_feature_port_afu(binfo, dfl, ofst); + return parse_feature_port_afu(binfo, ofst); default: dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n", binfo->feature_dev->name); @@ -707,35 +1067,91 @@ static int parse_feature_afu(struct build_feature_devs_info *binfo, return 0; } +static bool is_feature_dev_detected(struct build_feature_devs_info *binfo) +{ + return !!binfo->feature_dev; +} + +static void dfl_binfo_shift(struct build_feature_devs_info *binfo, + resource_size_t ofst) +{ + binfo->start = binfo->start + ofst; + binfo->len = binfo->len - ofst; +} + +static int dfl_binfo_prepare(struct build_feature_devs_info *binfo, + resource_size_t start, resource_size_t len) +{ + struct device *dev = binfo->dev; + void __iomem *ioaddr; + + if (!devm_request_mem_region(dev, start, len, dev_name(dev))) { + dev_err(dev, "request region fail, start:%pa, len:%pa\n", + &start, &len); + return -ENOMEM; + } + + ioaddr = devm_ioremap(dev, start, len); + if (!ioaddr) { + dev_err(dev, "ioremap region fail, start:%pa, len:%pa\n", + &start, &len); + devm_release_mem_region(dev, start, len); + return -EFAULT; + } + + binfo->start = start; + binfo->len = len; + binfo->ioaddr = ioaddr; + + return 0; +} + +static void dfl_binfo_finish(struct build_feature_devs_info *binfo) +{ + devm_iounmap(binfo->dev, binfo->ioaddr); + devm_release_mem_region(binfo->dev, binfo->start, binfo->len); +} + static int parse_feature_fiu(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst) { u32 id, offset; u64 v; int ret = 0; - v = readq(dfl->ioaddr + ofst + DFH); + if (is_feature_dev_detected(binfo)) { + dfl_binfo_finish(binfo); + + ret = build_info_commit_dev(binfo); + if (ret) + return ret; + + dfl_binfo_prepare(binfo, binfo->start + ofst, + binfo->len - ofst); + } else { + dfl_binfo_shift(binfo, ofst); + } + + v = readq(binfo->ioaddr + DFH); id = FIELD_GET(DFH_ID, v); /* create platform device for dfl feature dev */ - ret = build_info_create_dev(binfo, dfh_id_to_type(id), - dfl->ioaddr + ofst); + ret = build_info_create_dev(binfo, dfh_id_to_type(id)); if (ret) return ret; - ret = create_feature_instance(binfo, dfl, ofst, 0, 0); + ret = create_feature_instance(binfo, 0, 0, 0); if (ret) return ret; /* * find and parse FIU's child AFU via its NEXT_AFU register. * please note that only Port has valid NEXT_AFU pointer per spec. */ - v = readq(dfl->ioaddr + ofst + NEXT_AFU); + v = readq(binfo->ioaddr + NEXT_AFU); offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v); if (offset) - return parse_feature_afu(binfo, dfl, ofst + offset); + return parse_feature_afu(binfo, offset); dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id); @@ -743,16 +1159,15 @@ static int parse_feature_fiu(struct build_feature_devs_info *binfo, } static int parse_feature_private(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst) { if (!binfo->feature_dev) { dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n", - (unsigned long long)feature_id(dfl->ioaddr + ofst)); + (unsigned long long)feature_id(binfo->ioaddr + ofst)); return -EINVAL; } - return create_feature_instance(binfo, dfl, ofst, 0, 0); + return create_feature_instance(binfo, ofst, 0, 0); } /** @@ -760,24 +1175,24 @@ static int parse_feature_private(struct build_feature_devs_info *binfo, * * @binfo: build feature devices information. * @dfl: device feature list to parse - * @ofst: offset to feature header on this device feature list + * @ofst: offset to current FIU header */ static int parse_feature(struct build_feature_devs_info *binfo, - struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst) + resource_size_t ofst) { u64 v; u32 type; - v = readq(dfl->ioaddr + ofst + DFH); + v = readq(binfo->ioaddr + ofst + DFH); type = FIELD_GET(DFH_TYPE, v); switch (type) { case DFH_TYPE_AFU: - return parse_feature_afu(binfo, dfl, ofst); + return parse_feature_afu(binfo, ofst); case DFH_TYPE_PRIVATE: - return parse_feature_private(binfo, dfl, ofst); + return parse_feature_private(binfo, ofst); case DFH_TYPE_FIU: - return parse_feature_fiu(binfo, dfl, ofst); + return parse_feature_fiu(binfo, ofst); default: dev_info(binfo->dev, "Feature Type %x is not supported.\n", type); @@ -789,12 +1204,18 @@ static int parse_feature(struct build_feature_devs_info *binfo, static int parse_feature_list(struct build_feature_devs_info *binfo, struct dfl_fpga_enum_dfl *dfl) { - void __iomem *start = dfl->ioaddr; - void __iomem *end = dfl->ioaddr + dfl->len; + resource_size_t start, end; int ret = 0; u32 ofst = 0; u64 v; + ret = dfl_binfo_prepare(binfo, dfl->start, dfl->len); + if (ret) + return ret; + + start = dfl->start; + end = start + dfl->len; + /* walk through the device feature list via DFH's next DFH pointer. */ for (; start < end; start += ofst) { if (end - start < DFH_SIZE) { @@ -802,11 +1223,11 @@ static int parse_feature_list(struct build_feature_devs_info *binfo, return -EINVAL; } - ret = parse_feature(binfo, dfl, start - dfl->ioaddr); + ret = parse_feature(binfo, start - binfo->start); if (ret) return ret; - v = readq(start + DFH); + v = readq(binfo->ioaddr + start - binfo->start + DFH); ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v); /* stop parsing if EOL(End of List) is set or offset is 0 */ @@ -815,6 +1236,8 @@ static int parse_feature_list(struct build_feature_devs_info *binfo, } /* commit current feature device when reach the end of list */ + dfl_binfo_finish(binfo); + return build_info_commit_dev(binfo); } @@ -853,6 +1276,10 @@ void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info) devm_kfree(dev, dfl); } + /* remove irq table */ + if (info->irq_table) + devm_kfree(dev, info->irq_table); + devm_kfree(dev, info); put_device(dev); } @@ -864,7 +1291,6 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free); * @info: ptr to dfl_fpga_enum_info * @start: mmio resource address of the device feature list. * @len: mmio resource length of the device feature list. - * @ioaddr: mapped mmio resource address of the device feature list. * * One FPGA device may have one or more Device Feature Lists (DFLs), use this * function to add information of each DFL to common data structure for next @@ -873,8 +1299,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free); * Return: 0 on success, negative error code otherwise. */ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, - resource_size_t start, resource_size_t len, - void __iomem *ioaddr) + resource_size_t start, resource_size_t len) { struct dfl_fpga_enum_dfl *dfl; @@ -884,7 +1309,6 @@ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, dfl->start = start; dfl->len = len; - dfl->ioaddr = ioaddr; list_add_tail(&dfl->node, &info->dfls); @@ -892,6 +1316,45 @@ int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, } EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl); +/** + * dfl_fpga_enum_info_add_irq - add irq table to enum info + * + * @info: ptr to dfl_fpga_enum_info + * @nr_irqs: number of irqs of the DFL fpga device to be enumerated. + * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of + * this device. + * + * One FPGA device may have several interrupts. This function adds irq + * information of the DFL fpga device to enum info for next step enumeration. + * This function should be called before dfl_fpga_feature_devs_enumerate(). + * As we only support one irq domain for all DFLs in the same enum info, adding + * irq table a second time for the same enum info will return error. + * + * If we need to enumerate DFLs which belong to different irq domains, we + * should fill more enum info and enumerate them one by one. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info, + unsigned int nr_irqs, int *irq_table) +{ + if (!nr_irqs) + return -EINVAL; + + if (info->irq_table) + return -EEXIST; + + info->irq_table = devm_kmemdup(info->dev, irq_table, + sizeof(int) * nr_irqs, GFP_KERNEL); + if (!info->irq_table) + return -ENOMEM; + + info->nr_irqs = nr_irqs; + + return 0; +} +EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq); + static int remove_feature_dev(struct device *dev, void *data) { struct platform_device *pdev = to_platform_device(dev); @@ -959,6 +1422,10 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info) binfo->dev = info->dev; binfo->cdev = cdev; + binfo->nr_irqs = info->nr_irqs; + if (info->nr_irqs) + binfo->irq_table = info->irq_table; + /* * start enumeration for all feature devices based on Device Feature * Lists. @@ -1057,11 +1524,17 @@ static int __init dfl_fpga_init(void) { int ret; + ret = bus_register(&dfl_bus_type); + if (ret) + return ret; + dfl_ids_init(); ret = dfl_chardev_init(); - if (ret) + if (ret) { dfl_ids_destroy(); + bus_unregister(&dfl_bus_type); + } return ret; } @@ -1079,6 +1552,7 @@ static int __init dfl_fpga_init(void) */ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) { + struct dfl_feature_platform_data *pdata; struct platform_device *port_pdev; int ret = -ENODEV; @@ -1093,7 +1567,11 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id) goto put_dev_exit; } - ret = dfl_feature_dev_use_begin(dev_get_platdata(&port_pdev->dev)); + pdata = dev_get_platdata(&port_pdev->dev); + + mutex_lock(&pdata->lock); + ret = dfl_feature_dev_use_begin(pdata, true); + mutex_unlock(&pdata->lock); if (ret) goto put_dev_exit; @@ -1120,6 +1598,7 @@ EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port); */ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) { + struct dfl_feature_platform_data *pdata; struct platform_device *port_pdev; int ret = -ENODEV; @@ -1138,7 +1617,12 @@ int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id) if (ret) goto put_dev_exit; - dfl_feature_dev_use_end(dev_get_platdata(&port_pdev->dev)); + pdata = dev_get_platdata(&port_pdev->dev); + + mutex_lock(&pdata->lock); + dfl_feature_dev_use_end(pdata); + mutex_unlock(&pdata->lock); + cdev->released_port_num--; put_dev_exit: put_device(&port_pdev->dev); @@ -1230,10 +1714,164 @@ int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs) } EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf); +static irqreturn_t dfl_irq_handler(int irq, void *arg) +{ + struct eventfd_ctx *trigger = arg; + + eventfd_signal(trigger, 1); + return IRQ_HANDLED; +} + +static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx, + int fd) +{ + struct platform_device *pdev = feature->dev; + struct eventfd_ctx *trigger; + int irq, ret; + + if (idx >= feature->nr_irqs) + return -EINVAL; + + irq = feature->irq_ctx[idx].irq; + + if (feature->irq_ctx[idx].trigger) { + free_irq(irq, feature->irq_ctx[idx].trigger); + kfree(feature->irq_ctx[idx].name); + eventfd_ctx_put(feature->irq_ctx[idx].trigger); + feature->irq_ctx[idx].trigger = NULL; + } + + if (fd < 0) + return 0; + + feature->irq_ctx[idx].name = + kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx, + dev_name(&pdev->dev), + (unsigned long long)feature->id); + if (!feature->irq_ctx[idx].name) + return -ENOMEM; + + trigger = eventfd_ctx_fdget(fd); + if (IS_ERR(trigger)) { + ret = PTR_ERR(trigger); + goto free_name; + } + + ret = request_irq(irq, dfl_irq_handler, 0, + feature->irq_ctx[idx].name, trigger); + if (!ret) { + feature->irq_ctx[idx].trigger = trigger; + return ret; + } + + eventfd_ctx_put(trigger); +free_name: + kfree(feature->irq_ctx[idx].name); + + return ret; +} + +/** + * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts + * + * @feature: dfl sub feature. + * @start: start of irq index in this dfl sub feature. + * @count: number of irqs. + * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative. + * unbind "count" specified number of irqs if fds ptr is NULL. + * + * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if + * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is + * NULL. + * + * Return: 0 on success, negative error code otherwise. + */ +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds) +{ + unsigned int i; + int ret = 0; + + if (start + count < start || start + count > feature->nr_irqs) + return -EINVAL; + + for (i = 0; i < count; i++) { + int fd = fds ? fds[i] : -1; + + ret = do_set_irq_trigger(feature, start + i, fd); + if (ret) { + while (i--) + do_set_irq_trigger(feature, start + i, -1); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers); + +/** + * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + return put_user(feature->nr_irqs, (__u32 __user *)arg); +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs); + +/** + * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface. + * @pdev: the feature device which has the sub feature + * @feature: the dfl sub feature + * @arg: ioctl argument + * + * Return: 0 on success, negative error code otherwise. + */ +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg) +{ + struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct dfl_fpga_irq_set hdr; + s32 *fds; + long ret; + + if (!feature->nr_irqs) + return -ENOENT; + + if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr))) + return -EFAULT; + + if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) || + (hdr.start + hdr.count < hdr.start)) + return -EINVAL; + + fds = memdup_user((void __user *)(arg + sizeof(hdr)), + hdr.count * sizeof(s32)); + if (IS_ERR(fds)) + return PTR_ERR(fds); + + mutex_lock(&pdata->lock); + ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds); + mutex_unlock(&pdata->lock); + + kfree(fds); + return ret; +} +EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq); + static void __exit dfl_fpga_exit(void) { dfl_chardev_uinit(); dfl_ids_destroy(); + bus_unregister(&dfl_bus_type); } module_init(dfl_fpga_init); diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h index 9f0e656de720e..ae68033624f16 100644 --- a/drivers/fpga/dfl.h +++ b/drivers/fpga/dfl.h @@ -24,6 +24,8 @@ #include #include #include +#include +#include /* maximum supported number of ports */ #define MAX_DFL_FPGA_PORT_NUM 4 @@ -112,6 +114,13 @@ #define FME_PORT_OFST_ACC_VF 1 #define FME_PORT_OFST_IMP BIT_ULL(60) +/* FME Error Capability Register */ +#define FME_ERROR_CAP 0x70 + +/* FME Error Capability Register Bitfield */ +#define FME_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */ +#define FME_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */ + /* PORT Header Register Set */ #define PORT_HDR_DFH DFH #define PORT_HDR_GUID_L GUID_L @@ -145,6 +154,20 @@ #define PORT_STS_PWR_STATE_AP2 2 /* 90% throttling */ #define PORT_STS_PWR_STATE_AP6 6 /* 100% throttling */ +/* Port Error Capability Register */ +#define PORT_ERROR_CAP 0x38 + +/* Port Error Capability Register Bitfield */ +#define PORT_ERROR_CAP_SUPP_INT BIT_ULL(0) /* Interrupt Support */ +#define PORT_ERROR_CAP_INT_VECT GENMASK_ULL(12, 1) /* Interrupt vector */ + +/* Port Uint Capability Register */ +#define PORT_UINT_CAP 0x8 + +/* Port Uint Capability Register Bitfield */ +#define PORT_UINT_CAP_INT_NUM GENMASK_ULL(11, 0) /* Interrupts num */ +#define PORT_UINT_CAP_FST_VECT GENMASK_ULL(23, 12) /* First Vector */ + /** * struct dfl_fpga_port_ops - port ops * @@ -188,25 +211,49 @@ struct dfl_feature_driver { const struct dfl_feature_ops *ops; }; +/** + * struct dfl_feature_irq_ctx - dfl private feature interrupt context + * + * @irq: Linux IRQ number of this interrupt. + * @trigger: eventfd context to signal when interrupt happens. + * @name: irq name needed when requesting irq. + */ +struct dfl_feature_irq_ctx { + int irq; + struct eventfd_ctx *trigger; + char *name; +}; + /** * struct dfl_feature - sub feature of the feature devices * + * @dev: ptr to pdev of the feature device which has the sub feature. * @id: sub feature id. + * @index: unique identifier for an sub feature within the feature device. + * It is possible that multiply sub features with same feature id are + * listed in one feature device. So an incremental index (start from 0) + * is needed to identify each sub feature. * @resource_index: each sub feature has one mmio resource for its registers. * this index is used to find its mmio resource from the * feature dev (platform device)'s reources. * @ioaddr: mapped mmio resource address. + * @irq_ctx: interrupt context list. + * @nr_irqs: number of interrupt contexts. * @ops: ops of this sub feature. + * @priv: priv data of this feature. */ struct dfl_feature { + struct platform_device *dev; u64 id; + int index; int resource_index; void __iomem *ioaddr; + struct dfl_feature_irq_ctx *irq_ctx; + unsigned int nr_irqs; const struct dfl_feature_ops *ops; + void *priv; }; -#define DEV_STATUS_IN_USE 0 - #define FEATURE_DEV_ID_UNUSED (-1) /** @@ -219,8 +266,9 @@ struct dfl_feature { * @dfl_cdev: ptr to container device. * @id: id used for this feature device. * @disable_count: count for port disable. + * @excl_open: set on feature device exclusive open. + * @open_count: count for feature device open. * @num: number for sub features. - * @dev_status: dev status (e.g. DEV_STATUS_IN_USE). * @private: ptr to feature dev private data. * @features: sub features of this feature dev. */ @@ -232,26 +280,46 @@ struct dfl_feature_platform_data { struct dfl_fpga_cdev *dfl_cdev; int id; unsigned int disable_count; - unsigned long dev_status; + bool excl_open; + int open_count; void *private; int num; struct dfl_feature features[0]; }; static inline -int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata) +int dfl_feature_dev_use_begin(struct dfl_feature_platform_data *pdata, + bool excl) { - /* Test and set IN_USE flags to ensure file is exclusively used */ - if (test_and_set_bit_lock(DEV_STATUS_IN_USE, &pdata->dev_status)) + if (pdata->excl_open) return -EBUSY; + if (excl) { + if (pdata->open_count) + return -EBUSY; + + pdata->excl_open = true; + } + pdata->open_count++; + return 0; } static inline void dfl_feature_dev_use_end(struct dfl_feature_platform_data *pdata) { - clear_bit_unlock(DEV_STATUS_IN_USE, &pdata->dev_status); + pdata->excl_open = false; + + if (WARN_ON(pdata->open_count <= 0)) + return; + + pdata->open_count--; +} + +static inline +int dfl_feature_dev_use_count(struct dfl_feature_platform_data *pdata) +{ + return pdata->open_count; } static inline @@ -369,10 +437,14 @@ static inline u8 dfl_feature_revision(void __iomem *base) * * @dev: parent device. * @dfls: list of device feature lists. + * @nr_irqs: number of irqs for all feature devices. + * @irq_table: Linux IRQ numbers for all irqs, indexed by hw irq numbers. */ struct dfl_fpga_enum_info { struct device *dev; struct list_head dfls; + unsigned int nr_irqs; + int *irq_table; }; /** @@ -380,22 +452,20 @@ struct dfl_fpga_enum_info { * * @start: base address of this device feature list. * @len: size of this device feature list. - * @ioaddr: mapped base address of this device feature list. * @node: node in list of device feature lists. */ struct dfl_fpga_enum_dfl { resource_size_t start; resource_size_t len; - void __iomem *ioaddr; - struct list_head node; }; struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev); int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info, - resource_size_t start, resource_size_t len, - void __iomem *ioaddr); + resource_size_t start, resource_size_t len); +int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info, + unsigned int nr_irqs, int *irq_table); void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info); /** @@ -447,4 +517,13 @@ int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id); int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id); void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev); int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vf); +int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start, + unsigned int count, int32_t *fds); +long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); +long dfl_feature_ioctl_set_irq(struct platform_device *pdev, + struct dfl_feature *feature, + unsigned long arg); + #endif /* __FPGA_DFL_H */ diff --git a/drivers/fpga/ifpga-sec-mgr.c b/drivers/fpga/ifpga-sec-mgr.c new file mode 100644 index 0000000000000..bcbb56a2d7936 --- /dev/null +++ b/drivers/fpga/ifpga-sec-mgr.c @@ -0,0 +1,581 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Security Manager for FPGA + * + * Copyright (C) 2019-2020 Intel Corporation, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_IDA(ifpga_sec_mgr_ida); +static struct class *ifpga_sec_mgr_class; + +#define WRITE_BLOCK_SIZE 0x4000 + +static ssize_t show_root_entry_hash(struct ifpga_sec_mgr *imgr, + sysfs_reh_hndlr_t get_reh, char *buf) +{ + unsigned int i, hash_size = 0; + int ret, cnt = 0; + u8 *hash = NULL; + + ret = get_reh(imgr, &hash, &hash_size); + if (ret) + return ret; + + if (hash) { + cnt += sprintf(buf, "0x"); + for (i = 0; i < hash_size; i++) + cnt += sprintf(buf + cnt, "%02x", hash[i]); + cnt += sprintf(buf + cnt, "\n"); + vfree(hash); + } else { + cnt = sprintf(buf, "hash not programmed\n"); + } + return cnt; +} + +#define to_sec_mgr(d) container_of(d, struct ifpga_sec_mgr, dev) + +#define DEVICE_ATTR_SEC_CSK(_name) \ + static ssize_t _name##_canceled_csks_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ + { \ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); \ + unsigned long *csk_map = NULL; \ + unsigned int cnt, nbits = 0; \ + int ret; \ + \ + ret = imgr->iops->_name##_canceled_csks(imgr, &csk_map, \ + &nbits); \ + if (ret) \ + return ret; \ + \ + cnt = bitmap_print_to_pagebuf(1, buf, csk_map, nbits); \ + vfree(csk_map); \ + return cnt; \ + } \ + static DEVICE_ATTR_RO(_name##_canceled_csks) + +#define DEVICE_ATTR_SEC_ROOT_ENTRY_HASH(_name) \ + static ssize_t _name##_root_entry_hash_show(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ + { \ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); \ + return show_root_entry_hash(imgr, \ + imgr->iops->_name##_root_entry_hash, buf); \ + } \ + static DEVICE_ATTR_RO(_name##_root_entry_hash) + +#define DEVICE_ATTR_SEC_FLASH_CNT(_name) \ + static ssize_t _name##_flash_count_show(struct device *dev, \ + struct device_attribute *attr, char *buf) \ + { \ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); \ + int cnt = imgr->iops->_name##_flash_count(imgr); \ + return cnt < 0 ? cnt : sprintf(buf, "%d\n", cnt); \ + } \ + static DEVICE_ATTR_RO(_name##_flash_count) + +DEVICE_ATTR_SEC_ROOT_ENTRY_HASH(sr); +DEVICE_ATTR_SEC_ROOT_ENTRY_HASH(pr); +DEVICE_ATTR_SEC_ROOT_ENTRY_HASH(bmc); +DEVICE_ATTR_SEC_FLASH_CNT(user); +DEVICE_ATTR_SEC_FLASH_CNT(bmc); +DEVICE_ATTR_SEC_CSK(sr); +DEVICE_ATTR_SEC_CSK(pr); +DEVICE_ATTR_SEC_CSK(bmc); + +static struct attribute *sec_mgr_security_attrs[] = { + &dev_attr_user_flash_count.attr, + &dev_attr_bmc_flash_count.attr, + &dev_attr_bmc_root_entry_hash.attr, + &dev_attr_sr_root_entry_hash.attr, + &dev_attr_pr_root_entry_hash.attr, + &dev_attr_sr_canceled_csks.attr, + &dev_attr_pr_canceled_csks.attr, + &dev_attr_bmc_canceled_csks.attr, + NULL, +}; + +static void ifpga_sec_set_error(struct ifpga_sec_mgr *imgr, int err_code) +{ + mutex_lock(&imgr->lock); + imgr->err_state = imgr->progress; + imgr->err_code = err_code; + mutex_unlock(&imgr->lock); +} + +static void ifpga_sec_set_dev_error(struct ifpga_sec_mgr *imgr, int err_code) +{ + ifpga_sec_set_error(imgr, err_code); + if (imgr->iops->get_hw_errinfo) + imgr->hw_errinfo = imgr->iops->get_hw_errinfo(imgr); +} + +static int ifpga_sec_mgr_do_cancel(struct ifpga_sec_mgr *imgr) +{ + int ret; + + if (imgr->request_cancel) { + ret = imgr->iops->cancel(imgr); + if (!ret) { + ifpga_sec_set_error(imgr, -ECANCELED); + return 1; + } + } + return 0; +} + +static void +ifpga_sec_mgr_update_progress(struct ifpga_sec_mgr *imgr, u32 progress) +{ + mutex_lock(&imgr->lock); + imgr->progress = progress; + sysfs_notify(&imgr->dev.kobj, "update", "status"); + mutex_unlock(&imgr->lock); +} + +static void ifpga_sec_mgr_update(struct work_struct *work) +{ + u32 size, blk_size, offset = 0; + struct ifpga_sec_mgr *imgr; + const struct firmware *fw; + int ret; + + imgr = container_of(work, struct ifpga_sec_mgr, work); + + get_device(&imgr->dev); + ret = request_firmware(&fw, imgr->filename, &imgr->dev); + if (ret) { + ifpga_sec_set_error(imgr, -ENOENT); + goto idle_exit; + } + + if (imgr->request_cancel) { + ifpga_sec_set_error(imgr, -ECANCELED); + goto release_fw_exit; + } + + imgr->data = fw->data; + imgr->remaining_size = fw->size; + + ifpga_sec_mgr_update_progress(imgr, IFPGA_SEC_PROG_PREPARING); + ret = imgr->iops->prepare(imgr); + if (ret) { + ifpga_sec_set_dev_error(imgr, ret); + imgr->iops->cancel(imgr); + goto release_fw_exit; + } + + if (ifpga_sec_mgr_do_cancel(imgr)) + goto done; + + ifpga_sec_mgr_update_progress(imgr, IFPGA_SEC_PROG_WRITING); + size = imgr->remaining_size; + while (size) { + blk_size = min_t(u32, size, WRITE_BLOCK_SIZE); + size -= blk_size; + ret = imgr->iops->write_blk(imgr, offset, blk_size); + if (ret) { + ifpga_sec_set_dev_error(imgr, ret); + imgr->iops->cancel(imgr); + goto done; + } + + imgr->remaining_size = size; + if (ifpga_sec_mgr_do_cancel(imgr)) + goto done; + + offset += blk_size; + } + + ifpga_sec_mgr_update_progress(imgr, IFPGA_SEC_PROG_PROGRAMMING); + ret = imgr->iops->poll_complete(imgr); + if (ret) + ifpga_sec_set_dev_error(imgr, ret); + +done: + if (imgr->iops->cleanup) + imgr->iops->cleanup(imgr); + +release_fw_exit: + imgr->data = NULL; + release_firmware(fw); + +idle_exit: + ifpga_sec_mgr_update_progress(imgr, IFPGA_SEC_PROG_IDLE); + kfree(imgr->filename); + imgr->filename = NULL; + put_device(&imgr->dev); +} + +#define check_attr(attribute, _name) \ + ((attribute) == &dev_attr_##_name.attr && imgr->iops->_name) + +static umode_t sec_mgr_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(kobj_to_dev(kobj)); + + if (check_attr(attr, user_flash_count) || + check_attr(attr, bmc_flash_count) || + check_attr(attr, bmc_root_entry_hash) || + check_attr(attr, sr_root_entry_hash) || + check_attr(attr, pr_root_entry_hash) || + check_attr(attr, sr_canceled_csks) || + check_attr(attr, pr_canceled_csks) || + check_attr(attr, bmc_canceled_csks)) + return attr->mode; + + return 0; +} + +static struct attribute_group sec_mgr_security_attr_group = { + .name = "security", + .attrs = sec_mgr_security_attrs, + .is_visible = sec_mgr_visible, +}; + +static const char * const sec_mgr_prog_str[] = { + "idle", /* IFPGA_SEC_PROG_IDLE */ + "read_file", /* IFPGA_SEC_PROG_READ_FILE */ + "preparing", /* IFPGA_SEC_PROG_PREPARING */ + "writing", /* IFPGA_SEC_PROG_WRITING */ + "programming", /* IFPGA_SEC_PROG_PROGRAMMING */ +}; + +static const struct sec_mgr_error { + const int err_code; + const char *err_str; +} sec_mgr_errors[] = { + { -EINVAL, "hw-error"}, + { -ETIMEDOUT, "timeout"}, + { -ECANCELED, "user-abort"}, + { -EBUSY, "device-busy"}, + { -EFBIG, "invalid-file-size"}, + { -EIO, "read-write-error"}, + { -EAGAIN, "flash-wearout"}, + { -ENOENT, "file-read-error"} +}; + +static const char *sec_progress(struct ifpga_sec_mgr *imgr) +{ + return (imgr->progress < IFPGA_SEC_PROG_MAX) ? + sec_mgr_prog_str[imgr->progress] : "unknown-status"; +} + +static ssize_t +status_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", sec_progress(to_sec_mgr(dev))); +} +static DEVICE_ATTR_RO(status); + +static ssize_t +error_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + const char *prog_str, *err_str = "unknown-error"; + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); + int err_code; + ssize_t i; + + if (!imgr->err_code) + return 0; + + mutex_lock(&imgr->lock); + err_code = imgr->err_code; + prog_str = sec_progress(imgr); + mutex_unlock(&imgr->lock); + + for (i = 0; i < ARRAY_SIZE(sec_mgr_errors); i++) { + if (sec_mgr_errors[i].err_code == err_code) { + err_str = sec_mgr_errors[i].err_str; + break; + } + } + return sprintf(buf, "%s:%s\n", prog_str, err_str); +} +static DEVICE_ATTR_RO(error); + +static ssize_t +hw_errinfo_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); + ssize_t cnt = 0; + + mutex_lock(&imgr->lock); + cnt = sprintf(buf, "0x%llx\n", imgr->hw_errinfo); + mutex_unlock(&imgr->lock); + + return cnt; +} +static DEVICE_ATTR_RO(hw_errinfo); + +static ssize_t remaining_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); + + return sprintf(buf, "%u\n", imgr->remaining_size); +} +static DEVICE_ATTR_RO(remaining_size); + +static ssize_t filename_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); + int ret = 0; + + if (count == 0 || count >= PATH_MAX) + return -EINVAL; + + mutex_lock(&imgr->lock); + if (imgr->driver_unload || imgr->progress != IFPGA_SEC_PROG_IDLE) { + ret = -EBUSY; + goto unlock_exit; + } + + imgr->filename = kstrndup(buf, PATH_MAX - 1, GFP_KERNEL); + if (!imgr->filename) { + ret = -ENOMEM; + goto unlock_exit; + } + + if (imgr->filename[strlen(imgr->filename) - 1] == '\n') + imgr->filename[strlen(imgr->filename) - 1] = '\0'; + + imgr->err_code = 0; + imgr->hw_errinfo = 0; + imgr->request_cancel = false; + imgr->progress = IFPGA_SEC_PROG_READ_FILE; + schedule_work(&imgr->work); + +unlock_exit: + mutex_unlock(&imgr->lock); + return ret ? : count; +} +static DEVICE_ATTR_WO(filename); + +static ssize_t cancel_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(dev); + bool cancel; + int ret = 0; + + if (kstrtobool(buf, &cancel) || !cancel) + return -EINVAL; + + mutex_lock(&imgr->lock); + if (imgr->progress == IFPGA_SEC_PROG_READ_FILE || + imgr->progress == IFPGA_SEC_PROG_PREPARING || + imgr->progress == IFPGA_SEC_PROG_WRITING) + imgr->request_cancel = true; + else if (imgr->progress == IFPGA_SEC_PROG_PROGRAMMING) + ret = -EBUSY; + mutex_unlock(&imgr->lock); + + return ret ? : count; +} +static DEVICE_ATTR_WO(cancel); + +static umode_t +sec_mgr_update_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + struct ifpga_sec_mgr *imgr = to_sec_mgr(kobj_to_dev(kobj)); + + if (attr == &dev_attr_hw_errinfo.attr && !imgr->iops->get_hw_errinfo) + return 0; + + return attr->mode; +} + +static struct attribute *sec_mgr_update_attrs[] = { + &dev_attr_filename.attr, + &dev_attr_cancel.attr, + &dev_attr_status.attr, + &dev_attr_error.attr, + &dev_attr_remaining_size.attr, + &dev_attr_hw_errinfo.attr, + NULL, +}; + +static struct attribute_group sec_mgr_update_attr_group = { + .name = "update", + .attrs = sec_mgr_update_attrs, + .is_visible = sec_mgr_update_visible, +}; + +static const struct attribute_group *ifpga_sec_mgr_attr_groups[] = { + &sec_mgr_security_attr_group, + &sec_mgr_update_attr_group, + NULL, +}; + +/** + * ifpga_sec_mgr_create - create and initialize a IFPGA security manager struct + * + * @dev: create ifpga security manager device from pdev + * @name: ifpga security manager name + * @iops: pointer to a structure of ifpga callback functions + * @priv: ifpga security manager private data + * + * The caller of this function is responsible for freeing the struct with + * ifpga_sec_mgr_free(). + * + * Return: pointer to struct ifpga_sec_mgr or NULL + */ +struct ifpga_sec_mgr * +ifpga_sec_mgr_create(struct device *dev, const char *name, + const struct ifpga_sec_mgr_ops *iops, void *priv) +{ + struct ifpga_sec_mgr *imgr; + int id, ret; + + if (!iops || !iops->cancel || !iops->prepare || + !iops->write_blk || !iops->poll_complete) { + dev_err(dev, "Attempt to register without ifpga_sec_mgr_ops\n"); + return NULL; + } + + if (!name || !strlen(name)) { + dev_err(dev, "Attempt to register with no name!\n"); + return NULL; + } + + imgr = kzalloc(sizeof(*imgr), GFP_KERNEL); + if (!imgr) + return NULL; + + id = ida_simple_get(&ifpga_sec_mgr_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + ret = id; + goto exit_kfree; + } + + imgr->name = name; + imgr->priv = priv; + imgr->iops = iops; + INIT_WORK(&imgr->work, ifpga_sec_mgr_update); + + device_initialize(&imgr->dev); + imgr->dev.class = ifpga_sec_mgr_class; + imgr->dev.parent = dev; + imgr->dev.id = id; + + if (dev_set_name(&imgr->dev, "ifpga_sec%d", id)) { + dev_err(dev, "Failed to set device name: ifpga_sec%d\n", id); + goto exit_remove_ida; + } + + mutex_init(&imgr->lock); + return imgr; + +exit_remove_ida: + ida_simple_remove(&ifpga_sec_mgr_ida, id); +exit_kfree: + kfree(imgr); + + return NULL; +} +EXPORT_SYMBOL_GPL(ifpga_sec_mgr_create); + +/** + * ifpga_sec_mgr_free - free a security mgr created with ifpga_sec_mgr_create() + * @imgr: ifpga security manager struct + */ +void ifpga_sec_mgr_free(struct ifpga_sec_mgr *imgr) +{ + mutex_destroy(&imgr->lock); + ida_simple_remove(&ifpga_sec_mgr_ida, imgr->dev.id); + kfree(imgr); +} +EXPORT_SYMBOL_GPL(ifpga_sec_mgr_free); + +/** + * ifpga_sec_mgr_register - register a IFPGA security manager + * + * @mgr: ifpga security manager struct + * + * Return: 0 on success, negative error code otherwise. + */ +int ifpga_sec_mgr_register(struct ifpga_sec_mgr *imgr) +{ + int ret; + + ret = device_add(&imgr->dev); + if (!ret) + dev_info(&imgr->dev, "%s registered\n", imgr->name); + + return ret; +} +EXPORT_SYMBOL_GPL(ifpga_sec_mgr_register); + +/** + * ifpga_sec_mgr_unregister - unregister a IFPGA security manager + * + * @mgr: fpga manager struct + * + * This function is intended for use in a IFPGA security manager + * driver's remove() function. + */ +void ifpga_sec_mgr_unregister(struct ifpga_sec_mgr *imgr) +{ + dev_info(&imgr->dev, "%s %s\n", __func__, imgr->name); + + mutex_lock(&imgr->lock); + imgr->driver_unload = true; + if (imgr->progress != IFPGA_SEC_PROG_IDLE) { + imgr->request_cancel = true; + dev_info(&imgr->dev, "%s waiting on secure update\n", + __func__); + do { + mutex_unlock(&imgr->lock); + msleep(1000); + mutex_lock(&imgr->lock); + } while (imgr->progress != IFPGA_SEC_PROG_IDLE); + } + mutex_unlock(&imgr->lock); + device_unregister(&imgr->dev); +} +EXPORT_SYMBOL_GPL(ifpga_sec_mgr_unregister); + +static void ifpga_sec_mgr_dev_release(struct device *dev) +{ + ifpga_sec_mgr_free(to_sec_mgr(dev)); +} + +static int __init ifpga_sec_mgr_class_init(void) +{ + pr_info("Intel FPGA Security Manager\n"); + + ifpga_sec_mgr_class = class_create(THIS_MODULE, "ifpga_sec_mgr"); + if (IS_ERR(ifpga_sec_mgr_class)) + return PTR_ERR(ifpga_sec_mgr_class); + + ifpga_sec_mgr_class->dev_groups = ifpga_sec_mgr_attr_groups; + ifpga_sec_mgr_class->dev_release = ifpga_sec_mgr_dev_release; + + return 0; +} + +static void __exit ifpga_sec_mgr_class_exit(void) +{ + class_destroy(ifpga_sec_mgr_class); + ida_destroy(&ifpga_sec_mgr_ida); +} + +MODULE_DESCRIPTION("Intel FPGA Security Manager Driver"); +MODULE_LICENSE("GPL v2"); + +subsys_initcall(ifpga_sec_mgr_class_init); +module_exit(ifpga_sec_mgr_class_exit) diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 05a30832c6ba0..1da076124d2c5 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1997,6 +1997,17 @@ config SENSORS_XGENE If you say yes here you get support for the temperature and power sensors for APM X-Gene SoC. +config SENSORS_INTEL_M10_BMC_HWMON + tristate "Intel MAX10 BMC Hardware Monitoring" + depends on MFD_INTEL_M10_BMC + help + This driver provides support for the hardware monitoring functionality + on Intel MAX10 BMC chip. + + This BMC Chip is used on Intel FPGA PCIe Acceleration Cards (PAC). And + these sensors monitor various telemetry data of different components on + the Card, e.g. Board temperature, FPGA core temperature/voltage/current. + if ACPI comment "ACPI drivers" diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index b0b9c8e571762..ce1dd6b94b266 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -185,6 +185,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_INTEL_M10_BMC_HWMON) += intel-m10-bmc-hwmon.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_PMBUS) += pmbus/ diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c new file mode 100644 index 0000000000000..2a760ce420170 --- /dev/null +++ b/drivers/hwmon/intel-m10-bmc-hwmon.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Max10 BMC HWMON Driver + * + * Copyright (C) 2018-2020 Intel Corporation. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include + +enum m10bmc_channel_type { + M10BMC_CHT_TEMP, + M10BMC_CHT_IN, + M10BMC_CHT_CURR, + M10BMC_CHT_POWER, + M10BMC_CHT_MAX, +}; + +struct m10bmc_sdata { + unsigned int type; + + unsigned int reg_input; + unsigned int reg_max; + unsigned int reg_crit; + unsigned int reg_hyst; + unsigned int reg_min; + unsigned int multiplier; + + const char *label; +}; + +static struct m10bmc_sdata pacn3000_sensor_tbl[] = { + { M10BMC_CHT_TEMP, 0x100, 0x104, 0x108, 0x10c, 0x0, 500, + "Board Temperature" }, + { M10BMC_CHT_TEMP, 0x110, 0x114, 0x118, 0x0, 0x0, 500, + "FPGA Die Temperature" }, + { M10BMC_CHT_TEMP, 0x11c, 0x120, 0x124, 0x0, 0x0, 500, + "QSFP0 Temperature" }, + { M10BMC_CHT_IN, 0x128, 0x0, 0x0, 0x0, 0x0, 1, + "QSFP0 Supply Voltage" }, + { M10BMC_CHT_TEMP, 0x12c, 0x130, 0x134, 0x0, 0x0, 500, + "QSFP1 Temperature" }, + { M10BMC_CHT_IN, 0x138, 0x0, 0x0, 0x0, 0x0, 1, + "QSFP1 Supply Voltage" }, + { M10BMC_CHT_IN, 0x13c, 0x0, 0x0, 0x0, 0x0, 1, + "FPGA Core Voltage" }, + { M10BMC_CHT_CURR, 0x140, 0x0, 0x0, 0x0, 0x0, 1, + "FPGA Core Current" }, + { M10BMC_CHT_IN, 0x144, 0x0, 0x0, 0x0, 0x0, 1, + "12V Backplane Voltage" }, + { M10BMC_CHT_CURR, 0x148, 0x0, 0x0, 0x0, 0x0, 1, + "12V Backplane Current" }, + { M10BMC_CHT_IN, 0x14c, 0x0, 0x0, 0x0, 0x0, 1, + "1.2V Voltage" }, + {M10BMC_CHT_IN, 0x150, 0x0, 0x0, 0x0, 0x0, 1, + "12V AUX Voltage" }, + { M10BMC_CHT_CURR, 0x154, 0x0, 0x0, 0x0, 0x0, 1, + "12V AUX Current" }, + { M10BMC_CHT_IN, 0x158, 0x0, 0x0, 0x0, 0x0, 1, + "1.8V Voltage" }, + { M10BMC_CHT_IN, 0x15c, 0x0, 0x0, 0x0, 0x0, 1, + "3.3V Voltage" }, + { M10BMC_CHT_POWER, 0x160, 0x0, 0x0, 0x0, 0x0, 1000, + "Board Power" }, + { M10BMC_CHT_TEMP, 0x168, 0x0, 0x0, 0x0, 0x0, 500, + "PKVL1 Temperature" }, + { M10BMC_CHT_TEMP, 0x16c, 0x0, 0x0, 0x0, 0x0, 500, + "PKVL1 SerDes Temperature" }, + { M10BMC_CHT_TEMP, 0x170, 0x0, 0x0, 0x0, 0x0, 500, + "PKVL2 Temperature" }, + { M10BMC_CHT_TEMP, 0x174, 0x0, 0x0, 0x0, 0x0, 500, + "PKVL2 SerDes Temperature" }, + { M10BMC_CHT_MAX } /* sentinel */ +}; + +struct m10bmc_sdata pacd5005_sensor_tbl[] = { + { M10BMC_CHT_TEMP, 0x100, 0x104, 0x108, 0x10c, 0x0, 500, + "Board Inlet Air Temperature" }, + { M10BMC_CHT_TEMP, 0x110, 0x114, 0x118, 0x0, 0x0, 500, + "FPGA Core Temperature" }, + { M10BMC_CHT_TEMP, 0x11c, 0x120, 0x124, 0x128, 0x0, 500, + "Board Exhaust Air Temperature" }, + { M10BMC_CHT_TEMP, 0x12c, 0x130, 0x134, 0x0, 0x0, 500, + "FPGA Transceiver Temperature" }, + { M10BMC_CHT_TEMP, 0x138, 0x13c, 0x140, 0x144, 0x0, 500, + "RDIMM0 Temperature" }, + { M10BMC_CHT_TEMP, 0x148, 0x14c, 0x150, 0x154, 0x0, 500, + "RDIMM1 Temperature" }, + { M10BMC_CHT_TEMP, 0x158, 0x15c, 0x160, 0x164, 0x0, 500, + "RDIMM2 Temperature" }, + { M10BMC_CHT_TEMP, 0x168, 0x16c, 0x170, 0x174, 0x0, 500, + "RDIMM3 Temperature" }, + { M10BMC_CHT_TEMP, 0x178, 0x17c, 0x180, 0x0, 0x0, 500, + "QSFP0 Temperature" }, + { M10BMC_CHT_IN, 0x184, 0x0, 0x0, 0x0, 0x0, 1, + "QSFP0 Supply Voltage" }, + { M10BMC_CHT_TEMP, 0x188, 0x18c, 0x190, 0x0, 0x0, 500, + "QSFP1 Temperature" }, + { M10BMC_CHT_IN, 0x194, 0x0, 0x0, 0x0, 0x0, 1, + "QSFP1 Supply Voltage" }, + { M10BMC_CHT_IN, 0x198, 0x0, 0x0, 0x0, 0x0, 1, + "FPGA Core Voltage" }, + { M10BMC_CHT_CURR, 0x19c, 0x0, 0x0, 0x0, 0x0, 1, + "FPGA Core Current" }, + { M10BMC_CHT_TEMP, 0x1a0, 0x1a4, 0x1a8, 0x0, 0x0, 500, + "3.3v Temperature" }, + { M10BMC_CHT_IN, 0x1ac, 0x1b0, 0x1b4, 0x0, 0x0, 1, + "3.3v Voltage" }, + { M10BMC_CHT_CURR, 0x1b8, 0x0, 0x0, 0x0, 0x0, 1, + "3.3v Current" }, + { M10BMC_CHT_TEMP, 0x1bc, 0x1c0, 0x1c4, 0x0, 0x0, 500, + "VCCERAM Temperature" }, + { M10BMC_CHT_IN, 0x1c8, 0x1cc, 0x1d0, 0x0, 0x0, 1, + "VCCERAM Voltage" }, + { M10BMC_CHT_CURR, 0x1d4, 0x0, 0x0, 0x0, 0x0, 1, + "VCCERAM Current" }, + { M10BMC_CHT_TEMP, 0x1d8, 0x1dc, 0x1e0, 0x0, 0x0, 500, + "VCCR Temperature" }, + { M10BMC_CHT_IN, 0x1e4, 0x1e8, 0x1ec, 0x0, 0x0, 1, + "VCCR Voltage" }, + { M10BMC_CHT_CURR, 0x1f0, 0x0, 0x0, 0x0, 0x0, 1, + "VCCR Current" }, + { M10BMC_CHT_TEMP, 0x1f4, 0x1f8, 0x1fc, 0x0, 0x0, 500, + "VCCT Temperature" }, + { M10BMC_CHT_IN, 0x200, 0x204, 0x208, 0x0, 0x0, 1, + "VCCT Voltage" }, + { M10BMC_CHT_CURR, 0x20c, 0x0, 0x0, 0x0, 0x0, 1, + "VCCT Current" }, + { M10BMC_CHT_TEMP, 0x210, 0x214, 0x218, 0x0, 0x0, 500, + "1.8v Temperature" }, + { M10BMC_CHT_IN, 0x21c, 0x220, 0x224, 0x0, 0x0, 1, + "1.8v Voltage" }, + { M10BMC_CHT_CURR, 0x228, 0x0, 0x0, 0x0, 0x0, 1, + "1.8v Current" }, + { M10BMC_CHT_TEMP, 0x22c, 0x230, 0x234, 0x0, 0x0, 500, + "12v Backplane Temperature" }, + { M10BMC_CHT_IN, 0x238, 0x0, 0x0, 0x0, 0x23c, 1, + "12v Backplane Voltage" }, + { M10BMC_CHT_CURR, 0x240, 0x244, 0x0, 0x0, 0x0, 1, + "12v Backplane Current" }, + { M10BMC_CHT_TEMP, 0x248, 0x24c, 0x250, 0x0, 0x0, 500, + "12v AUX Temperature" }, + { M10BMC_CHT_IN, 0x254, 0x0, 0x0, 0x0, 0x258, 1, + "12v AUX Voltage" }, + { M10BMC_CHT_CURR, 0x25c, 0x260, 0x0, 0x0, 0x0, 1, + "12v AUX Current" }, + { M10BMC_CHT_MAX } /* sentinel */ +}; + +struct m10bmc_ch_group { + struct m10bmc_sdata **data_list; + u32 *config; + struct hwmon_channel_info info; +}; + +struct m10bmc_hwmon { + struct device *dev; + struct m10bmc_ch_group chgs[M10BMC_CHT_MAX]; + /* This is a NULL terminated array required by HWMON interface */ + const struct hwmon_channel_info *info[M10BMC_CHT_MAX + 1]; + struct hwmon_chip_info chip; + char *hw_name; + struct intel_m10bmc *m10bmc; + struct m10bmc_sdata *data_tbl; +}; + +static enum m10bmc_channel_type +htype_to_ctype(enum hwmon_sensor_types htype) +{ + switch (htype) { + case hwmon_temp: + return M10BMC_CHT_TEMP; + case hwmon_in: + return M10BMC_CHT_IN; + case hwmon_curr: + return M10BMC_CHT_CURR; + case hwmon_power: + return M10BMC_CHT_POWER; + default: + return M10BMC_CHT_MAX; + } +} + +static enum hwmon_sensor_types +ctype_to_htype(enum m10bmc_channel_type ctype) +{ + switch (ctype) { + case M10BMC_CHT_TEMP: + return hwmon_temp; + case M10BMC_CHT_IN: + return hwmon_in; + case M10BMC_CHT_CURR: + return hwmon_curr; + case M10BMC_CHT_POWER: + return hwmon_power; + default: + return hwmon_max; + } +} + +static umode_t +m10bmc_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + return 0444; +} + +static struct m10bmc_sdata * +find_sensor_data(struct m10bmc_hwmon *hw, enum hwmon_sensor_types htype, + int channel) +{ + enum m10bmc_channel_type ctype = htype_to_ctype(htype); + + if (ctype >= M10BMC_CHT_MAX) + return NULL; + + return hw->chgs[ctype].data_list[channel]; +} + +static int do_sensor_read(struct m10bmc_hwmon *hw, struct m10bmc_sdata *data, + unsigned int regoff, long *val) +{ + unsigned int regval; + int ret; + + ret = m10bmc_sys_read(hw->m10bmc, regoff, ®val); + if (ret) + return ret; + + /* + * BMC Firmware will return 0xdeadbeef if sensor value is invalid at + * that time. This usually happens on sensor channels which connect to + * external pluggable modules, e.g. QSFP Temperature and Voltage. When + * QSFP is unplugged from cage, driver will get 0xdeadbeef from their + * registers. + */ + if (regval == 0xdeadbeef) + return -EBUSY; + + *val = regval * data->multiplier; + + return 0; +} + +static int m10bmc_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct m10bmc_hwmon *hw = dev_get_drvdata(dev); + unsigned int reg, reg_hyst = 0; + struct m10bmc_sdata *data; + long hyst, value; + int ret; + + data = find_sensor_data(hw, type, channel); + if (!data) + return -EOPNOTSUPP; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + reg = data->reg_input; + break; + case hwmon_temp_max_hyst: + reg_hyst = data->reg_hyst; + fallthrough; + case hwmon_temp_max: + reg = data->reg_max; + break; + case hwmon_temp_crit_hyst: + reg_hyst = data->reg_hyst; + fallthrough; + case hwmon_temp_crit: + reg = data->reg_crit; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_in: + switch (attr) { + case hwmon_in_input: + reg = data->reg_input; + break; + case hwmon_in_max: + reg = data->reg_max; + break; + case hwmon_in_crit: + reg = data->reg_crit; + break; + case hwmon_in_min: + reg = data->reg_min; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + reg = data->reg_input; + break; + case hwmon_curr_max: + reg = data->reg_max; + break; + case hwmon_curr_crit: + reg = data->reg_crit; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_power: + switch (attr) { + case hwmon_power_input: + reg = data->reg_input; + break; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + + ret = do_sensor_read(hw, data, reg, &value); + if (ret) + return ret; + + if (reg_hyst) { + ret = do_sensor_read(hw, data, reg_hyst, &hyst); + if (ret) + return ret; + + value -= hyst; + } + + *val = value; + + return ret; +} + +static int m10bmc_hwmon_read_string(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct m10bmc_hwmon *hw = dev_get_drvdata(dev); + struct m10bmc_sdata *data; + + data = find_sensor_data(hw, type, channel); + if (!data) + return -EOPNOTSUPP; + + *str = data->label; + + return 0; +} + +static const struct hwmon_ops m10bmc_hwmon_ops = { + .is_visible = m10bmc_hwmon_is_visible, + .read = m10bmc_hwmon_read, + .read_string = m10bmc_hwmon_read_string, +}; + +static int m10bmc_malloc_channels(struct device *dev, + struct m10bmc_ch_group *chg, int num_ch) +{ + chg->config = devm_kcalloc(dev, num_ch + 1, + sizeof(*chg->config), GFP_KERNEL); + if (!chg->config) + return -ENOMEM; + + chg->data_list = devm_kcalloc(dev, num_ch, sizeof(*chg->data_list), + GFP_KERNEL); + if (!chg->data_list) + return -ENOMEM; + + chg->info.config = chg->config; + + return 0; +} + +static void m10bmc_fill_temp_channel(struct m10bmc_hwmon *hwmon, int ch_idx, + struct m10bmc_sdata *data) +{ + struct m10bmc_ch_group *chg = &hwmon->chgs[M10BMC_CHT_TEMP]; + + if (data->reg_input) + chg->config[ch_idx] |= HWMON_T_INPUT; + + if (data->reg_max) { + chg->config[ch_idx] |= HWMON_T_MAX; + if (data->reg_hyst) + chg->config[ch_idx] |= HWMON_T_MAX_HYST; + } + + if (data->reg_crit) { + chg->config[ch_idx] |= HWMON_T_CRIT; + if (data->reg_hyst) + chg->config[ch_idx] |= HWMON_T_CRIT_HYST; + } + + if (data->label) + chg->config[ch_idx] |= HWMON_T_LABEL; + + chg->data_list[ch_idx] = data; +} + +static void m10bmc_fill_in_channel(struct m10bmc_hwmon *hwmon, int ch_idx, + struct m10bmc_sdata *data) +{ + struct m10bmc_ch_group *chg = &hwmon->chgs[M10BMC_CHT_IN]; + + if (data->reg_input) + chg->config[ch_idx] |= HWMON_I_INPUT; + + if (data->reg_max) + chg->config[ch_idx] |= HWMON_I_MAX; + + if (data->reg_crit) + chg->config[ch_idx] |= HWMON_I_CRIT; + + if (data->reg_min) + chg->config[ch_idx] |= HWMON_I_MIN; + + if (data->label) + chg->config[ch_idx] |= HWMON_I_LABEL; + + chg->data_list[ch_idx] = data; +} + +static void m10bmc_fill_curr_channel(struct m10bmc_hwmon *hwmon, int ch_idx, + struct m10bmc_sdata *data) +{ + struct m10bmc_ch_group *chg = &hwmon->chgs[M10BMC_CHT_CURR]; + + if (data->reg_input) + chg->config[ch_idx] |= HWMON_C_INPUT; + + if (data->reg_max) + chg->config[ch_idx] |= HWMON_C_MAX; + + if (data->reg_crit) + chg->config[ch_idx] |= HWMON_C_CRIT; + + if (data->label) + chg->config[ch_idx] |= HWMON_C_LABEL; + + chg->data_list[ch_idx] = data; +} + +static void m10bmc_fill_power_channel(struct m10bmc_hwmon *hwmon, int ch_idx, + struct m10bmc_sdata *data) +{ + struct m10bmc_ch_group *chg = &hwmon->chgs[M10BMC_CHT_POWER]; + + if (data->reg_input) + chg->config[ch_idx] |= HWMON_P_INPUT; + + if (data->label) + chg->config[ch_idx] |= HWMON_P_LABEL; + + chg->data_list[ch_idx] = data; +} + +static int m10bmc_hwmon_init(struct device *dev, struct intel_m10bmc *m10bmc, + const char *dev_name, + struct m10bmc_sdata *data_tbl) +{ + int num_ch[M10BMC_CHT_MAX] = { 0 }, ret, i, j; + struct m10bmc_sdata *data = data_tbl; + struct device *hwmon_dev; + struct m10bmc_hwmon *hw; + + hw = devm_kzalloc(dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return -EINVAL; + + hw->dev = dev; + hw->m10bmc = m10bmc; + hw->data_tbl = data_tbl; + + while (data->type != M10BMC_CHT_MAX) { + if (data->type > M10BMC_CHT_MAX) + return -EINVAL; + + ++num_ch[data->type]; + ++data; + } + + for (i = 0; i < M10BMC_CHT_MAX; i++) { + if (!num_ch[i]) + continue; + + ret = m10bmc_malloc_channels(dev, &hw->chgs[i], + num_ch[i]); + if (ret) + return ret; + + hw->chgs[i].info.type = ctype_to_htype(i); + } + + data = data_tbl; + memset(&num_ch, 0, sizeof(num_ch)); + while (data->type != M10BMC_CHT_MAX) { + switch (data->type) { + case M10BMC_CHT_TEMP: + m10bmc_fill_temp_channel(hw, num_ch[data->type], + data); + break; + case M10BMC_CHT_IN: + m10bmc_fill_in_channel(hw, num_ch[data->type], + data); + break; + case M10BMC_CHT_CURR: + m10bmc_fill_curr_channel(hw, num_ch[data->type], + data); + break; + case M10BMC_CHT_POWER: + m10bmc_fill_power_channel(hw, num_ch[data->type], + data); + break; + } + + ++num_ch[data->type]; + ++data; + } + + for (i = 0, j = 0; i < M10BMC_CHT_MAX; i++) { + if (num_ch[i]) + hw->info[j++] = &hw->chgs[i].info; + } + + hw->chip.info = hw->info; + hw->chip.ops = &m10bmc_hwmon_ops; + + hw->hw_name = devm_kstrdup(dev, dev_name, GFP_KERNEL); + if (!hw->hw_name) + return -ENOMEM; + + for (i = 0; hw->hw_name[i]; i++) + if (hwmon_is_bad_char(hw->hw_name[i])) + hw->hw_name[i] = '_'; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, hw->hw_name, + hw, &hw->chip, NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + return 0; +} + +static int m10bmc_hwmon_probe(struct platform_device *pdev) +{ + const struct platform_device_id *id = platform_get_device_id(pdev); + struct intel_m10bmc *m10bmc = dev_get_drvdata(pdev->dev.parent); + + return m10bmc_hwmon_init(&pdev->dev, m10bmc, id->name, + (struct m10bmc_sdata *)id->driver_data); +} + +static const struct platform_device_id intel_m10bmc_hwmon_ids[] = { + { + .name = N3000BMC_HWMON_DEV_NAME, + .driver_data = (unsigned long)&pacn3000_sensor_tbl, + }, + { + .name = d5005BMC_HWMON_DEV_NAME, + .driver_data = (unsigned long)&pacd5005_sensor_tbl, + }, + { } +}; + +static struct platform_driver intel_m10bmc_hwmon_driver = { + .probe = m10bmc_hwmon_probe, + .driver = { + .name = "intel-m10bmc-hwmon", + }, + .id_table = intel_m10bmc_hwmon_ids, +}; +module_platform_driver(intel_m10bmc_hwmon_driver); + +MODULE_DEVICE_TABLE(platform, intel_m10bmc_hwmon_ids); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel MAX10 BMC hardware monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 0a59249198d34..f75bb2c402570 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -19,6 +19,22 @@ config MFD_CS5535 This is the core driver for CS5535/CS5536 MFD functions. This is necessary for using the board's GPIO and MFGPT functionality. +config MFD_INTEL_M10_BMC + tristate "Intel MAX10 board management controller" + depends on SPI_MASTER + select REGMAP + select MFD_CORE + help + Support for the Intel MAX10 board management controller using the + SPI interface. + +config MFD_INTEL_M10_BMC_SECURE + tristate "Intel MAX10 BMC security engine" + depends on MFD_INTEL_M10_BMC && IFPGA_SEC_MGR + help + Secure update support for the Intel MAX10 board management + controller. + config MFD_ALTERA_A10SR bool "Altera Arria10 DevKit System Resource chip" depends on ARCH_SOCFPGA && SPI_MASTER=y && OF diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index f935d10cbf0fc..e9d296301c11a 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -259,3 +259,7 @@ obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o obj-$(CONFIG_MFD_STMFX) += stmfx.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o + +intel-m10-bmc-objs := intel-m10-bmc-main.o intel-spi-avmm.o +obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o +obj-$(CONFIG_MFD_INTEL_M10_BMC_SECURE) += intel-m10-bmc-secure.o diff --git a/drivers/mfd/intel-m10-bmc-main.c b/drivers/mfd/intel-m10-bmc-main.c new file mode 100644 index 0000000000000..62ddec4615f79 --- /dev/null +++ b/drivers/mfd/intel-m10-bmc-main.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Max10 Board Management Controller chip Driver + * + * Copyright (C) 2018-2020 Intel Corporation. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "intel-spi-avmm.h" + +enum m10bmc_type { + M10_N3000, + M10_D5005 +}; + +static struct mfd_cell m10bmc_bmc_subdevs[] = { + { + .name = d5005BMC_HWMON_DEV_NAME, + }, + { + .name = INTEL_M10BMC_SEC_DRV_NAME, + } +}; + +static struct mfd_cell m10bmc_pacn3000_subdevs[] = { + { + .name = N3000BMC_HWMON_DEV_NAME, + }, + { + .name = INTEL_M10BMC_SEC_DRV_NAME, + } +}; + +static struct regmap_config intel_m10bmc_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .max_register = M10BMC_MEM_END, +}; + +static ssize_t bmc_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct intel_m10bmc *m10bmc = dev_get_drvdata(dev); + unsigned int val; + int ret; + + ret = m10bmc_sys_read(m10bmc, M10BMC_BUILD_VER, &val); + if (ret) + return ret; + + return sprintf(buf, "0x%x\n", val); +} +static DEVICE_ATTR_RO(bmc_version); + +static ssize_t bmcfw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct intel_m10bmc *max10 = dev_get_drvdata(dev); + unsigned int val; + int ret; + + ret = m10bmc_sys_read(max10, NIOS2_FW_VERSION, &val); + if (ret) + return ret; + + return sprintf(buf, "0x%x\n", val); +} +static DEVICE_ATTR_RO(bmcfw_version); + +static ssize_t mac_address_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct intel_m10bmc *max10 = dev_get_drvdata(dev); + unsigned int macaddr1, macaddr2; + int ret; + + ret = m10bmc_sys_read(max10, M10BMC_MACADDR1, &macaddr1); + if (ret) + return ret; + + ret = m10bmc_sys_read(max10, M10BMC_MACADDR2, &macaddr2); + if (ret) + return ret; + + return sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n", + (u8)FIELD_GET(M10BMC_MAC_BYTE1, macaddr1), + (u8)FIELD_GET(M10BMC_MAC_BYTE2, macaddr1), + (u8)FIELD_GET(M10BMC_MAC_BYTE3, macaddr1), + (u8)FIELD_GET(M10BMC_MAC_BYTE4, macaddr1), + (u8)FIELD_GET(M10BMC_MAC_BYTE5, macaddr2), + (u8)FIELD_GET(M10BMC_MAC_BYTE6, macaddr2)); +} +static DEVICE_ATTR_RO(mac_address); + +static ssize_t mac_count_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct intel_m10bmc *max10 = dev_get_drvdata(dev); + unsigned int macaddr2; + int ret; + + ret = m10bmc_sys_read(max10, M10BMC_MACADDR2, &macaddr2); + if (ret) + return ret; + + return sprintf(buf, "%u\n", + (u8)FIELD_GET(M10BMC_MAC_COUNT, macaddr2)); +} +static DEVICE_ATTR_RO(mac_count); + +static struct attribute *m10bmc_attrs[] = { + &dev_attr_bmc_version.attr, + &dev_attr_bmcfw_version.attr, + &dev_attr_mac_address.attr, + &dev_attr_mac_count.attr, + NULL, +}; + +static struct attribute_group m10bmc_attr_group = { + .attrs = m10bmc_attrs, +}; + +static const struct attribute_group *m10bmc_dev_groups[] = { + &m10bmc_attr_group, + NULL +}; + +static int check_m10bmc_version(struct intel_m10bmc *m10bmc) +{ + unsigned int v; + + if (m10bmc_raw_read(m10bmc, M10BMC_LEGACY_SYS_BASE + M10BMC_BUILD_VER, + &v)) + return -ENODEV; + + if (v != 0xffffffff) + dev_info(m10bmc->dev, "non-secure M10BMC detected\n"); + else + m10bmc->flags |= M10BMC_FLAGS_SECURE; + + return 0; +} + +static int m10bmc_spi_setup(struct spi_device *spi) +{ + /* try 32 bits bpw first then fall back to 8 bits bpw */ + spi->mode = SPI_MODE_1; + spi->bits_per_word = 32; + if (!spi_setup(spi)) + return 0; + + spi->bits_per_word = 8; + return spi_setup(spi); +} + +static int intel_m10_bmc_spi_probe(struct spi_device *spi) +{ + const struct spi_device_id *id = spi_get_device_id(spi); + struct device *dev = &spi->dev; + struct mfd_cell *cells; + struct intel_m10bmc *m10bmc; + int ret, n_cell; + + /* try 32 bits bpw first then fall back to 8 bits bpw */ + ret = m10bmc_spi_setup(spi); + if (ret) + return ret; + + m10bmc = devm_kzalloc(dev, sizeof(*m10bmc), GFP_KERNEL); + if (!m10bmc) + return -ENOMEM; + + m10bmc->dev = dev; + + m10bmc->regmap = + devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config); + if (IS_ERR(m10bmc->regmap)) { + ret = PTR_ERR(m10bmc->regmap); + dev_err(dev, "Failed to allocate regmap: %d\n", ret); + return ret; + } + + spi_set_drvdata(spi, m10bmc); + + ret = check_m10bmc_version(m10bmc); + if (ret) { + dev_err(dev, "Failed to identify m10bmc hardware\n"); + return ret; + } + + switch (id->driver_data) { + case M10_N3000: + cells = m10bmc_pacn3000_subdevs; + n_cell = ARRAY_SIZE(m10bmc_pacn3000_subdevs); + break; + case M10_D5005: + cells = m10bmc_bmc_subdevs; + n_cell = ARRAY_SIZE(m10bmc_bmc_subdevs); + break; + default: + return -ENODEV; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cell, + NULL, 0, NULL); + if (ret) + dev_err(dev, "Failed to register sub-devices: %d\n", ret); + + return ret; +} + +static const struct spi_device_id m10bmc_spi_id[] = { + { "m10-n3000", M10_N3000 }, + { "m10-d5005", M10_D5005 }, + { } +}; +MODULE_DEVICE_TABLE(spi, m10bmc_spi_id); + +static struct spi_driver intel_m10bmc_spi_driver = { + .driver = { + .name = "intel-m10-bmc", + .dev_groups = m10bmc_dev_groups, + }, + .probe = intel_m10_bmc_spi_probe, + .id_table = m10bmc_spi_id, +}; + +module_spi_driver(intel_m10bmc_spi_driver); + +MODULE_DESCRIPTION("Intel Max10 BMC Device Driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:intel-m10-bmc"); diff --git a/drivers/mfd/intel-m10-bmc-secure.c b/drivers/mfd/intel-m10-bmc-secure.c new file mode 100644 index 0000000000000..8d7893ad33907 --- /dev/null +++ b/drivers/mfd/intel-m10-bmc-secure.c @@ -0,0 +1,583 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Max10 Board Management Controller Security Engine Driver + * + * Copyright (C) 2019-2020 Intel Corporation. All rights reserved. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct m10bmc_sec { + struct device *dev; + struct intel_m10bmc *m10bmc; + struct ifpga_sec_mgr *imgr; +}; + +/* + * register access helper functions. + * + * m10bmc_raw_bulk_read - bulk_read max10 registers per addr + * m10bmc_raw_bulk_write - bulk_write max10 registers per addr + * m10bmc_raw_update_bits - update max10 register per addr + * m10bmc_sys_update_bits - update max10 system register per offset + */ +static int m10bmc_raw_bulk_read(struct intel_m10bmc *m10bmc, unsigned int addr, + void *val, size_t cnt) +{ + int ret; + + ret = regmap_bulk_read(m10bmc->regmap, addr, val, cnt); + if (ret) + dev_err(m10bmc->dev, "fail to read raw reg %x cnt %zx: %d\n", + addr, cnt, ret); + + return ret; +} + +static int m10bmc_raw_bulk_write(struct intel_m10bmc *m10bmc, unsigned int addr, + void *val, size_t cnt) +{ + int ret; + + ret = regmap_bulk_write(m10bmc->regmap, addr, val, cnt); + if (ret) + dev_err(m10bmc->dev, "fail to write raw reg %x cnt %zx: %d\n", + addr, cnt, ret); + + return ret; +} + +static int m10bmc_raw_update_bits(struct intel_m10bmc *m10bmc, + unsigned int addr, unsigned int msk, + unsigned int val) +{ + int ret; + + ret = regmap_update_bits(m10bmc->regmap, addr, msk, val); + if (ret) + dev_err(m10bmc->dev, "fail to update raw reg %x: %d\n", + addr, ret); + + return ret; +} + +#define m10bmc_sys_update_bits(m10bmc, offset, msk, val) \ + m10bmc_raw_update_bits(m10bmc, M10BMC_SYS_BASE + (offset), msk, val) + +#define SHA256_REH_SIZE 32 +#define SHA384_REH_SIZE 48 + +static int get_root_entry_hash(struct ifpga_sec_mgr *imgr, u32 exp_magic, + u32 prog_addr, u32 hash_addr, u8 **hash, + unsigned int *hash_size) +{ + struct m10bmc_sec *sec = imgr->priv; + unsigned int stride = regmap_get_reg_stride(sec->m10bmc->regmap); + u32 magic, sha_num_bytes; + int ret; + + ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic); + if (ret) + return ret; + + dev_dbg(sec->dev, "%s magic 0x%08x\n", __func__, magic); + + if ((magic & 0xffff) != exp_magic) + return 0; + + sha_num_bytes = ((magic >> 16) & 0xffff) / 8; + + if (sha_num_bytes != SHA256_REH_SIZE && + sha_num_bytes != SHA384_REH_SIZE) { + dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__, + sha_num_bytes); + return -EINVAL; + } + + *hash = vmalloc(sizeof(u8) * sha_num_bytes); + if (!*hash) + return -ENOMEM; + + ret = m10bmc_raw_bulk_read(sec->m10bmc, hash_addr, + *hash, sha_num_bytes / stride); + if (ret) { + dev_err(sec->dev, "bulk_read of 0x%x failed %d", + hash_addr, ret); + vfree(*hash); + return ret; + } + + *hash_size = sha_num_bytes; + return 0; +} + +#define BMC_REH_ADDR 0x17ffc004 +#define BMC_PROG_ADDR 0x17ffc000 +#define BMC_PROG_MAGIC 0x5746 + +#define SR_REH_ADDR 0x17ffd004 +#define SR_PROG_ADDR 0x17ffd000 +#define SR_PROG_MAGIC 0x5253 + +#define PR_REH_ADDR 0x17ffe004 +#define PR_PROG_ADDR 0x17ffe000 +#define PR_PROG_MAGIC 0x5250 + +#define SYSFS_GET_REH(_name, _magic, _prog_addr, _hash_addr) \ + static int get_##_name##_root_entry_hash(struct ifpga_sec_mgr *imgr, \ + u8 **hash, \ + unsigned int *hash_size) \ + { \ + return get_root_entry_hash(imgr, _magic, _prog_addr, \ + _hash_addr, hash, hash_size); \ + } + +SYSFS_GET_REH(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR) +SYSFS_GET_REH(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR) +SYSFS_GET_REH(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR) + +#define FLASH_COUNT_SIZE 4096 +#define USER_FLASH_COUNT 0x17ffb000 + +static int get_qspi_flash_count(struct ifpga_sec_mgr *imgr) +{ + struct m10bmc_sec *sec = imgr->priv; + unsigned int stride = regmap_get_reg_stride(sec->m10bmc->regmap); + unsigned int cnt, num_bits = FLASH_COUNT_SIZE * 8; + u8 *flash_buf; + int ret; + + flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL); + if (!flash_buf) + return -ENOMEM; + + ret = m10bmc_raw_bulk_read(sec->m10bmc, USER_FLASH_COUNT, flash_buf, + FLASH_COUNT_SIZE / stride); + if (ret) { + dev_err(sec->dev, "%s failed to read %d\n", __func__, ret); + goto exit_free; + } + + cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits); + +exit_free: + kfree(flash_buf); + + return ret ? : cnt; +} + +#define CSK_BIT_LEN 128U +#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32) + +static int get_csk_vector(struct ifpga_sec_mgr *imgr, u32 addr, + unsigned long **csk_map, unsigned int *nbits) +{ + struct m10bmc_sec *sec = imgr->priv; + u32 csk32[CSK_32ARRAY_SIZE]; + int i, ret; + + *csk_map = vmalloc(sizeof(unsigned long) * BITS_TO_LONGS(CSK_BIT_LEN)); + if (!*csk_map) + return -ENOMEM; + + ret = m10bmc_raw_bulk_read(sec->m10bmc, addr, csk32, CSK_32ARRAY_SIZE); + if (ret) { + dev_err(sec->dev, "%s failed to read %d\n", __func__, ret); + vfree(*csk_map); + return ret; + } + + for (i = 0; i < CSK_32ARRAY_SIZE; i++) + csk32[i] = le32_to_cpu(csk32[i]); + + bitmap_from_arr32(*csk_map, csk32, CSK_BIT_LEN); + bitmap_complement(*csk_map, *csk_map, CSK_BIT_LEN); + + *nbits = CSK_BIT_LEN; + return 0; +} + +#define SYSFS_GET_CSK_VEC(_name, _addr) \ + static int get_##_name##_canceled_csks(struct ifpga_sec_mgr *imgr, \ + unsigned long **csk_map, \ + unsigned int *nbits) \ + { return get_csk_vector(imgr, _addr, csk_map, nbits); } + +#define CSK_VEC_OFFSET 0x34 + +SYSFS_GET_CSK_VEC(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET) +SYSFS_GET_CSK_VEC(sr, SR_PROG_ADDR + CSK_VEC_OFFSET) +SYSFS_GET_CSK_VEC(pr, PR_PROG_ADDR + CSK_VEC_OFFSET) + +static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell) +{ + u32 auth_result; + + dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell); + + if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result)) + dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result); +} + +static int rsu_check_idle(struct m10bmc_sec *sec) +{ + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + if (ret) + return -EIO; + + if (rsu_prog(doorbell) != RSU_PROG_IDLE && + rsu_prog(doorbell) != RSU_PROG_RSU_DONE) { + log_error_regs(sec, doorbell); + return -EBUSY; + } + + return 0; +} + +static inline bool rsu_start_done(u32 doorbell) +{ + return (!(doorbell & RSU_REQUEST) && + (rsu_stat(doorbell) == RSU_STAT_ERASE_FAIL || + rsu_stat(doorbell) == RSU_STAT_WEAROUT || + (rsu_prog(doorbell) != RSU_PROG_IDLE && + rsu_prog(doorbell) != RSU_PROG_RSU_DONE))); +} + +static int rsu_update_init(struct m10bmc_sec *sec) +{ + u32 doorbell; + int ret; + + ret = m10bmc_sys_update_bits(sec->m10bmc, M10BMC_DOORBELL, + RSU_REQUEST | HOST_STATUS, RSU_REQUEST); + if (ret) + return -EIO; + + ret = regmap_read_poll_timeout(sec->m10bmc->regmap, + M10BMC_SYS_BASE + M10BMC_DOORBELL, + doorbell, + rsu_start_done(doorbell), + NIOS_HANDSHAKE_INTERVAL_US, + NIOS_HANDSHAKE_TIMEOUT_US); + + if (ret == -ETIMEDOUT) { + log_error_regs(sec, doorbell); + return ret; + } else if (ret) { + return -EIO; + } + + if (rsu_stat(doorbell) == RSU_STAT_WEAROUT) { + dev_warn(sec->dev, "Excessive flash count detected\n"); + return -EAGAIN; + } else if (rsu_stat(doorbell) == RSU_STAT_ERASE_FAIL) { + log_error_regs(sec, doorbell); + return -EINVAL; + } + + return 0; +} + +static int rsu_prog_ready(struct m10bmc_sec *sec) +{ + unsigned long poll_timeout; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + poll_timeout = jiffies + msecs_to_jiffies(RSU_PREP_TIMEOUT_MS); + while (!ret && !time_after(jiffies, poll_timeout)) { + if (rsu_prog(doorbell) != RSU_PROG_PREPARE) + break; + msleep(RSU_PREP_INTERVAL_MS); + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + } + + if (ret) { + return -EIO; + } else if (rsu_prog(doorbell) == RSU_PROG_PREPARE) { + log_error_regs(sec, doorbell); + return -ETIMEDOUT; + } else if (rsu_prog(doorbell) != RSU_PROG_READY) { + log_error_regs(sec, doorbell); + return -EINVAL; + } + + return 0; +} + +static int rsu_send_data(struct m10bmc_sec *sec) +{ + u32 doorbell; + int ret; + + ret = m10bmc_sys_update_bits(sec->m10bmc, M10BMC_DOORBELL, HOST_STATUS, + FIELD_PREP(HOST_STATUS, + HOST_STATUS_WRITE_DONE)); + if (ret) + return -EIO; + + ret = regmap_read_poll_timeout(sec->m10bmc->regmap, + M10BMC_SYS_BASE + M10BMC_DOORBELL, + doorbell, + rsu_prog(doorbell) != RSU_PROG_READY, + NIOS_HANDSHAKE_INTERVAL_US, + NIOS_HANDSHAKE_TIMEOUT_US); + + if (ret == -ETIMEDOUT) { + log_error_regs(sec, doorbell); + return ret; + } else if (ret) { + return -EIO; + } + + switch (rsu_stat(doorbell)) { + case RSU_STAT_NORMAL: + case RSU_STAT_NIOS_OK: + case RSU_STAT_USER_OK: + case RSU_STAT_FACTORY_OK: + break; + default: + log_error_regs(sec, doorbell); + return -EINVAL; + } + + return 0; +} + +static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell) +{ + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell); + if (ret) + return -EIO; + + switch (rsu_stat(*doorbell)) { + case RSU_STAT_NORMAL: + case RSU_STAT_NIOS_OK: + case RSU_STAT_USER_OK: + case RSU_STAT_FACTORY_OK: + case RSU_STAT_WEAROUT: + break; + default: + log_error_regs(sec, *doorbell); + return -EINVAL; + } + + switch (rsu_prog(*doorbell)) { + case RSU_PROG_IDLE: + case RSU_PROG_RSU_DONE: + return 0; + case RSU_PROG_AUTHENTICATING: + case RSU_PROG_COPYING: + case RSU_PROG_UPDATE_CANCEL: + case RSU_PROG_PROGRAM_KEY_HASH: + return -EAGAIN; + default: + log_error_regs(sec, *doorbell); + return -EINVAL; + } +} + +static int m10bmc_sec_prepare(struct ifpga_sec_mgr *imgr) +{ + struct m10bmc_sec *sec = imgr->priv; + unsigned int ret; + + ret = rsu_check_idle(sec); + if (ret) + return ret; + + ret = rsu_update_init(sec); + if (ret) + return ret; + + return rsu_prog_ready(sec); +} + +static int +m10bmc_sec_write_blk(struct ifpga_sec_mgr *imgr, u32 offset, u32 size) +{ + struct m10bmc_sec *sec = imgr->priv; + unsigned int stride = regmap_get_reg_stride(sec->m10bmc->regmap); + u32 doorbell; + int ret; + + if (offset + imgr->remaining_size > M10BMC_STAGING_SIZE) + return -EFBIG; + + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + if (ret) { + return -EIO; + } else if (rsu_prog(doorbell) != RSU_PROG_READY) { + log_error_regs(sec, doorbell); + return -EINVAL; + } + + ret = m10bmc_raw_bulk_write(sec->m10bmc, M10BMC_STAGING_BASE + offset, + (void *)imgr->data + offset, size / stride); + + return ret ? -EIO : 0; +} + +static int m10bmc_sec_poll_complete(struct ifpga_sec_mgr *imgr) +{ + struct m10bmc_sec *sec = imgr->priv; + unsigned long poll_timeout; + unsigned int ret; + u32 doorbell; + + ret = rsu_send_data(sec); + if (ret) + return ret; + + ret = rsu_check_complete(sec, &doorbell); + poll_timeout = jiffies + msecs_to_jiffies(RSU_COMPLETE_TIMEOUT_MS); + while (ret == -EAGAIN && !time_after(jiffies, poll_timeout)) { + msleep(RSU_COMPLETE_INTERVAL_MS); + ret = rsu_check_complete(sec, &doorbell); + } + + if (ret == -EAGAIN) { + log_error_regs(sec, doorbell); + return -ETIMEDOUT; + } + + return 0; +} + +static int m10bmc_sec_cancel(struct ifpga_sec_mgr *imgr) +{ + struct m10bmc_sec *sec = imgr->priv; + u32 doorbell; + int ret; + + ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell); + if (ret) + return ret; + + if (rsu_prog(doorbell) != RSU_PROG_READY) + return -EBUSY; + + ret = m10bmc_sys_update_bits(sec->m10bmc, M10BMC_DOORBELL, HOST_STATUS, + FIELD_PREP(HOST_STATUS, + HOST_STATUS_ABORT_RSU)); + + return ret; +} + +static u64 m10bmc_sec_hw_errinfo(struct ifpga_sec_mgr *imgr) +{ + struct m10bmc_sec *sec = imgr->priv; + u32 doorbell = 0, auth_result = 0; + u64 hw_errinfo = 0; + + switch (-imgr->err_code) { + case EINVAL: + case ETIMEDOUT: + case EBUSY: + case EAGAIN: + + if (!m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell)) + hw_errinfo = (u64)doorbell << 32; + + if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, + &auth_result)) + hw_errinfo |= auth_result; + + return hw_errinfo; + default: + return 0; + } +} + +static const struct ifpga_sec_mgr_ops m10bmc_iops = { + .user_flash_count = get_qspi_flash_count, + .bmc_root_entry_hash = get_bmc_root_entry_hash, + .sr_root_entry_hash = get_sr_root_entry_hash, + .pr_root_entry_hash = get_pr_root_entry_hash, + .sr_canceled_csks = get_sr_canceled_csks, + .bmc_canceled_csks = get_bmc_canceled_csks, + .pr_canceled_csks = get_pr_canceled_csks, + .prepare = m10bmc_sec_prepare, + .write_blk = m10bmc_sec_write_blk, + .poll_complete = m10bmc_sec_poll_complete, + .cancel = m10bmc_sec_cancel, + .get_hw_errinfo = m10bmc_sec_hw_errinfo +}; + +static void ifpga_sec_mgr_uinit(struct m10bmc_sec *sec) +{ + ifpga_sec_mgr_unregister(sec->imgr); +} + +static int ifpga_sec_mgr_init(struct m10bmc_sec *sec) +{ + int ret; + + sec->imgr = ifpga_sec_mgr_create(sec->dev, dev_name(sec->dev), + &m10bmc_iops, sec); + if (!sec->imgr) + return -ENOMEM; + + ret = ifpga_sec_mgr_register(sec->imgr); + if (ret) { + ifpga_sec_mgr_free(sec->imgr); + sec->imgr = NULL; + } + return ret; +} + +static int m10bmc_secure_probe(struct platform_device *pdev) +{ + struct m10bmc_sec *sec; + int ret; + + sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + + sec->dev = &pdev->dev; + sec->m10bmc = dev_get_drvdata(pdev->dev.parent); + dev_set_drvdata(&pdev->dev, sec); + + ret = ifpga_sec_mgr_init(sec); + if (ret) + dev_err(&pdev->dev, + "Security manager failed to start: %d\n", ret); + + return ret; +} + +static int m10bmc_secure_remove(struct platform_device *pdev) +{ + struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); + + ifpga_sec_mgr_uinit(sec); + return 0; +} + +static struct platform_driver intel_m10bmc_secure_driver = { + .probe = m10bmc_secure_probe, + .remove = m10bmc_secure_remove, + .driver = { + .name = INTEL_M10BMC_SEC_DRV_NAME, + }, +}; +module_platform_driver(intel_m10bmc_secure_driver); + +MODULE_ALIAS("platform:" INTEL_M10BMC_SEC_DRV_NAME); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel MAX10 BMC secure engine"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/intel-spi-avmm.c b/drivers/mfd/intel-spi-avmm.c new file mode 100644 index 0000000000000..0d08868d50e12 --- /dev/null +++ b/drivers/mfd/intel-spi-avmm.c @@ -0,0 +1,813 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Intel SPI Slave to AVMM Bus Bridge + * + * Copyright (C) 2018-2020 Intel Corporation. All rights reserved. + * + */ + +#include "intel-spi-avmm.h" + +/* + * This driver implements the Read/write protocol for generic SPI master to + * communicate with the "SPI slave to Avalon Master Bridge" (spi-avmm) IP. + * + * The spi-avmm IP act as a bridge to convert encoded streams of bytes from + * host to internal mmio read/write on Avalon bus. In order to issue register + * access request to the slave chip, the host should send formatted bytes that + * conform to the transfer protocol. + * The transfer protocol contains 3 layers: transaction layer, packet layer + * and physical layer. + * + * Reference Documents could be found at: + * https://www.intel.com/content/www/us/en/programmable/documentation/ + * sfo1400787952932.html + * + * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" does general + * introduction about the protocol. + * + * Chapter "Avalon-ST Serial Peripheral Interface Core" describes Physical + * layer. + * + * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores" + * describes Packet layer. + * + * Chapter "Avalon Packets to Transactions Converter Core" describes + * Transaction layer. + * + * The main function of Physical layer is the use of PHY_IDLE (4a). Host + * issues SCLK to query data from slave, but if slave is not ready to submit + * data yet, it will repeat PHY_IDLE until data is prepared. + * Because of this special char, it also needs an ESCAPE char (4d), to help + * represent data "4a". The escape rule is "4d first, following 4a ^ 20". + * So "4d, 6a" for data "4a", and "4d, 6d" for data "4d". + * + * The Packet layer defines the boundary of a whole packet. It defines the + * Start Of Packet (SOP, 7a) char and End Of Packet (EOP, 7b) char. Please + * note that the non-special byte after EOP is the last byte of the packet. + * Besides Packet layer defines a Channel char (7c) + Channel number for + * multiple channel transfer. But it is now not supported by this driver. So + * host will always send "7c, 00" when needed, and will drop the packet if + * "7c, non-zero" is received. + * Finally, a Packet layer ESCAPE char (7d) is also needed to represent data + * value same as the special chars. The escape rule is the same. + * The escape rule should be used if the last byte requires it. So if a packet + * ends up with data 7a, the last bytes should be "7b, 7d, 5a". + * + * The transaction layer defines several transaction formats, including host + * write/incrementing write request, slave write response, host + * read/incrementing read request. + * + * +------+------------------+------------------------------+ + * | Byte | Field | Description | + * +------+------------------+------------------------------+ + * | Host transaction format | + * +------+------------------+------------------------------+ + * | 0 | Transaction code | 0x0, Write non-incrementing | + * | | | 0x4, Write incrementing | + * | | | 0x10, Read non-incrementing | + * | | | 0x14, Read incrementing | + * +------+------------------+------------------------------+ + * | 1 | Reserved | | + * +------+------------------+------------------------------+ + * | 3:2 | Size | Big endian | + * +------+------------------+------------------------------+ + * | 7:4 | Address | Big endian | + * +------+------------------+------------------------------+ + * | n:8 | Data | For Write only, Little endian| + * +------+------------------+------------------------------+ + * | Slave write complete response format | + * +------+------------------+------------------------------+ + * | 0 | Response code | Transaction code ^ 0x80 | + * +------+------------------+------------------------------+ + * | 1 | Reserved | | + * +------+------------------+------------------------------+ + * | 3:2 | Size | Big endian | + * +------+------------------+------------------------------+ + * + * For slave read response, there is no transaction header, simply returns the + * read out data. + * + * + * Here is a simple case to illustrate the protocol. Host request slave to + * do a write32 to addr 0x024b7a40. The following diagram shows how the slave + * parses the incoming byte streams from MOSI layer by layer. + * + * + * LSB Physical layer MSB + * + * |4a|7a|7c|4a|00|00|00|4a|00|04|02|4b|7d|5a|40|4d|6a|ff|03|7b|5f| + * | | | | | + * +--------+-----------+ Escape | + * | +--+ + * IDLE, Dropped | + * Escape dropped, + * Next byte XORed with 0x20 + * | + * | + * Packet layer | + * | + * |7a|7c|00|00|00|00|04|02|4b|7d|5a|40|4a|ff|03|7b|5f| + * | | | | | | | + * SOP +--+ Escape | EOP | + * | +--+ | + * Channel 0 | Last valid byte + * Escape dropped, + * Next byte XORed with 0x20 + * | + * | + * Transaction layer | + * | + * |00|00|00|04|02|4b|7a|40|4a|ff|03|5f| + * | | | | | | | | | | | + * Write +--+ +--+--+--+ +--+--+--+ + * | | | | + * | size=4Byte | | + * | | | | + * +-------+ | | + * | | | + * Command Addr(BE): Data(LE): + * 0x024b7a40 0x5f03ff4a + * + * + * This is how host and slave interact for the single write32, only transaction + * layer and PHY_IDLE chars are shown for simplicity: + * + * MOSI |00|00|00|04|02|4b|3a|40|4a|ff|03|5f|XX|XX|...|XX|XX|XX|XX|XX| + * MISO |4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|...|4a|80|00|00|04| + * | | + * Write done, 0x00 ^ 0x80 | + * 4bytes written + * + * This is another case, a single read32 of addr 0x024b3a40, slave returns + * 0x12345678. + * + * MOSI |10|00|00|04|02|4b|3a|40|XX|XX|...............|XX|XX|XX|XX|XX| + * MISO |4a|4a|4a|4a|4a|4a|4a|4a|4a|4a|...............|4a|78|56|34|12| + * | | | | + * +--+--+--+ + * | + * just return data + */ + +#define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200)) + +/* + * Transaction Layer + * + * Transaction layer capsules transaction code (read/write), size (16 bits), + * addr (32 bits) and value (32 bits) into transaction format. + * SEQ_READ/WRITE will read/write number of bytes(specified by head->size) + * on incrementing addr start from header->addr field. + * Transaction header will be followed by register data for write operations. + * + * Please not that size and addr are sent in big endian but value is sent in + * little endian according to transaction layer protocol. + */ + +/* Format a transaction layer byte stream for tx_buf */ +static void trans_tx_prepare(bool is_read, u32 reg, u32 *wr_val, u16 size, + char *tx_buf, unsigned int *tx_len) +{ + /* size parameter must be n * VAL_SIZE */ + u16 count = size / VAL_SIZE; + struct trans_header *header; + u8 trans_code; + __le32 *data; + int i; + + trans_code = is_read ? + (count == 1 ? TRANS_CODE_READ : TRANS_CODE_SEQ_READ) : + (count == 1 ? TRANS_CODE_WRITE : TRANS_CODE_SEQ_WRITE); + + header = (struct trans_header *)tx_buf; + header->trans_code = trans_code; + header->rsvd = 0; + header->size = cpu_to_be16(size); + header->addr = cpu_to_be32(reg); + + if (is_read) { + *tx_len = RD_TRANS_TX_SIZE; + } else { + data = (__le32 *)(tx_buf + TRANS_HEAD_SIZE); + + for (i = 0; i < count; i++) + *data++ = cpu_to_le32(*wr_val++); + + *tx_len = WR_TRANS_TX_SIZE(count); + } +} + +/* + * For read transaction, avmm bus will directly return register values + * without transaction response header. + */ +static int rd_trans_rx_parse(char *rx_buf, unsigned int rx_len, u32 *val) +{ + unsigned int count, i; + __le32 *data; + + if (!rx_len || !IS_ALIGNED(rx_len, VAL_SIZE)) + return -EINVAL; + + count = rx_len / VAL_SIZE; + + data = (__le32 *)rx_buf; + for (i = 0; i < count; i++) + *val++ = le32_to_cpu(*data++); + + return 0; +} + +/* For write transaction, slave will return a transaction response header. */ +static int wr_trans_rx_parse(char *rx_buf, unsigned int rx_len, + u16 expected_len) +{ + struct trans_response *resp; + u8 trans_code; + u16 val_len; + + if (rx_len != TRANS_RESP_SIZE) + return -EINVAL; + + resp = (struct trans_response *)rx_buf; + + trans_code = resp->r_trans_code ^ 0x80; + val_len = be16_to_cpu(resp->size); + if (!val_len || !IS_ALIGNED(val_len, VAL_SIZE) || + val_len != expected_len) + return -EINVAL; + + /* error out if trans code doesn't align with what host sent */ + if ((val_len == VAL_SIZE && trans_code != TRANS_CODE_WRITE) || + (val_len > VAL_SIZE && trans_code != TRANS_CODE_SEQ_WRITE)) + return -EFAULT; + + return 0; +} + +/* Input an transaction layer byte stream in rx_buf, output read out data. */ +static int trans_rx_parse(char *rx_buf, unsigned int rx_len, bool is_read, + u16 expected_len, u32 *rd_val) +{ + if (is_read) { + if (expected_len != rx_len) + return -EINVAL; + + return rd_trans_rx_parse(rx_buf, rx_len, rd_val); + } + + return wr_trans_rx_parse(rx_buf, rx_len, expected_len); +} + +/* Packet Layer & Physical Layer */ + +#define PKT_SOP 0x7a +#define PKT_EOP 0x7b +#define PKT_CHANNEL 0x7c +#define PKT_ESC 0x7d + +#define PHY_IDLE 0x4a +#define PHY_ESC 0x4d + +/* + * Input a trans stream in trans_tx_buf, format a phy stream in phy_tx_buf. + */ +static void pkt_phy_tx_prepare(char *trans_tx_buf, unsigned int trans_tx_len, + char *phy_tx_buf, unsigned int *phy_tx_len) +{ + unsigned int i; + char *b, *p; + + b = trans_tx_buf; + p = phy_tx_buf; + + *p++ = PKT_SOP; + + /* + * driver doesn't support multiple channel so channel number is + * always 0. + */ + *p++ = PKT_CHANNEL; + *p++ = 0x0; + + for (i = 0; i < trans_tx_len; i++) { + /* EOP should be inserted before last valid char */ + if (i == trans_tx_len - 1) + *p++ = PKT_EOP; + + /* insert ESCAPE char if data value equals any special char */ + switch (*b) { + case PKT_SOP: + case PKT_EOP: + case PKT_CHANNEL: + case PKT_ESC: + *p++ = PKT_ESC; + *p++ = *b++ ^ 0x20; + break; + case PHY_IDLE: + case PHY_ESC: + *p++ = PHY_ESC; + *p++ = *b++ ^ 0x20; + break; + default: + *p++ = *b++; + break; + } + } + + *phy_tx_len = p - phy_tx_buf; +} + +/* + * input a phy stream in pkt_rx_buf, parse out a trans stream in trans_rx_buf. + */ +static int pkt_phy_rx_parse(struct device *dev, + char *phy_rx_buf, unsigned int phy_rx_len, + char *trans_rx_buf, unsigned int *trans_rx_len) +{ + char *b, *p, *sop = NULL; + + b = phy_rx_buf; + p = trans_rx_buf; + + /* Find the last SOP */ + while (b < phy_rx_buf + phy_rx_len) { + if (*b == PKT_SOP) + sop = b; + b++; + } + + if (!sop) { + dev_err(dev, "%s no SOP\n", __func__); + return -EINVAL; + } + + b = sop + 1; + + while (b < phy_rx_buf + phy_rx_len) { + switch (*b) { + case PHY_IDLE: + b++; + break; + case PKT_CHANNEL: + /* + * We don't support multiple channel, so error out if + * a non-zero channel number is found. + */ + b++; + if (*b != 0x0) + return -EINVAL; + b++; + break; + case PHY_ESC: + case PKT_ESC: + b++; + *p++ = *b++ ^ 0x20; + break; + case PKT_SOP: + dev_err(dev, "%s 2nd SOP\n", __func__); + return -EINVAL; + case PKT_EOP: + /* the char after EOP is the last valid char*/ + b++; + + switch (*b) { + case PHY_ESC: + case PKT_ESC: + /* the last char may also be escaped */ + b++; + *p++ = *b++ ^ 0x20; + break; + case PHY_IDLE: + case PKT_SOP: + case PKT_CHANNEL: + case PKT_EOP: + dev_err(dev, "%s unexpected 0x%x\n", + __func__, *b); + return -EINVAL; + default: + *p++ = *b++; + break; + } + + *trans_rx_len = p - trans_rx_buf; + + return 0; + default: + *p++ = *b++; + break; + } + } + + /* We parsed all the bytes but didn't find EOP */ + dev_err(dev, "%s no EOP\n", __func__); + return -EINVAL; +} + +/* + * tx_buf len should be aligned with BPW of SPI. Spare bytes should be padded + * with PHY_IDLE, then slave just drop them. + * + * Driver will not simply pad 4a at the tail. The concern is that driver will + * not store MISO data during tx phase, if driver pad 4a at the tail, it is + * possible that slave is fast enough to response at the padding time. As a + * result these rx bytes lost. In the following case, 7a,7c,00 will lost. + * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|... + * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|... + * + * So driver moves EOP and bytes after EOP to the end of aligned size, then + * fill the hole with PHY_IDLE. As following: + * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40| + * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40| + * Then slave will not get the entire packet before tx phase is over, it can't + * response anything either. + */ +static void phy_tx_pad(unsigned int word_len, char *phy_buf, + unsigned int phy_buf_len, unsigned int *aligned_len) +{ + char *p = &phy_buf[phy_buf_len - 1], *dst_p; + + *aligned_len = ALIGN(phy_buf_len, word_len); + + if (*aligned_len == phy_buf_len) + return; + + dst_p = &phy_buf[*aligned_len - 1]; + + /* move EOP and bytes after EOP to the end of aligned size*/ + while (p > phy_buf) { + *dst_p = *p; + + if (*p == PKT_EOP) + break; + + p--; + dst_p--; + } + + /* fill the hole with PHY_IDLEs */ + while (p < dst_p) + *p++ = PHY_IDLE; +} + +static bool br_need_swap(struct spi_avmm_bridge *br) +{ + return (br->word_len == 4 && !(br->spi->mode & SPI_LSB_FIRST)); +} + +static void swap_word(u32 *p) +{ + *p = swab32p(p); +} + +static void br_rx_swap_word(struct spi_avmm_bridge *br, char *rxbuf) +{ + if (!br_need_swap(br)) + return; + + swap_word((u32 *)rxbuf); +} + +static void br_tx_swap(struct spi_avmm_bridge *br) +{ + unsigned int count; + u32 *p; + + /* + * Phy layer data is filled byte by byte from low addr to high. And the + * protocol requires LSB first. If spi device cannot do LSB_FIRST + * transfer, driver need to swap the byte order word by word. + */ + if (!br_need_swap(br)) + return; + + count = br->phy_tx_len / 4; + + p = (u32 *)br->phy_tx_buf; + while (count--) { + swap_word(p); + p++; + } +} + +#define RX_NOT_READY_1 PHY_IDLE +#define RX_NOT_READY_4 (PHY_IDLE << 24 | PHY_IDLE << 16 | \ + PHY_IDLE << 8 | PHY_IDLE) + +static bool is_word_not_ready(const char *rxbuf, u32 word_len) +{ + return (word_len == 1 && *rxbuf == RX_NOT_READY_1) || + (word_len == 4 && *(u32 *)rxbuf == RX_NOT_READY_4); +} + +static const char *find_eop(const char *rxbuf, u32 word_len) +{ + return memchr(rxbuf, PKT_EOP, word_len); +} + +static int br_tx_all(struct spi_avmm_bridge *br) +{ + return spi_write(br->spi, br->phy_tx_buf, br->phy_tx_len); +} + +static int br_rx_word_timeout(struct spi_avmm_bridge *br, char *rxbuf) +{ + unsigned long poll_timeout; + bool last_try = false; + int ret; + + poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; + for (;;) { + ret = spi_read(br->spi, rxbuf, br->word_len); + if (ret) + return ret; + + /* keep on reading if rx word having no valid byte. */ + if (!is_word_not_ready(rxbuf, br->word_len)) + break; + + if (last_try) + return -ETIMEDOUT; + + /* + * We timeout when rx keeps invalid for some time. But + * it is possible we are scheduled out for long time + * after spi_read. And when we are scheduled in, SW + * timeout happens. But actually HW may work fine and + * be ready long time ago. So we need to do an extra + * read, if we got valid word we return a valid rx word, + * otherwise real HW issue happens. + */ + if (time_after(jiffies, poll_timeout)) + last_try = true; + } + + return 0; +} + +static void br_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg, + u32 *wr_val, u16 count) +{ + unsigned int tx_len; + + trans_tx_prepare(is_read, reg, wr_val, VAL_SIZE * count, + br->trans_tx_buf, &tx_len); + pkt_phy_tx_prepare(br->trans_tx_buf, tx_len, + br->phy_tx_buf, &tx_len); + phy_tx_pad(br->word_len, br->phy_tx_buf, tx_len, &tx_len); + + br->phy_tx_len = tx_len; +} + +static int br_rx_parse(struct spi_avmm_bridge *br, bool is_read, + u16 expected_count, u32 *rd_val) +{ + struct device *dev = &br->spi->dev; + unsigned int trans_rx_len; + int ret; + + ret = pkt_phy_rx_parse(dev, br->phy_rx_buf, br->phy_rx_len, + br->trans_rx_buf, &trans_rx_len); + if (ret) { + dev_err(dev, "%s pkt_phy_rx_parse failed %d\n", + __func__, ret); + goto phy_pkt_rx_err; + } + + ret = trans_rx_parse(br->trans_rx_buf, trans_rx_len, is_read, + expected_count * VAL_SIZE, rd_val); + if (!ret) + return 0; + + dev_err(dev, "%s trans_rx_parse failed %d\n", __func__, ret); + print_hex_dump(KERN_DEBUG, "trans rx:", DUMP_PREFIX_OFFSET, + 16, 1, br->trans_rx_buf, trans_rx_len, true); + +phy_pkt_rx_err: + print_hex_dump(KERN_DEBUG, "phy rx:", DUMP_PREFIX_OFFSET, + 16, 1, br->phy_rx_buf, br->phy_rx_len, true); + + return ret; +} + +static void rx_max_adjust(const char *w, const char *eop, u32 word_len, + u32 rx_len, u32 *rx_max) +{ + u32 remain_bytes = w + word_len - 1 - eop, add_bytes = 0; + const char *ptr; + + if (remain_bytes == 0) { + /* + * EOP is the last byte in the word, rx 2 more bytes and + * finish. + */ + add_bytes = 2; + } else if (remain_bytes == 1) { + /* 1 byte left in the word after EOP. */ + ptr = eop + 1; + /* + * If the byte is an ESCAPE char, rx 1 more byte and + * finish. Otherwise OK to finish rx immediately. + */ + if (*ptr == PHY_ESC || *ptr == PKT_ESC) + add_bytes = 1; + } + /* + * 2 or more bytes left in the word after EOP, OK to finish rx + * immediately. + */ + + /* Adjust rx_max, make sure we don't exceed the original rx_max. */ + *rx_max = min(*rx_max, ALIGN(rx_len + add_bytes, word_len)); +} + +/* + * In tx phase, slave only returns PHY_IDLE (0x4a). So driver will ignore rx in + * tx phase. + * + * Slave may send unknown number of PHY_IDLEs in rx phase, so we cannot prepare + * a fixed length buffer to receive all rx data in a batch. We have to read word + * by word and filter out pure PHY_IDLE words. The rest of words have + * predictable max length, driver prepared enough buffer for them. See comments + * for definition of PHY_RX_MAX. + * + * When EOP is detected, 1 or 2 (if the last byte is escaped) more bytes should + * be received then rx should finish. + */ +static int br_txrx(struct spi_avmm_bridge *br) +{ + u32 wl = br->word_len, rx_max = PHY_RX_MAX, rx_len = 0; + char *rxbuf = br->phy_rx_buf; + const char *eop = NULL; + int ret; + + /* reorder words for spi transfer */ + br_tx_swap(br); + + ret = br_tx_all(br); + if (ret) + goto out; + + while (rx_len <= rx_max - wl) { + /* read word by word */ + ret = br_rx_word_timeout(br, rxbuf); + if (ret) + goto out; + + rx_len += wl; + + /* reorder word back now */ + br_rx_swap_word(br, rxbuf); + + if (!eop) { + eop = find_eop(rxbuf, wl); + if (eop) { + /* + * EOP is found in the word, then we are about + * to finish rx, no need to fill the whole phy + * rx buf. But EOP is not the last byte in rx + * stream, we may read 1 or 2 more bytes and + * early finish rx by adjusting rx_max. + */ + rx_max_adjust(rxbuf, eop, wl, rx_len, &rx_max); + } + } + + rxbuf += wl; + } + +out: + br->phy_rx_len = rx_len; + + ret = ret ? : (eop ? 0 : -EINVAL); + if (ret) { + dev_err(&br->spi->dev, "%s br txrx err %d\n", __func__, ret); + print_hex_dump(KERN_DEBUG, "phy rx:", DUMP_PREFIX_OFFSET, + 16, 1, br->phy_rx_buf, rx_len, true); + } + return ret; +} + +static int do_reg_access(void *context, bool is_read, unsigned int reg, + unsigned int *value, u16 count) +{ + struct spi_avmm_bridge *br = context; + int ret; + + br_tx_prepare(br, is_read, reg, value, count); + + ret = br_txrx(br); + if (ret) + return ret; + + return br_rx_parse(br, is_read, count, value); +} + +#define do_reg_read(_ctx, _reg, _value, _count) \ + do_reg_access(_ctx, true, _reg, _value, _count) +#define do_reg_write(_ctx, _reg, _value, _count) \ + do_reg_access(_ctx, false, _reg, _value, _count) + +static int regmap_spi_avmm_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + return do_reg_read(context, reg, val, 1); +} + +static int regmap_spi_avmm_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + return do_reg_write(context, reg, &val, 1); +} + +static int regmap_spi_avmm_gather_write(void *context, + const void *reg_buf, size_t reg_len, + const void *val_buf, size_t val_len) +{ + if (reg_len != REG_SIZE) + return -EINVAL; + + return do_reg_write(context, *(u32 *)reg_buf, (u32 *)val_buf, + (u16)(val_len / VAL_SIZE)); +} + +static int regmap_spi_avmm_write(void *context, const void *data, size_t count) +{ + if (count < REG_SIZE + VAL_SIZE) + return -EINVAL; + + return regmap_spi_avmm_gather_write(context, data, REG_SIZE, + data + REG_SIZE, count - REG_SIZE); +} + +static int regmap_spi_avmm_read(void *context, + const void *reg_buf, size_t reg_len, + void *val_buf, size_t val_len) +{ + if (reg_len != REG_SIZE) + return -EINVAL; + + return do_reg_read(context, *(u32 *)reg_buf, val_buf, + (u16)(val_len / VAL_SIZE)); +} + +static void spi_avmm_bridge_ctx_free(void *context) +{ + kfree(context); +} + +static struct spi_avmm_bridge * +spi_avmm_bridge_ctx_gen(struct spi_device *spi) +{ + struct spi_avmm_bridge *br; + + br = kzalloc(sizeof(*br), GFP_KERNEL); + if (br) { + br->spi = spi; + br->word_len = spi->bits_per_word / 8; + } + + return br; +} + +static const struct regmap_bus regmap_spi_avmm_bus = { + .write = regmap_spi_avmm_write, + .gather_write = regmap_spi_avmm_gather_write, + .read = regmap_spi_avmm_read, + .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, + .val_format_endian_default = REGMAP_ENDIAN_NATIVE, + .max_raw_read = VAL_SIZE * MAX_RX_CNT, + .max_raw_write = VAL_SIZE * MAX_TX_CNT, + + .reg_write = regmap_spi_avmm_reg_write, + .reg_read = regmap_spi_avmm_reg_read, + + .free_context = spi_avmm_bridge_ctx_free, +}; + +struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name) +{ + struct spi_avmm_bridge *bridge; + struct regmap *map; + + /* Only support BPW == 8 or 32 now */ + if (!spi || (spi->bits_per_word != 8 && spi->bits_per_word != 32)) + return ERR_PTR(-EINVAL); + + bridge = spi_avmm_bridge_ctx_gen(spi); + if (!bridge) + return ERR_PTR(-ENOMEM); + + map = __devm_regmap_init(&spi->dev, ®map_spi_avmm_bus, + bridge, config, lock_key, lock_name); + if (IS_ERR(map)) { + spi_avmm_bridge_ctx_free(bridge); + return ERR_CAST(map); + } + + return map; +} diff --git a/drivers/mfd/intel-spi-avmm.h b/drivers/mfd/intel-spi-avmm.h new file mode 100644 index 0000000000000..a238132425ec1 --- /dev/null +++ b/drivers/mfd/intel-spi-avmm.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver Header File for Intel SPI Slave to AVMM Bus Bridge + * + * Copyright (C) 2018-2020 Intel Corporation. All rights reserved. + * + */ + +#ifndef __INTEL_SPI_AVMM_H +#define __INTEL_SPI_AVMM_H + +#include +#include + +#define TRANS_CODE_WRITE 0x0 +#define TRANS_CODE_SEQ_WRITE 0x4 +#define TRANS_CODE_READ 0x10 +#define TRANS_CODE_SEQ_READ 0x14 +#define TRANS_CODE_NO_TRANS 0x7f + +struct trans_header { + u8 trans_code; + u8 rsvd; + __be16 size; + __be32 addr; +}; + +struct trans_response { + u8 r_trans_code; + u8 rsvd; + __be16 size; +}; + +/* slave's register addr is 32 bits */ +#define REG_SIZE 4UL + +/* slave's register value is 32 bits */ +#define VAL_SIZE 4UL + +/* + * max rx size could be larger. But considering the buffer consuming, + * it is proper that we limit 1KB xfer at max. + */ +#define MAX_RX_CNT 256UL +#define MAX_TX_CNT 1UL + +#define TRANS_HEAD_SIZE (sizeof(struct trans_header)) +#define TRANS_RESP_SIZE (sizeof(struct trans_response)) + +#define WR_TRANS_TX_SIZE(n) (TRANS_HEAD_SIZE + VAL_SIZE * (n)) +#define RD_TRANS_TX_SIZE TRANS_HEAD_SIZE + +#define TRANS_TX_MAX WR_TRANS_TX_SIZE(MAX_TX_CNT) +/* + * The worst case, all chars are escaped, plus 4 special chars (SOP, CHANNEL, + * CHANNEL_NUM, EOP). Finally make sure the length is aligned to SPI BPW. + */ +#define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4) + +/* No additional chars in transaction layer RX, just read out data */ +#define TRANS_RX_MAX (VAL_SIZE * MAX_RX_CNT) +/* + * Unlike tx, phy rx is bothered by possible PHY_IDLE bytes from slave, + * Driver will read the word one by one and filter out pure IDLE words. + * The rest of words may still contain IDLE chars. A worse case could be + * receiving word 0x7a7a7a7a in 4 BPW transfer mode. The 4 bytes word may + * consume up to 12 bytes in rx buffer, like: + * |4a|4a|4a|7d| |5a|7d|5a|7d| |5a|7d|5a|4a| + * Besides, the packet layer header may consume up to 8 bytes, like: + * |4a|4a|4a|7a| |7c|00|4a|4a| + * So the PHY_RX_MAX is calculated as following. + */ +#define PHY_RX_MAX (TRANS_RX_MAX * 3 + 8) + +/** + * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge + * + * @spi: spi slave associated with this bridge. + * @word_len: bytes of word for spi transfer. + * @phy_tx_len: length of valid data in phy_tx_buf which will be sent by spi. + * @phy_rx_len: length of valid data in phy_rx_buf which received from spi. + * + * As device's registers are implemented on the AVMM bus address space, it + * requires driver to issue formatted requests to spi slave to AVMM bus master + * bridge to perform register access. + */ +struct spi_avmm_bridge { + struct spi_device *spi; + unsigned int word_len; + unsigned int phy_tx_len; + unsigned int phy_rx_len; + /* bridge buffer used in translation between protocol layers */ + char trans_tx_buf[TRANS_TX_MAX]; + char trans_rx_buf[TRANS_RX_MAX]; + char phy_tx_buf[PHY_TX_MAX]; + char phy_rx_buf[PHY_RX_MAX]; +}; + +struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, + const struct regmap_config *config, + struct lock_class_key *lock_key, + const char *lock_name); + +/** + * devm_regmap_init_spi_avmm() - Initialise register map for Intel SPI Slave + * to AVMM Bus Bridge + * + * @spi: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The map will be automatically freed by the + * device management code. + */ +#define devm_regmap_init_spi_avmm(spi, config) \ + __regmap_lockdep_wrapper(__devm_regmap_init_spi_avmm, #config, \ + spi, config) + +#endif /* __INTEL_SPI_AVMM_H */ diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index ad34e4335df2f..0a4d651b9cd6b 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -342,4 +342,10 @@ config IGC To compile this driver as a module, choose M here. The module will be called igc. +config INTEL_LL_10G_MAC + tristate "Control Plane Driver for Intel Low Latency 10G MAC" + help + This driver provides control plane support for an Intel + Low Latency 10G MAC. + endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 3075290063f66..4dc343c2ac17c 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ +obj-$(CONFIG_INTEL_LL_10G_MAC) += intel_ll_10g_mac.o diff --git a/drivers/net/ethernet/intel/intel_ll_10g_mac.c b/drivers/net/ethernet/intel/intel_ll_10g_mac.c new file mode 100644 index 0000000000000..ec0334675057b --- /dev/null +++ b/drivers/net/ethernet/intel/intel_ll_10g_mac.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Intel(R) Low Latency 10G Network Driver + * + * Copyright (C) 2020 Intel Corporation. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct intel_ll_10g_drvdata { + struct net_device *netdev; +}; + +struct intel_ll_10g_netdata { + struct dfl_device *dfl_dev; +}; + +static int netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + netdev->mtu = new_mtu; + + return 0; +} + +static int netdev_set_features(struct net_device *dev, + netdev_features_t features) +{ + return 0; +} + +static int netdev_set_mac_address(struct net_device *ndev, void *p) +{ + struct sockaddr *addr = p; + + memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN); + + /* TODO program hardware */ + + return 0; +} + +static const struct net_device_ops netdev_ops = { + .ndo_change_mtu = netdev_change_mtu, + .ndo_set_features = netdev_set_features, + .ndo_set_mac_address = netdev_set_mac_address, +}; + +struct stat_info { + unsigned int addr; + char string[ETH_GSTRING_LEN]; +}; + +#define STAT_INFO(_addr, _string) \ + .addr = _addr, .string = _string, + +static struct stat_info stats_10g[] = { + /* TX Statistics */ + {STAT_INFO(0x142, "tx_frame_ok")}, + {STAT_INFO(0x144, "tx_frame_err")}, + {STAT_INFO(0x146, "tx_frame_crc_err")}, + {STAT_INFO(0x148, "tx_octets_ok")}, + {STAT_INFO(0x14a, "tx_pause_mac_ctrl_frames")}, + {STAT_INFO(0x14c, "tx_if_err")}, + {STAT_INFO(0x14e, "tx_unicast_frame_ok")}, + {STAT_INFO(0x150, "tx_unicast_frame_err")}, + {STAT_INFO(0x152, "tx_multicast_frame_ok")}, + {STAT_INFO(0x154, "tx_multicast_frame_err")}, + {STAT_INFO(0x156, "tx_broadcast_frame_ok")}, + {STAT_INFO(0x158, "tx_broadcast_frame_err")}, + {STAT_INFO(0x15a, "tx_ether_octets")}, + {STAT_INFO(0x15c, "tx_ether_pkts")}, + {STAT_INFO(0x15e, "tx_ether_undersize_pkts")}, + {STAT_INFO(0x160, "tx_ether_oversize_pkts")}, + {STAT_INFO(0x162, "tx_ether_pkts_64_octets")}, + {STAT_INFO(0x164, "tx_ether_pkts_65_127_octets")}, + {STAT_INFO(0x166, "tx_ether_pkts_128_255_octets")}, + {STAT_INFO(0x168, "tx_ether_pkts_256_511_octets")}, + {STAT_INFO(0x16a, "tx_ether_pkts_512_1023_octets")}, + {STAT_INFO(0x16c, "tx_ether_pkts_1024_1518_octets")}, + {STAT_INFO(0x16e, "tx_ether_pkts_1519_x_octets")}, + /* {STAT_INFO(0x170, "tx_ether_fragments")}, */ + /* {STAT_INFO(0x172, "tx_ether_jabbers")}, */ + /* {STAT_INFO(0x174, "tx_ether_crc_err")}, */ + {STAT_INFO(0x176, "tx_unicast_mac_ctrl_frames")}, + {STAT_INFO(0x178, "tx_multicast_mac_ctrl_frames")}, + {STAT_INFO(0x17a, "tx_broadcast_mac_ctrl_frames")}, + {STAT_INFO(0x17c, "tx_pfc_mac_ctrl_frames")}, + + /* RX Statistics */ + {STAT_INFO(0x1c2, "rx_frame_ok")}, + {STAT_INFO(0x1c4, "rx_frame_err")}, + {STAT_INFO(0x1c6, "rx_frame_crc_err")}, + {STAT_INFO(0x1c8, "rx_octets_ok")}, + {STAT_INFO(0x1ca, "rx_pause_mac_ctrl_frames")}, + {STAT_INFO(0x1cc, "rx_if_err")}, + {STAT_INFO(0x1ce, "rx_unicast_frame_ok")}, + {STAT_INFO(0x1d0, "rx_unicast_frame_err")}, + {STAT_INFO(0x1d2, "rx_multicast_frame_ok")}, + {STAT_INFO(0x1d4, "rx_multicast_frame_err")}, + {STAT_INFO(0x1d6, "rx_broadcast_frame_ok")}, + {STAT_INFO(0x1d8, "rx_broadcast_frame_err")}, + {STAT_INFO(0x1da, "rx_ether_octets")}, + {STAT_INFO(0x1dc, "rx_ether_pkts")}, + {STAT_INFO(0x1de, "rx_ether_undersize_pkts")}, + {STAT_INFO(0x1e0, "rx_ether_oversize_pkts")}, + {STAT_INFO(0x1e2, "rx_ether_pkts_64_octets")}, + {STAT_INFO(0x1e4, "rx_ether_pkts_65_127_octets")}, + {STAT_INFO(0x1e6, "rx_ether_pkts_128_255_octets")}, + {STAT_INFO(0x1e8, "rx_ether_pkts_256_511_octets")}, + {STAT_INFO(0x1ea, "rx_ether_pkts_512_1023_octets")}, + {STAT_INFO(0x1ec, "rx_ether_pkts_1024_1518_octets")}, + {STAT_INFO(0x1ee, "rx_ether_pkts_1519_x_octets")}, + {STAT_INFO(0x1f0, "rx_ether_fragments")}, + {STAT_INFO(0x1f2, "rx_ether_jabbers")}, + {STAT_INFO(0x1f4, "rx_ether_crc_err")}, + {STAT_INFO(0x1f6, "rx_unicast_mac_ctrl_frames")}, + {STAT_INFO(0x1f8, "rx_multicast_mac_ctrl_frames")}, + {STAT_INFO(0x1fa, "rx_broadcast_mac_ctrl_frames")}, + {STAT_INFO(0x1fc, "rx_pfc_mac_ctrl_frames")}, +}; + +static void ethtool_get_strings(struct net_device *netdev, u32 stringset, + u8 *s) +{ + unsigned int i, stats_num = 0; + struct stat_info *stat; + + switch (stringset) { + case ETH_SS_STATS: + stat = stats_10g; + stats_num = ARRAY_SIZE(stats_10g); + break; + default: + return; + } + + for (i = 0; i < stats_num; i++, s += ETH_GSTRING_LEN) + memcpy(s, stat[i].string, ETH_GSTRING_LEN); +} + +static int ethtool_get_sset_count(struct net_device *netdev, int stringset) +{ + switch (stringset) { + case ETH_SS_STATS: + return ARRAY_SIZE(stats_10g); + + default: + return 0; + } +} + +static void ethtool_get_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + unsigned int i, stats_num = ARRAY_SIZE(stats_10g); + struct stat_info *stat = stats_10g; + + for (i = 0; i < stats_num; i++) + data[i] = stat[i].addr; +} + +static const struct ethtool_ops ethtool_ops = { + .get_strings = ethtool_get_strings, + .get_sset_count = ethtool_get_sset_count, + .get_ethtool_stats = ethtool_get_stats, +}; + +static void intel_ll_10g_init_netdev(struct net_device *netdev) +{ + netdev->ethtool_ops = ðtool_ops; + netdev->netdev_ops = &netdev_ops; + netdev->features = 0; + netdev->hw_features |= NETIF_F_LOOPBACK; + netdev->hard_header_len = 0; + netdev->priv_flags |= IFF_NO_QUEUE; + + netdev->needs_free_netdev = true; + + ether_setup(netdev); +} + +static int intel_ll_10g_mac_probe(struct dfl_device *dfl_dev) +{ + struct intel_ll_10g_netdata *npriv; + struct intel_ll_10g_drvdata *priv; + int ret; + + priv = devm_kzalloc(&dfl_dev->dev, sizeof(*priv), GFP_KERNEL); + + if (!priv) + return -ENOMEM; + + dev_set_drvdata(&dfl_dev->dev, priv); + + dev_info(&dfl_dev->dev, "%s priv %p\n", __func__, priv); + + priv->netdev = alloc_netdev(sizeof(struct intel_ll_10g_netdata), + "ll_10g%d", NET_NAME_UNKNOWN, + intel_ll_10g_init_netdev); + + if (!priv->netdev) + return -ENOMEM; + + npriv = netdev_priv(priv->netdev); + + npriv->dfl_dev = dfl_dev; + + SET_NETDEV_DEV(priv->netdev, &dfl_dev->dev); + + ret = register_netdev(priv->netdev); + + if (ret) + dev_err(&dfl_dev->dev, "failed to register %s: %d", + priv->netdev->name, ret); + + return ret; +} + +static int intel_ll_10g_mac_remove(struct dfl_device *dfl_dev) +{ + struct intel_ll_10g_drvdata *priv = dev_get_drvdata(&dfl_dev->dev); + + dev_info(&dfl_dev->dev, "%s %p\n", __func__, priv); + + unregister_netdev(priv->netdev); + + return 0; +} + +#define FME_FEATURE_ID_LL_10G_MAC 0xf + +static const struct dfl_device_id intel_ll_10g_mac_ids[] = { + { FME_ID, FME_FEATURE_ID_LL_10G_MAC }, + { } +}; + +static struct dfl_driver intel_ll_10g_mac_driver = { + .drv = { + .name = "intel-ll-10g-mac", + }, + .id_table = intel_ll_10g_mac_ids, + .probe = intel_ll_10g_mac_probe, + .remove = intel_ll_10g_mac_remove, +}; + +module_dfl_driver(intel_ll_10g_mac_driver); +MODULE_DEVICE_TABLE(dfl, intel_ll_10g_mac_ids); +MODULE_DESCRIPTION("Network Device Driver for Intel(R) Low Latency 10G MAC"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 3fa33d27eebaf..da9d963d5e994 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -535,3 +535,12 @@ endif # PHYLIB config MICREL_KS8995MA tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch" depends on SPI + +config INTEL_S10_PHY + tristate "Intel HSSI configurable ethernet phy driver" + depends on FPGA_DFL + select FPGA_DFl_HSSI + help + This is the Intel HSSI configurable ethernet phy driver. It + provides the ability to view and change some of the transceiver + tuner parameters for a QSFP interface. diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 2f5c7093a65b3..f82b659fe3074 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -102,3 +102,4 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_TERANETICS_PHY) += teranetics.o obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o +obj-$(CONFIG_INTEL_S10_PHY) += intel-s10-phy.o diff --git a/drivers/net/phy/intel-s10-phy.c b/drivers/net/phy/intel-s10-phy.c new file mode 100644 index 0000000000000..5c3f714507ab7 --- /dev/null +++ b/drivers/net/phy/intel-s10-phy.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Stratix 10 HSSI Phy + * + * Copyright 2019-2020 Intel Corporation, Inc. + */ + +#include +#include +#include +#include +#include +#include + +/* HSSI QSFP Control & Status Registers */ +#define HSSI_QSFP_RCFG_CMD(phy) ((phy)->phy_offset + 0x0) +#define QSFP_RCFG_CMD GENMASK_ULL(1, 0) +#define QSFP_RCFG_CMD_CLR 0 +#define QSFP_RCFG_CMD_RD 1 +#define QSFP_RCFG_CMD_WRT 2 +#define QSFP_RCFG_CMD_SEL_XCVR GENMASK_ULL(5, 4) /* XCVR 0 - 3 */ +#define QSFP_RCFG_XCVR_ADDR GENMASK_ULL(26, 16) +#define QSFP_RCFG_XCVR_ACK BIT_ULL(32) + +#define HSSI_QSFP_RCFG_DATA(phy) ((phy)->phy_offset + 0x8) +#define XCVR_RCFG_RDATA GENMASK_ULL(31, 0) /* RO: rd data */ +#define XCVR_RCFG_WDATA GENMASK_ULL(63, 32) /* RW: wrt data */ + +#define HSSI_QSFP_CTRL(phy) ((phy)->phy_offset + 0x10) +#define DATA_RATE_SEL_1G BIT_ULL(0) /* 1 = Selected */ +#define DATA_RATE_SEL_10G BIT_ULL(1) +#define DATA_RATE_SEL_25G BIT_ULL(2) +#define DATA_RATE_SEL_40G BIT_ULL(3) +#define DATA_RATE_SEL_50G BIT_ULL(4) +#define DATA_RATE_SEL_100G BIT_ULL(5) +#define DATA_RATE_SEL_200G BIT_ULL(6) +#define DATA_RATE_SEL_400G BIT_ULL(7) +#define GLOBAL_RESET BIT_ULL(8) /* 1 = Active */ +#define RECONFIG_RESET BIT_ULL(9) +#define CHAN0_RESET BIT_ULL(10) +#define CHAN1_RESET BIT_ULL(11) +#define CHAN2_RESET BIT_ULL(12) +#define CHAN3_RESET BIT_ULL(13) +#define SELECT_ATX_PLL BIT_ULL(14) /* 0 = 10G, 1 = 25G */ +#define SELECT_TX_CORE_CLK BIT_ULL(15) /* 0 = PHY, 1 = IOPLL */ +#define SELECT_RX_CORE_CLK BIT_ULL(16) /* 0 = PHY, 1 = IOPLL */ + +#define HSSI_QSFP_STAT(phy) ((phy)->phy_offset + 0x18) +#define HSSI_QSFP_STAT_CHAN0 GENMASK_ULL(15, 0) +#define HSSI_QSFP_STAT_CHAN1 GENMASK_ULL(31, 16) +#define HSSI_QSFP_STAT_CHAN2 GENMASK_ULL(47, 32) +#define HSSI_QSFP_STAT_CHAN3 GENMASK_ULL(63, 48) +#define TX_ANALOG_RST_STAT BIT_ULL(0) +#define TX_DIG_RST_STAT BIT_ULL(1) +#define RX_ANALOG_RST_STAT BIT_ULL(2) +#define RX_DIG_RST_STAT BIT_ULL(3) +#define TX_DIG_RST_TIMEOUT BIT_ULL(4) +#define RX_DIG_RST_TIMEOUT BIT_ULL(5) +#define TX_FIFO_READY BIT_ULL(6) +#define RX_FIFO_READY BIT_ULL(7) +#define TX_XFER_READY BIT_ULL(8) +#define RX_XFER_READY BIT_ULL(9) +#define TX_CAL_BUSY BIT_ULL(10) +#define RX_CAL_BUSY BIT_ULL(11) +#define RX_LOCKED_TO_DATA BIT_ULL(12) +#define RX_LOCKED_TO_REF BIT_ULL(13) +#define TX_READY BIT_ULL(14) +#define RX_READY BIT_ULL(15) + +#define HSSI_WRITE_POLL_INVL_US 10 /* Write poll interval */ +#define HSSI_WRITE_POLL_TIMEOUT_US 100000 /* Write poll timeout */ + +/* Analog preemphasis tuning parameters */ +#define PRE_TAP_ADDR 0x107 +#define PRE_TAP_MAGNITUDE_MASK GENMASK(4, 0) +#define PRE_TAP_MAX 15 +#define PRE_TAP_POLARITY BIT(5) /* 1 = negative polarity */ + +#define POST_TAP_ADDR 0x105 +#define POST_TAP_MAGNITUDE_MASK GENMASK(4, 0) +#define POST_TAP_MAX 24 +#define POST_TAP_POLARITY BIT(6) /* 1 = negative polarity */ + +#define VOD_COMP_ADDR 0x109 +#define VOD_MASK GENMASK(4, 0) +#define VOD_MIN 17 +#define VOD_MAX 31 + +#define COMPENSATION_FLAG BIT(5) /* 1 = ON; 0 = OFF */ + +struct hssi_phy { + void __iomem *csr_base; + u32 phy_offset; + struct device *dev; + struct mutex lock; /* serialize access to phy registers */ +}; + +static int hssi_await_ack(struct hssi_phy *phy) +{ + int ret; + u64 v; + + /* Poll for the expected state of acknowlege bit */ + ret = readq_poll_timeout(phy->csr_base + HSSI_QSFP_RCFG_CMD(phy), v, + v & QSFP_RCFG_XCVR_ACK, + HSSI_WRITE_POLL_INVL_US, + HSSI_WRITE_POLL_TIMEOUT_US); + if (ret) { + dev_err(phy->dev, "timeout, phy ack not received\n"); + return ret; + } + + /* Clear ACK state */ + v = readq(phy->csr_base + HSSI_QSFP_RCFG_CMD(phy)); + v &= ~QSFP_RCFG_CMD; + v |= FIELD_PREP(QSFP_RCFG_CMD, QSFP_RCFG_CMD_CLR); + writeq(v, phy->csr_base + HSSI_QSFP_RCFG_CMD(phy)); + + return 0; +} + +static int hssi_xcvr_read(struct hssi_phy *phy, u8 chan_num, + u16 addr, u32 *data) +{ + int ret; + u64 v; + + /* Read the desired address */ + v = FIELD_PREP(QSFP_RCFG_CMD, QSFP_RCFG_CMD_RD); + v |= FIELD_PREP(QSFP_RCFG_CMD_SEL_XCVR, chan_num); + v |= FIELD_PREP(QSFP_RCFG_XCVR_ADDR, addr); + writeq(v, phy->csr_base + HSSI_QSFP_RCFG_CMD(phy)); + + /* Poll for read complete */ + ret = hssi_await_ack(phy); + if (ret) + return ret; + + /* Return data */ + v = readq(phy->csr_base + HSSI_QSFP_RCFG_DATA(phy)); + *data = FIELD_GET(XCVR_RCFG_RDATA, v); + + return 0; +} + +static int hssi_xcvr_write(struct hssi_phy *phy, u8 chan_num, + u16 addr, u32 data) +{ + u64 v; + + /* Set up the write data */ + v = FIELD_PREP(XCVR_RCFG_WDATA, data); + writeq(v, phy->csr_base + HSSI_QSFP_RCFG_DATA(phy)); + + /* Trigger the write */ + v = FIELD_PREP(QSFP_RCFG_CMD, QSFP_RCFG_CMD_WRT); + v |= FIELD_PREP(QSFP_RCFG_CMD_SEL_XCVR, chan_num); + v |= FIELD_PREP(QSFP_RCFG_XCVR_ADDR, addr); + writeq(v, phy->csr_base + HSSI_QSFP_RCFG_CMD(phy)); + + /* Poll for write complete */ + return hssi_await_ack(phy); +} + +static int hssi_xcvr_rmw(struct hssi_phy *phy, u8 chan_num, + u16 addr, u32 mask, u32 data) +{ + u32 value; + int ret; + + ret = hssi_xcvr_read(phy, chan_num, addr, &value); + if (ret) + return ret; + + value &= ~mask; + value |= (data & mask); + + return hssi_xcvr_write(phy, chan_num, addr, value); +} + +static ssize_t tx_pre_tap_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + u8 magnitude, polarity = 0; + const char *p = buf; + unsigned long chan; + int ret; + + if ((buf[0] == '+') || (buf[0] == '-')) { + if (buf[0] == '-') + polarity = PRE_TAP_POLARITY; + p++; + } + + ret = kstrtou8(p, 0, &magnitude); + if (ret) + return ret; + + if (magnitude > PRE_TAP_MAX) { + dev_err(phy->dev, "Max pre-tap is %d\n", PRE_TAP_MAX); + return -EINVAL; + } + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_rmw(phy, (u8)chan, PRE_TAP_ADDR, + PRE_TAP_POLARITY | PRE_TAP_MAGNITUDE_MASK, + polarity | magnitude); + mutex_unlock(&phy->lock); + + return ret ? : count; +} + +static ssize_t tx_pre_tap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + char polarity = '\0'; + unsigned long chan; + u8 magnitude; + u32 pre_tap; + int ret; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_read(phy, (u8)chan, PRE_TAP_ADDR, &pre_tap); + mutex_unlock(&phy->lock); + + if (ret) + return ret; + + magnitude = pre_tap & PRE_TAP_MAGNITUDE_MASK; + if (magnitude) + polarity = pre_tap & PRE_TAP_POLARITY ? '-' : '+'; + + return scnprintf(buf, PAGE_SIZE, "%c%u\n", polarity, magnitude); +} + +static ssize_t tx_post_tap_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + u8 magnitude, polarity = 0; + const char *p = buf; + unsigned long chan; + int ret; + + if ((buf[0] == '+') || (buf[0] == '-')) { + if (buf[0] == '-') + polarity = POST_TAP_POLARITY; + p++; + } + + ret = kstrtou8(p, 0, &magnitude); + if (ret) + return ret; + + if (magnitude > POST_TAP_MAX) { + dev_err(phy->dev, "Max post-tap is %d\n", POST_TAP_MAX); + return -EINVAL; + } + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_rmw(phy, (u8)chan, POST_TAP_ADDR, + POST_TAP_POLARITY | POST_TAP_MAGNITUDE_MASK, + polarity | magnitude); + mutex_unlock(&phy->lock); + + return ret ? : count; +} + +static ssize_t tx_post_tap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + char polarity = '\0'; + unsigned long chan; + u8 magnitude; + u32 post_tap; + int ret; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_read(phy, (u8)chan, POST_TAP_ADDR, &post_tap); + mutex_unlock(&phy->lock); + + if (ret) + return ret; + + magnitude = post_tap & POST_TAP_MAGNITUDE_MASK; + if (magnitude) + polarity = post_tap & POST_TAP_POLARITY ? '-' : '+'; + + return scnprintf(buf, PAGE_SIZE, "%c%u\n", polarity, magnitude); +} + +static ssize_t tx_vod_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + unsigned long chan; + int ret; + u8 vod; + + ret = kstrtou8(buf, 0, &vod); + if (ret) + return ret; + + if (vod > VOD_MAX || vod < VOD_MIN) { + dev_err(phy->dev, "Valid VOD range is %d to %d\n", + VOD_MIN, VOD_MAX); + return -EINVAL; + } + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_rmw(phy, (u8)chan, VOD_COMP_ADDR, VOD_MASK, vod); + mutex_unlock(&phy->lock); + + return ret ? : count; +} + +static ssize_t tx_vod_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + unsigned long chan; + int ret; + u32 vod; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_read(phy, (u8)chan, VOD_COMP_ADDR, &vod); + mutex_unlock(&phy->lock); + + return ret ? : scnprintf(buf, PAGE_SIZE, "%lu\n", vod & VOD_MASK); +} + +static ssize_t tx_comp_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + unsigned long chan; + u8 compensation; + int ret; + + ret = kstrtou8(buf, 0, &compensation); + if (ret) + return ret; + + if (compensation > 1) { + dev_err(phy->dev, "Compensation must be 1 or 0"); + return -EINVAL; + } + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_rmw(phy, (u8)chan, VOD_COMP_ADDR, COMPENSATION_FLAG, + compensation ? COMPENSATION_FLAG : 0); + mutex_unlock(&phy->lock); + + return ret ? : count; +} + +static ssize_t tx_comp_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + struct dev_ext_attribute *eattr; + unsigned long chan; + u32 compensation; + int ret; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + chan = (unsigned long)eattr->var; + + mutex_lock(&phy->lock); + ret = hssi_xcvr_read(phy, (u8)chan, VOD_COMP_ADDR, &compensation); + mutex_unlock(&phy->lock); + + return ret ? : scnprintf(buf, PAGE_SIZE, "%u\n", + compensation & COMPENSATION_FLAG ? 1 : 0); +} + +#define PHY_TUNE_ATTR(_name, _chan) \ +static struct dev_ext_attribute phy_tune_##_name##_chan = { \ + .attr = __ATTR_RW(_name), \ + .var = (void *)_chan, \ +} + +#define PHY_TUNE_ATTRS(_chan) \ +PHY_TUNE_ATTR(tx_comp, _chan); \ +PHY_TUNE_ATTR(tx_post_tap, _chan); \ +PHY_TUNE_ATTR(tx_pre_tap, _chan); \ +PHY_TUNE_ATTR(tx_vod, _chan); \ +static struct attribute *chan##_chan##_attrs[] = { \ + &phy_tune_tx_pre_tap##_chan.attr.attr, \ + &phy_tune_tx_post_tap##_chan.attr.attr, \ + &phy_tune_tx_vod##_chan.attr.attr, \ + &phy_tune_tx_comp##_chan.attr.attr, \ + NULL, \ +}; \ +static struct attribute_group chan##_chan##_attr_group = { \ + .name = __stringify(chan##_chan), \ + .attrs = chan##_chan##_attrs, \ +} + +PHY_TUNE_ATTRS(0); +PHY_TUNE_ATTRS(1); +PHY_TUNE_ATTRS(2); +PHY_TUNE_ATTRS(3); + +static ssize_t ctrl_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + int ret; + u64 v; + + ret = kstrtou64(buf, 0, &v); + if (ret) + return ret; + + mutex_lock(&phy->lock); + writeq(v, phy->csr_base + HSSI_QSFP_CTRL(phy)); + mutex_unlock(&phy->lock); + + return count; +} + +static ssize_t ctrl_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + u64 v; + + mutex_lock(&phy->lock); + v = readq(phy->csr_base + HSSI_QSFP_CTRL(phy)); + mutex_unlock(&phy->lock); + + return scnprintf(buf, PAGE_SIZE, "0x%016llx\n", v); +} +static DEVICE_ATTR_RW(ctrl); + +static ssize_t stat_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct hssi_phy *phy = dev_get_drvdata(dev); + u64 v; + + mutex_lock(&phy->lock); + v = readq(phy->csr_base + HSSI_QSFP_STAT(phy)); + mutex_unlock(&phy->lock); + + return scnprintf(buf, PAGE_SIZE, "0x%016llx\n", v); +} +static DEVICE_ATTR_RO(stat); + +static struct attribute *qsfp_attrs[] = { + &dev_attr_ctrl.attr, + &dev_attr_stat.attr, + NULL, +}; + +static struct attribute_group qsfp_attr_group = { + .attrs = qsfp_attrs, +}; + +static const struct attribute_group *qsfp_attr_groups[] = { + &qsfp_attr_group, + &chan0_attr_group, + &chan1_attr_group, + &chan2_attr_group, + &chan3_attr_group, + NULL, +}; + +static int intel_s10_phy_probe(struct platform_device *pdev) +{ + struct intel_s10_platform_data *pdata; + struct device *dev = &pdev->dev; + struct hssi_phy *phy; + + pdata = dev_get_platdata(dev); + if (!pdata) + return -ENODEV; + + phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); + if (!phy) + return -ENOMEM; + + phy->csr_base = pdata->csr_base; + phy->phy_offset = pdata->phy_offset; + phy->dev = dev; + mutex_init(&phy->lock); + dev_set_drvdata(dev, phy); + + return 0; +} + +static int intel_s10_phy_remove(struct platform_device *pdev) +{ + struct hssi_phy *phy = dev_get_drvdata(&pdev->dev); + + mutex_destroy(&phy->lock); + return 0; +} + +static struct platform_driver intel_s10_phy_driver = { + .driver = { + .name = INTEL_S10_PHY_DRV_NAME, + .dev_groups = qsfp_attr_groups, + }, + .probe = intel_s10_phy_probe, + .remove = intel_s10_phy_remove, +}; + +module_platform_driver(intel_s10_phy_driver); + +MODULE_DESCRIPTION("Intel HSSI Ethernet Phy"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" INTEL_S10_PHY_DRV_NAME); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 741b9140992a8..8191f028152b4 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -59,6 +59,7 @@ comment "SPI Master Controller Drivers" config SPI_ALTERA tristate "Altera SPI Controller" + select REGMAP_MMIO help This is the driver for the Altera SPI Controller. diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index 41d71ba7fd328..468fbd5b6c4f2 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -14,12 +14,11 @@ #include #include #include +#include #include #include #include -#define DRV_NAME "spi_altera" - #define ALTERA_SPI_RXDATA 0 #define ALTERA_SPI_TXDATA 4 #define ALTERA_SPI_STATUS 8 @@ -40,19 +39,57 @@ #define ALTERA_SPI_CONTROL_IE_MSK 0x100 #define ALTERA_SPI_CONTROL_SSO_MSK 0x400 +#define ALTERA_SPI_MAX_CS 32 + struct altera_spi { - void __iomem *base; int irq; int len; int count; int bytes_per_word; - unsigned long imr; + u32 imr; /* data buffers */ const unsigned char *tx; unsigned char *rx; + + struct regmap *regmap; + u32 base; + + struct device *dev; +}; + +static const struct regmap_config spi_altera_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, }; +static int altr_spi_writel(struct altera_spi *hw, unsigned int reg, + unsigned int val) +{ + int ret; + + ret = regmap_write(hw->regmap, hw->base + reg, val); + if (ret) + dev_err(hw->dev, "fail to write reg 0x%x val 0x%x: %d\n", + reg, val, ret); + + return ret; +} + +static int altr_spi_readl(struct altera_spi *hw, unsigned int reg, + unsigned int *val) +{ + int ret; + + ret = regmap_read(hw->regmap, hw->base + reg, val); + if (ret) + dev_err(hw->dev, "fail to read reg 0x%x: %d\n", reg, ret); + + return ret; +} + static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev) { return spi_master_get_devdata(sdev->master); @@ -64,12 +101,13 @@ static void altera_spi_set_cs(struct spi_device *spi, bool is_high) if (is_high) { hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - writel(0, hw->base + ALTERA_SPI_SLAVE_SEL); + altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr); + altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, 0); } else { - writel(BIT(spi->chip_select), hw->base + ALTERA_SPI_SLAVE_SEL); + altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, + BIT(spi->chip_select)); hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr); } } @@ -86,17 +124,24 @@ static void altera_spi_tx_word(struct altera_spi *hw) txd = (hw->tx[hw->count * 2] | (hw->tx[hw->count * 2 + 1] << 8)); break; + case 4: + txd = (hw->tx[hw->count * 4] + | (hw->tx[hw->count * 4 + 1] << 8) + | (hw->tx[hw->count * 4 + 2] << 16) + | (hw->tx[hw->count * 4 + 3] << 24)); + break; + } } - writel(txd, hw->base + ALTERA_SPI_TXDATA); + altr_spi_writel(hw, ALTERA_SPI_TXDATA, txd); } static void altera_spi_rx_word(struct altera_spi *hw) { unsigned int rxd; - rxd = readl(hw->base + ALTERA_SPI_RXDATA); + altr_spi_readl(hw, ALTERA_SPI_RXDATA, &rxd); if (hw->rx) { switch (hw->bytes_per_word) { case 1: @@ -106,6 +151,13 @@ static void altera_spi_rx_word(struct altera_spi *hw) hw->rx[hw->count * 2] = rxd; hw->rx[hw->count * 2 + 1] = rxd >> 8; break; + case 4: + hw->rx[hw->count * 4] = rxd; + hw->rx[hw->count * 4 + 1] = rxd >> 8; + hw->rx[hw->count * 4 + 2] = rxd >> 16; + hw->rx[hw->count * 4 + 3] = rxd >> 24; + break; + } } @@ -116,6 +168,7 @@ static int altera_spi_txrx(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) { struct altera_spi *hw = spi_master_get_devdata(master); + u32 val; hw->tx = t->tx_buf; hw->rx = t->rx_buf; @@ -126,7 +179,7 @@ static int altera_spi_txrx(struct spi_master *master, if (hw->irq >= 0) { /* enable receive interrupt */ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr); /* send the first byte */ altera_spi_tx_word(hw); @@ -134,9 +187,13 @@ static int altera_spi_txrx(struct spi_master *master, while (hw->count < hw->len) { altera_spi_tx_word(hw); - while (!(readl(hw->base + ALTERA_SPI_STATUS) & - ALTERA_SPI_STATUS_RRDY_MSK)) + for (;;) { + altr_spi_readl(hw, ALTERA_SPI_STATUS, &val); + if (val & ALTERA_SPI_STATUS_RRDY_MSK) + break; + cpu_relax(); + } altera_spi_rx_word(hw); } @@ -158,7 +215,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev) } else { /* disable receive interrupt */ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK; - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); + altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr); spi_finalize_current_transfer(master); } @@ -168,9 +225,13 @@ static irqreturn_t altera_spi_irq(int irq, void *dev) static int altera_spi_probe(struct platform_device *pdev) { + struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev); struct altera_spi *hw; struct spi_master *master; + void __iomem *res; int err = -ENODEV; + u32 val; + u16 i; master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi)); if (!master) @@ -178,27 +239,61 @@ static int altera_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 16; - master->mode_bits = SPI_CS_HIGH; - master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + + if (pdata) { + if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) { + dev_err(&pdev->dev, + "Invalid number of chipselect: %hu\n", + pdata->num_chipselect); + return -EINVAL; + } + + master->num_chipselect = pdata->num_chipselect; + master->mode_bits = pdata->mode_bits; + master->bits_per_word_mask = pdata->bits_per_word_mask; + } else { + master->num_chipselect = 16; + master->mode_bits = SPI_CS_HIGH; + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16); + } + master->dev.of_node = pdev->dev.of_node; master->transfer_one = altera_spi_txrx; master->set_cs = altera_spi_set_cs; hw = spi_master_get_devdata(master); + hw->dev = &pdev->dev; /* find and map our resources */ - hw->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(hw->base)) { - err = PTR_ERR(hw->base); - goto exit; + if (pdata && pdata->use_parent_regmap) { + hw->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!hw->regmap) { + dev_err(&pdev->dev, "get regmap failed\n"); + goto exit; + } + hw->base = pdata->regoff; + } else { + res = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(res)) { + err = PTR_ERR(res); + goto exit; + } + + hw->regmap = devm_regmap_init_mmio(&pdev->dev, res, + &spi_altera_config); + if (IS_ERR(hw->regmap)) { + dev_err(&pdev->dev, "regmap mmio init failed\n"); + err = PTR_ERR(hw->regmap); + goto exit; + } } /* program defaults into the registers */ hw->imr = 0; /* disable spi interrupts */ - writel(hw->imr, hw->base + ALTERA_SPI_CONTROL); - writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */ - if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK) - readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */ + altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr); + altr_spi_writel(hw, ALTERA_SPI_STATUS, 0); /* clear status reg */ + altr_spi_readl(hw, ALTERA_SPI_STATUS, &val); + if (val & ALTERA_SPI_STATUS_RRDY_MSK) + altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */ /* irq is optional */ hw->irq = platform_get_irq(pdev, 0); if (hw->irq >= 0) { @@ -211,7 +306,17 @@ static int altera_spi_probe(struct platform_device *pdev) err = devm_spi_register_master(&pdev->dev, master); if (err) goto exit; - dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq); + + if (pdata) { + for (i = 0; i < pdata->num_devices; i++) { + if (!spi_new_device(master, pdata->devices + i)) + dev_warn(&pdev->dev, + "unable to create SPI device: %s\n", + pdata->devices[i].modalias); + } + } + + dev_info(&pdev->dev, "base %u, irq %d\n", hw->base, hw->irq); return 0; exit: @@ -231,7 +336,7 @@ MODULE_DEVICE_TABLE(of, altera_spi_match); static struct platform_driver altera_spi_driver = { .probe = altera_spi_probe, .driver = { - .name = DRV_NAME, + .name = ALTERA_SPI_DRV_NAME, .pm = NULL, .of_match_table = of_match_ptr(altera_spi_match), }, @@ -241,4 +346,4 @@ module_platform_driver(altera_spi_driver); MODULE_DESCRIPTION("Altera SPI driver"); MODULE_AUTHOR("Thomas Chou "); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:" DRV_NAME); +MODULE_ALIAS("platform:" ALTERA_SPI_DRV_NAME); diff --git a/include/linux/fpga/dfl-bus.h b/include/linux/fpga/dfl-bus.h new file mode 100644 index 0000000000000..763e6ce46930c --- /dev/null +++ b/include/linux/fpga/dfl-bus.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for DFL driver and device API + * + * Copyright (C) 2020 Intel Corporation, Inc. + */ + +#ifndef __FPGA_DFL_BUS_H +#define __FPGA_DFL_BUS_H + +#include +#include + +/** + * enum dfl_id_type - define the DFL FIU types + */ +enum dfl_id_type { + FME_ID, + PORT_ID, + DFL_ID_MAX, +}; + +/** + * struct dfl_device - represent an dfl device on dfl bus + * + * @dev: Generic device interface. + * @type: Type of DFL FIU of the device. See enum dfl_id_type. + * @feature_id: 64 bits feature identifier local to its DFL FIU type. + * @mmio_res: MMIO resource of this dfl device. + * @irqs: List of Linux IRQ numbers of this dfl device. + * @num_irqs: number of IRQs supported by this dfl device. + * @cdev: pointer to DFL FPGA container device this dfl device belongs to. + * @id_entry: matched id entry in dfl driver's id table. + */ +struct dfl_device { + struct device dev; + unsigned int type; + unsigned long long feature_id; + struct resource mmio_res; + int *irqs; + unsigned int num_irqs; + struct dfl_fpga_cdev *cdev; + const struct dfl_device_id *id_entry; +}; + +/** + * struct dfl_driver - represent an dfl device driver + * + * @drv: Driver model structure. + * @id_table: Pointer to table of device IDs the driver is interested in. + * @probe: Callback for device binding. + * @remove: Callback for device unbinding. + */ +struct dfl_driver { + struct device_driver drv; + const struct dfl_device_id *id_table; + + int (*probe)(struct dfl_device *dfl_dev); + int (*remove)(struct dfl_device *dfl_dev); +}; + +#define to_dfl_dev(d) container_of(d, struct dfl_device, dev) +#define to_dfl_drv(d) container_of(d, struct dfl_driver, drv) + +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define dfl_driver_register(drv) \ + __dfl_driver_register(drv, THIS_MODULE) +int __dfl_driver_register(struct dfl_driver *dfl_drv, struct module *owner); +void dfl_driver_unregister(struct dfl_driver *dfl_drv); + +/* module_dfl_driver() - Helper macro for drivers that don't do + * anything special in module init/exit. This eliminates a lot of + * boilerplate. Each module may only use this macro once, and + * calling it replaces module_init() and module_exit() + */ +#define module_dfl_driver(__dfl_driver) \ + module_driver(__dfl_driver, dfl_driver_register, \ + dfl_driver_unregister) + +#endif /* __FPGA_DFL_BUS_H */ diff --git a/include/linux/fpga/ifpga-sec-mgr.h b/include/linux/fpga/ifpga-sec-mgr.h new file mode 100644 index 0000000000000..d06ba89337552 --- /dev/null +++ b/include/linux/fpga/ifpga-sec-mgr.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file for Intel FPGA Security Manager + * + * Copyright (C) 2019-2020 Intel Corporation, Inc. + */ +#ifndef _LINUX_IFPGA_SEC_MGR_H +#define _LINUX_IFPGA_SEC_MGR_H + +#include +#include +#include + +struct ifpga_sec_mgr; + +/** + * typedef sysfs_reh_hndlr_t - Function pointer to sysfs file handler + * for root entry hashes + * @imgr: pointer to security manager structure + * @hash: pointer to a pointer to an array of bytes comprising the hash + * @hash_size: pointer to the number of bytes in the root entry hash + * + * This datatype is used to define a sysfs file handler function to + * return root entry hash data to be displayed via sysfs. + * + * Context: No locking requirements are imposed by the security manager. + * The function is expected to vmalloc the hash array on success. + * The security manager is responsible for calling vfree. + * Return: 0 on success, negative errno on failure + */ +typedef int (*sysfs_reh_hndlr_t)(struct ifpga_sec_mgr *imgr, + u8 **hash, unsigned int *hash_size); + +/** + * typedef sysfs_cnt_hndlr_t - Function pointer to sysfs file handler + * for flash counts + * @imgr: pointer to security manager structure + * + * This datatype is used to define a sysfs file handler function to + * return a flash count to be displayed via sysfs. + * + * Context: No locking requirements are imposed by the security manager + * Return: flash count or negative errno + */ +typedef int (*sysfs_cnt_hndlr_t)(struct ifpga_sec_mgr *imgr); + +/** + * typedef sysfs_csk_hndlr_t - Function pointer to sysfs file handler + * bit vector of canceled keys + * + * @imgr: pointer to security manager structure + * @csk_map: pointer to a pointer of a cancellation key bitmap + * @nbits: number of bits in cancellation key bitmap + * + * This datatype is used to define a sysfs file handler function to + * return a bitmap of canceled keys to be displayed via sysfs. + * + * Context: No locking requirements are imposed by the security manager. + * The function is expected to vmalloc the cancellation key bitmap + * on success. The security manager is responsible for calling + * vfree. + * Return: 0 on success, negative errno on failure + */ +typedef int (*sysfs_csk_hndlr_t)(struct ifpga_sec_mgr *imgr, + unsigned long **csk_map, unsigned int *nbits); + +/** + * struct ifpga_sec_mgr_ops - device specific operations + * @user_flash_count: Optional: Return sysfs string output for FPGA + * image flash count + * @bmc_flash_count: Optional: Return sysfs string output for BMC + * image flash count + * @sr_root_entry_hash: Optional: Return sysfs string output for static + * region root entry hash + * @pr_root_entry_hash: Optional: Return sysfs string output for partial + * reconfiguration root entry hash + * @bmc_root_entry_hash: Optional: Return sysfs string output for BMC + * root entry hash + * @sr_canceled_csks: Optional: Return sysfs string output for static + * region canceled keys + * @pr_canceled_csks: Optional: Return sysfs string output for partial + * reconfiguration canceled keys + * @bmc_canceled_csks: Optional: Return sysfs string output for bmc + * canceled keys + * @prepare: Required: Prepare secure update + * @write_blk: Required: Write a block of data + * @poll_complete: Required: Check for the completion of the + * HW authentication/programming function + * @cancel: Required: Signal HW to cancel update + * @cleanup: Optional: Complements the prepare() + * function and is called at the completion + * of the update, whether success or failure, + * iff the prepare function succeeded. + * @get_hw_errinfo: Optional: return u64 hw specific error info. + * The software err_code may used to determine + * whether the hw error info is applicable. + */ +struct ifpga_sec_mgr_ops { + sysfs_cnt_hndlr_t user_flash_count; + sysfs_cnt_hndlr_t bmc_flash_count; + sysfs_cnt_hndlr_t smbus_flash_count; + sysfs_reh_hndlr_t sr_root_entry_hash; + sysfs_reh_hndlr_t pr_root_entry_hash; + sysfs_reh_hndlr_t bmc_root_entry_hash; + sysfs_csk_hndlr_t sr_canceled_csks; + sysfs_csk_hndlr_t pr_canceled_csks; + sysfs_csk_hndlr_t bmc_canceled_csks; + int (*prepare)(struct ifpga_sec_mgr *imgr); + int (*write_blk)(struct ifpga_sec_mgr *imgr, u32 offset, u32 size); + int (*poll_complete)(struct ifpga_sec_mgr *imgr); + void (*cleanup)(struct ifpga_sec_mgr *imgr); + int (*cancel)(struct ifpga_sec_mgr *imgr); + u64 (*get_hw_errinfo)(struct ifpga_sec_mgr *imgr); +}; + +/* Update progress codes */ +#define IFPGA_SEC_PROG_IDLE 0x0 +#define IFPGA_SEC_PROG_READ_FILE 0x1 +#define IFPGA_SEC_PROG_PREPARING 0x2 +#define IFPGA_SEC_PROG_WRITING 0x3 +#define IFPGA_SEC_PROG_PROGRAMMING 0x4 +#define IFPGA_SEC_PROG_MAX 0x5 + +struct ifpga_sec_mgr { + const char *name; + struct device dev; + const struct ifpga_sec_mgr_ops *iops; + struct mutex lock; /* protect data structure contents */ + struct work_struct work; + char *filename; + const u8 *data; /* pointer to update data */ + u32 remaining_size; /* size remaining to transfer */ + u32 progress; + int err_state; /* progress state at time of failure */ + int err_code; /* negative errno value on failure */ + u64 hw_errinfo; /* 64 bits of HW specific error info */ + bool request_cancel; + bool driver_unload; + void *priv; +}; + +int ifpga_sec_mgr_register(struct ifpga_sec_mgr *imgr); +void ifpga_sec_mgr_unregister(struct ifpga_sec_mgr *imgr); +struct ifpga_sec_mgr * +ifpga_sec_mgr_create(struct device *dev, const char *name, + const struct ifpga_sec_mgr_ops *iops, void *priv); +void ifpga_sec_mgr_free(struct ifpga_sec_mgr *imgr); + +#endif diff --git a/include/linux/mfd/intel-m10-bmc.h b/include/linux/mfd/intel-m10-bmc.h new file mode 100644 index 0000000000000..3dd8280e34e7a --- /dev/null +++ b/include/linux/mfd/intel-m10-bmc.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Driver Header File for Intel Max10 Board Management Controller chip. + * + * Copyright (C) 2018-2020 Intel Corporation, Inc. + * + */ +#ifndef __INTEL_M10_BMC_H +#define __INTEL_M10_BMC_H + +#include + +#define M10BMC_LEGACY_SYS_BASE 0x300400 +#define M10BMC_SYS_BASE 0x300800 +#define M10BMC_MEM_END 0x200000fc + +#define M10BMC_STAGING_BASE 0x18000000 +#define M10BMC_STAGING_SIZE 0x3800000 + +/* Register offset of system registers */ +#define NIOS2_FW_VERSION 0x0 +#define M10BMC_MACADDR1 0x10 +#define M10BMC_MAC_BYTE4 GENMASK(7, 0) +#define M10BMC_MAC_BYTE3 GENMASK(15, 8) +#define M10BMC_MAC_BYTE2 GENMASK(23, 16) +#define M10BMC_MAC_BYTE1 GENMASK(31, 24) +#define M10BMC_MACADDR2 0x14 +#define M10BMC_MAC_BYTE6 GENMASK(7, 0) +#define M10BMC_MAC_BYTE5 GENMASK(15, 8) +#define M10BMC_MAC_COUNT GENMASK(23, 16) +#define M10BMC_TEST_REG 0x3c +#define M10BMC_BUILD_VER 0x68 +#define M10BMC_VERSION_MAJOR GENMASK(23, 16) +#define PCB_INFO GENMASK(31, 24) + +/* Secure update doorbell register, in system register region */ +#define M10BMC_DOORBELL 0x400 +#define RSU_REQUEST BIT(0) +#define RSU_PROGRESS GENMASK(7, 4) +#define HOST_STATUS GENMASK(11, 8) +#define RSU_STATUS GENMASK(23, 16) +#define PKVL_EEPROM_LOAD_SEC BIT(24) +#define PKVL1_POLL_EN BIT(25) +#define PKVL2_POLL_EN BIT(26) +#define CONFIG_SEL BIT(28) +#define REBOOT_REQ BIT(29) +#define REBOOT_DISABLED BIT(30) + +/* Progress states */ +#define RSU_PROG_IDLE 0x0 +#define RSU_PROG_PREPARE 0x1 +#define RSU_PROG_READY 0x3 +#define RSU_PROG_AUTHENTICATING 0x4 +#define RSU_PROG_COPYING 0x5 +#define RSU_PROG_UPDATE_CANCEL 0x6 +#define RSU_PROG_PROGRAM_KEY_HASH 0x7 +#define RSU_PROG_RSU_DONE 0x8 +#define RSU_PROG_PKVL_PROM_DONE 0x9 + +/* Device and error states */ +#define RSU_STAT_NORMAL 0x0 +#define RSU_STAT_TIMEOUT 0x1 +#define RSU_STAT_AUTH_FAIL 0x2 +#define RSU_STAT_COPY_FAIL 0x3 +#define RSU_STAT_FATAL 0x4 +#define RSU_STAT_PKVL_REJECT 0x5 +#define RSU_STAT_NON_INC 0x6 +#define RSU_STAT_ERASE_FAIL 0x7 +#define RSU_STAT_WEAROUT 0x8 +#define RSU_STAT_NIOS_OK 0x80 +#define RSU_STAT_USER_OK 0x81 +#define RSU_STAT_FACTORY_OK 0x82 +#define RSU_STAT_USER_FAIL 0x83 +#define RSU_STAT_FACTORY_FAIL 0x84 +#define RSU_STAT_NIOS_FLASH_ERR 0x85 +#define RSU_STAT_FPGA_FLASH_ERR 0x86 + +#define HOST_STATUS_IDLE 0x0 +#define HOST_STATUS_WRITE_DONE 0x1 +#define HOST_STATUS_ABORT_RSU 0x2 + +#define rsu_prog(doorbell) FIELD_GET(RSU_PROGRESS, doorbell) +#define rsu_stat(doorbell) FIELD_GET(RSU_STATUS, doorbell) + +/* interval 100ms and timeout 5s */ +#define NIOS_HANDSHAKE_INTERVAL_US (100 * 1000) +#define NIOS_HANDSHAKE_TIMEOUT_US (5 * 1000 * 1000) + +/* RSU PREP Timeout (2 minutes) to erase flash staging area */ +#define RSU_PREP_INTERVAL_MS 100 +#define RSU_PREP_TIMEOUT_MS (2 * 60 * 1000) + +/* RSU Complete Timeout (40 minutes) for full flash update */ +#define RSU_COMPLETE_INTERVAL_MS 1000 +#define RSU_COMPLETE_TIMEOUT_MS (40 * 60 * 1000) + +/* Authorization Result register, in system register region */ +#define M10BMC_AUTH_RESULT 0x404 + +/** + * struct intel_m10bmc - Intel Max10 BMC MFD device private data structure + * @dev: this device + * @regmap: the regmap used to access registers by m10bmc itself + */ +struct intel_m10bmc { + struct device *dev; + struct regmap *regmap; + unsigned int flags; +}; + +#define M10BMC_FLAGS_SECURE BIT(2) + +/* + * register access helper functions. + * + * m10bmc_raw_read - read m10bmc register per addr + * m10bmc_sys_read - read m10bmc system register per offset + */ +static inline int +m10bmc_raw_read(struct intel_m10bmc *m10bmc, unsigned int addr, + unsigned int *val) +{ + int ret; + + ret = regmap_read(m10bmc->regmap, addr, val); + if (ret) + dev_err(m10bmc->dev, "fail to read raw reg %x: %d\n", + addr, ret); + + return ret; +} + +#define m10bmc_sys_read(m10bmc, offset, val) \ + m10bmc_raw_read(m10bmc, m10bmc->flags & M10BMC_FLAGS_SECURE ? M10BMC_SYS_BASE : M10BMC_LEGACY_SYS_BASE + (offset), val) + +/* M10BMC system sub devices for PAC N3000 */ +/* subdev hwmon */ +#define N3000BMC_HWMON_DEV_NAME "n3000bmc-hwmon" + +/* M10BMC sub devices for PAC D5005 */ +/* subdev hwmon */ +#define d5005BMC_HWMON_DEV_NAME "d5005bmc-hwmon" + +/* M10BMC subdevices for both PAC N3000 & PAC D5005 */ +/* subdev security engine */ +#define INTEL_M10BMC_SEC_DRV_NAME "m10bmc-secure" + +#endif /* __INTEL_M10_BMC_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 4c2ddd0941a75..6b4d4ba5b51da 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -832,4 +832,16 @@ struct mhi_device_id { kernel_ulong_t driver_data; }; +/** + * struct dfl_device_id - dfl device identifier + * @type: type of DFL FIU of the device. 0 for FME, 1 for PORT + * @feature_id: 64 bits feature identifier local to its DFL FIU type. + * @driver_data: Driver specific data + */ +struct dfl_device_id { + unsigned int type; + unsigned long long feature_id; + kernel_ulong_t driver_data; +}; + #endif /* LINUX_MOD_DEVICETABLE_H */ diff --git a/include/linux/phy/intel-s10-phy.h b/include/linux/phy/intel-s10-phy.h new file mode 100644 index 0000000000000..a68a5e1d2ba65 --- /dev/null +++ b/include/linux/phy/intel-s10-phy.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for Intel Stratix 10 Phy Driver. + * + * Copyright 2019-2020 Intel Corporation, Inc. + */ +#ifndef __INTEL_S10_PHY_H +#define __INTEL_S10_PHY_H + +#define INTEL_S10_PHY_DRV_NAME "intel-s10-phy" + +/** + * struct intel_s10_platform_data - Platform data of the Intel S10 Phy Driver + * @csr_base: Base address of Control & Status registers + */ +struct intel_s10_platform_data { + void __iomem *csr_base; + u32 phy_offset; +}; + +#endif /* __INTEL_S10_PHY_H */ diff --git a/include/linux/spi/altera.h b/include/linux/spi/altera.h new file mode 100644 index 0000000000000..6539371f28c60 --- /dev/null +++ b/include/linux/spi/altera.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header File for Altera SPI Driver. + */ +#ifndef __LINUX_SPI_ALTERA_H +#define __LINUX_SPI_ALTERA_H + +#include +#include +#include + +#define ALTERA_SPI_DRV_NAME "spi-altera" + +/** + * struct altera_spi_platform_data - Platform data of the Altera SPI driver + * @mode_bits: Mode bits of SPI master. + * @num_chipselect: Number of chipselects. + * @bits_per_word_mask: bitmask of supported bits_per_word for transfers. + * @num_devices: Number of devices that shall be added when the driver + * is probed. + * @devices: The devices to add. + * @use_parent_regmap: If true, device uses parent regmap to access its + * registers. Otherwise try map platform mmio resources. + * @regoff: Offset of the device register base in parent regmap. + * This field is ignored when use_parent_regmap == false. + */ +struct altera_spi_platform_data { + u16 mode_bits; + u16 num_chipselect; + u32 bits_per_word_mask; + u16 num_devices; + struct spi_board_info *devices; + bool use_parent_regmap; + u32 regoff; +}; + +#endif /* __LINUX_SPI_ALTERA_H */ diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h index ec70a0746e59b..ec69006aaadec 100644 --- a/include/uapi/linux/fpga-dfl.h +++ b/include/uapi/linux/fpga-dfl.h @@ -151,6 +151,65 @@ struct dfl_fpga_port_dma_unmap { #define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4) +/** + * struct dfl_fpga_irq_set - the argument for DFL_FPGA_XXX_SET_IRQ ioctl. + * + * @start: Index of the first irq. + * @count: The number of eventfd handler. + * @evtfds: Eventfd handler. + */ +struct dfl_fpga_irq_set { + __u32 start; + __u32 count; + __s32 evtfds[]; +}; + +/** + * DFL_FPGA_PORT_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 5, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga port error reporting private + * feature. Currently hardware supports up to 1 irq. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 5, __u32) + +/** + * DFL_FPGA_PORT_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 6, + * struct dfl_fpga_irq_set) + * + * Set fpga port error reporting interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 6, \ + struct dfl_fpga_irq_set) + +/** + * DFL_FPGA_PORT_UINT_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 7, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga AFU interrupt private + * feature. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_UINT_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 7, __u32) + +/** + * DFL_FPGA_PORT_UINT_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_PORT_BASE + 8, + * struct dfl_fpga_irq_set) + * + * Set fpga AFU interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_PORT_UINT_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_PORT_BASE + 8, \ + struct dfl_fpga_irq_set) + /* IOCTLs for FME file descriptor */ /** @@ -194,4 +253,27 @@ struct dfl_fpga_fme_port_pr { */ #define DFL_FPGA_FME_PORT_ASSIGN _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 2, int) +/** + * DFL_FPGA_FME_ERR_GET_IRQ_NUM - _IOR(DFL_FPGA_MAGIC, DFL_FME_BASE + 3, + * __u32 num_irqs) + * + * Get the number of irqs supported by the fpga fme error reporting private + * feature. Currently hardware supports up to 1 irq. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_ERR_GET_IRQ_NUM _IOR(DFL_FPGA_MAGIC, \ + DFL_FME_BASE + 3, __u32) + +/** + * DFL_FPGA_FME_ERR_SET_IRQ - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 4, + * struct dfl_fpga_irq_set) + * + * Set fpga fme error reporting interrupt trigger if evtfds[n] is valid. + * Unset related interrupt trigger if evtfds[n] is a negative value. + * Return: 0 on success, -errno on failure. + */ +#define DFL_FPGA_FME_ERR_SET_IRQ _IOW(DFL_FPGA_MAGIC, \ + DFL_FME_BASE + 4, \ + struct dfl_fpga_irq_set) + #endif /* _UAPI_LINUX_FPGA_DFL_H */ diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c index 010be8ba21160..b004b6d8a55f7 100644 --- a/scripts/mod/devicetable-offsets.c +++ b/scripts/mod/devicetable-offsets.c @@ -241,5 +241,9 @@ int main(void) DEVID(mhi_device_id); DEVID_FIELD(mhi_device_id, chan); + DEVID(dfl_device_id); + DEVID_FIELD(dfl_device_id, type); + DEVID_FIELD(dfl_device_id, feature_id); + return 0; } diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 02d5d79da2844..84a83c837aa24 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1362,6 +1362,17 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: dfl:type:feature_id */ +static int do_dfl_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD(symval, dfl_device_id, type); + DEF_FIELD(symval, dfl_device_id, feature_id); + + sprintf(alias, "dfl:%08x:%016llx", type, feature_id); + + return 1; +} + /* Does namelen bytes of name exactly match the symbol? */ static bool sym_is(const char *name, unsigned namelen, const char *symbol) { @@ -1436,6 +1447,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"dfl", SIZE_dfl_device_id, do_dfl_entry}, }; /* Create MODULE_ALIAS() statements.