diff --git a/.gitignore b/.gitignore index 97ba6b79834c..c62842976bdb 100644 --- a/.gitignore +++ b/.gitignore @@ -132,3 +132,6 @@ all.config # Kdevelop4 *.kdev4 + +# fetched Android config fragments +kernel/configs/android-*.cfg diff --git a/Documentation/ABI/testing/configfs-usb-gadget-dvctrace b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace new file mode 100644 index 000000000000..6391096ac151 --- /dev/null +++ b/Documentation/ABI/testing/configfs-usb-gadget-dvctrace @@ -0,0 +1,9 @@ +What: /config/usb-gadget//functions/dvctrace./source_dev +Date: Mar 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The name of the source device paired with this function + instance, if upon creation of the instance a source device + named exists and is free, the source device will be + associated with the current instance, otherwise the first free + source device will be used. diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace b/Documentation/ABI/testing/sysfs-bus-dvctrace new file mode 100644 index 000000000000..bf2b5bb2144a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: Groups the devices and drivers registered to + to dvc-trace bus. + +What: /sys/bus/dvctrace/devices//status +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) The status of a dvc-trace source device with + respect to an USB function driver. + Free - The device is free + Reserved - The device is reserved by an USB + function but not in use. + In use - The device is used by an USB function. + +What: /sys/bus/dvctrace/devices//protocol +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) The protocol id of a dvc-trace source device, + this will used in function driver interface + descriptors (u8). According to USB debug class + specification the protocol id is vendor specific. + +What: /sys/bus/dvctrace/devices//descriptors +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Hex-dump of the descriptors provided by the + source device. + eg. A debug class output connection descriptor + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + | | | +- iConnection string id. + | | +- Descriptor sub-type DC_OUTPUT_CONNECTION + | +- Descriptor type (USB_DT_CS_INTERFACE) + +- Descriptor length + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the descriptors currently present. + - will remove any strings previously provided. + - should use the same format. + - accepts multiple descriptors separated by space or '\n'. + +What: /sys/bus/dvctrace/devices//strings +Date: May 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Currently set usb descriptor strings in + .: string format. + . identifies the location where + the string id is needed. + eg. Having the same debug class output connection descriptor, + as the first descriptor. + 09 24 02 04 03 00 00 00 00 + ll tt ss xx xx xx xx xx ii + +- iConnection string id. + 0.8: My output connection - will identify the string associated + with this descriptor. + Writing: + - is not allowed while the device is Reserved or In Use. + - will replace all the strings currently present. + - should use the same format. + - accepts multiple strings separated by ";" or '\n'. + eg. "0.4: first string; 1.4: second string" diff --git a/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith new file mode 100644 index 000000000000..0dda3aefb89c --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith @@ -0,0 +1,68 @@ +What: /sys/bus/dvctrace/devices/dvcith-/msc +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Symbolic link to the Intel Trace Hub MSC + (Memory Storage Controller) sub-device used to get tracing data. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_min_transfer +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Window transfer watermark, the driver will queue a + new transfer only if at least bytes + of trace data is available. Since on every switch @48 bytes + of trace data is generated, this should not be set under this + threshold. + Default 2048 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_retry_timeout +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Read retry interval, If by the time the last usb transfer + is complete, there is no new data to be sent the driver will + sleep ms, before checking again. + Default: 2 ms + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_max_retry +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) the maximum retries to be bone before triggering a switch + and sending the currently available data regardless of the + available size. + Default: 150 + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_proc_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data process type, during DvC tracing the MSC is set up in + Multi Window mode (check Intel Trace Hub Developer's Manual for + details), This attribute specifies what the dvc-trace data stream + should contain. + Available values are: + - 1 - Full blocks, + - 2 - Trimmed blocks (Block header + STP data) + - 3 - STP data only. + Default 3. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_transfer_type +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (RW) Data transfer type, This attribute specifies how the trace data + is queued in the USB requests. + Available values are: + - 1 - Auto, + - 2 - SG-List, + - 3 - Linear buffer. + Default 1. + +What: /sys/bus/dvctrace/devices/dvcith-/mdd_stats +Date: Aug 2015 +KernelVersion: 4.0 +Contact: Traian Schiau +Description: (R) Provides statistical information regarding the latest. + trace session. Available if (CONFIG_INTEL_TH_MSU_DVC_DEBUG). diff --git a/Documentation/ABI/testing/sysfs-class-rpmb b/Documentation/ABI/testing/sysfs-class-rpmb new file mode 100644 index 000000000000..d0540be7db19 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-class-rpmb @@ -0,0 +1,57 @@ +What: /sys/class/rpmb/ +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The rpmb/ class sub-directory belongs to RPMB device class. + + Few storage technologies such is EMMC, UFS, and NVMe support + Replay Protected Memory Block (RPMB) hardware partition with + common protocol and similar frame layout. + Such a partition provides authenticated and replay protected access, + hence suitable as a secure storage. + +What: /sys/class/rpmb/rpmbN/ +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN directory is created for + each RPMB registered device. + +What: /sys/class/rpmb/rpmbN/type +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/type file contains device + underlying storage type technology: EMMC, UFS, NVMe. + In case of simulated device it will have :SIM suffix + i.e EMMC:SIM. + +What: /sys/class/rpmb/rpmbN/id +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/id file contains unique device id + in a binary form as defined by underlying storage device. + In case of multiple RPMB devices a user can determine correct + device. + The content can be parsed according the storage device type. + +What: /sys/class/rpmb/rpmbN/wr_cnt_max +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/wr_cnt_max file contains + number of blocks that can be reliable written in a single request. + +What: /sys/class/rpmb/rpmbN/rd_cnt_max +Date: Jul 2018 +KernelVersion: 4.18 +Contact: Tomas Winkler +Description: + The /sys/class/rpmb/rpmbN/rd_cnt_max file contains + number of blocks that can be read in a single request. diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons new file mode 100644 index 000000000000..acb19b91c192 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons @@ -0,0 +1,16 @@ +What: /sys/kernel/wakeup_reasons/last_resume_reason +Date: February 2014 +Contact: Ruchi Kandoi +Description: + The /sys/kernel/wakeup_reasons/last_resume_reason is + used to report wakeup reasons after system exited suspend. + +What: /sys/kernel/wakeup_reasons/last_suspend_time +Date: March 2015 +Contact: jinqian +Description: + The /sys/kernel/wakeup_reasons/last_suspend_time is + used to report time spent in last suspend cycle. It contains + two numbers (in seconds) separated by space. First number is + the time spent in suspend and resume processes. Second number + is the time spent in sleep state. \ No newline at end of file diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 92eb1f42240d..1d5cb5305f8f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -880,6 +880,9 @@ dis_ucode_ldr [X86] Disable the microcode loader. + dm= [DM] Allows early creation of a device-mapper device. + See Documentation/device-mapper/boot.txt. + dma_debug=off If the kernel is compiled with DMA_API_DEBUG support, this option disables the debugging code at boot. @@ -3899,6 +3902,9 @@ reboot_cpu is s[mp]#### with #### being the processor to be used for rebooting. + reboot_panic= [KNL] + Same as reboot parameter above but only in case of panic. + relax_domain_level= [KNL, SMP] Set scheduler's default relax_domain_level. See Documentation/cgroup-v1/cpusets.txt. diff --git a/Documentation/conf.py b/Documentation/conf.py index b691af4831fa..a57272e7820c 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -403,6 +403,8 @@ 'The kernel development community', 'manual'), ('userspace-api/index', 'userspace-api.tex', 'The Linux kernel user-space API guide', 'The kernel development community', 'manual'), + ('rpmb/index', 'rpmb.tex', 'Linux RPMB Subsystem Documentation', + 'The kernel development community', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt new file mode 100644 index 000000000000..adcaad5e5e32 --- /dev/null +++ b/Documentation/device-mapper/boot.txt @@ -0,0 +1,42 @@ +Boot time creation of mapped devices +=================================== + +It is possible to configure a device mapper device to act as the root +device for your system in two ways. + +The first is to build an initial ramdisk which boots to a minimal +userspace which configures the device, then pivot_root(8) in to it. + +For simple device mapper configurations, it is possible to boot directly +using the following kernel command line: + +dm=" ,table line 1,...,table line n" + +name = the name to associate with the device + after boot, udev, if used, will use that name to label + the device node. +uuid = may be 'none' or the UUID desired for the device. +ro = may be "ro" or "rw". If "ro", the device and device table will be + marked read-only. + +Each table line may be as normal when using the dmsetup tool except for +two variations: +1. Any use of commas will be interpreted as a newline +2. Quotation marks cannot be escaped and cannot be used without + terminating the dm= argument. + +Unless renamed by udev, the device node created will be dm-0 as the +first minor number for the device-mapper is used during early creation. + +Example +======= + +- Booting to a linear array made up of user-mode linux block devices: + + dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \ + root=/dev/dm-0 + +Will boot to a rw dm-linear target of 8192 sectors split across two +block devices identified by their major:minor numbers. After boot, udev +will rename this target to /dev/mapper/lroot (depending on the rules). +No uuid was assigned. diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt new file mode 100644 index 000000000000..c99a506c030d --- /dev/null +++ b/Documentation/devicetree/bindings/misc/memory-state-time.txt @@ -0,0 +1,8 @@ +Memory bandwidth and frequency state tracking + +Required properties: +- compatible : should be: + "memory-state-time" +- freq-tbl: Should contain entries with each frequency in Hz. +- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps. + Must match the framework power_profile.xml for the device. diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt new file mode 100644 index 000000000000..18329d39487e --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq-debugger.txt @@ -0,0 +1,8 @@ +Trusty fiq debugger interface + +Provides a single fiq for the fiq debugger. + +Required properties: +- compatible: compatible = "android,trusty-fiq-v1-*"; where * is a serial port. + +Must be a child of the node that provides fiq support ("android,trusty-fiq-v1"). diff --git a/Documentation/devicetree/bindings/trusty/trusty-fiq.txt b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt new file mode 100644 index 000000000000..de810b955bc9 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-fiq.txt @@ -0,0 +1,8 @@ +Trusty fiq interface + +Trusty provides fiq emulation. + +Required properties: +- compatible: "android,trusty-fiq-v1" + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-irq.txt b/Documentation/devicetree/bindings/trusty/trusty-irq.txt new file mode 100644 index 000000000000..5aefeb8e536f --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-irq.txt @@ -0,0 +1,67 @@ +Trusty irq interface + +Trusty requires non-secure irqs to be forwarded to the secure OS. + +Required properties: +- compatible: "android,trusty-irq-v1" + +Optional properties: + +- interrupt-templates: is an optional property that works together + with "interrupt-ranges" to specify secure side to kernel IRQs mapping. + + It is a list of entries, each one of which defines a group of interrupts + having common properties, and has the following format: + < phandle irq_id_pos [templ_data]> + phandle - phandle of interrupt controller this template is for + irq_id_pos - the position of irq id in interrupt specifier array + for interrupt controller referenced by phandle. + templ_data - is an array of u32 values (could be empty) in the same + format as interrupt specifier for interrupt controller + referenced by phandle but with omitted irq id field. + +- interrupt-ranges: list of entries that specifies secure side to kernel + IRQs mapping. + + Each entry in the "interrupt-ranges" list has the following format: + + beg - first entry in this range + end - last entry in this range + templ_idx - index of entry in "interrupt-templates" property + that must be used as a template for all interrupts + in this range + +Example: +{ + gic: interrupt-controller@50041000 { + compatible = "arm,gic-400"; + #interrupt-cells = <3>; + interrupt-controller; + ... + }; + ... + IPI: interrupt-controller { + compatible = "android,CustomIPI"; + #interrupt-cells = <1>; + interrupt-controller; + }; + ... + trusty { + compatible = "android,trusty-smc-v1"; + ranges; + #address-cells = <2>; + #size-cells = <2>; + + irq { + compatible = "android,trusty-irq-v1"; + interrupt-templates = <&IPI 0>, + <&gic 1 GIC_PPI 0>, + <&gic 1 GIC_SPI 0>; + interrupt-ranges = < 0 15 0>, + <16 31 1>, + <32 223 2>; + }; + } +} + +Must be a child of the node that provides the trusty std/fast call interface. diff --git a/Documentation/devicetree/bindings/trusty/trusty-smc.txt b/Documentation/devicetree/bindings/trusty/trusty-smc.txt new file mode 100644 index 000000000000..1b39ad317c67 --- /dev/null +++ b/Documentation/devicetree/bindings/trusty/trusty-smc.txt @@ -0,0 +1,6 @@ +Trusty smc interface + +Trusty is running in secure mode on the same (arm) cpu(s) as the current os. + +Required properties: +- compatible: "android,trusty-smc-v1" diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index 6d9f2f9fe20e..d602f6c05972 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -53,6 +53,7 @@ available subsections can be seen below. slimbus soundwire/index fpga/index + rpmb/index .. only:: subproject and html diff --git a/Documentation/driver-api/rpmb/conf.py b/Documentation/driver-api/rpmb/conf.py new file mode 100644 index 000000000000..15430a0b3a08 --- /dev/null +++ b/Documentation/driver-api/rpmb/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "Linux RPMB Subsystem" + +tags.add("subproject") diff --git a/Documentation/driver-api/rpmb/index.rst b/Documentation/driver-api/rpmb/index.rst new file mode 100644 index 000000000000..3813a44ad06e --- /dev/null +++ b/Documentation/driver-api/rpmb/index.rst @@ -0,0 +1,18 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +============================================== +Replay Protected Memory Block (RPMB) subsystem +============================================== + +.. toctree:: + + introduction + simulation-device.rst + rpmb-tool.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/driver-api/rpmb/introduction.rst b/Documentation/driver-api/rpmb/introduction.rst new file mode 100644 index 000000000000..403cbcf6e142 --- /dev/null +++ b/Documentation/driver-api/rpmb/introduction.rst @@ -0,0 +1,98 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +============= +Introduction: +============= + +Few storage technologies such is EMMC, UFS, and NVMe support RPMB +hardware partition with common protocol and frame layout. +The RPMB partition `cannot` be accessed via standard block layer, +but by a set of specific commands: + +WRITE, READ, GET_WRITE_COUNTER, and PROGRAM_KEY. + +The commands and the data are embedded within :c:type:`rpmb_frame `. + +An RPMB partition provides authenticated and replay protected access, +hence it is suitable as a secure storage. + +In-kernel API +------------- +The RPMB layer aims to provide in-kernel API for Trusted Execution +Environment (TEE) devices that are capable to securely compute the block +frame signature. In case a TEE device wish to store a replay protected +data, it creates an RPMB frame with requested data and computes HMAC of +the frame, then it requests the storage device via RPMB layer to store +the data. + +The layer provides APIs, for :c:func:`rpmb_seq_cmd()` for issuing sequence +of raw RPMB protocol frames, which is close to the functionality provided +by emmc multi ioctl interface. + +.. c:function:: int rpmb_cmd_seq(struct rpmb_dev *rdev, u8 target, struct rpmb_cmd *cmds, u32 ncmds); + + +A TEE driver can claim the RPMB interface, for example, via +:c:func:`class_interface_register`: + +.. code-block:: c + + struct class_interface tee_rpmb_intf = { + .class = &rpmb_class; + .add_dev = rpmb_add_device; + .remove_dev = rpmb_remove_device; + } + class_interface_register(&tee_rpmb_intf); + + +RPMB device registeration +---------------------------- + +A storage device registers its RPMB hardware (eMMC) partition or RPMB +W-LUN (UFS) with the RPMB layer :c:func:`rpmb_dev_register` providing +an implementation for :c:func:`rpmb_seq_cmd()` handler. The interface +enables sending sequence of RPMB standard frames. + +.. code-block:: c + + struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .type = RPMB_TYPE_EMMC, + ... + } + rpmb_dev_register(disk_to_dev(part_md->disk), &mmc_rpmb_dev_ops); + + +User space API +-------------- + +A parallel user space API is provided via /dev/rpmbX character +device with two IOCTL commands. +- First ``RPMB_IOC_VER_CMD``, return driver protocol version, +- second ``RPMB_IOC_CAP_CMD`` return capability structure, +- last ``RPMB_IOC_SEQ_CMD`` where the whole RPMB sequence, and + including ``RESULT_READ`` is supplied by the caller. +https://android.googlesource.com/trusty/app/storage/ + +.. code-block:: c + + struct rpmb_ioc_req_cmd ireq; + int ret; + + ireq.req_type = RPMB_WRITE_DATA; + rpmb_ioc_cmd_set(ireq.icmd, RPMB_F_WRITE, frames_in, cnt_in); + rpmb_ioc_cmd_set(ireq.ocmd, 0, frames_out, cnt_out); + + ret = ioctl(fd, RPMB_IOC_REQ_CMD, &ireq); + + +API +--- +.. kernel-doc:: include/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/core.c + +.. kernel-doc:: include/uapi/linux/rpmb.h + +.. kernel-doc:: drivers/char/rpmb/cdev.c + diff --git a/Documentation/driver-api/rpmb/rpmb-tool.rst b/Documentation/driver-api/rpmb/rpmb-tool.rst new file mode 100644 index 000000000000..3f4eed84542a --- /dev/null +++ b/Documentation/driver-api/rpmb/rpmb-tool.rst @@ -0,0 +1,19 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +========== +RPMB Tool +========== + +There is a sample rpmb tool under tools/rpmb/ directory that exercises +the RPMB devices via RPMB character devices interface (/dev/rpmbX) + +.. code-block:: none + + rpmb [-v] [-r|-s] + + rpmb get-info + rpmb program-key + rpmb write-counter [KEY_FILE] + rpmb write-blocks
+ rpmb read-blocks
[KEY_FILE] + + rpmb -v/--verbose: runs in verbose mode diff --git a/Documentation/driver-api/rpmb/simulation-device.rst b/Documentation/driver-api/rpmb/simulation-device.rst new file mode 100644 index 000000000000..21b7bc8bc39d --- /dev/null +++ b/Documentation/driver-api/rpmb/simulation-device.rst @@ -0,0 +1,21 @@ +.. SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +====================== +RPMB Simulation Device +====================== + +RPMB partition simulation device is a virtual device that +provides simulation of the RPMB protocol and uses kernel memory +as storage. + +This driver cannot promise any real security, it is suitable for testing +of the RPMB subsystem it self and mostly it was found useful for testing of +RPMB applications prior to RPMB key provisioning/programming as +The RPMB key programming can be performed only once in the life time +of the storage device. + +Implementation: +--------------- + +.. kernel-doc:: drivers/char/rpmb/rpmb_sim.c + diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index 22b4b00dee31..02ba2136a358 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -398,6 +398,8 @@ is not associated with a file: [stack] = the stack of the main process [vdso] = the "virtual dynamic shared object", the kernel system call handler + [anon:] = an anonymous mapping that has been + named by userspace or if empty, the mapping is anonymous. @@ -426,6 +428,7 @@ KernelPageSize: 4 kB MMUPageSize: 4 kB Locked: 0 kB VmFlags: rd ex mr mw me dw +Name: name from userspace the first of these lines shows the same information as is displayed for the mapping in /proc/PID/maps. The remaining lines show the size of the mapping @@ -498,6 +501,9 @@ Note that there is no guarantee that every flag and associated mnemonic will be present in all further kernel releases. Things get changed, the flags may be vanished or the reverse -- new added. +The "Name" field will only be present on a mapping that has been named by +userspace, and will show the name passed in by userspace. + This file is only present if the CONFIG_MMU kernel configuration option is enabled. diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 13a7c999c04a..ae2f081324f4 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -324,6 +324,7 @@ Code Seq#(hex) Include File Comments 0xB3 00 linux/mmc/ioctl.h 0xB4 00-0F linux/gpio.h 0xB5 00-0F uapi/linux/rpmsg.h +0xB5 80-8F linux/uapi/linux/rpmb.h 0xB6 all linux/fpga-dfl.h 0xC0 00-0F linux/usb/iowarrior.h 0xCA 00-0F uapi/misc/cxl.h diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 960de8fe3f40..f234dbe3c3f1 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -630,6 +630,16 @@ tcp_fastopen_blackhole_timeout_sec - INTEGER 0 to disable the blackhole detection. By default, it is set to 1hr. +tcp_fwmark_accept - BOOLEAN + If set, incoming connections to listening sockets that do not have a + socket mark will set the mark of the accepting socket to the fwmark of + the incoming SYN packet. This will cause all packets on that connection + (starting from the first SYNACK) to be sent with that fwmark. The + listening socket's mark is unchanged. Listening sockets that already + have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are + unaffected. + Default: 0 + tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt will be retransmitted. Should not be higher than 127. Default value diff --git a/Documentation/scheduler/sched-tune.txt b/Documentation/scheduler/sched-tune.txt new file mode 100644 index 000000000000..5df0ea361311 --- /dev/null +++ b/Documentation/scheduler/sched-tune.txt @@ -0,0 +1,413 @@ + Central, scheduler-driven, power-performance control + (EXPERIMENTAL) + +Abstract +======== + +The topic of a single simple power-performance tunable, that is wholly +scheduler centric, and has well defined and predictable properties has come up +on several occasions in the past [1,2]. With techniques such as a scheduler +driven DVFS [3], we now have a good framework for implementing such a tunable. +This document describes the overall ideas behind its design and implementation. + + +Table of Contents +================= + +1. Motivation +2. Introduction +3. Signal Boosting Strategy +4. OPP selection using boosted CPU utilization +5. Per task group boosting +6. Per-task wakeup-placement-strategy Selection +7. Question and Answers + - What about "auto" mode? + - What about boosting on a congested system? + - How CPUs are boosted when we have tasks with multiple boost values? +8. References + + +1. Motivation +============= + +Sched-DVFS [3] was a new event-driven cpufreq governor which allows the +scheduler to select the optimal DVFS operating point (OPP) for running a task +allocated to a CPU. Later, the cpufreq maintainers introduced a similar +governor, schedutil. The introduction of schedutil also enables running +workloads at the most energy efficient OPPs. + +However, sometimes it may be desired to intentionally boost the performance of +a workload even if that could imply a reasonable increase in energy +consumption. For example, in order to reduce the response time of a task, we +may want to run the task at a higher OPP than the one that is actually required +by it's CPU bandwidth demand. + +This last requirement is especially important if we consider that one of the +main goals of the utilization-driven governor component is to replace all +currently available CPUFreq policies. Since sched-DVFS and schedutil are event +based, as opposed to the sampling driven governors we currently have, they are +already more responsive at selecting the optimal OPP to run tasks allocated to +a CPU. However, just tracking the actual task load demand may not be enough +from a performance standpoint. For example, it is not possible to get +behaviors similar to those provided by the "performance" and "interactive" +CPUFreq governors. + +This document describes an implementation of a tunable, stacked on top of the +utilization-driven governors which extends their functionality to support task +performance boosting. + +By "performance boosting" we mean the reduction of the time required to +complete a task activation, i.e. the time elapsed from a task wakeup to its +next deactivation (e.g. because it goes back to sleep or it terminates). For +example, if we consider a simple periodic task which executes the same workload +for 5[s] every 20[s] while running at a certain OPP, a boosted execution of +that task must complete each of its activations in less than 5[s]. + +A previous attempt [5] to introduce such a boosting feature has not been +successful mainly because of the complexity of the proposed solution. Previous +versions of the approach described in this document exposed a single simple +interface to user-space. This single tunable knob allowed the tuning of +system wide scheduler behaviours ranging from energy efficiency at one end +through to incremental performance boosting at the other end. This first +tunable affects all tasks. However, that is not useful for Android products +so in this version only a more advanced extension of the concept is provided +which uses CGroups to boost the performance of only selected tasks while using +the energy efficient default for all others. + +The rest of this document introduces in more details the proposed solution +which has been named SchedTune. + + +2. Introduction +=============== + +SchedTune exposes a simple user-space interface provided through a new +CGroup controller 'stune' which provides two power-performance tunables +per group: + + //schedtune.prefer_idle + //schedtune.boost + +The CGroup implementation permits arbitrary user-space defined task +classification to tune the scheduler for different goals depending on the +specific nature of the task, e.g. background vs interactive vs low-priority. + +More details are given in section 5. + +2.1 Boosting +============ + +The boost value is expressed as an integer in the range [-100..0..100]. + +A value of 0 (default) configures the CFS scheduler for maximum energy +efficiency. This means that sched-DVFS runs the tasks at the minimum OPP +required to satisfy their workload demand. + +A value of 100 configures scheduler for maximum performance, which translates +to the selection of the maximum OPP on that CPU. + +A value of -100 configures scheduler for minimum performance, which translates +to the selection of the minimum OPP on that CPU. + +The range between -100, 0 and 100 can be set to satisfy other scenarios suitably. +For example to satisfy interactive response or depending on other system events +(battery level etc). + +The overall design of the SchedTune module is built on top of "Per-Entity Load +Tracking" (PELT) signals and sched-DVFS by introducing a bias on the Operating +Performance Point (OPP) selection. + +Each time a task is allocated on a CPU, cpufreq is given the opportunity to tune +the operating frequency of that CPU to better match the workload demand. The +selection of the actual OPP being activated is influenced by the boost value +for the task CGroup. + +This simple biasing approach leverages existing frameworks, which means minimal +modifications to the scheduler, and yet it allows to achieve a range of +different behaviours all from a single simple tunable knob. + +In EAS schedulers, we use boosted task and CPU utilization for energy +calculation and energy-aware task placement. + +2.2 prefer_idle +=============== + +This is a flag which indicates to the scheduler that userspace would like +the scheduler to focus on energy or to focus on performance. + +A value of 0 (default) signals to the CFS scheduler that tasks in this group +can be placed according to the energy-aware wakeup strategy. + +A value of 1 signals to the CFS scheduler that tasks in this group should be +placed to minimise wakeup latency. + +The value is combined with the boost value - task placement will not be +boost aware however CPU OPP selection is still boost aware. + +Android platforms typically use this flag for application tasks which the +user is currently interacting with. + + +3. Signal Boosting Strategy +=========================== + +The whole PELT machinery works based on the value of a few load tracking signals +which basically track the CPU bandwidth requirements for tasks and the capacity +of CPUs. The basic idea behind the SchedTune knob is to artificially inflate +some of these load tracking signals to make a task or RQ appears more demanding +that it actually is. + +Which signals have to be inflated depends on the specific "consumer". However, +independently from the specific (signal, consumer) pair, it is important to +define a simple and possibly consistent strategy for the concept of boosting a +signal. + +A boosting strategy defines how the "abstract" user-space defined +sched_cfs_boost value is translated into an internal "margin" value to be added +to a signal to get its inflated value: + + margin := boosting_strategy(sched_cfs_boost, signal) + boosted_signal := signal + margin + +Different boosting strategies were identified and analyzed before selecting the +one found to be most effective. + +Signal Proportional Compensation (SPC) +-------------------------------------- + +In this boosting strategy the sched_cfs_boost value is used to compute a +margin which is proportional to the complement of the original signal. +When a signal has a maximum possible value, its complement is defined as +the delta from the actual value and its possible maximum. + +Since the tunable implementation uses signals which have SCHED_LOAD_SCALE as +the maximum possible value, the margin becomes: + + margin := sched_cfs_boost * (SCHED_LOAD_SCALE - signal) + +Using this boosting strategy: +- a 100% sched_cfs_boost means that the signal is scaled to the maximum value +- each value in the range of sched_cfs_boost effectively inflates the signal in + question by a quantity which is proportional to the maximum value. + +For example, by applying the SPC boosting strategy to the selection of the OPP +to run a task it is possible to achieve these behaviors: + +- 0% boosting: run the task at the minimum OPP required by its workload +- 100% boosting: run the task at the maximum OPP available for the CPU +- 50% boosting: run at the half-way OPP between minimum and maximum + +Which means that, at 50% boosting, a task will be scheduled to run at half of +the maximum theoretically achievable performance on the specific target +platform. + +A graphical representation of an SPC boosted signal is represented in the +following figure where: + a) "-" represents the original signal + b) "b" represents a 50% boosted signal + c) "p" represents a 100% boosted signal + + + ^ + | SCHED_LOAD_SCALE + +-----------------------------------------------------------------+ + |pppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppppp + | + | boosted_signal + | bbbbbbbbbbbbbbbbbbbbbbbb + | + | original signal + | bbbbbbbbbbbbbbbbbbbbbbbb+----------------------+ + | | + |bbbbbbbbbbbbbbbbbb | + | | + | | + | | + | +-----------------------+ + | | + | | + | | + |------------------+ + | + | + +-----------------------------------------------------------------------> + +The plot above shows a ramped load signal (titled 'original_signal') and it's +boosted equivalent. For each step of the original signal the boosted signal +corresponding to a 50% boost is midway from the original signal and the upper +bound. Boosting by 100% generates a boosted signal which is always saturated to +the upper bound. + + +4. OPP selection using boosted CPU utilization +============================================== + +It is worth calling out that the implementation does not introduce any new load +signals. Instead, it provides an API to tune existing signals. This tuning is +done on demand and only in scheduler code paths where it is sensible to do so. +The new API calls are defined to return either the default signal or a boosted +one, depending on the value of sched_cfs_boost. This is a clean an non invasive +modification of the existing existing code paths. + +The signal representing a CPU's utilization is boosted according to the +previously described SPC boosting strategy. To sched-DVFS, this allows a CPU +(ie CFS run-queue) to appear more used then it actually is. + +Thus, with the sched_cfs_boost enabled we have the following main functions to +get the current utilization of a CPU: + + cpu_util() + boosted_cpu_util() + +The new boosted_cpu_util() is similar to the first but returns a boosted +utilization signal which is a function of the sched_cfs_boost value. + +This function is used in the CFS scheduler code paths where sched-DVFS needs to +decide the OPP to run a CPU at. +For example, this allows selecting the highest OPP for a CPU which has +the boost value set to 100%. + + +5. Per task group boosting +========================== + +On battery powered devices there usually are many background services which are +long running and need energy efficient scheduling. On the other hand, some +applications are more performance sensitive and require an interactive +response and/or maximum performance, regardless of the energy cost. + +To better service such scenarios, the SchedTune implementation has an extension +that provides a more fine grained boosting interface. + +A new CGroup controller, namely "schedtune", can be enabled which allows to +defined and configure task groups with different boosting values. +Tasks that require special performance can be put into separate CGroups. +The value of the boost associated with the tasks in this group can be specified +using a single knob exposed by the CGroup controller: + + schedtune.boost + +This knob allows the definition of a boost value that is to be used for +SPC boosting of all tasks attached to this group. + +The current schedtune controller implementation is really simple and has these +main characteristics: + + 1) It is only possible to create 1 level depth hierarchies + + The root control groups define the system-wide boost value to be applied + by default to all tasks. Its direct subgroups are named "boost groups" and + they define the boost value for specific set of tasks. + Further nested subgroups are not allowed since they do not have a sensible + meaning from a user-space standpoint. + + 2) It is possible to define only a limited number of "boost groups" + + This number is defined at compile time and by default configured to 16. + This is a design decision motivated by two main reasons: + a) In a real system we do not expect utilization scenarios with more then few + boost groups. For example, a reasonable collection of groups could be + just "background", "interactive" and "performance". + b) It simplifies the implementation considerably, especially for the code + which has to compute the per CPU boosting once there are multiple + RUNNABLE tasks with different boost values. + +Such a simple design should allow servicing the main utilization scenarios identified +so far. It provides a simple interface which can be used to manage the +power-performance of all tasks or only selected tasks. +Moreover, this interface can be easily integrated by user-space run-times (e.g. +Android, ChromeOS) to implement a QoS solution for task boosting based on tasks +classification, which has been a long standing requirement. + +Setup and usage +--------------- + +0. Use a kernel with CONFIG_SCHED_TUNE support enabled + +1. Check that the "schedtune" CGroup controller is available: + + root@linaro-nano:~# cat /proc/cgroups + #subsys_name hierarchy num_cgroups enabled + cpuset 0 1 1 + cpu 0 1 1 + schedtune 0 1 1 + +2. Mount a tmpfs to create the CGroups mount point (Optional) + + root@linaro-nano:~# sudo mount -t tmpfs cgroups /sys/fs/cgroup + +3. Mount the "schedtune" controller + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune + root@linaro-nano:~# sudo mount -t cgroup -o schedtune stune /sys/fs/cgroup/stune + +4. Create task groups and configure their specific boost value (Optional) + + For example here we create a "performance" boost group configure to boost + all its tasks to 100% + + root@linaro-nano:~# mkdir /sys/fs/cgroup/stune/performance + root@linaro-nano:~# echo 100 > /sys/fs/cgroup/stune/performance/schedtune.boost + +5. Move tasks into the boost group + + For example, the following moves the tasks with PID $TASKPID (and all its + threads) into the "performance" boost group. + + root@linaro-nano:~# echo "TASKPID > /sys/fs/cgroup/stune/performance/cgroup.procs + +This simple configuration allows only the threads of the $TASKPID task to run, +when needed, at the highest OPP in the most capable CPU of the system. + + +6. Per-task wakeup-placement-strategy Selection +=============================================== + +Many devices have a number of CFS tasks in use which require an absolute +minimum wakeup latency, and many tasks for which wakeup latency is not +important. + +For touch-driven environments, removing additional wakeup latency can be +critical. + +When you use the Schedtume CGroup controller, you have access to a second +parameter which allows a group to be marked such that energy_aware task +placement is bypassed for tasks belonging to that group. + +prefer_idle=0 (default - use energy-aware task placement if available) +prefer_idle=1 (never use energy-aware task placement for these tasks) + +Since the regular wakeup task placement algorithm in CFS is biased for +performance, this has the effect of restoring minimum wakeup latency +for the desired tasks whilst still allowing energy-aware wakeup placement +to save energy for other tasks. + + +7. Question and Answers +======================= + +What about "auto" mode? +----------------------- + +The 'auto' mode as described in [5] can be implemented by interfacing SchedTune +with some suitable user-space element. This element could use the exposed +system-wide or cgroup based interface. + +How are multiple groups of tasks with different boost values managed? +--------------------------------------------------------------------- + +The current SchedTune implementation keeps track of the boosted RUNNABLE tasks +on a CPU. The CPU utilization seen by the scheduler-driven cpufreq governors +(and used to select an appropriate OPP) is boosted with a value which is the +maximum of the boost values of the currently RUNNABLE tasks in its RQ. + +This allows cpufreq to boost a CPU only while there are boosted tasks ready +to run and switch back to the energy efficient mode as soon as the last boosted +task is dequeued. + + +8. References +============= +[1] http://lwn.net/Articles/552889 +[2] http://lkml.org/lkml/2012/5/18/91 +[3] http://lkml.org/lkml/2015/6/26/620 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 37a679501ddc..0e4a8129e86f 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -693,7 +693,8 @@ allowed to execute. perf_event_paranoid: Controls use of the performance events system by unprivileged -users (without CAP_SYS_ADMIN). The default value is 2. +users (without CAP_SYS_ADMIN). The default value is 3 if +CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise. -1: Allow use of (almost) all events by all users Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK @@ -701,6 +702,7 @@ users (without CAP_SYS_ADMIN). The default value is 2. Disallow raw tracepoint access by users without CAP_SYS_ADMIN >=1: Disallow CPU event access by users without CAP_SYS_ADMIN >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN +>=3: Disallow all event access by users without CAP_SYS_ADMIN ============================================================== diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 7d73882e2c27..a48baf202265 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt @@ -31,6 +31,7 @@ Currently, these files are in /proc/sys/vm: - dirty_writeback_centisecs - drop_caches - extfrag_threshold +- extra_free_kbytes - hugetlb_shm_group - laptop_mode - legacy_va_layout @@ -274,6 +275,21 @@ any throttling. ============================================================== +extra_free_kbytes + +This parameter tells the VM to keep extra free memory between the threshold +where background reclaim (kswapd) kicks in, and the threshold where direct +reclaim (by allocating processes) kicks in. + +This is useful for workloads that require low latency memory allocations +and have a bounded burstiness in memory allocations, for example a +realtime application that receives and transmits network traffic +(causing in-kernel memory allocations) with a maximum total message burst +size of 200MB may need 200MB of extra free memory to avoid direct reclaim +related latencies. + +============================================================== + hugetlb_shm_group hugetlb_shm_group contains group id that is allowed to create SysV diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index af0d23968ee7..257aec22dbff 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -9,3 +9,6 @@ kvm/ - Kernel Virtual Machine. See also http://linux-kvm.org uml/ - User Mode Linux, builds/runs Linux kernel as a userspace program. + +acrn/ + - ACRN Project. See also http://github.com/projectacrn/ diff --git a/Documentation/virtual/acrn/00-INDEX b/Documentation/virtual/acrn/00-INDEX new file mode 100644 index 000000000000..5beb50eef9e1 --- /dev/null +++ b/Documentation/virtual/acrn/00-INDEX @@ -0,0 +1,8 @@ +00-INDEX + - this file. +index.rst + - Index. +vhm.rst + - virtio and hypervisor service module (VHM) APIs. +vbs.rst + - virtio and backend service (VBS) APIs. diff --git a/Documentation/virtual/acrn/conf.py b/Documentation/virtual/acrn/conf.py new file mode 100644 index 000000000000..ed247df22700 --- /dev/null +++ b/Documentation/virtual/acrn/conf.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8; mode: python -*- + +project = "ACRN Project" + +tags.add("subproject") diff --git a/Documentation/virtual/acrn/index.rst b/Documentation/virtual/acrn/index.rst new file mode 100644 index 000000000000..3630d4fe3207 --- /dev/null +++ b/Documentation/virtual/acrn/index.rst @@ -0,0 +1,17 @@ +.. -*- coding: utf-8; mode: rst -*- + +============================= +ACRN Project +============================= + +.. toctree:: + + vbs.rst + vhm.rst + +.. only:: subproject + + Indices + ======= + + * :ref:`genindex` diff --git a/Documentation/virtual/acrn/vbs.rst b/Documentation/virtual/acrn/vbs.rst new file mode 100644 index 000000000000..40a0683a1c0b --- /dev/null +++ b/Documentation/virtual/acrn/vbs.rst @@ -0,0 +1,20 @@ +================================ +Virtio and Backend Service (VBS) +================================ + +The Virtio and Backend Service (VBS) in part of ACRN Project. + +The VBS can be further divided into two parts: VBS in user space (VBS-U) +and VBS in kernel space (VBS-K). + +Example: +-------- +A reference driver for VBS-K can be found at :c:type:`struct vbs_rng`. + +.. kernel-doc:: drivers/vbs/vbs_rng.c + +APIs: +----- + +.. kernel-doc:: include/linux/vbs/vbs.h +.. kernel-doc:: include/linux/vbs/vq.h diff --git a/Documentation/virtual/acrn/vhm.rst b/Documentation/virtual/acrn/vhm.rst new file mode 100644 index 000000000000..901cff492e2b --- /dev/null +++ b/Documentation/virtual/acrn/vhm.rst @@ -0,0 +1,13 @@ +================================== +Virtio and Hypervisor Module (VHM) +================================== + +The Virtio and Hypervisor service Module (VHM) in part of ACRN Project. + +APIs: +----- + +.. kernel-doc:: include/linux/vhm/acrn_vhm_ioreq.h +.. kernel-doc:: include/linux/vhm/acrn_vhm_mm.h +.. kernel-doc:: include/linux/vhm/vhm_ioctl_defs.h +.. kernel-doc:: include/linux/vhm/vhm_vm_mngt.h diff --git a/MAINTAINERS b/MAINTAINERS index 48a65c3a4189..357207d5f808 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5129,6 +5129,21 @@ S: Maintained F: drivers/media/usb/dvb-usb-v2/dvb_usb* F: drivers/media/usb/dvb-usb-v2/usb_urb.c +DVC_TRACE BUS DRIVER +M: Traian Schiau +S: Maintained +F: drivers/bus/dvctrace.c +F: include/linux/dvctrace.h +F: Documentation/ABI/testing/sysfs-bus-dvctrace + +DVC_TRACE USB_GADGET DRIVER +M: Traian Schiau +S: Maintained +F: drivers/usb/gadget/function/f_dvctrace.c +F: drivers/usb/gadget/function/u_dvctrace.h +F: include/linux/usb/debug.h +F: Documentation/ABI/testing/configfs-usb-gadget-dvctrace + DYNAMIC DEBUG M: Jason Baron S: Maintained @@ -7579,6 +7594,12 @@ S: Supported F: Documentation/trace/intel_th.rst F: drivers/hwtracing/intel_th/ +INTEL(R) TRACE HUB TO USB-DVC.TRACE +M: Traian Schiau +S: Supported +F: drivers/hwtracing/intel_th/msu-dvc.c +F: Documentation/ABI/testing/sysfs-bus-dvctrace-devices-dvcith + INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) M: Ning Sun L: tboot-devel@lists.sourceforge.net @@ -12513,6 +12534,17 @@ F: include/net/rose.h F: include/uapi/linux/rose.h F: net/rose/ +RPMB SUBSYSTEM +M: Tomas Winkler +L: linux-kernel@vger.kernel.org +S: Supported +F: drivers/char/rpmb/* +F: include/uapi/linux/rpmb.h +F: include/linux/rpmb.h +F: Documentation/ABI/testing/sysfs-class-rpmb +F: Documentation/driver-api/rpmb.rst +F: tools/rpmb/ + RTL2830 MEDIA DRIVER M: Antti Palosaari L: linux-media@vger.kernel.org diff --git a/Makefile b/Makefile index 9b2df076885a..9afe2ca3b29c 100644 --- a/Makefile +++ b/Makefile @@ -482,7 +482,8 @@ endif ifeq ($(cc-name),clang) ifneq ($(CROSS_COMPILE),) -CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) +CLANG_TRIPLE ?= $(CROSS_COMPILE) +CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%)) GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) endif ifneq ($(GCC_TOOLCHAIN),) @@ -700,6 +701,7 @@ KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,) KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier) KBUILD_CFLAGS += $(call cc-disable-warning, gnu) KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member) +KBUILD_CFLAGS += $(call cc-disable-warning, duplicate-decl-specifier) # Quiet clang warning: comparison of unsigned expression < 0 is always false KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare) # CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the @@ -788,6 +790,30 @@ KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections LDFLAGS_vmlinux += --gc-sections endif +ifdef CONFIG_CFI_CLANG +cfi-clang-flags += -fsanitize=cfi +DISABLE_CFI_CLANG := -fno-sanitize=cfi +ifdef CONFIG_MODULES +cfi-clang-flags += -fsanitize-cfi-cross-dso +DISABLE_CFI_CLANG += -fno-sanitize-cfi-cross-dso +endif +ifdef CONFIG_CFI_PERMISSIVE +cfi-clang-flags += -fsanitize-recover=cfi -fno-sanitize-trap=cfi +endif + +# allow disabling only clang CFI where needed +export DISABLE_CFI_CLANG +endif + +ifdef CONFIG_CFI +# cfi-flags are re-tested in prepare-compiler-check +cfi-flags := $(cfi-clang-flags) +KBUILD_CFLAGS += $(cfi-flags) + +DISABLE_CFI := $(DISABLE_CFI_CLANG) +export DISABLE_CFI +endif + # arch Makefile may override CC so keep this after arch Makefile is included NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include) @@ -1114,6 +1140,12 @@ uapi-asm-generic: PHONY += prepare-objtool prepare-objtool: $(objtool_target) +ifdef cfi-flags + ifeq ($(call cc-option, $(cfi-flags)),) + @echo Cannot use CONFIG_CFI: $(cfi-flags) not supported by compiler >&2 && exit 1 + endif +endif + # Generate some files # --------------------------------------------------------------------------- diff --git a/arch/Kconfig b/arch/Kconfig index 6801123932a5..9379d03bd5a4 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -474,6 +474,34 @@ config STACKPROTECTOR_STRONG about 20% of all kernel functions, which increases the kernel code size by about 2%. +config CFI + bool + +config CFI_PERMISSIVE + bool "Use CFI in permissive mode" + depends on CFI + help + When selected, Control Flow Integrity (CFI) violations result in a + warning instead of a kernel panic. This option is useful for finding + CFI violations in drivers during development. + +config CFI_CLANG + bool "Use clang Control Flow Integrity (CFI) (EXPERIMENTAL)" + depends on CC_IS_CLANG + depends on KALLSYMS + select CFI + help + This option enables clang Control Flow Integrity (CFI), which adds + runtime checking for indirect function calls. + +config CFI_CLANG_SHADOW + bool "Use CFI shadow to speed up cross-module checks" + default y + depends on CFI_CLANG + help + If you select this option, the kernel builds a fast look-up table of + CFI check functions in loaded modules to reduce overhead. + config HAVE_ARCH_WITHIN_STACK_FRAMES bool help diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e8cd55a5b04c..1d67c443f9be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1839,6 +1839,21 @@ config DEPRECATED_PARAM_STRUCT This was deprecated in 2001 and announced to live on for 5 years. Some old boot loaders still use this way. +config BUILD_ARM_APPENDED_DTB_IMAGE + bool "Build a concatenated zImage/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated zImage and list of + DTBs to be built by default (instead of a standalone zImage.) + The image will built in arch/arm/boot/zImage-dtb + +config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated zImage-dtb. + # Compressed boot loader in ROM. Yes, we really want to ask about # TEXT and BSS so we preserve their values in the config files. config ZBOOT_ROM_TEXT diff --git a/arch/arm/Makefile b/arch/arm/Makefile index d1516f85f25d..7574964b83b9 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -303,6 +303,8 @@ libs-y := arch/arm/lib/ $(libs-y) boot := arch/arm/boot ifeq ($(CONFIG_XIP_KERNEL),y) KBUILD_IMAGE := $(boot)/xipImage +else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/zImage-dtb else KBUILD_IMAGE := $(boot)/zImage endif @@ -356,6 +358,9 @@ ifeq ($(CONFIG_VDSO),y) $(Q)$(MAKE) $(build)=arch/arm/vdso $@ endif +zImage-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ + # We use MRPROPER_FILES and CLEAN_FILES now archclean: $(Q)$(MAKE) $(clean)=$(boot) diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore index ce1c5ff746e7..025d8aaf013d 100644 --- a/arch/arm/boot/.gitignore +++ b/arch/arm/boot/.gitignore @@ -3,3 +3,4 @@ zImage xipImage bootpImage uImage +zImage-dtb diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile index a3af4dc08c3e..3e3199ac1820 100644 --- a/arch/arm/boot/Makefile +++ b/arch/arm/boot/Makefile @@ -16,6 +16,7 @@ OBJCOPYFLAGS :=-O binary -R .comment -S ifneq ($(MACHINE),) include $(MACHINE)/Makefile.boot endif +include $(srctree)/arch/arm/boot/dts/Makefile # Note: the following conditions must always be true: # ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET) @@ -29,6 +30,14 @@ export ZRELADDR INITRD_PHYS PARAMS_PHYS targets := Image zImage xipImage bootpImage uImage +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + ifeq ($(CONFIG_XIP_KERNEL),y) cmd_deflate_xip_data = $(CONFIG_SHELL) -c \ @@ -66,6 +75,10 @@ $(obj)/compressed/vmlinux: $(obj)/Image FORCE $(obj)/zImage: $(obj)/compressed/vmlinux FORCE $(call if_changed,objcopy) +$(obj)/zImage-dtb: $(obj)/zImage $(DTB_OBJS) FORCE + $(call if_changed,cat) + @echo ' Kernel: $@ is ready' + endif ifneq ($(LOADADDR),) diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index ac6b90e9d806..75663ed4817f 100644 --- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -42,6 +42,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <990>; }; cpu1: cpu@1 { @@ -51,6 +52,7 @@ cci-control-port = <&cci_control1>; cpu-idle-states = <&CLUSTER_SLEEP_BIG>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <990>; }; cpu2: cpu@2 { @@ -60,6 +62,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; cpu3: cpu@3 { @@ -69,6 +72,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; cpu4: cpu@4 { @@ -78,6 +82,7 @@ cci-control-port = <&cci_control2>; cpu-idle-states = <&CLUSTER_SLEEP_LITTLE>; capacity-dmips-mhz = <516>; + dynamic-power-coefficient = <133>; }; idle-states { diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index fc33444e94f0..2721877d5a11 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -2,6 +2,12 @@ CONFIG_SYSVIPC=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y CONFIG_EMBEDDED=y CONFIG_PERF_EVENTS=y @@ -116,6 +122,7 @@ CONFIG_PCI_ENDPOINT=y CONFIG_PCI_ENDPOINT_CONFIGFS=y CONFIG_PCI_EPF_TEST=m CONFIG_SMP=y +CONFIG_SCHED_MC=y CONFIG_NR_CPUS=16 CONFIG_SECCOMP=y CONFIG_ARM_APPENDED_DTB=y @@ -124,10 +131,10 @@ CONFIG_KEXEC=y CONFIG_EFI=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m -CONFIG_CPU_FREQ_GOV_USERSPACE=m -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_CPUFREQ_DT=y CONFIG_ARM_IMX6Q_CPUFREQ=y @@ -137,6 +144,7 @@ CONFIG_ARM_CPUIDLE=y CONFIG_ARM_ZYNQ_CPUIDLE=y CONFIG_ARM_EXYNOS_CPUIDLE=y CONFIG_KERNEL_MODE_NEON=y +CONFIG_ENERGY_MODEL=y CONFIG_NET=y CONFIG_PACKET=y CONFIG_UNIX=y diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig new file mode 100644 index 000000000000..69157c4c21fd --- /dev/null +++ b/arch/arm/configs/ranchu_defconfig @@ -0,0 +1,313 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_ARCH_MMAP_RND_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARCH_VIRT=y +CONFIG_ARM_KERNMEM_PERMS=y +CONFIG_SMP=y +CONFIG_PREEMPT=y +CONFIG_AEABI=y +CONFIG_HIGHMEM=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_VFP=y +CONFIG_NEON=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMSC911X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_AMBAKMI=y +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_PL031=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_VIRTUALIZATION=y diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 5d88d2f22b2c..201dc2011c16 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -30,9 +30,15 @@ const struct cpumask *cpu_coregroup_mask(int cpu); /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_capacity topology_get_freq_scale +/* Replace task scheduler's default max-frequency-invariant accounting */ +#define arch_scale_max_freq_capacity topology_get_max_freq_scale + /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #else static inline void init_cpu_topology(void) { } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b1a0e95c751..da5bc2c9392e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1239,6 +1239,23 @@ config CMDLINE entering them here. As a minimum, you should specify the the root device (e.g. root=/dev/nfs). +choice + prompt "Kernel command line type" if CMDLINE != "" + default CMDLINE_FROM_BOOTLOADER + +config CMDLINE_FROM_BOOTLOADER + bool "Use bootloader kernel arguments if available" + help + Uses the command-line options passed by the boot loader. If + the boot loader doesn't provide any, the default kernel command + string provided in CMDLINE will be used. + +config CMDLINE_EXTEND + bool "Extend bootloader kernel arguments" + help + The command-line arguments provided by the boot loader will be + appended to the default kernel command string. + config CMDLINE_FORCE bool "Always use the default kernel command string" help @@ -1246,6 +1263,7 @@ config CMDLINE_FORCE loader passes other arguments to the kernel. This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. +endchoice config EFI_STUB bool @@ -1280,6 +1298,41 @@ config DMI However, even with this option, the resultant kernel should continue to boot on existing non-UEFI platforms. +config BUILD_ARM64_APPENDED_DTB_IMAGE + bool "Build a concatenated Image.gz/dtb by default" + depends on OF + help + Enabling this option will cause a concatenated Image.gz and list of + DTBs to be built by default (instead of a standalone Image.gz.) + The image will built in arch/arm64/boot/Image.gz-dtb + +choice + prompt "Appended DTB Kernel Image name" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Enabling this option will cause a specific kernel image Image or + Image.gz to be used for final image creation. + The image will built in arch/arm64/boot/IMAGE-NAME-dtb + + config IMG_GZ_DTB + bool "Image.gz-dtb" + config IMG_DTB + bool "Image-dtb" +endchoice + +config BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME + string + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + default "Image.gz-dtb" if IMG_GZ_DTB + default "Image-dtb" if IMG_DTB + +config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES + string "Default dtb names" + depends on BUILD_ARM64_APPENDED_DTB_IMAGE + help + Space separated list of names of dtbs to append when + building a concatenated Image.gz-dtb. + endmenu config COMPAT diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 106039d25e2f..0b0f5b7cead1 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -112,10 +112,15 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a # Default target when executing plain make boot := arch/arm64/boot +ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y) +KBUILD_IMAGE := $(boot)/$(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) +else KBUILD_IMAGE := $(boot)/Image.gz +endif + KBUILD_DTBS := dtbs -all: Image.gz $(KBUILD_DTBS) +all: Image.gz $(KBUILD_DTBS) $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_KERNEL_IMAGE_NAME)) Image: vmlinux @@ -138,6 +143,12 @@ dtbs: prepare scripts dtbs_install: $(Q)$(MAKE) $(dtbinst)=$(boot)/dts +Image-dtb: vmlinux scripts dtbs + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +Image.gz-dtb: vmlinux scripts dtbs Image.gz + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + PHONY += vdso_install vdso_install: $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@ diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore index 8dab0bb6ae66..34e35209fc2e 100644 --- a/arch/arm64/boot/.gitignore +++ b/arch/arm64/boot/.gitignore @@ -1,2 +1,4 @@ Image +Image-dtb Image.gz +Image.gz-dtb diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile index 1f012c506434..2c8cb864315e 100644 --- a/arch/arm64/boot/Makefile +++ b/arch/arm64/boot/Makefile @@ -14,16 +14,29 @@ # Based on the ia64 boot/Makefile. # +include $(srctree)/arch/arm64/boot/dts/Makefile + OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.gz +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST)) + $(obj)/Image: vmlinux FORCE $(call if_changed,objcopy) $(obj)/Image.bz2: $(obj)/Image FORCE $(call if_changed,bzip2) +$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE + $(call if_changed,cat) + $(obj)/Image.gz: $(obj)/Image FORCE $(call if_changed,gzip) @@ -36,6 +49,9 @@ $(obj)/Image.lzma: $(obj)/Image FORCE $(obj)/Image.lzo: $(obj)/Image FORCE $(call if_changed,lzo) +$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE + $(call if_changed,cat) + install: $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(obj)/Image System.map "$(INSTALL_PATH)" diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile index 4690364d584b..ce19f2766412 100644 --- a/arch/arm64/boot/dts/Makefile +++ b/arch/arm64/boot/dts/Makefile @@ -26,3 +26,17 @@ subdir-y += synaptics subdir-y += ti subdir-y += xilinx subdir-y += zte + +targets += dtbs + +DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES)) +ifneq ($(DTB_NAMES),) +DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES)) +else +DTB_LIST := $(dtb-y) +endif +targets += $(DTB_LIST) + +dtbs: $(addprefix $(obj)/, $(DTB_LIST)) + +clean-files := dts/*.dtb *.dtb diff --git a/arch/arm64/boot/dts/arm/juno-r2.dts b/arch/arm64/boot/dts/arm/juno-r2.dts index ab77adb4f3c2..66f0ec79c864 100644 --- a/arch/arm64/boot/dts/arm/juno-r2.dts +++ b/arch/arm64/boot/dts/arm/juno-r2.dts @@ -99,6 +99,7 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; }; A72_1: cpu@1 { @@ -116,6 +117,7 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <450>; }; A53_0: cpu@100 { @@ -133,6 +135,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_1: cpu@101 { @@ -150,6 +153,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_2: cpu@102 { @@ -167,6 +171,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A53_3: cpu@103 { @@ -184,6 +189,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <485>; + dynamic-power-coefficient = <140>; }; A72_L2: l2-cache0 { diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index 1fb5c5a0f32e..e3069e286256 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -98,6 +98,7 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; }; A57_1: cpu@1 { @@ -115,6 +116,7 @@ clocks = <&scpi_dvfs 0>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <1024>; + dynamic-power-coefficient = <530>; }; A53_0: cpu@100 { @@ -132,6 +134,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_1: cpu@101 { @@ -149,6 +152,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_2: cpu@102 { @@ -166,6 +170,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A53_3: cpu@103 { @@ -183,6 +188,7 @@ clocks = <&scpi_dvfs 1>; cpu-idle-states = <&CPU_SLEEP_0 &CLUSTER_SLEEP_0>; capacity-dmips-mhz = <578>; + dynamic-power-coefficient = <140>; }; A57_L2: l2-cache0 { diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index db8d364f8476..2fc45d5505a4 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -19,9 +19,13 @@ CONFIG_BLK_CGROUP=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y +CONFIG_CGROUPS=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_FREEZER=y CONFIG_USER_NS=y CONFIG_SCHED_AUTOGROUP=y CONFIG_BLK_DEV_INITRD=y @@ -101,13 +105,16 @@ CONFIG_XEN=y CONFIG_COMPAT=y CONFIG_HIBERNATION=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y +CONFIG_ENERGY_MODEL=y +CONFIG_SCHED_TUNE=y CONFIG_ARM_CPUIDLE=y CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=m +CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y CONFIG_CPUFREQ_DT=y CONFIG_ACPI_CPPC_CPUFREQ=m diff --git a/arch/arm64/configs/ranchu64_defconfig b/arch/arm64/configs/ranchu64_defconfig new file mode 100644 index 000000000000..3d2eb3275b1f --- /dev/null +++ b/arch/arm64/configs/ranchu64_defconfig @@ -0,0 +1,309 @@ +# CONFIG_LOCALVERSION_AUTO is not set +# CONFIG_SWAP is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=14 +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_ARCH_MMAP_RND_BITS=24 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_ARCH_VEXPRESS=y +CONFIG_NR_CPUS=4 +CONFIG_PREEMPT=y +CONFIG_KSM=y +CONFIG_SECCOMP=y +CONFIG_ARMV8_DEPRECATED=y +CONFIG_SWP_EMULATION=y +CONFIG_CP15_BARRIER_EMULATION=y +CONFIG_SETEND_EMULATION=y +CONFIG_CMDLINE="console=ttyAMA0" +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_COMPAT=y +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_INET_ESP=y +# CONFIG_INET_LRO is not set +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_RPFILTER=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_AH=y +CONFIG_IP6_NF_MATCH_EUI64=y +CONFIG_IP6_NF_MATCH_FRAG=y +CONFIG_IP6_NF_MATCH_OPTS=y +CONFIG_IP6_NF_MATCH_HL=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_MH=y +CONFIG_IP6_NF_MATCH_RT=y +CONFIG_IP6_NF_TARGET_HL=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_BRIDGE=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_SMC91X=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +# CONFIG_WLAN is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_INPUT_TABLET=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_SERPORT is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +CONFIG_BATTERY_GOLDFISH=y +# CONFIG_HWMON is not set +CONFIG_MEDIA_SUPPORT=y +CONFIG_FB=y +CONFIG_FB_GOLDFISH=y +CONFIG_FB_SIMPLE=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +# CONFIG_USB_SUPPORT is not set +CONFIG_RTC_CLASS=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_TIMED_GPIO=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SW_SYNC_USER=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_EXT2_FS=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_CUSE=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ISO8859_1=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_FS=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_PANIC_TIMEOUT=5 +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +# CONFIG_FTRACE is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_DEBUG_RODATA=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c index 17fac2889f56..c3d572af201c 100644 --- a/arch/arm64/crypto/sha1-ce-glue.c +++ b/arch/arm64/crypto/sha1-ce-glue.c @@ -29,6 +29,14 @@ struct sha1_ce_state { asmlinkage void sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src, int blocks); +#ifdef CONFIG_CFI_CLANG +static inline void __cfi_sha1_ce_transform(struct sha1_state *sst, + u8 const *src, int blocks) +{ + sha1_ce_transform((struct sha1_ce_state *)sst, src, blocks); +} +#define sha1_ce_transform __cfi_sha1_ce_transform +#endif const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count); const u32 sha1_ce_offsetof_finalize = offsetof(struct sha1_ce_state, finalize); diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index 261f5195cab7..db37282ca060 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -29,6 +29,14 @@ struct sha256_ce_state { asmlinkage void sha2_ce_transform(struct sha256_ce_state *sst, u8 const *src, int blocks); +#ifdef CONFIG_CFI_CLANG +static inline void __cfi_sha2_ce_transform(struct sha256_state *sst, + u8 const *src, int blocks) +{ + sha2_ce_transform((struct sha256_ce_state *)sst, src, blocks); +} +#define sha2_ce_transform __cfi_sha2_ce_transform +#endif const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state, sst.count); diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 39ec0b8a689e..1b7aa97b9c9a 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -141,7 +141,7 @@ static inline void cpu_install_idmap(void) * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ -static inline void cpu_replace_ttbr1(pgd_t *pgdp) +static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 49a0fee4f89b..8e0a96d71bd6 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -42,9 +42,15 @@ int pcibus_to_node(struct pci_bus *bus); /* Replace task scheduler's default frequency-invariant accounting */ #define arch_scale_freq_capacity topology_get_freq_scale +/* Replace task scheduler's default max-frequency-invariant accounting */ +#define arch_scale_max_freq_capacity topology_get_max_freq_scale + /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale +/* Enable topology flag updates */ +#define arch_update_cpu_topology topology_update_cpu_topology + #include #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index e238b7932096..703ebe0de2f8 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -902,7 +902,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry, return !has_cpuid_feature(entry, scope); } -static void +static void __nocfi kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused) { typedef void (kpti_remap_fn)(int, int, phys_addr_t); diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index 2fabc2dc1966..d9028bba9d46 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -4,7 +4,11 @@ # ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING \ - $(DISABLE_STACKLEAK_PLUGIN) + $(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_CFI) + +ifeq ($(cc-name),clang) +ccflags-y += -fno-jump-tables +endif KVM=../../../../virt/kvm diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 0038a2d10a7a..466219296cd6 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -7,6 +7,8 @@ obj-$(CONFIG_KVM) += kvm/ # Xen paravirtualization support obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_ACRN) += acrn/ + # Hyper-V paravirtualization support obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1a0be022f91d..dd007a2b85c9 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -782,6 +782,8 @@ config QUEUED_LOCK_STAT behavior of paravirtualized queued spinlocks and report them on debugfs. +source "arch/x86/acrn/Kconfig" + source "arch/x86/xen/Kconfig" config KVM_GUEST @@ -1491,6 +1493,14 @@ config X86_DIRECT_GBPAGES supports them), so don't confuse the user by printing that we have them enabled. +config X86_CPA_STATISTICS + bool "Enable statistic for Change Page Attribute" + depends on DEBUG_FS + ---help--- + Expose statistics about the Change Page Attribute mechanims, which + helps to determine the effectivness of preserving large and huge + page mappings when mapping protections are changed. + config ARCH_HAS_MEM_ENCRYPT def_bool y diff --git a/arch/x86/acrn/Kconfig b/arch/x86/acrn/Kconfig new file mode 100644 index 000000000000..ce0abc8cdcad --- /dev/null +++ b/arch/x86/acrn/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes ACRN options +# + +config ACRN + bool "Enable services run on ACRN hypervisor" + depends on X86_64 + depends on PARAVIRT + help + This option is needed if were to run ACRN services linux on top of + ACRN hypervisor. diff --git a/arch/x86/acrn/Makefile b/arch/x86/acrn/Makefile new file mode 100644 index 000000000000..d961d8c5ee93 --- /dev/null +++ b/arch/x86/acrn/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ACRN) += acrn.o diff --git a/arch/x86/acrn/acrn.c b/arch/x86/acrn/acrn.c new file mode 100644 index 000000000000..eea9db84ca89 --- /dev/null +++ b/arch/x86/acrn/acrn.c @@ -0,0 +1,75 @@ +/* + * ACRN hypervisor support + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Jason Chen CJ + * + */ +#include +#include + +static uint32_t __init acrn_detect(void) +{ + return hypervisor_cpuid_base("ACRNACRNACRN\0\0", 0); +} + +static void __init acrn_init_platform(void) +{ +#if defined(CONFIG_PCI_MSI) && defined(CONFIG_ACRN_VHM) + pv_irq_ops.write_msi = acrn_write_msi_msg; +#endif +} + +static void acrn_pin_vcpu(int cpu) +{ + /* do nothing here now */ +} + +static bool acrn_x2apic_available(void) +{ + /* do not support x2apic */ + return false; +} + +static void __init acrn_init_mem_mapping(void) +{ + /* do nothing here now */ +} + +const struct hypervisor_x86 x86_hyper_acrn = { + .name = "ACRN", + .detect = acrn_detect, + .type = X86_HYPER_ACRN, + .init.init_platform = acrn_init_platform, + .runtime.pin_vcpu = acrn_pin_vcpu, + .init.x2apic_available = acrn_x2apic_available, + .init.init_mem_mapping = acrn_init_mem_mapping, +}; +EXPORT_SYMBOL(x86_hyper_acrn); diff --git a/arch/x86/configs/abl_diffconfig b/arch/x86/configs/abl_diffconfig new file mode 100644 index 000000000000..899be931801b --- /dev/null +++ b/arch/x86/configs/abl_diffconfig @@ -0,0 +1 @@ +CONFIG_ABL_BOOTLOADER_CONTROL=y diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig new file mode 100644 index 000000000000..4e9dc7d49cbe --- /dev/null +++ b/arch/x86/configs/i386_ranchu_defconfig @@ -0,0 +1,421 @@ +# CONFIG_64BIT is not set +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_X86_BIGSMP=y +CONFIG_MCORE2=y +CONFIG_X86_GENERIC=y +CONFIG_HPET_TIMER=y +CONFIG_NR_CPUS=512 +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_REBOOTFIXUPS=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=2048 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_AES_586=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/configs/sbl_diffconfig b/arch/x86/configs/sbl_diffconfig new file mode 100644 index 000000000000..efbd1ae0b5e1 --- /dev/null +++ b/arch/x86/configs/sbl_diffconfig @@ -0,0 +1 @@ +CONFIG_SBL_BOOTLOADER_CONTROL=y diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig new file mode 100644 index 000000000000..db63c91b57b7 --- /dev/null +++ b/arch/x86/configs/x86_64_cuttlefish_defconfig @@ -0,0 +1,464 @@ +CONFIG_POSIX_MQUEUE=y +# CONFIG_FHANDLE is not set +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_MEMCG_SWAP=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_BPF=y +CONFIG_NAMESPACES=y +CONFIG_BLK_DEV_INITRD=y +# CONFIG_RD_LZ4 is not set +CONFIG_KALLSYMS_ALL=y +# CONFIG_PCSPKR_PLATFORM is not set +CONFIG_BPF_SYSCALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_REFCOUNT_FULL=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_SMP=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_MCORE2=y +CONFIG_PROCESSOR_SELECT=y +# CONFIG_CPU_SUP_CENTAUR is not set +CONFIG_NR_CPUS=8 +CONFIG_PREEMPT=y +# CONFIG_MICROCODE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_ZSMALLOC=y +# CONFIG_MTRR is not set +CONFIG_HZ_100=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_PHYSICAL_START=0x200000 +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_CMDLINE_BOOL=y +CONFIG_CMDLINE="console=ttyS0 reboot=p" +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_ACPI_PROCFS_POWER=y +# CONFIG_ACPI_FAN is not set +# CONFIG_ACPI_THERMAL is not set +# CONFIG_X86_PM_TIMER is not set +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_MSI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_VTI=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_SOCKET_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_NAT=y +CONFIG_IP_NF_TARGET_MASQUERADE=y +CONFIG_IP_NF_TARGET_NETMAP=y +CONFIG_IP_NF_TARGET_REDIRECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_NF_SOCKET_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_MATCH_IPV6HEADER=y +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_RFKILL=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y +CONFIG_DEBUG_DEVRES=y +CONFIG_OF=y +CONFIG_OF_UNITTEST=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_ZRAM=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_UID_SYS_STATS=y +CONFIG_MEMORY_STATE_TIME=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_VIRTIO=y +CONFIG_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE=1 +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_ANDROID_VERITY=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +# CONFIG_ETHERNET is not set +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +# CONFIG_USB_NET_CDCETHER is not set +# CONFIG_USB_NET_CDC_NCM is not set +# CONFIG_USB_NET_NET1080 is not set +# CONFIG_USB_NET_CDC_SUBSET is not set +# CONFIG_USB_NET_ZAURUS is not set +# CONFIG_WLAN_VENDOR_ADMTEK is not set +# CONFIG_WLAN_VENDOR_ATH is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +# CONFIG_WLAN_VENDOR_BROADCOM is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +# CONFIG_WLAN_VENDOR_INTEL is not set +# CONFIG_WLAN_VENDOR_INTERSIL is not set +# CONFIG_WLAN_VENDOR_MARVELL is not set +# CONFIG_WLAN_VENDOR_MEDIATEK is not set +# CONFIG_WLAN_VENDOR_RALINK is not set +# CONFIG_WLAN_VENDOR_REALTEK is not set +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +# CONFIG_WLAN_VENDOR_QUANTENNA is not set +CONFIG_MAC80211_HWSIM=y +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_KEYCHORD=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVMEM is not set +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_EXAR is not set +CONFIG_SERIAL_8250_NR_UARTS=48 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_INTEL is not set +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set +CONFIG_HW_RANDOM_VIRTIO=y +CONFIG_HPET=y +# CONFIG_HPET_MMAP_DEFAULT is not set +# CONFIG_DEVPORT is not set +# CONFIG_ACPI_I2C_OPREGION is not set +# CONFIG_I2C_COMPAT is not set +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_PTP_1588_CLOCK=y +# CONFIG_HWMON is not set +# CONFIG_X86_PKG_TEMP_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_SOFT_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +# CONFIG_VGA_ARB is not set +CONFIG_DRM=y +# CONFIG_DRM_FBDEV_EMULATION is not set +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_GADGET=y +CONFIG_USB_DUMMY_HCD=y +CONFIG_USB_CONFIGFS=y +CONFIG_USB_CONFIGFS_F_FS=y +CONFIG_USB_CONFIGFS_F_ACC=y +CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y +CONFIG_USB_CONFIGFS_UEVENT=y +CONFIG_USB_CONFIGFS_F_MIDI=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_SW_SYNC=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_VSOC=y +CONFIG_ION=y +# CONFIG_X86_PLATFORM_DEVICES is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_ENCRYPTION=y +CONFIG_F2FS_FS=y +CONFIG_F2FS_FS_SECURITY=y +CONFIG_F2FS_FS_ENCRYPTION=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_SDCARD_FS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_STACK_USAGE=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y +CONFIG_HARDLOCKUP_DETECTOR=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_IO_DELAY_NONE=y +CONFIG_DEBUG_BOOT_PARAMS=y +CONFIG_OPTIMIZE_INLINING=y +CONFIG_UNWINDER_FRAME_POINTER=y +CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_PATH=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1 +CONFIG_CRYPTO_RSA=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_LZ4=y +CONFIG_CRYPTO_ZSTD=y +CONFIG_CRYPTO_DEV_VIRTIO=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="verity_dev_keys.x509" diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig new file mode 100644 index 000000000000..81202e3f6ae8 --- /dev/null +++ b/arch/x86/configs/x86_64_ranchu_defconfig @@ -0,0 +1,416 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_CGROUPS=y +CONFIG_CGROUP_DEBUG=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_SCHED=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_ARCH_MMAP_RND_BITS=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16 +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +CONFIG_MAC_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_SGI_PARTITION=y +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_SMP=y +CONFIG_MCORE2=y +CONFIG_MAXSMP=y +CONFIG_PREEMPT=y +# CONFIG_X86_MCE is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_KSM=y +CONFIG_CMA=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_HZ_100=y +CONFIG_PHYSICAL_START=0x100000 +CONFIG_PM_AUTOSLEEP=y +CONFIG_PM_WAKELOCKS=y +CONFIG_PM_WAKELOCKS_LIMIT=0 +# CONFIG_PM_WAKELOCKS_GC is not set +CONFIG_PM_DEBUG=y +CONFIG_CPU_FREQ=y +# CONFIG_CPU_FREQ_STAT is not set +CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCIEPORTBUS=y +# CONFIG_PCIEASPM is not set +CONFIG_PCCARD=y +CONFIG_YENTA=y +CONFIG_HOTPLUG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_IA32_EMULATION=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_ESP=y +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +CONFIG_INET_DIAG_DESTROY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=y +CONFIG_INET6_ESP=y +CONFIG_INET6_IPCOMP=y +CONFIG_IPV6_MIP6=y +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_H323=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y +CONFIG_NF_CONNTRACK_TFTP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_MARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QTAGUID=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_QUOTA2=y +CONFIG_NETFILTER_XT_MATCH_SOCKET=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_NETFILTER_XT_MATCH_STATISTIC=y +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_MANGLE=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_SECURITY=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y +CONFIG_NF_CONNTRACK_IPV6=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=y +CONFIG_NET_CLS_U32=y +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_U32=y +CONFIG_NET_CLS_ACT=y +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_MAC80211_LEDS=y +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DMA_CMA=y +CONFIG_CONNECTOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_SD=y +CONFIG_BLK_DEV_SR=y +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_ISCSI_ATTRS=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_PATA_MPIIX=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_DEBUG=y +CONFIG_DM_CRYPT=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_DM_UEVENT=y +CONFIG_DM_VERITY=y +CONFIG_DM_VERITY_FEC=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_TUN=y +CONFIG_VIRTIO_NET=y +CONFIG_BNX2=y +CONFIG_TIGON3=y +CONFIG_NET_TULIP=y +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_SKY2=y +CONFIG_NE2K_PCI=y +CONFIG_FORCEDETH=y +CONFIG_8139TOO=y +# CONFIG_8139TOO_PIO is not set +CONFIG_R8169=y +CONFIG_FDDI=y +CONFIG_PPP=y +CONFIG_PPP_BSDCOMP=y +CONFIG_PPP_DEFLATE=y +CONFIG_PPP_MPPE=y +CONFIG_USB_USBNET=y +CONFIG_INPUT_POLLDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_KEYRESET=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_KEYBOARD_GOLDFISH_EVENTS=y +# CONFIG_INPUT_MOUSE is not set +CONFIG_INPUT_JOYSTICK=y +CONFIG_JOYSTICK_XPAD=y +CONFIG_JOYSTICK_XPAD_FF=y +CONFIG_JOYSTICK_XPAD_LEDS=y +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=y +CONFIG_TABLET_USB_AIPTEK=y +CONFIG_TABLET_USB_GTCO=y +CONFIG_TABLET_USB_HANWANG=y +CONFIG_TABLET_USB_KBTAB=y +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_INPUT_MISC=y +CONFIG_INPUT_UINPUT=y +CONFIG_INPUT_GPIO=y +# CONFIG_SERIO is not set +# CONFIG_VT is not set +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_DEVMEM is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_NVRAM=y +CONFIG_I2C_I801=y +CONFIG_BATTERY_GOLDFISH=y +CONFIG_WATCHDOG=y +CONFIG_MEDIA_SUPPORT=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_FB_EFI=y +CONFIG_FB_GOLDFISH=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +# CONFIG_LCD_CLASS_DEVICE is not set +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_HIDRAW=y +CONFIG_UHID=y +CONFIG_HID_A4TECH=y +CONFIG_HID_ACRUX=y +CONFIG_HID_ACRUX_FF=y +CONFIG_HID_APPLE=y +CONFIG_HID_BELKIN=y +CONFIG_HID_CHERRY=y +CONFIG_HID_CHICONY=y +CONFIG_HID_PRODIKEYS=y +CONFIG_HID_CYPRESS=y +CONFIG_HID_DRAGONRISE=y +CONFIG_DRAGONRISE_FF=y +CONFIG_HID_EMS_FF=y +CONFIG_HID_ELECOM=y +CONFIG_HID_EZKEY=y +CONFIG_HID_HOLTEK=y +CONFIG_HID_KEYTOUCH=y +CONFIG_HID_KYE=y +CONFIG_HID_UCLOGIC=y +CONFIG_HID_WALTOP=y +CONFIG_HID_GYRATION=y +CONFIG_HID_TWINHAN=y +CONFIG_HID_KENSINGTON=y +CONFIG_HID_LCPOWER=y +CONFIG_HID_LOGITECH=y +CONFIG_HID_LOGITECH_DJ=y +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y +CONFIG_HID_MICROSOFT=y +CONFIG_HID_MONTEREY=y +CONFIG_HID_MULTITOUCH=y +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_PICOLCD=y +CONFIG_HID_PRIMAX=y +CONFIG_HID_ROCCAT=y +CONFIG_HID_SAITEK=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SPEEDLINK=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_GREENASIA=y +CONFIG_GREENASIA_FF=y +CONFIG_HID_SMARTJOYPLUS=y +CONFIG_SMARTJOYPLUS_FF=y +CONFIG_HID_TIVO=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_THRUSTMASTER=y +CONFIG_HID_WACOM=y +CONFIG_HID_WIIMOTE=y +CONFIG_HID_ZEROPLUS=y +CONFIG_HID_ZYDACRON=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_PRINTER=y +CONFIG_USB_STORAGE=y +CONFIG_USB_OTG_WAKELOCK=y +CONFIG_EDAC=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRTIO_PCI=y +CONFIG_STAGING=y +CONFIG_ASHMEM=y +CONFIG_ANDROID_LOW_MEMORY_KILLER=y +CONFIG_SYNC=y +CONFIG_SW_SYNC=y +CONFIG_SYNC_FILE=y +CONFIG_ION=y +CONFIG_GOLDFISH_AUDIO=y +CONFIG_SND_HDA_INTEL=y +CONFIG_GOLDFISH=y +CONFIG_GOLDFISH_PIPE=y +CONFIG_GOLDFISH_SYNC=y +CONFIG_ANDROID=y +CONFIG_ANDROID_BINDER_IPC=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_FUSE_FS=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +# CONFIG_NETWORK_FILESYSTEMS is not set +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_PRINTK_TIME=y +CONFIG_DEBUG_INFO=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_UNUSED_SYMBOLS is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_PANIC_TIMEOUT=5 +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_SCHED_TRACER=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_KEYS=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_CRYPTO_TWOFISH=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_PKCS7_TEST_KEY=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_CRC_T10DIF=y diff --git a/arch/x86/include/asm/early_intel_th.h b/arch/x86/include/asm/early_intel_th.h new file mode 100644 index 000000000000..bf93609995c8 --- /dev/null +++ b/arch/x86/include/asm/early_intel_th.h @@ -0,0 +1,20 @@ +/* + * early_intel_th.h: Intel Trace Hub early printk + * + * (C) Copyright 2015 Intel Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _ASM_X86_EARLY_INTEL_TH_H +#define _ASM_X86_EARLY_INTEL_TH_H + +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK +extern struct console intel_th_early_console; +extern void early_intel_th_init(const char *); +#endif /* CONFIG_INTEL_TH_EARLY_PRINTK */ + +#endif /* _ASM_X86_EARLY_INTEL_TH_H */ + diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h index 8c5aaba6633f..50a30f6c668b 100644 --- a/arch/x86/include/asm/hypervisor.h +++ b/arch/x86/include/asm/hypervisor.h @@ -29,6 +29,7 @@ enum x86_hypervisor_type { X86_HYPER_XEN_HVM, X86_HYPER_KVM, X86_HYPER_JAILHOUSE, + X86_HYPER_ACRN, }; #ifdef CONFIG_HYPERVISOR_GUEST diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index e375d4266b53..f5af75bc2d45 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -807,6 +807,16 @@ static inline notrace unsigned long arch_local_irq_save(void) return f; } +static inline void write_msi_msg_paravirt(struct msi_desc *entry, + struct msi_msg *msg) +{ + if ((pv_irq_ops.write_msi == NULL) || + (pv_irq_ops.write_msi == paravirt_nop)) + return; + + return PVOP_VCALL2(pv_irq_ops.write_msi, entry, msg); +} + /* Make sure as little as possible of this mess escapes. */ #undef PARAVIRT_CALL diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 4b75acc23b30..06e01d87d76a 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -56,6 +56,9 @@ struct cpumask; struct flush_tlb_info; struct mmu_gather; +struct msi_desc; +struct msi_msg; + /* * Wrapper type for pointers to code which uses the non-standard * calling convention. See PV_CALL_SAVE_REGS_THUNK below. @@ -196,6 +199,7 @@ struct pv_irq_ops { void (*safe_halt)(void); void (*halt)(void); + void (*write_msi)(struct msi_desc *entry, struct msi_msg *msg); } __no_randomize_layout; struct pv_mmu_ops { diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index ae13bc974416..9490cb15a275 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -4,7 +4,7 @@ #include -#define COMMAND_LINE_SIZE 2048 +#define COMMAND_LINE_SIZE 4096 #include #include diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index e84c9eb4e5b4..9ba0ac0c8c1f 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -29,6 +29,15 @@ static struct apic apic_flat; struct apic *apic __ro_after_init = &apic_flat; EXPORT_SYMBOL_GPL(apic); +int xapic_phys = 0; + +static int set_xapic_phys_mode(char *arg) +{ + xapic_phys = 1; + return 0; +} +early_param("xapic_phys", set_xapic_phys_mode); + static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 1; @@ -236,6 +245,9 @@ static void physflat_send_IPI_all(int vector) static int physflat_probe(void) { + if (xapic_phys == 1) + return 1; + if (apic == &apic_physflat || num_possible_cpus() > 8 || jailhouse_paravirt()) return 1; diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 479ca4728de0..5a6f072e6748 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c @@ -32,6 +32,7 @@ extern const struct hypervisor_x86 x86_hyper_xen_pv; extern const struct hypervisor_x86 x86_hyper_xen_hvm; extern const struct hypervisor_x86 x86_hyper_kvm; extern const struct hypervisor_x86 x86_hyper_jailhouse; +extern const struct hypervisor_x86 x86_hyper_acrn; static const __initconst struct hypervisor_x86 * const hypervisors[] = { @@ -49,6 +50,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] = #ifdef CONFIG_JAILHOUSE_GUEST &x86_hyper_jailhouse, #endif +#ifdef CONFIG_ACRN + &x86_hyper_acrn, +#endif }; enum x86_hypervisor_type x86_hyper_type; diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c index 5e801c8c8ce7..f65d32c7040c 100644 --- a/arch/x86/kernel/early_printk.c +++ b/arch/x86/kernel/early_printk.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Simple VGA output */ #define VGABASE (__ISA_IO_base + 0xb8000) @@ -387,6 +388,12 @@ static int __init setup_early_printk(char *buf) if (!strncmp(buf, "xdbc", 4)) early_xdbc_parse_parameter(buf + 4); #endif +#ifdef CONFIG_INTEL_TH_EARLY_PRINTK + if (!strncmp(buf, "intelth", 7)) { + early_intel_th_init(buf + 7); + early_console_register(&intel_th_early_console, keep); + } +#endif buf++; } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 8dc69d82567e..eaa8917dab73 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -336,6 +337,9 @@ __visible struct pv_irq_ops pv_irq_ops = { .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable), .safe_halt = native_safe_halt, .halt = native_halt, +#ifdef CONFIG_PCI_MSI + .write_msi = native_write_msi_msg, +#endif }; __visible struct pv_cpu_ops pv_cpu_ops = { diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 04adc8d60aed..9bbeec53634c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -117,6 +117,19 @@ static atomic_t stopping_cpu = ATOMIC_INIT(-1); static bool smp_no_nmi_ipi = false; +static DEFINE_PER_CPU(struct pt_regs, cpu_regs); + +/* Store regs of this CPU for RAM dump decoding help */ +static inline void store_regs(struct pt_regs *regs) +{ + struct pt_regs *print_regs; + print_regs = &get_cpu_var(cpu_regs); + crash_setup_regs(print_regs, regs); + + /* Flush CPU cache */ + wbinvd(); +} + /* * this function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -163,6 +176,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); @@ -173,9 +187,10 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) * this function calls the 'stop' function on all other CPUs in the system. */ -asmlinkage __visible void smp_reboot_interrupt(void) +__visible void smp_reboot_interrupt(struct pt_regs *regs) { ipi_entering_ack_irq(); + store_regs(regs); cpu_emergency_vmxoff(); stop_this_cpu(NULL); irq_exit(); @@ -247,6 +262,7 @@ static void native_stop_other_cpus(int wait) } finish: + store_regs(NULL); local_irq_save(flags); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 979e0a02cbe1..142c7d9f89cc 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -923,34 +923,19 @@ static void mark_nxdata_nx(void) void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); - unsigned long size = PFN_ALIGN(_etext) - start; + unsigned long size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel text: %luk\n", + pr_info("Write protecting kernel text and read-only data: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", - start, start+size); - set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); - - printk(KERN_INFO "Testing CPA: write protecting again\n"); - set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); -#endif - - start += size; - size = (unsigned long)__end_rodata - start; - set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", - size >> 10); - -#ifdef CONFIG_CPA_DEBUG - printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); + pr_info("Testing CPA: Reverting %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); - printk(KERN_INFO "Testing CPA: write protecting again\n"); + pr_info("Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 51a5a69ecac9..4e55ded01be5 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -37,11 +37,20 @@ struct cpa_data { unsigned long numpages; int flags; unsigned long pfn; - unsigned force_split : 1; + unsigned force_split : 1, + force_static_prot : 1; int curpage; struct page **pages; }; +enum cpa_warn { + CPA_CONFLICT, + CPA_PROTECT, + CPA_DETECT, +}; + +static const int cpa_warn_level = CPA_PROTECT; + /* * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) * using cpa_lock. So that we don't allow any other cpu, with stale large tlb @@ -94,6 +103,87 @@ void arch_report_meminfo(struct seq_file *m) static inline void split_page_count(int level) { } #endif +#ifdef CONFIG_X86_CPA_STATISTICS + +static unsigned long cpa_1g_checked; +static unsigned long cpa_1g_sameprot; +static unsigned long cpa_1g_preserved; +static unsigned long cpa_2m_checked; +static unsigned long cpa_2m_sameprot; +static unsigned long cpa_2m_preserved; +static unsigned long cpa_4k_install; + +static inline void cpa_inc_1g_checked(void) +{ + cpa_1g_checked++; +} + +static inline void cpa_inc_2m_checked(void) +{ + cpa_2m_checked++; +} + +static inline void cpa_inc_4k_install(void) +{ + cpa_4k_install++; +} + +static inline void cpa_inc_lp_sameprot(int level) +{ + if (level == PG_LEVEL_1G) + cpa_1g_sameprot++; + else + cpa_2m_sameprot++; +} + +static inline void cpa_inc_lp_preserved(int level) +{ + if (level == PG_LEVEL_1G) + cpa_1g_preserved++; + else + cpa_2m_preserved++; +} + +static int cpastats_show(struct seq_file *m, void *p) +{ + seq_printf(m, "1G pages checked: %16lu\n", cpa_1g_checked); + seq_printf(m, "1G pages sameprot: %16lu\n", cpa_1g_sameprot); + seq_printf(m, "1G pages preserved: %16lu\n", cpa_1g_preserved); + seq_printf(m, "2M pages checked: %16lu\n", cpa_2m_checked); + seq_printf(m, "2M pages sameprot: %16lu\n", cpa_2m_sameprot); + seq_printf(m, "2M pages preserved: %16lu\n", cpa_2m_preserved); + seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); + return 0; +} + +static int cpastats_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpastats_show, NULL); +} + +static const struct file_operations cpastats_fops = { + .open = cpastats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpa_stats_init(void) +{ + debugfs_create_file("cpa_stats", S_IRUSR, arch_debugfs_dir, NULL, + &cpastats_fops); + return 0; +} +late_initcall(cpa_stats_init); +#else +static inline void cpa_inc_1g_checked(void) { } +static inline void cpa_inc_2m_checked(void) { } +static inline void cpa_inc_4k_install(void) { } +static inline void cpa_inc_lp_sameprot(int level) { } +static inline void cpa_inc_lp_preserved(int level) { } +#endif + + static inline int within(unsigned long addr, unsigned long start, unsigned long end) { @@ -286,84 +376,179 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, } } -/* - * Certain areas of memory on x86 require very specific protection flags, - * for example the BIOS area or kernel text. Callers don't always get this - * right (again, ioremap() on BIOS memory is not uncommon) so this function - * checks and fixes these known static required protection bits. - */ -static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, - unsigned long pfn) +static bool overlaps(unsigned long r1_start, unsigned long r1_end, + unsigned long r2_start, unsigned long r2_end) { - pgprot_t forbidden = __pgprot(0); + return (r1_start <= r2_end && r1_end >= r2_start) || + (r2_start <= r1_end && r2_end >= r1_start); +} - /* - * The BIOS area between 640k and 1Mb needs to be executable for - * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. - */ #ifdef CONFIG_PCI_BIOS - if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_NX; +/* + * The BIOS area between 640k and 1Mb needs to be executable for PCI BIOS + * based config access (CONFIG_PCI_GOBIOS) support. + */ +#define BIOS_PFN PFN_DOWN(BIOS_BEGIN) +#define BIOS_PFN_END PFN_DOWN(BIOS_END - 1) + +static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) +{ + if (pcibios_enabled && overlaps(spfn, epfn, BIOS_PFN, BIOS_PFN_END)) + return _PAGE_NX; + return 0; +} +#else +static pgprotval_t protect_pci_bios(unsigned long spfn, unsigned long epfn) +{ + return 0; +} #endif - /* - * The kernel text needs to be executable for obvious reasons - * Does not cover __inittext since that is gone later on. On - * 64bit we do not enforce !NX on the low mapping - */ - if (within(address, (unsigned long)_text, (unsigned long)_etext)) - pgprot_val(forbidden) |= _PAGE_NX; +/* + * The .rodata section needs to be read-only. Using the pfn catches all + * aliases. This also includes __ro_after_init, so do not enforce until + * kernel_set_to_readonly is true. + */ +static pgprotval_t protect_rodata(unsigned long spfn, unsigned long epfn) +{ + unsigned long epfn_ro, spfn_ro = PFN_DOWN(__pa_symbol(__start_rodata)); /* - * The .rodata section needs to be read-only. Using the pfn - * catches all aliases. This also includes __ro_after_init, - * so do not enforce until kernel_set_to_readonly is true. + * Note: __end_rodata is at page aligned and not inclusive, so + * subtract 1 to get the last enforced PFN in the rodata area. */ - if (kernel_set_to_readonly && - within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, - __pa_symbol(__end_rodata) >> PAGE_SHIFT)) - pgprot_val(forbidden) |= _PAGE_RW; + epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; + + if (kernel_set_to_readonly && overlaps(spfn, epfn, spfn_ro, epfn_ro)) + return _PAGE_RW; + return 0; +} + +/* + * Protect kernel text against becoming non executable by forbidding + * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext) + * out of which the kernel actually executes. Do not protect the low + * mapping. + * + * This does not cover __inittext since that is gone after boot. + */ +static pgprotval_t protect_kernel_text(unsigned long start, unsigned long end) +{ + unsigned long t_end = (unsigned long)_etext - 1; + unsigned long t_start = (unsigned long)_text; + + if (overlaps(start, end, t_start, t_end)) + return _PAGE_NX; + return 0; +} #if defined(CONFIG_X86_64) +/* + * Once the kernel maps the text as RO (kernel_set_to_readonly is set), + * kernel text mappings for the large page aligned text, rodata sections + * will be always read-only. For the kernel identity mappings covering the + * holes caused by this alignment can be anything that user asks. + * + * This will preserve the large page mappings for kernel text/data at no + * extra cost. + */ +static pgprotval_t protect_kernel_text_ro(unsigned long start, + unsigned long end) +{ + unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; + unsigned long t_start = (unsigned long)_text; + unsigned int level; + + if (!kernel_set_to_readonly || !overlaps(start, end, t_start, t_end)) + return 0; /* - * Once the kernel maps the text as RO (kernel_set_to_readonly is set), - * kernel text mappings for the large page aligned text, rodata sections - * will be always read-only. For the kernel identity mappings covering - * the holes caused by this alignment can be anything that user asks. + * Don't enforce the !RW mapping for the kernel text mapping, if + * the current mapping is already using small page mapping. No + * need to work hard to preserve large page mappings in this case. * - * This will preserve the large page mappings for kernel text/data - * at no extra cost. + * This also fixes the Linux Xen paravirt guest boot failure caused + * by unexpected read-only mappings for kernel identity + * mappings. In this paravirt guest case, the kernel text mapping + * and the kernel identity mapping share the same page-table pages, + * so the protections for kernel text and identity mappings have to + * be the same. */ - if (kernel_set_to_readonly && - within(address, (unsigned long)_text, - (unsigned long)__end_rodata_hpage_align)) { - unsigned int level; - - /* - * Don't enforce the !RW mapping for the kernel text mapping, - * if the current mapping is already using small page mapping. - * No need to work hard to preserve large page mappings in this - * case. - * - * This also fixes the Linux Xen paravirt guest boot failure - * (because of unexpected read-only mappings for kernel identity - * mappings). In this paravirt guest case, the kernel text - * mapping and the kernel identity mapping share the same - * page-table pages. Thus we can't really use different - * protections for the kernel text and identity mappings. Also, - * these shared mappings are made of small page mappings. - * Thus this don't enforce !RW mapping for small page kernel - * text mapping logic will help Linux Xen parvirt guest boot - * as well. - */ - if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) - pgprot_val(forbidden) |= _PAGE_RW; - } + if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) + return _PAGE_RW; + return 0; +} +#else +static pgprotval_t protect_kernel_text_ro(unsigned long start, + unsigned long end) +{ + return 0; +} #endif - prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); +static inline bool conflicts(pgprot_t prot, pgprotval_t val) +{ + return (pgprot_val(prot) & ~val) != pgprot_val(prot); +} - return prot; +static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val, + unsigned long start, unsigned long end, + unsigned long pfn, const char *txt) +{ + static const char *lvltxt[] = { + [CPA_CONFLICT] = "conflict", + [CPA_PROTECT] = "protect", + [CPA_DETECT] = "detect", + }; + + if (warnlvl > cpa_warn_level || !conflicts(prot, val)) + return; + + pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", + lvltxt[warnlvl], txt, start, end, pfn, (unsigned long long)pgprot_val(prot), + (unsigned long long)val); +} + +/* + * Certain areas of memory on x86 require very specific protection flags, + * for example the BIOS area or kernel text. Callers don't always get this + * right (again, ioremap() on BIOS memory is not uncommon) so this function + * checks and fixes these known static required protection bits. + */ +static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, + unsigned long pfn, unsigned long npg, + int warnlvl) +{ + pgprotval_t forbidden, res; + unsigned long end; + + /* + * There is no point in checking RW/NX conflicts when the requested + * mapping is setting the page !PRESENT. + */ + if (!(pgprot_val(prot) & _PAGE_PRESENT)) + return prot; + + /* Operate on the virtual address */ + end = start + npg * PAGE_SIZE - 1; + + res = protect_kernel_text(start, end); + check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX"); + forbidden = res; + + res = protect_kernel_text_ro(start, end); + check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO"); + forbidden |= res; + + /* Check the PFN directly */ + res = protect_pci_bios(pfn, pfn + npg - 1); + check_conflict(warnlvl, prot, res, start, end, pfn, "PCIBIOS NX"); + forbidden |= res; + + res = protect_rodata(pfn, pfn + npg - 1); + check_conflict(warnlvl, prot, res, start, end, pfn, "Rodata RO"); + forbidden |= res; + + return __pgprot(pgprot_val(prot) & ~forbidden); } /* @@ -421,18 +606,18 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, */ pte_t *lookup_address(unsigned long address, unsigned int *level) { - return lookup_address_in_pgd(pgd_offset_k(address), address, level); + return lookup_address_in_pgd(pgd_offset_k(address), address, level); } EXPORT_SYMBOL_GPL(lookup_address); static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, unsigned int *level) { - if (cpa->pgd) + if (cpa->pgd) return lookup_address_in_pgd(cpa->pgd + pgd_index(address), address, level); - return lookup_address(address, level); + return lookup_address(address, level); } /* @@ -549,40 +734,35 @@ static pgprot_t pgprot_clear_protnone_bits(pgprot_t prot) return prot; } -static int -try_preserve_large_page(pte_t *kpte, unsigned long address, - struct cpa_data *cpa) +static int __should_split_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) { - unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn, old_pfn; + unsigned long numpages, pmask, psize, lpaddr, pfn, old_pfn; + pgprot_t old_prot, new_prot, req_prot, chk_prot; pte_t new_pte, old_pte, *tmp; - pgprot_t old_prot, new_prot, req_prot; - int i, do_split = 1; enum pg_level level; - if (cpa->force_split) - return 1; - - spin_lock(&pgd_lock); /* * Check for races, another CPU might have split this page * up already: */ tmp = _lookup_address_cpa(cpa, address, &level); if (tmp != kpte) - goto out_unlock; + return 1; switch (level) { case PG_LEVEL_2M: old_prot = pmd_pgprot(*(pmd_t *)kpte); old_pfn = pmd_pfn(*(pmd_t *)kpte); + cpa_inc_2m_checked(); break; case PG_LEVEL_1G: old_prot = pud_pgprot(*(pud_t *)kpte); old_pfn = pud_pfn(*(pud_t *)kpte); + cpa_inc_1g_checked(); break; default: - do_split = -EINVAL; - goto out_unlock; + return -EINVAL; } psize = page_level_size(level); @@ -592,8 +772,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, * Calculate the number of pages, which fit into this large * page starting at address: */ - nextpage_addr = (address + psize) & pmask; - numpages = (nextpage_addr - address) >> PAGE_SHIFT; + lpaddr = (address + psize) & pmask; + numpages = (lpaddr - address) >> PAGE_SHIFT; if (numpages < cpa->numpages) cpa->numpages = numpages; @@ -620,71 +800,142 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, pgprot_val(req_prot) |= _PAGE_PSE; /* - * old_pfn points to the large page base pfn. So we need - * to add the offset of the virtual address: + * old_pfn points to the large page base pfn. So we need to add the + * offset of the virtual address: */ pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); cpa->pfn = pfn; - new_prot = static_protections(req_prot, address, pfn); + /* + * Calculate the large page base address and the number of 4K pages + * in the large page + */ + lpaddr = address & pmask; + numpages = psize >> PAGE_SHIFT; /* - * We need to check the full range, whether - * static_protection() requires a different pgprot for one of - * the pages in the range we try to preserve: + * Sanity check that the existing mapping is correct versus the static + * protections. static_protections() guards against !PRESENT, so no + * extra conditional required here. */ - addr = address & pmask; - pfn = old_pfn; - for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { - pgprot_t chk_prot = static_protections(req_prot, addr, pfn); + chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages, + CPA_CONFLICT); - if (pgprot_val(chk_prot) != pgprot_val(new_prot)) - goto out_unlock; + if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) { + /* + * Split the large page and tell the split code to + * enforce static protections. + */ + cpa->force_static_prot = 1; + return 1; } /* - * If there are no changes, return. maxpages has been updated - * above: + * Optimization: If the requested pgprot is the same as the current + * pgprot, then the large page can be preserved and no updates are + * required independent of alignment and length of the requested + * range. The above already established that the current pgprot is + * correct, which in consequence makes the requested pgprot correct + * as well if it is the same. The static protection scan below will + * not come to a different conclusion. */ - if (pgprot_val(new_prot) == pgprot_val(old_prot)) { - do_split = 0; - goto out_unlock; + if (pgprot_val(req_prot) == pgprot_val(old_prot)) { + cpa_inc_lp_sameprot(level); + return 0; } /* - * We need to change the attributes. Check, whether we can - * change the large page in one go. We request a split, when - * the address is not aligned and the number of pages is - * smaller than the number of pages in the large page. Note - * that we limited the number of possible pages already to - * the number of pages in the large page. + * If the requested range does not cover the full page, split it up */ - if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { - /* - * The address is aligned and the number of pages - * covers the full page. - */ - new_pte = pfn_pte(old_pfn, new_prot); - __set_pmd_pte(kpte, address, new_pte); - cpa->flags |= CPA_FLUSHTLB; - do_split = 0; - } + if (address != lpaddr || cpa->numpages != numpages) + return 1; + + /* + * Check whether the requested pgprot is conflicting with a static + * protection requirement in the large page. + */ + new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, + CPA_DETECT); + + /* + * If there is a conflict, split the large page. + * + * There used to be a 4k wise evaluation trying really hard to + * preserve the large pages, but experimentation has shown, that this + * does not help at all. There might be corner cases which would + * preserve one large page occasionally, but it's really not worth the + * extra code and cycles for the common case. + */ + if (pgprot_val(req_prot) != pgprot_val(new_prot)) + return 1; + + /* All checks passed. Update the large page mapping. */ + new_pte = pfn_pte(old_pfn, new_prot); + __set_pmd_pte(kpte, address, new_pte); + cpa->flags |= CPA_FLUSHTLB; + cpa_inc_lp_preserved(level); + return 0; +} + +static int should_split_large_page(pte_t *kpte, unsigned long address, + struct cpa_data *cpa) +{ + int do_split; + + if (cpa->force_split) + return 1; -out_unlock: + spin_lock(&pgd_lock); + do_split = __should_split_large_page(kpte, address, cpa); spin_unlock(&pgd_lock); return do_split; } +static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn, + pgprot_t ref_prot, unsigned long address, + unsigned long size) +{ + unsigned int npg = PFN_DOWN(size); + pgprot_t prot; + + /* + * If should_split_large_page() discovered an inconsistent mapping, + * remove the invalid protection in the split mapping. + */ + if (!cpa->force_static_prot) + goto set; + + prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT); + + if (pgprot_val(prot) == pgprot_val(ref_prot)) + goto set; + + /* + * If this is splitting a PMD, fix it up. PUD splits cannot be + * fixed trivially as that would require to rescan the newly + * installed PMD mappings after returning from split_large_page() + * so an eventual further split can allocate the necessary PTE + * pages. Warn for now and revisit it in case this actually + * happens. + */ + if (size == PAGE_SIZE) + ref_prot = prot; + else + pr_warn_once("CPA: Cannot fixup static protections for PUD split\n"); +set: + set_pte(pte, pfn_pte(pfn, ref_prot)); +} + static int __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, struct page *base) { + unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1; pte_t *pbase = (pte_t *)page_address(base); - unsigned long ref_pfn, pfn, pfninc = 1; unsigned int i, level; - pte_t *tmp; pgprot_t ref_prot; + pte_t *tmp; spin_lock(&pgd_lock); /* @@ -707,15 +958,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * PAT bit to correct position. */ ref_prot = pgprot_large_2_4k(ref_prot); - ref_pfn = pmd_pfn(*(pmd_t *)kpte); + lpaddr = address & PMD_MASK; + lpinc = PAGE_SIZE; break; case PG_LEVEL_1G: ref_prot = pud_pgprot(*(pud_t *)kpte); ref_pfn = pud_pfn(*(pud_t *)kpte); pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; - + lpaddr = address & PUD_MASK; + lpinc = PMD_SIZE; /* * Clear the PSE flags if the PRESENT flag is not set * otherwise pmd_present/pmd_huge will return true @@ -736,8 +989,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * Get the target pfn from the original entry: */ pfn = ref_pfn; - for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) - set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); + for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc) + split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc); if (virt_addr_valid(address)) { unsigned long pfn = PFN_DOWN(__pa(address)); @@ -1247,7 +1500,9 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); - new_prot = static_protections(new_prot, address, pfn); + cpa_inc_4k_install(); + new_prot = static_protections(new_prot, address, pfn, 1, + CPA_PROTECT); new_prot = pgprot_clear_protnone_bits(new_prot); @@ -1273,7 +1528,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) * Check, whether we can keep the large page intact * and just change the pte: */ - do_split = try_preserve_large_page(kpte, address, cpa); + do_split = should_split_large_page(kpte, address, cpa); /* * When the range fits into the existing large page, * return. cp->numpages and cpa->tlbflush have been updated in @@ -1288,23 +1543,23 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) err = split_large_page(cpa, kpte, address); if (!err) { /* - * Do a global flush tlb after splitting the large page - * and before we do the actual change page attribute in the PTE. - * - * With out this, we violate the TLB application note, that says - * "The TLBs may contain both ordinary and large-page + * Do a global flush tlb after splitting the large page + * and before we do the actual change page attribute in the PTE. + * + * With out this, we violate the TLB application note, that says + * "The TLBs may contain both ordinary and large-page * translations for a 4-KByte range of linear addresses. This * may occur if software modifies the paging structures so that * the page size used for the address range changes. If the two * translations differ with respect to page frame or attributes * (e.g., permissions), processor behavior is undefined and may * be implementation-specific." - * - * We do this global tlb flush inside the cpa_lock, so that we + * + * We do this global tlb flush inside the cpa_lock, so that we * don't allow any other cpu, with stale tlb entries change the * page attribute in parallel, that also falls into the * just split large page entry. - */ + */ flush_tlb_all(); goto repeat; } diff --git a/block/genhd.c b/block/genhd.c index be5bab20b2ab..4c777e1b3bd9 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1284,6 +1284,7 @@ static void disk_release(struct device *dev) struct class block_class = { .name = "block", }; +EXPORT_SYMBOL_GPL(block_class); static char *block_devnode(struct device *dev, umode_t *mode, kuid_t *uid, kgid_t *gid) diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64 new file mode 100644 index 000000000000..694ed56a5f47 --- /dev/null +++ b/build.config.cuttlefish.x86_64 @@ -0,0 +1,16 @@ +ARCH=x86_64 +BRANCH=android-4.14 +CLANG_TRIPLE=x86_64-linux-gnu- +CROSS_COMPILE=x86_64-linux-androidkernel- +DEFCONFIG=x86_64_cuttlefish_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +POST_DEFCONFIG_CMDS="check_defconfig" +CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r328903/bin +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.arm b/build.config.goldfish.arm new file mode 100644 index 000000000000..ff5646ab4f40 --- /dev/null +++ b/build.config.goldfish.arm @@ -0,0 +1,13 @@ +ARCH=arm +BRANCH=android-4.4 +CROSS_COMPILE=arm-linux-androidkernel- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9/bin +FILES=" +arch/arm/boot/zImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.arm64 b/build.config.goldfish.arm64 new file mode 100644 index 000000000000..4c896a679ab9 --- /dev/null +++ b/build.config.goldfish.arm64 @@ -0,0 +1,13 @@ +ARCH=arm64 +BRANCH=android-4.4 +CROSS_COMPILE=aarch64-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin +FILES=" +arch/arm64/boot/Image +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.mips b/build.config.goldfish.mips new file mode 100644 index 000000000000..9a14a444ac14 --- /dev/null +++ b/build.config.goldfish.mips @@ -0,0 +1,12 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.mips64 b/build.config.goldfish.mips64 new file mode 100644 index 000000000000..6ad9759f5f4a --- /dev/null +++ b/build.config.goldfish.mips64 @@ -0,0 +1,12 @@ +ARCH=mips +BRANCH=android-4.4 +CROSS_COMPILE=mips64el-linux-android- +DEFCONFIG=ranchu64_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/mips/mips64el-linux-android-4.9/bin +FILES=" +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.x86 b/build.config.goldfish.x86 new file mode 100644 index 000000000000..2266c621835e --- /dev/null +++ b/build.config.goldfish.x86 @@ -0,0 +1,13 @@ +ARCH=x86 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=i386_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/build.config.goldfish.x86_64 b/build.config.goldfish.x86_64 new file mode 100644 index 000000000000..08c42c2eba03 --- /dev/null +++ b/build.config.goldfish.x86_64 @@ -0,0 +1,13 @@ +ARCH=x86_64 +BRANCH=android-4.4 +CROSS_COMPILE=x86_64-linux-android- +DEFCONFIG=x86_64_ranchu_defconfig +EXTRA_CMDS='' +KERNEL_DIR=common +LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin +FILES=" +arch/x86/boot/bzImage +vmlinux +System.map +" +STOP_SHIP_TRACEPRINTK=1 diff --git a/certs/system_keyring.c b/certs/system_keyring.c index 81728717523d..4ba922ff3db6 100644 --- a/certs/system_keyring.c +++ b/certs/system_keyring.c @@ -264,5 +264,46 @@ int verify_pkcs7_signature(const void *data, size_t len, return ret; } EXPORT_SYMBOL_GPL(verify_pkcs7_signature); - #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */ + +/** + * verify_signature_one - Verify a signature with keys from given keyring + * @sig: The signature to be verified + * @trusted_keys: Trusted keys to use (NULL for builtin trusted keys only, + * (void *)1UL for all trusted keys). + * @keyid: key description (not partial) + */ +int verify_signature_one(const struct public_key_signature *sig, + struct key *trusted_keys, const char *keyid) +{ + key_ref_t ref; + struct key *key; + int ret; + + if (!sig) + return -EBADMSG; + if (!trusted_keys) { + trusted_keys = builtin_trusted_keys; + } else if (trusted_keys == (void *)1UL) { +#ifdef CONFIG_SECONDARY_TRUSTED_KEYRING + trusted_keys = secondary_trusted_keys; +#else + trusted_keys = builtin_trusted_keys; +#endif + } + + ref = keyring_search(make_key_ref(trusted_keys, 1), + &key_type_asymmetric, keyid); + if (IS_ERR(ref)) { + pr_err("Asymmetric key (%s) not found in keyring(%s)\n", + keyid, trusted_keys->description); + return -ENOKEY; + } + + key = key_ref_to_ptr(ref); + ret = verify_signature(key, sig); + key_put(key); + return ret; +} +EXPORT_SYMBOL_GPL(verify_signature_one); + diff --git a/drivers/Kconfig b/drivers/Kconfig index ab4d43923c4d..3243eabb2337 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -79,6 +79,8 @@ source "drivers/hwmon/Kconfig" source "drivers/thermal/Kconfig" +source "drivers/trusty/Kconfig" + source "drivers/watchdog/Kconfig" source "drivers/ssb/Kconfig" @@ -212,6 +214,7 @@ source "drivers/tee/Kconfig" source "drivers/mux/Kconfig" source "drivers/opp/Kconfig" +source "drivers/sdw/Kconfig" source "drivers/visorbus/Kconfig" @@ -219,4 +222,11 @@ source "drivers/siox/Kconfig" source "drivers/slimbus/Kconfig" +source "drivers/vbs/Kconfig" + +source "drivers/acrn/Kconfig" + +source "drivers/vhm/Kconfig" + +source "drivers/energy_model/Kconfig" endmenu diff --git a/drivers/Makefile b/drivers/Makefile index 578f469f72fb..30b1c2ef0921 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -118,6 +118,7 @@ obj-$(CONFIG_W1) += w1/ obj-y += power/ obj-$(CONFIG_HWMON) += hwmon/ obj-$(CONFIG_THERMAL) += thermal/ +obj-$(CONFIG_TRUSTY) += trusty/ obj-$(CONFIG_WATCHDOG) += watchdog/ obj-$(CONFIG_MD) += md/ obj-$(CONFIG_BT) += bluetooth/ @@ -146,6 +147,7 @@ obj-$(CONFIG_OF) += of/ obj-$(CONFIG_SSB) += ssb/ obj-$(CONFIG_BCMA) += bcma/ obj-$(CONFIG_VHOST_RING) += vhost/ +obj-$(CONFIG_VBS) += vbs/ obj-$(CONFIG_VHOST) += vhost/ obj-$(CONFIG_VLYNQ) += vlynq/ obj-$(CONFIG_STAGING) += staging/ @@ -157,6 +159,8 @@ obj-$(CONFIG_REMOTEPROC) += remoteproc/ obj-$(CONFIG_RPMSG) += rpmsg/ obj-$(CONFIG_SOUNDWIRE) += soundwire/ +obj-$(CONFIG_ENERGY_MODEL) += energy_model/ + # Virtualization drivers obj-$(CONFIG_VIRT_DRIVERS) += virt/ obj-$(CONFIG_HYPERV) += hv/ @@ -183,6 +187,9 @@ obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FSI) += fsi/ obj-$(CONFIG_TEE) += tee/ obj-$(CONFIG_MULTIPLEXER) += mux/ +obj-$(CONFIG_ACRN) += acrn/ +obj-$(CONFIG_ACRN_VHM) += vhm/ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ obj-$(CONFIG_SIOX) += siox/ obj-$(CONFIG_GNSS) += gnss/ +obj-$(CONFIG_SDW) += sdw/ diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index cb97b6105f52..1cb33c95388a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -563,7 +563,7 @@ static int acpi_battery_get_state(struct acpi_battery *battery) if (!acpi_battery_present(battery)) return 0; - if (battery->update_time && + if (cache_time && time_before(jiffies, battery->update_time + msecs_to_jiffies(cache_time))) return 0; diff --git a/drivers/acrn/Kconfig b/drivers/acrn/Kconfig new file mode 100644 index 000000000000..706aba14ad00 --- /dev/null +++ b/drivers/acrn/Kconfig @@ -0,0 +1,29 @@ +config ACRN_SHARED_BUFFER + bool "Intel ACRN SHARED BUFFER" + depends on ACRN_VHM + ---help--- + Ring buffer shared between ACRN Hypervisor and its SOS. + Help ACRN performance profiling. + +config ACRN_TRACE + tristate "Intel ACRN Hypervisor Trace support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor. + You can say y to build it into the kernel, or m to build + it as a module. + +config ACRN_HVLOG + bool "Intel ACRN Hypervisor Logmsg support" + depends on ACRN_SHARED_BUFFER + ---help--- + This is the Trace driver for the Intel ACRN hypervisor log. + You can say y to build it into the kernel. + +config ACRN_HV_NPK_LOG + bool "Intel ACRN Hypervisor NPK Log" + depends on INTEL_TH + depends on ACRN_VHM + ---help--- + The driver is to configure/enable/disable the Intel ACRN hypervisor + NPK log. diff --git a/drivers/acrn/Makefile b/drivers/acrn/Makefile new file mode 100644 index 000000000000..dad6a9e8c42c --- /dev/null +++ b/drivers/acrn/Makefile @@ -0,0 +1,5 @@ +subdir-ccflags-$(CONFIG_ACRN) := -Werror +obj-$(CONFIG_ACRN_SHARED_BUFFER) += sbuf.o +obj-$(CONFIG_ACRN_TRACE) += acrn_trace.o +obj-$(CONFIG_ACRN_HVLOG) += acrn_hvlog.o +obj-$(CONFIG_ACRN_HV_NPK_LOG) += hv_npk_log.o diff --git a/drivers/acrn/acrn_hvlog.c b/drivers/acrn/acrn_hvlog.c new file mode 100644 index 000000000000..6d27b79cb1a1 --- /dev/null +++ b/drivers/acrn/acrn_hvlog.c @@ -0,0 +1,432 @@ +/* + * ACRN Hypervisor logmsg + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ +#define pr_fmt(fmt) "ACRN HVLog: " fmt + +#include +#include +#include +#include +#include +#include +#include + +#include "sbuf.h" + +#define LOG_ENTRY_SIZE 80 +#define PCPU_NRS 4 + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define foreach_hvlog_type(idx, hvlog_type) \ + for ((idx) = 0; (idx) < (hvlog_type); (idx)++) + +enum sbuf_hvlog_index { + ACRN_CURRNET_HVLOG = 0, + ACRN_LAST_HVLOG, + ACRN_HVLOG_TYPE +}; + +struct acrn_hvlog { + struct miscdevice miscdev; + shared_buf_t *sbuf; + atomic_t open_cnt; + int pcpu_num; +}; + +static unsigned long long hvlog_buf_size; +static unsigned long long hvlog_buf_base; + +static int __init early_hvlog(char *p) +{ + int ret; + + pr_debug("%s(%s)\n", __func__, p); + hvlog_buf_size = memparse(p, &p); + if (*p != '@') + return 0; + hvlog_buf_base = memparse(p + 1, &p); + + if (!!hvlog_buf_base && !!hvlog_buf_size) { + ret = memblock_reserve(hvlog_buf_base, hvlog_buf_size); + if (ret) { + pr_err("%s: Error reserving hvlog memblock\n", + __func__); + hvlog_buf_base = 0; + hvlog_buf_size = 0; + return ret; + } + } + + return 0; +} +early_param("hvlog", early_hvlog); + + +static inline shared_buf_t *hvlog_mark_unread(shared_buf_t *sbuf) +{ + /* sbuf must point to valid data. + * clear the lowest bit in the magic to indicate that + * the sbuf point to the last boot valid data, we should + * read it later. + */ + if (sbuf != NULL) + sbuf->magic &= ~1; + + return sbuf; +} + +static int acrn_hvlog_open(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = container_of(filp->private_data, + struct acrn_hvlog, miscdev); + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + /* More than one reader at the same time could get data messed up */ + if (atomic_cmpxchg(&acrn_hvlog->open_cnt, 0, 1) != 0) + return -EBUSY; + + filp->private_data = acrn_hvlog; + + return 0; +} + +static int acrn_hvlog_release(struct inode *inode, struct file *filp) +{ + struct acrn_hvlog *acrn_hvlog; + + acrn_hvlog = filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + atomic_dec(&acrn_hvlog->open_cnt); + filp->private_data = NULL; + + return 0; +} + +static ssize_t acrn_hvlog_read(struct file *filp, char __user *buf, + size_t count, loff_t *offset) +{ + char data[LOG_ENTRY_SIZE]; + struct acrn_hvlog *acrn_hvlog; + int ret; + + acrn_hvlog = (struct acrn_hvlog *)filp->private_data; + + pr_debug("%s, %s\n", __func__, acrn_hvlog->miscdev.name); + + if (acrn_hvlog->pcpu_num >= PCPU_NRS) { + pr_err("%s, invalid pcpu_num: %d\n", + __func__, acrn_hvlog->pcpu_num); + return -EIO; + } + + if (acrn_hvlog->sbuf != NULL) { + ret = sbuf_get(acrn_hvlog->sbuf, (uint8_t *)&data); + if (ret > 0) { + if (copy_to_user(buf, &data, ret)) + return -EFAULT; + } + + return ret; + } + + return 0; +} + +static const struct file_operations acrn_hvlog_fops = { + .owner = THIS_MODULE, + .open = acrn_hvlog_open, + .release = acrn_hvlog_release, + .read = acrn_hvlog_read, +}; + +static struct acrn_hvlog acrn_hvlog_devs[ACRN_HVLOG_TYPE][PCPU_NRS] = { + [ACRN_CURRNET_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_cur_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_cur_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 3, + }, + }, + [ACRN_LAST_HVLOG] = { + { + .miscdev = { + .name = "acrn_hvlog_last_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 0, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 1, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 2, + }, + { + .miscdev = { + .name = "acrn_hvlog_last_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_hvlog_fops, + }, + .pcpu_num = 3, + }, + } +}; + +static int __init acrn_hvlog_init(void) +{ + int ret = 0; + int i, j, idx; + uint32_t pcpu_id; + uint64_t logbuf_base0; + uint64_t logbuf_base1; + uint64_t logbuf_size; + uint32_t ele_size; + uint32_t ele_num; + uint32_t size; + bool sbuf_constructed = false; + + shared_buf_t *sbuf0[PCPU_NRS]; + shared_buf_t *sbuf1[PCPU_NRS]; + + pr_info("%s\n", __func__); + if (!hvlog_buf_base || !hvlog_buf_size) { + pr_warn("no fixed memory reserve for hvlog.\n"); + return 0; + } + + logbuf_base0 = hvlog_buf_base; + logbuf_size = (hvlog_buf_size >> 1); + logbuf_base1 = hvlog_buf_base + logbuf_size; + + size = (logbuf_size / PCPU_NRS); + ele_size = LOG_ENTRY_SIZE; + ele_num = (size - SBUF_HEAD_SIZE) / ele_size; + + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf0[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + sbuf1[pcpu_id] = sbuf_check_valid(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf0[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + acrn_hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf0[pcpu_id]); + acrn_hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base1 + size * pcpu_id); + } + sbuf_constructed = true; + } + + if (sbuf_constructed == false) { + foreach_cpu(pcpu_id, PCPU_NRS) { + if (sbuf1[pcpu_id] == NULL) + continue; + + foreach_cpu(pcpu_id, PCPU_NRS) { + acrn_hvlog_devs[ACRN_LAST_HVLOG][pcpu_id].sbuf = + hvlog_mark_unread(sbuf1[pcpu_id]); + } + } + foreach_cpu(pcpu_id, PCPU_NRS) { + acrn_hvlog_devs[ACRN_CURRNET_HVLOG][pcpu_id].sbuf = + sbuf_construct(ele_num, ele_size, + logbuf_base0 + size * pcpu_id); + } + sbuf_constructed = true; + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + ret = sbuf_share_setup(pcpu_id, ACRN_HVLOG, + acrn_hvlog_devs[idx][pcpu_id].sbuf); + if (ret < 0) { + pr_err("Failed to setup %s, errno %d\n", + acrn_hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto setup_err; + } + } + } + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + atomic_set(&acrn_hvlog_devs[idx][pcpu_id].open_cnt, 0); + + ret = misc_register( + &acrn_hvlog_devs[idx][pcpu_id].miscdev); + if (ret < 0) { + pr_err("Failed to register %s, errno %d\n", + acrn_hvlog_devs[idx][pcpu_id].miscdev.name, ret); + goto reg_err; + } + } + } + + return 0; + +reg_err: + foreach_hvlog_type(i, idx) { + foreach_cpu(j, PCPU_NRS) { + misc_deregister(&acrn_hvlog_devs[i][j].miscdev); + } + } + + foreach_cpu(j, pcpu_id) { + misc_deregister(&acrn_hvlog_devs[idx][j].miscdev); + } + + pcpu_id = PCPU_NRS; +setup_err: + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(j, pcpu_id) { + sbuf_share_setup(j, ACRN_HVLOG, 0); + sbuf_deconstruct(acrn_hvlog_devs[idx][j].sbuf); + } + } + + return ret; +} + +static void __exit acrn_hvlog_exit(void) +{ + int idx; + uint32_t pcpu_id; + + pr_info("%s\n", __func__); + + foreach_hvlog_type(idx, ACRN_HVLOG_TYPE) { + foreach_cpu(pcpu_id, PCPU_NRS) { + misc_deregister(&acrn_hvlog_devs[idx][pcpu_id].miscdev); + } + } + + idx = ACRN_CURRNET_HVLOG; + { + foreach_cpu(pcpu_id, PCPU_NRS) { + sbuf_share_setup(pcpu_id, ACRN_HVLOG, 0); + sbuf_deconstruct(acrn_hvlog_devs[idx][pcpu_id].sbuf); + } + } +} + +module_init(acrn_hvlog_init); +module_exit(acrn_hvlog_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Logmsg"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/acrn_trace.c b/drivers/acrn/acrn_trace.c new file mode 100644 index 000000000000..d48b03625223 --- /dev/null +++ b/drivers/acrn/acrn_trace.c @@ -0,0 +1,304 @@ +/* +* +* ACRN Trace module +* +* This file is provided under a dual BSD/GPLv2 license.  When using or +* redistributing this file, you may do so under either license. +* +* GPL LICENSE SUMMARY +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of version 2 of the GNU General Public License as +* published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, but +* WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU +* General Public License for more details. +* +* Contact Information: Yan, Like +* +* BSD LICENSE +* +* Copyright (c) 2017 Intel Corporation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions +* are met: +* +*   * Redistributions of source code must retain the above copyright +*     notice, this list of conditions and the following disclaimer. +*   * Redistributions in binary form must reproduce the above copyright +*     notice, this list of conditions and the following disclaimer in +*     the documentation and/or other materials provided with the +*     distribution. +*   * Neither the name of Intel Corporation nor the names of its +*     contributors may be used to endorse or promote products derived +*     from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* Like Yan +* +*/ + +#define pr_fmt(fmt) "ACRNTrace: " fmt + +#include +#include +#include +#include +#include +#include + +#include + +#include "sbuf.h" + + +#define TRACE_SBUF_SIZE (4 * 1024 * 1024) +#define TRACE_ELEMENT_SIZE 32 /* byte */ +#define TRACE_ELEMENT_NUM ((TRACE_SBUF_SIZE - SBUF_HEAD_SIZE) / \ + TRACE_ELEMENT_SIZE) + +#define foreach_cpu(cpu, cpu_num) \ + for ((cpu) = 0; (cpu) < (cpu_num); (cpu)++) + +#define MAX_NR_CPUS 4 +/* actual physical cpu number, initialized by module init */ +static int pcpu_num; + +static int nr_cpus = MAX_NR_CPUS; +module_param(nr_cpus, int, S_IRUSR | S_IWUSR); + +static atomic_t open_cnt[MAX_NR_CPUS]; +static shared_buf_t *sbuf_per_cpu[MAX_NR_CPUS]; + +static inline int get_id_from_devname(struct file *filep) +{ + uint32_t cpuid; + int err; + char id_str[16]; + struct miscdevice *dev = filep->private_data; + + strncpy(id_str, (void *)dev->name + sizeof("acrn_trace_") - 1, 16); + id_str[15] = '\0'; + err = kstrtoul(&id_str[0], 10, (unsigned long *)&cpuid); + + if (err) + return err; + + if (cpuid >= pcpu_num) { + pr_err("%s, failed to get cpuid, cpuid %d\n", + __func__, cpuid); + return -1; + } + + return cpuid; +} + +/************************************************************************ + * + * file_operations functions + * + ***********************************************************************/ +static int acrn_trace_open(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + /* More than one reader at the same time could get data messed up */ + if (atomic_read(&open_cnt[cpuid])) + return -EBUSY; + + atomic_inc(&open_cnt[cpuid]); + + return 0; +} + +static int acrn_trace_release(struct inode *inode, struct file *filep) +{ + int cpuid = get_id_from_devname(filep); + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + atomic_dec(&open_cnt[cpuid]); + + return 0; +} + +static int acrn_trace_mmap(struct file *filep, struct vm_area_struct *vma) +{ + int cpuid = get_id_from_devname(filep); + phys_addr_t paddr; + + pr_debug("%s, cpu %d\n", __func__, cpuid); + if (cpuid < 0) + return -ENXIO; + + BUG_ON(!virt_addr_valid(sbuf_per_cpu[cpuid])); + paddr = virt_to_phys(sbuf_per_cpu[cpuid]); + + if (remap_pfn_range(vma, vma->vm_start, + paddr >> PAGE_SHIFT, + vma->vm_end - vma->vm_start, + vma->vm_page_prot)) { + pr_err("Failed to mmap sbuf for cpu%d\n", cpuid); + return -EAGAIN; + } + + return 0; +} + +static const struct file_operations acrn_trace_fops = { + .owner = THIS_MODULE, + .open = acrn_trace_open, + .release = acrn_trace_release, + .mmap = acrn_trace_mmap, +}; + +static struct miscdevice acrn_trace_dev0 = { + .name = "acrn_trace_0", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_trace_fops, +}; + +static struct miscdevice acrn_trace_dev1 = { + .name = "acrn_trace_1", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_trace_fops, +}; + +static struct miscdevice acrn_trace_dev2 = { + .name = "acrn_trace_2", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_trace_fops, +}; + +static struct miscdevice acrn_trace_dev3 = { + .name = "acrn_trace_3", + .minor = MISC_DYNAMIC_MINOR, + .fops = &acrn_trace_fops, +}; + +static struct miscdevice *acrn_trace_devs[4] = { + &acrn_trace_dev0, + &acrn_trace_dev1, + &acrn_trace_dev2, + &acrn_trace_dev3, +}; + +/* + * acrn_trace_init() + */ +static int __init acrn_trace_init(void) +{ + int ret = 0; + int i, cpu; + + if (x86_hyper_type != X86_HYPER_ACRN) { + pr_err("acrn_trace: not support acrn hypervisor!\n"); + return -EINVAL; + } + + /* TBD: we could get the native cpu number by hypercall later */ + pr_info("%s, cpu_num %d\n", __func__, nr_cpus); + if (nr_cpus > MAX_NR_CPUS) { + pr_err("nr_cpus %d exceed MAX_NR_CPUS %d !\n", + nr_cpus, MAX_NR_CPUS); + return -EINVAL; + } + pcpu_num = nr_cpus; + + foreach_cpu(cpu, pcpu_num) { + /* allocate shared_buf */ + sbuf_per_cpu[cpu] = sbuf_allocate(TRACE_ELEMENT_NUM, + TRACE_ELEMENT_SIZE); + if (!sbuf_per_cpu[cpu]) { + pr_err("Failed alloc SBuf, cpuid %d\n", cpu); + ret = -ENOMEM; + goto out_free; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = sbuf_share_setup(cpu, ACRN_TRACE, sbuf_per_cpu[cpu]); + if (ret < 0) { + pr_err("Failed to setup SBuf, cpuid %d\n", cpu); + goto out_sbuf; + } + } + + foreach_cpu(cpu, pcpu_num) { + ret = misc_register(acrn_trace_devs[cpu]); + if (ret < 0) { + pr_err("Failed to register acrn_trace_%d, errno %d\n", + cpu, ret); + goto out_dereg; + } + } + + return ret; + +out_dereg: + for (i = --cpu; i >= 0; i--) + misc_deregister(acrn_trace_devs[i]); + cpu = pcpu_num; + +out_sbuf: + for (i = --cpu; i >= 0; i--) + sbuf_share_setup(i, ACRN_TRACE, NULL); + cpu = pcpu_num; + +out_free: + for (i = --cpu; i >= 0; i--) + sbuf_free(sbuf_per_cpu[i]); + + return ret; +} + +/* + * acrn_trace_exit() + */ +static void __exit acrn_trace_exit(void) +{ + int cpu; + + pr_info("%s, cpu_num %d\n", __func__, pcpu_num); + + foreach_cpu(cpu, pcpu_num) { + /* deregister devices */ + misc_deregister(acrn_trace_devs[cpu]); + + /* set sbuf pointer to NULL in HV */ + sbuf_share_setup(cpu, ACRN_TRACE, NULL); + + /* free sbuf, sbuf_per_cpu[cpu] should be set NULL */ + sbuf_free(sbuf_per_cpu[cpu]); + } +} + +module_init(acrn_trace_init); +module_exit(acrn_trace_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor Trace"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/hv_npk_log.c b/drivers/acrn/hv_npk_log.c new file mode 100644 index 000000000000..2303b9a72a3a --- /dev/null +++ b/drivers/acrn/hv_npk_log.c @@ -0,0 +1,384 @@ +/* + * ACRN Hypervisor NPK Log + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * The hv_npk_log driver is to enable/disable/configure the ACRN hypervisor + * NPK log. It communicates with the hypervisor via the HC_SETUP_HV_NPK_LOG + * hypercall, and exposes the interface to usperspace via sysfs. + * With this driver, the user could: + * 1. Configure the Master/Channel used for the hypervisor NPK log. + * 2. Configure the log level of the hypervisor NPK log. + * 3. Enable/Disable the hypervisor NPK log. + * + * +-------------------------------------+ + * | Interfaces exposed by the driver | + * SOS | U: and used by the applications | + * | +----------------^----------------+ | + * | | | + * | K: +---------v---------+ | + * | | hv_npk_log driver | | + * | +---------^---------+ | + * +------------------|------------------+ + * | HC_SETUP_HV_NPK_LOG + * +------------------|------------------+ + * HV | +-------v-------+ | + * | | npk_log | | + * | +-------+-------+ | + * +------------------|------------------+ + * | + * +-------------+----v----+-------------+ + * NPK MMIO | | /////// | | + * | | /////// | | + * +-------------+----+----+-------------+ + * | + * +---+ The Master/Channel reserved + * for the hypervisor NPK logs + */ + +#define pr_fmt(fmt) "ACRN HV_NPK_LOG: " fmt + +#include +#include +#include +#include +#include +#include +#include "hv_npk_log.h" + +#define HV_NPK_LOG_USAGE \ + "echo \"E[nable] [#M #C] [#L]\" to enable the ACRN HV NPK Log\n" \ + "echo \"D[isable]\" to disable the ACRN HV NPK Log\n" \ + "echo \"C[onfig] [#M #C] [#L]\" to configure the ACRN HV NPK Log\n" + +static struct hv_npk_log_conf *hnl_conf; + +/* Try to get the master/channel based on the given address */ +static int addr2mc(phys_addr_t addr, int *master, int *channel) +{ + phys_addr_t offset, base; + unsigned int start, end; + int c, m; + + if (!hnl_conf || !master || !channel || !hnl_conf->nchan) + return -EINVAL; + + /* check if the addr belongs to SW_BAR or FW_BAR */ + if (addr >= hnl_conf->stmr_base && addr < hnl_conf->stmr_end) { + base = hnl_conf->stmr_base; + start = hnl_conf->sw_start; + end = hnl_conf->sw_end; + } else if (addr >= hnl_conf->ftmr_base && addr < hnl_conf->ftmr_end) { + base = hnl_conf->ftmr_base; + start = hnl_conf->fw_start; + end = hnl_conf->fw_end; + } else { + return -EINVAL; + } + + offset = addr - base; + if (offset % NPK_CHAN_SIZE) + return -EINVAL; + + c = offset / NPK_CHAN_SIZE; + m = c / hnl_conf->nchan; + c = c % hnl_conf->nchan; + if (start + m > end) + return -EINVAL; + + *channel = c; + *master = m + start; + return 0; +} + +/* Try to get the MMIO address based on the given master/channel */ +static int mc2addr(phys_addr_t *addr, unsigned int master, unsigned int channel) +{ + phys_addr_t base; + unsigned int start; + + if (!hnl_conf || !addr || !hnl_conf->nchan + || channel >= hnl_conf->nchan) + return -EINVAL; + + /* check if the master belongs to SW_BAR or FW_BAR */ + if (master >= hnl_conf->sw_start && master <= hnl_conf->sw_end) { + base = hnl_conf->stmr_base; + start = hnl_conf->sw_start; + } else if (master >= hnl_conf->fw_start && master <= hnl_conf->fw_end) { + base = hnl_conf->ftmr_base; + start = hnl_conf->fw_start; + } else { + return -EINVAL; + } + + *addr = base + ((master - start) * hnl_conf->nchan + channel) + * NPK_CHAN_SIZE; + return 0; +} + +static int npk_dev_match(struct device *dev, void *data) +{ + return 1; +} + +/* Check if the NPK device/driver exists, and get info from them */ +static int load_npk_conf(void) +{ + u32 reg; + int err; + void __iomem *base; + struct device *dev; + struct pci_dev *pdev; + struct device_driver *drv; + + /* check if the NPK device and driver exists */ + drv = driver_find(NPK_DRV_NAME, &pci_bus_type); + if (!drv) { + pr_err("Cannot find the %s driver\n", NPK_DRV_NAME); + return -ENODEV; + } + + dev = driver_find_device(drv, NULL, NULL, npk_dev_match); + if (!dev) { + pr_err("Cannot find the NPK device\n"); + return -ENODEV; + } + + hnl_conf = kzalloc(sizeof(struct hv_npk_log_conf), GFP_KERNEL); + if (!hnl_conf) + return -ENOMEM; + + /* get the base address of FW_BAR */ + pdev = to_pci_dev(dev); + err = pci_read_config_dword(pdev, PCI_REG_FW_LBAR, ®); + if (err) + return err; + hnl_conf->ftmr_base = reg & 0xfffc0000U; + err = pci_read_config_dword(pdev, PCI_REG_FW_UBAR, ®); + if (err) + return err; + hnl_conf->ftmr_base |= ((phys_addr_t)reg << 32); + + /* read out some configurations of NPK */ + base = devm_ioremap(dev, pdev->resource[TH_MMIO_CONFIG].start, + resource_size(&(pdev->resource[TH_MMIO_CONFIG]))); + if (!base) { + pr_err("Cannot map the NPK configuration address\n"); + goto error; + } + + reg = ioread32(base + REG_STH_STHCAP0); + hnl_conf->sw_start = reg & 0xffffU; + hnl_conf->sw_end = reg >> 16; + reg = ioread32(base + REG_STH_STHCAP1); + hnl_conf->nchan = reg & 0xffU; + hnl_conf->fw_end = reg >> 24; + reg = ioread32(base + REG_STH_STHCAP2); + hnl_conf->fw_start = reg & 0xffffU; + devm_iounmap(dev, base); + + hnl_conf->status = HV_NPK_LOG_UNKNOWN; + hnl_conf->master = HV_NPK_LOG_UNKNOWN; + hnl_conf->channel = HV_NPK_LOG_UNKNOWN; + hnl_conf->loglevel = HV_NPK_LOG_UNKNOWN; + hnl_conf->stmr_base = pdev->resource[TH_MMIO_SW].start; + + if (hnl_conf->sw_end < hnl_conf->sw_start + || hnl_conf->fw_end < hnl_conf->fw_start + || hnl_conf->nchan == 0) + goto error; + + hnl_conf->stmr_end = hnl_conf->stmr_base + (hnl_conf->sw_end - + hnl_conf->sw_start) * hnl_conf->nchan * NPK_CHAN_SIZE; + hnl_conf->ftmr_end = hnl_conf->ftmr_base + (hnl_conf->fw_end - + hnl_conf->fw_start) * hnl_conf->nchan * NPK_CHAN_SIZE; + + return 0; + +error: + kfree(hnl_conf); + hnl_conf = NULL; + return -EINVAL; +} + +/* User interface to set the configuration */ +static int hv_npk_log_conf_set(const char *val, const struct kernel_param *kp) +{ + char **argv; + int i, argc, ret = -EINVAL; + struct hv_npk_log_param cmd; + unsigned int args[HV_NPK_LOG_MAX_PARAM]; + + if (!hnl_conf && load_npk_conf() < 0) + return -EINVAL; + + argv = argv_split(GFP_KERNEL, val, &argc); + if (!argv) + return -ENOMEM; + if (!argc || argc > HV_NPK_LOG_MAX_PARAM) + goto out; + + for (i = 1; i < argc; i++) + if (kstrtouint(argv[i], 10, &args[i]) < 0) + goto out; + + memset(&cmd, 0, sizeof(struct hv_npk_log_param)); + cmd.loglevel = 0xffffU; + cmd.cmd = HV_NPK_LOG_CMD_INVALID; + switch (tolower(argv[0][0])) { + case 'e': /* enable */ + case 'c': /* configure */ + if (!strncasecmp(argv[0], "enable", strlen(argv[0]))) { + cmd.cmd = HV_NPK_LOG_CMD_ENABLE; + } else if (!strncasecmp(argv[0], "configure", strlen(argv[0])) + && argc != 1) { + cmd.cmd = HV_NPK_LOG_CMD_CONF; + } else + break; + + if (argc <= 2) { + cmd.loglevel = argc == 2 ? args[1] : 0xffffU; + if (hnl_conf->master == HV_NPK_LOG_UNKNOWN) + mc2addr(&cmd.mmio_addr, HV_NPK_LOG_DFT_MASTER, + HV_NPK_LOG_DFT_CHANNEL); + } else if (argc > 2 && !mc2addr(&cmd.mmio_addr, + args[1], args[2])) { + cmd.loglevel = argc == 4 ? args[3] : 0xffffU; + } + break; + case 'd': /* disable */ + if (!strncasecmp(argv[0], "disable", strlen(argv[0])) + && argc == 1) + cmd.cmd = HV_NPK_LOG_CMD_DISABLE; + break; + default: + pr_err("Unsupported command : %s\n", argv[0]); + break; + } + + if (cmd.cmd != HV_NPK_LOG_CMD_INVALID) { + ret = hcall_setup_hv_npk_log(virt_to_phys(&cmd)); + ret = (ret < 0 || cmd.res == HV_NPK_LOG_RES_KO) ? -EINVAL : 0; + } + +out: + argv_free(argv); + if (ret < 0) + pr_err("Unsupported configuration : %s\n", val); + return ret; +} + +/* User interface to query the configuration */ +static int hv_npk_log_conf_get(char *buffer, const struct kernel_param *kp) +{ + long ret; + struct hv_npk_log_param query; + + if (!hnl_conf && load_npk_conf() < 0) + return sprintf(buffer, "%s\n", + "Failed to init the configuration."); + + memset(&query, 0, sizeof(struct hv_npk_log_param)); + query.cmd = HV_NPK_LOG_CMD_QUERY; + ret = hcall_setup_hv_npk_log(virt_to_phys(&query)); + if (ret < 0 || query.res == HV_NPK_LOG_RES_KO) + return sprintf(buffer, "%s\n", "Failed to invoke the hcall."); + + if (!addr2mc(query.mmio_addr, &hnl_conf->master, &hnl_conf->channel)) { + hnl_conf->status = query.res == HV_NPK_LOG_RES_ENABLED ? + HV_NPK_LOG_ENABLED : HV_NPK_LOG_DISABLED; + } else { + hnl_conf->status = HV_NPK_LOG_UNKNOWN; + hnl_conf->master = HV_NPK_LOG_UNKNOWN; + hnl_conf->channel = HV_NPK_LOG_UNKNOWN; + } + hnl_conf->loglevel = query.loglevel; + + return scnprintf(buffer, PAGE_SIZE, "Master(SW:%d~%d FW:%d~%d):%d " + "Channel(0~%d):%d Status:%d Log Level: %d\n%s\n", + hnl_conf->sw_start, hnl_conf->sw_end, + hnl_conf->fw_start, hnl_conf->fw_end, + hnl_conf->master, hnl_conf->nchan - 1, + hnl_conf->channel, hnl_conf->status, + hnl_conf->loglevel, HV_NPK_LOG_USAGE); +} + +/* /sys/module/hv_npk_log/parameters/hv_npk_log_conf */ +static struct kernel_param_ops hv_npk_log_conf_param_ops = { + .set = hv_npk_log_conf_set, + .get = hv_npk_log_conf_get, +}; +module_param_cb(hv_npk_log_conf, &hv_npk_log_conf_param_ops, NULL, 0644); + +static struct miscdevice hv_npk_log_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hv_npk_log", +}; + +static int __init hv_npk_log_init(void) +{ + return misc_register(&hv_npk_log_misc); +} + +static void __exit hv_npk_log_exit(void) +{ + kfree(hnl_conf); + + misc_deregister(&hv_npk_log_misc); +} + +module_init(hv_npk_log_init); +module_exit(hv_npk_log_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Intel Corp., http://www.intel.com"); +MODULE_DESCRIPTION("Driver for the Intel ACRN Hypervisor NPK Log"); +MODULE_VERSION("0.1"); diff --git a/drivers/acrn/hv_npk_log.h b/drivers/acrn/hv_npk_log.h new file mode 100644 index 000000000000..68bd68286522 --- /dev/null +++ b/drivers/acrn/hv_npk_log.h @@ -0,0 +1,109 @@ +/* + * ACRN Hypervisor NPK Log + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef _HV_NPK_LOG_H_ +#define _HV_NPK_LOG_H_ + +#define NPK_DRV_NAME "intel_th_pci" + +#define NPK_CHAN_SIZE 64 +#define TH_MMIO_CONFIG 0 +#define TH_MMIO_SW 2 + +#define PCI_REG_FW_LBAR 0x70 +#define PCI_REG_FW_UBAR 0x74 + +#define REG_STH_STHCAP0 0x4000 +#define REG_STH_STHCAP1 0x4004 +#define REG_STH_STHCAP2 0x407C + +#define HV_NPK_LOG_ENABLED 1 +#define HV_NPK_LOG_DISABLED 0 +#define HV_NPK_LOG_UNKNOWN (-1) +#define HV_NPK_LOG_MAX_PARAM 4 + +#define HV_NPK_LOG_DFT_MASTER 74 +#define HV_NPK_LOG_DFT_CHANNEL 0 + +enum { + HV_NPK_LOG_CMD_INVALID, + HV_NPK_LOG_CMD_CONF, + HV_NPK_LOG_CMD_ENABLE, + HV_NPK_LOG_CMD_DISABLE, + HV_NPK_LOG_CMD_QUERY, +}; + +enum { + HV_NPK_LOG_RES_INVALID, + HV_NPK_LOG_RES_OK, + HV_NPK_LOG_RES_KO, + HV_NPK_LOG_RES_ENABLED, + HV_NPK_LOG_RES_DISABLED, +}; + +struct hv_npk_log_conf { + int status; + int master; + int channel; + int loglevel; + unsigned int fw_start; + unsigned int fw_end; + unsigned int sw_start; + unsigned int sw_end; + unsigned int nchan; + phys_addr_t stmr_base; + phys_addr_t stmr_end; + phys_addr_t ftmr_base; + phys_addr_t ftmr_end; + phys_addr_t ch_addr; +}; + +#endif /* _HV_NPK_LOG_H_ */ diff --git a/drivers/acrn/sbuf.c b/drivers/acrn/sbuf.c new file mode 100644 index 000000000000..b51ee04e12fa --- /dev/null +++ b/drivers/acrn/sbuf.c @@ -0,0 +1,241 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#define pr_fmt(fmt) "SBuf: " fmt + +#include +#include +#include +#include +#include +#include "sbuf.h" + +static inline bool sbuf_is_empty(shared_buf_t *sbuf) +{ + return (sbuf->head == sbuf->tail); +} + +static inline uint32_t sbuf_next_ptr(uint32_t pos, + uint32_t span, uint32_t scope) +{ + pos += span; + pos = (pos >= scope) ? (pos - scope) : pos; + return pos; +} + +static inline uint32_t sbuf_calculate_allocate_size(uint32_t ele_num, + uint32_t ele_size) +{ + uint64_t sbuf_allocate_size; + + sbuf_allocate_size = ele_num * ele_size; + sbuf_allocate_size += SBUF_HEAD_SIZE; + if (sbuf_allocate_size > SBUF_MAX_SIZE) { + pr_err("num=0x%x, size=0x%x exceed 0x%llx!\n", + ele_num, ele_size, SBUF_MAX_SIZE); + return 0; + } + + /* align to PAGE_SIZE */ + return (sbuf_allocate_size + PAGE_SIZE - 1) & PAGE_MASK; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size) +{ + shared_buf_t *sbuf; + struct page *page; + uint32_t sbuf_allocate_size; + + if (!ele_num || !ele_size) { + pr_err("invalid parameter %s!\n", __func__); + return NULL; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(ele_num, ele_size); + if (!sbuf_allocate_size) + return NULL; + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, + get_order(sbuf_allocate_size)); + if (page == NULL) { + pr_err("failed to alloc pages!\n"); + return NULL; + } + + sbuf = phys_to_virt(page_to_phys(page)); + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + sbuf->magic = SBUF_MAGIC; + pr_info("ele_num=0x%x, ele_size=0x%x allocated!\n", + ele_num, ele_size); + return sbuf; +} +EXPORT_SYMBOL(sbuf_allocate); + +void sbuf_free(shared_buf_t *sbuf) +{ + uint32_t sbuf_allocate_size; + + if ((sbuf == NULL) || sbuf->magic != SBUF_MAGIC) { + pr_err("invalid parameter %s\n", __func__); + return; + } + + sbuf_allocate_size = sbuf_calculate_allocate_size(sbuf->ele_num, + sbuf->ele_size); + if (!sbuf_allocate_size) + return; + + sbuf->magic = 0; + __free_pages((struct page *)virt_to_page(sbuf), + get_order(sbuf_allocate_size)); +} +EXPORT_SYMBOL(sbuf_free); + +int sbuf_get(shared_buf_t *sbuf, uint8_t *data) +{ + const void *from; + + if ((sbuf == NULL) || (data == NULL)) + return -EINVAL; + + if (sbuf_is_empty(sbuf)) { + /* no data available */ + return 0; + } + + from = (void *)sbuf + SBUF_HEAD_SIZE + sbuf->head; + + memcpy(data, from, sbuf->ele_size); + + sbuf->head = sbuf_next_ptr(sbuf->head, sbuf->ele_size, sbuf->size); + + return sbuf->ele_size; +} +EXPORT_SYMBOL(sbuf_get); + +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf) +{ + struct sbuf_setup_param ssp; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + ssp.pcpu_id = pcpu_id; + ssp.sbuf_id = sbuf_id; + + if (!sbuf) { + ssp.gpa = 0; + } else { + BUG_ON(!virt_addr_valid(sbuf)); + ssp.gpa = virt_to_phys(sbuf); + } + pr_info("setup phys add = 0x%llx\n", ssp.gpa); + + return hcall_setup_sbuf(virt_to_phys(&ssp)); +} +EXPORT_SYMBOL(sbuf_share_setup); + +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + if ((sbuf->magic == SBUF_MAGIC) && + (sbuf->ele_num == ele_num) && + (sbuf->ele_size == ele_size)) { + return sbuf; + } + + return NULL; +} +EXPORT_SYMBOL(sbuf_check_valid); + +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t paddr) +{ + shared_buf_t *sbuf; + + if (!ele_num || !ele_size || !paddr) + return NULL; + + sbuf = (shared_buf_t *)phys_to_virt(paddr); + BUG_ON(!virt_addr_valid(sbuf)); + + memset(sbuf, 0, SBUF_HEAD_SIZE); + sbuf->magic = SBUF_MAGIC; + sbuf->ele_num = ele_num; + sbuf->ele_size = ele_size; + sbuf->size = ele_num * ele_size; + pr_info("construct sbuf at 0x%llx.\n", paddr); + return sbuf; +} +EXPORT_SYMBOL(sbuf_construct); + +void sbuf_deconstruct(shared_buf_t *sbuf) +{ + if (sbuf == NULL) + return; + + sbuf->magic = 0; +} +EXPORT_SYMBOL(sbuf_deconstruct); diff --git a/drivers/acrn/sbuf.h b/drivers/acrn/sbuf.h new file mode 100644 index 000000000000..4fae7a258bce --- /dev/null +++ b/drivers/acrn/sbuf.h @@ -0,0 +1,129 @@ +/* + * shared buffer + * + * This file is provided under a dual BSD/GPLv2 license.  When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * Contact Information: Li Fei + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Li Fei + * + */ + +#ifndef SHARED_BUF_H +#define SHARED_BUF_H + +#include + + +#define SBUF_MAGIC 0x5aa57aa71aa13aa3 +#define SBUF_MAX_SIZE (1ULL << 22) +#define SBUF_HEAD_SIZE 64 + +/* sbuf flags */ +#define OVERRUN_CNT_EN (1ULL << 0) /* whether overrun counting is enabled */ +#define OVERWRITE_EN (1ULL << 1) /* whether overwrite is enabled */ + +enum sbuf_type { + ACRN_TRACE, + ACRN_HVLOG, + ACRN_SBUF_TYPE_MAX, +}; +/** + * (sbuf) head + buf (store (ele_num - 1) elements at most) + * buffer empty: tail == head + * buffer full: (tail + ele_size) % size == head + * + * Base of memory for elements + * | + * | + * --------------------------------------------------------------------------------------- + * | shared_buf_t | raw data (ele_size)| raw date (ele_size) | ... | raw data (ele_size) | + * --------------------------------------------------------------------------------------- + * | + * | + * shared_buf_t *buf + */ + +/* Make sure sizeof(shared_buf_t) == SBUF_HEAD_SIZE */ +typedef struct shared_buf { + uint64_t magic; + uint32_t ele_num; /* number of elements */ + uint32_t ele_size; /* sizeof of elements */ + uint32_t head; /* offset from base, to read */ + uint32_t tail; /* offset from base, to write */ + uint64_t flags; + uint32_t overrun_cnt; /* count of overrun */ + uint32_t size; /* ele_num * ele_size */ + uint32_t padding[6]; +} ____cacheline_aligned shared_buf_t; + +static inline void sbuf_clear_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags &= ~flags; +} + +static inline void sbuf_set_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags = flags; +} + +static inline void sbuf_add_flags(shared_buf_t *sbuf, uint64_t flags) +{ + sbuf->flags |= flags; +} + +shared_buf_t *sbuf_allocate(uint32_t ele_num, uint32_t ele_size); +void sbuf_free(shared_buf_t *sbuf); +int sbuf_get(shared_buf_t *sbuf, uint8_t *data); +int sbuf_share_setup(uint32_t pcpu_id, uint32_t sbuf_id, shared_buf_t *sbuf); +shared_buf_t *sbuf_check_valid(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +shared_buf_t *sbuf_construct(uint32_t ele_num, uint32_t ele_size, + uint64_t gpa); +void sbuf_deconstruct(shared_buf_t *sbuf); + +#endif /* SHARED_BUF_H */ diff --git a/drivers/android/binder.c b/drivers/android/binder.c index d58763b6b009..c0cd8b65fdf5 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -73,6 +73,7 @@ #include #include +#include #include @@ -350,10 +351,14 @@ struct binder_error { * and by @lock) * @has_async_transaction: async transaction to node in progress * (protected by @lock) + * @sched_policy: minimum scheduling policy for node + * (invariant after initialized) * @accept_fds: file descriptor operations supported for node * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @inherit_rt: inherit RT scheduling policy from caller + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -389,6 +394,8 @@ struct binder_node { /* * invariant after initialization */ + u8 sched_policy:2; + u8 inherit_rt:1; u8 accept_fds:1; u8 min_priority; }; @@ -462,6 +469,22 @@ enum binder_deferred_state { BINDER_DEFERRED_RELEASE = 0x04, }; +/** + * struct binder_priority - scheduler policy and priority + * @sched_policy scheduler policy + * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT + * + * The binder driver supports inheriting the following scheduler policies: + * SCHED_NORMAL + * SCHED_BATCH + * SCHED_FIFO + * SCHED_RR + */ +struct binder_priority { + unsigned int sched_policy; + int prio; +}; + /** * struct binder_proc - binder process bookkeeping * @proc_node: element for binder_procs list @@ -540,7 +563,7 @@ struct binder_proc { int requested_threads; int requested_threads_started; int tmp_ref; - long default_priority; + struct binder_priority default_priority; struct dentry *debugfs_entry; struct binder_alloc alloc; struct binder_context *context; @@ -590,6 +613,7 @@ enum { * @is_dead: thread is dead and awaiting free * when outstanding transactions are cleaned up * (protected by @proc->inner_lock) + * @task: struct task_struct for this thread * * Bookkeeping structure for binder threads. */ @@ -609,6 +633,7 @@ struct binder_thread { struct binder_stats stats; atomic_t tmp_ref; bool is_dead; + struct task_struct *task; }; struct binder_transaction { @@ -625,8 +650,9 @@ struct binder_transaction { struct binder_buffer *buffer; unsigned int code; unsigned int flags; - long priority; - long saved_priority; + struct binder_priority priority; + struct binder_priority saved_priority; + bool set_priority_called; kuid_t sender_euid; /** * @lock: protects @from, @to_proc, and @to_thread @@ -1107,22 +1133,145 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc) binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } -static void binder_set_nice(long nice) +static bool is_rt_policy(int policy) +{ + return policy == SCHED_FIFO || policy == SCHED_RR; +} + +static bool is_fair_policy(int policy) +{ + return policy == SCHED_NORMAL || policy == SCHED_BATCH; +} + +static bool binder_supported_policy(int policy) { - long min_nice; + return is_fair_policy(policy) || is_rt_policy(policy); +} - if (can_nice(current, nice)) { - set_user_nice(current, nice); +static int to_userspace_prio(int policy, int kernel_priority) +{ + if (is_fair_policy(policy)) + return PRIO_TO_NICE(kernel_priority); + else + return MAX_USER_RT_PRIO - 1 - kernel_priority; +} + +static int to_kernel_prio(int policy, int user_priority) +{ + if (is_fair_policy(policy)) + return NICE_TO_PRIO(user_priority); + else + return MAX_USER_RT_PRIO - 1 - user_priority; +} + +static void binder_do_set_priority(struct task_struct *task, + struct binder_priority desired, + bool verify) +{ + int priority; /* user-space prio value */ + bool has_cap_nice; + unsigned int policy = desired.sched_policy; + + if (task->policy == policy && task->normal_prio == desired.prio) return; + + has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE); + + priority = to_userspace_prio(policy, desired.prio); + + if (verify && is_rt_policy(policy) && !has_cap_nice) { + long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO); + + if (max_rtprio == 0) { + policy = SCHED_NORMAL; + priority = MIN_NICE; + } else if (priority > max_rtprio) { + priority = max_rtprio; + } + } + + if (verify && is_fair_policy(policy) && !has_cap_nice) { + long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE)); + + if (min_nice > MAX_NICE) { + binder_user_error("%d RLIMIT_NICE not set\n", + task->pid); + return; + } else if (priority < min_nice) { + priority = min_nice; + } + } + + if (policy != desired.sched_policy || + to_kernel_prio(policy, priority) != desired.prio) + binder_debug(BINDER_DEBUG_PRIORITY_CAP, + "%d: priority %d not allowed, using %d instead\n", + task->pid, desired.prio, + to_kernel_prio(policy, priority)); + + trace_binder_set_priority(task->tgid, task->pid, task->normal_prio, + to_kernel_prio(policy, priority), + desired.prio); + + /* Set the actual priority */ + if (task->policy != policy || is_rt_policy(policy)) { + struct sched_param params; + + params.sched_priority = is_rt_policy(policy) ? priority : 0; + + sched_setscheduler_nocheck(task, + policy | SCHED_RESET_ON_FORK, + ¶ms); } - min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); - binder_debug(BINDER_DEBUG_PRIORITY_CAP, - "%d: nice value %ld not allowed use %ld instead\n", - current->pid, nice, min_nice); - set_user_nice(current, min_nice); - if (min_nice <= MAX_NICE) + if (is_fair_policy(policy)) + set_user_nice(task, priority); +} + +static void binder_set_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ true); +} + +static void binder_restore_priority(struct task_struct *task, + struct binder_priority desired) +{ + binder_do_set_priority(task, desired, /* verify = */ false); +} + +static void binder_transaction_priority(struct task_struct *task, + struct binder_transaction *t, + struct binder_priority node_prio, + bool inherit_rt) +{ + struct binder_priority desired_prio = t->priority; + + if (t->set_priority_called) return; - binder_user_error("%d RLIMIT_NICE not set\n", current->pid); + + t->set_priority_called = true; + t->saved_priority.sched_policy = task->policy; + t->saved_priority.prio = task->normal_prio; + + if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) { + desired_prio.prio = NICE_TO_PRIO(0); + desired_prio.sched_policy = SCHED_NORMAL; + } + + if (node_prio.prio < t->priority.prio || + (node_prio.prio == t->priority.prio && + node_prio.sched_policy == SCHED_FIFO)) { + /* + * In case the minimum priority on the node is + * higher (lower value), use that priority. If + * the priority is the same, but the node uses + * SCHED_FIFO, prefer SCHED_FIFO, since it can + * run unbounded, unlike SCHED_RR. + */ + desired_prio = node_prio; + } + + binder_set_priority(task, desired_prio); } static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, @@ -1175,6 +1324,7 @@ static struct binder_node *binder_init_node_ilocked( binder_uintptr_t ptr = fp ? fp->binder : 0; binder_uintptr_t cookie = fp ? fp->cookie : 0; __u32 flags = fp ? fp->flags : 0; + s8 priority; assert_spin_locked(&proc->inner_lock); @@ -1207,8 +1357,12 @@ static struct binder_node *binder_init_node_ilocked( node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; - node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >> + FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT; + node->min_priority = to_kernel_prio(node->sched_policy, priority); node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -2633,11 +2787,15 @@ static bool binder_proc_transaction(struct binder_transaction *t, struct binder_thread *thread) { struct binder_node *node = t->buffer->target_node; + struct binder_priority node_prio; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; BUG_ON(!node); binder_node_lock(node); + node_prio.prio = node->min_priority; + node_prio.sched_policy = node->sched_policy; + if (oneway) { BUG_ON(thread); if (node->has_async_transaction) { @@ -2658,12 +2816,15 @@ static bool binder_proc_transaction(struct binder_transaction *t, if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); - if (thread) + if (thread) { + binder_transaction_priority(thread->task, t, node_prio, + node->inherit_rt); binder_enqueue_thread_work_ilocked(thread, &t->work); - else if (!pending_async) + } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); - else + } else { binder_enqueue_work_ilocked(&t->work, &node->async_todo); + } if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); @@ -2780,7 +2941,6 @@ static void binder_transaction(struct binder_proc *proc, } thread->transaction_stack = in_reply_to->to_parent; binder_inner_proc_unlock(proc); - binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { return_error = BR_DEAD_REPLY; @@ -2953,7 +3113,15 @@ static void binder_transaction(struct binder_proc *proc, t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; - t->priority = task_nice(current); + if (!(t->flags & TF_ONE_WAY) && + binder_supported_policy(current->policy)) { + /* Inherit supported policies for synchronous transactions */ + t->priority.sched_policy = current->policy; + t->priority.prio = current->normal_prio; + } else { + /* Otherwise, fall back to the default priority */ + t->priority = target_proc->default_priority; + } trace_binder_transaction(reply, t, target_node); @@ -3182,6 +3350,7 @@ static void binder_transaction(struct binder_proc *proc, binder_enqueue_thread_work_ilocked(target_thread, &t->work); binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); + binder_restore_priority(current, in_reply_to->saved_priority); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); @@ -3285,6 +3454,7 @@ static void binder_transaction(struct binder_proc *proc, BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { + binder_restore_priority(current, in_reply_to->saved_priority); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); @@ -3865,7 +4035,7 @@ static int binder_thread_read(struct binder_proc *proc, wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } - binder_set_nice(proc->default_priority); + binder_restore_priority(current, proc->default_priority); } if (non_block) { @@ -4080,16 +4250,14 @@ static int binder_thread_read(struct binder_proc *proc, BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; + struct binder_priority node_prio; tr.target.ptr = target_node->ptr; tr.cookie = target_node->cookie; - t->saved_priority = task_nice(current); - if (t->priority < target_node->min_priority && - !(t->flags & TF_ONE_WAY)) - binder_set_nice(t->priority); - else if (!(t->flags & TF_ONE_WAY) || - t->saved_priority > target_node->min_priority) - binder_set_nice(target_node->min_priority); + node_prio.sched_policy = target_node->sched_policy; + node_prio.prio = target_node->min_priority; + binder_transaction_priority(current, t, node_prio, + target_node->inherit_rt); cmd = BR_TRANSACTION; } else { tr.target.ptr = 0; @@ -4267,6 +4435,8 @@ static struct binder_thread *binder_get_thread_ilocked( binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; + get_task_struct(current); + thread->task = current; atomic_set(&thread->tmp_ref, 0); init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); @@ -4317,6 +4487,7 @@ static void binder_free_thread(struct binder_thread *thread) BUG_ON(!list_empty(&thread->todo)); binder_stats_deleted(BINDER_STAT_THREAD); binder_proc_dec_tmpref(thread->proc); + put_task_struct(thread->task); kfree(thread); } @@ -4544,6 +4715,42 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) return ret; } +static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, + struct binder_node_info_for_ref *info) +{ + struct binder_node *node; + struct binder_context *context = proc->context; + __u32 handle = info->handle; + + if (info->strong_count || info->weak_count || info->reserved1 || + info->reserved2 || info->reserved3) { + binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", + proc->pid); + return -EINVAL; + } + + /* This ioctl may only be used by the context manager */ + mutex_lock(&context->context_mgr_node_lock); + if (!context->binder_context_mgr_node || + context->binder_context_mgr_node->proc != proc) { + mutex_unlock(&context->context_mgr_node_lock); + return -EPERM; + } + mutex_unlock(&context->context_mgr_node_lock); + + node = binder_get_node_from_ref(proc, handle, true, NULL); + if (!node) + return -EINVAL; + + info->strong_count = node->local_strong_refs + + node->internal_strong_refs; + info->weak_count = node->local_weak_refs; + + binder_put_node(node); + + return 0; +} + static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { @@ -4638,6 +4845,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) } break; } + case BINDER_GET_NODE_INFO_FOR_REF: { + struct binder_node_info_for_ref info; + + if (copy_from_user(&info, ubuf, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + ret = binder_ioctl_get_node_info_for_ref(proc, &info); + if (ret < 0) + goto err; + + if (copy_to_user(ubuf, &info, sizeof(info))) { + ret = -EFAULT; + goto err; + } + + break; + } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; @@ -4767,7 +4993,14 @@ static int binder_open(struct inode *nodp, struct file *filp) proc->tsk = current->group_leader; mutex_init(&proc->files_lock); INIT_LIST_HEAD(&proc->todo); - proc->default_priority = task_nice(current); + if (binder_supported_policy(current->policy)) { + proc->default_priority.sched_policy = current->policy; + proc->default_priority.prio = current->normal_prio; + } else { + proc->default_priority.sched_policy = SCHED_NORMAL; + proc->default_priority.prio = NICE_TO_PRIO(0); + } + binder_dev = container_of(filp->private_data, struct binder_device, miscdev); proc->context = &binder_dev->context; @@ -5061,13 +5294,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m, spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, - "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, - t->code, t->flags, t->priority, t->need_reply); + t->code, t->flags, t->priority.sched_policy, + t->priority.prio, t->need_reply); spin_unlock(&t->lock); if (proc != to_proc) { @@ -5185,8 +5419,9 @@ static void print_binder_node_nilocked(struct seq_file *m, hlist_for_each_entry(ref, &node->refs, node_entry) count++; - seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", + seq_printf(m, " node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, + node->sched_policy, node->min_priority, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count, node->tmp_refs); diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 588eb3ec3507..7d8beb77f532 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -85,6 +85,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done); DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done); +TRACE_EVENT(binder_set_priority, + TP_PROTO(int proc, int thread, unsigned int old_prio, + unsigned int desired_prio, unsigned int new_prio), + TP_ARGS(proc, thread, old_prio, new_prio, desired_prio), + + TP_STRUCT__entry( + __field(int, proc) + __field(int, thread) + __field(unsigned int, old_prio) + __field(unsigned int, new_prio) + __field(unsigned int, desired_prio) + ), + TP_fast_assign( + __entry->proc = proc; + __entry->thread = thread; + __entry->old_prio = old_prio; + __entry->new_prio = new_prio; + __entry->desired_prio = desired_prio; + ), + TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d", + __entry->proc, __entry->thread, __entry->old_prio, + __entry->new_prio, __entry->desired_prio) +); + TRACE_EVENT(binder_wait_for_work, TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo), TP_ARGS(proc_work, transaction_stack, thread_todo), diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index e7cb0c6ade81..b5f61f2840d0 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -15,8 +15,11 @@ #include #include #include +#include DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; +DEFINE_PER_CPU(unsigned long, max_cpu_freq); +DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE; void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, unsigned long max_freq) @@ -26,8 +29,29 @@ void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; - for_each_cpu(i, cpus) + for_each_cpu(i, cpus) { per_cpu(freq_scale, i) = scale; + per_cpu(max_cpu_freq, i) = max_freq; + } +} + +void arch_set_max_freq_scale(struct cpumask *cpus, + unsigned long policy_max_freq) +{ + unsigned long scale, max_freq; + int cpu = cpumask_first(cpus); + + if (cpu > nr_cpu_ids) + return; + + max_freq = per_cpu(max_cpu_freq, cpu); + if (!max_freq) + return; + + scale = (policy_max_freq << SCHED_CAPACITY_SHIFT) / max_freq; + + for_each_cpu(cpu, cpus) + per_cpu(max_freq_scale, cpu) = scale; } static DEFINE_MUTEX(cpu_scale_mutex); @@ -47,6 +71,9 @@ static ssize_t cpu_capacity_show(struct device *dev, return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id)); } +static void update_topology_flags_workfn(struct work_struct *work); +static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); + static ssize_t cpu_capacity_store(struct device *dev, struct device_attribute *attr, const char *buf, @@ -72,6 +99,8 @@ static ssize_t cpu_capacity_store(struct device *dev, topology_set_cpu_scale(i, new_capacity); mutex_unlock(&cpu_scale_mutex); + schedule_work(&update_topology_flags_work); + return count; } @@ -96,6 +125,25 @@ static int register_cpu_capacity_sysctl(void) } subsys_initcall(register_cpu_capacity_sysctl); +static int update_topology; + +int topology_update_cpu_topology(void) +{ + return update_topology; +} + +/* + * Updating the sched_domains can't be done directly from cpufreq callbacks + * due to locking, so queue the work for later. + */ +static void update_topology_flags_workfn(struct work_struct *work) +{ + update_topology = 1; + rebuild_sched_domains(); + pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); + update_topology = 0; +} + static u32 capacity_scale; static u32 *raw_capacity; @@ -201,6 +249,7 @@ init_cpu_capacity_callback(struct notifier_block *nb, if (cpumask_empty(cpus_to_visit)) { topology_normalize_cpu_scale(); + schedule_work(&update_topology_flags_work); free_raw_capacity(); pr_debug("cpu_capacity: parsing done\n"); schedule_work(&parsing_done_work); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a690fd400260..ee0d68f61cfb 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "../base.h" #include "power.h" @@ -1706,6 +1707,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) pm_callback_t callback = NULL; const char *info = NULL; int error = 0; + char suspend_abort[MAX_SUSPEND_ABORT_LEN]; DECLARE_DPM_WATCHDOG_ON_STACK(wd); TRACE_DEVICE(dev); @@ -1729,6 +1731,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { dev->power.direct_complete = false; + pm_get_active_wakeup_sources(suspend_abort, + MAX_SUSPEND_ABORT_LEN); + log_suspend_abort_reason(suspend_abort); async_error = -EBUSY; goto Complete; } diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 5fa1898755a3..e75512dd9733 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include "power.h" @@ -803,6 +804,37 @@ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard) } EXPORT_SYMBOL_GPL(pm_wakeup_dev_event); +void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max) +{ + struct wakeup_source *ws, *last_active_ws = NULL; + int len = 0; + bool active = false; + + rcu_read_lock(); + list_for_each_entry_rcu(ws, &wakeup_sources, entry) { + if (ws->active && len < max) { + if (!active) + len += scnprintf(pending_wakeup_source, max, + "Pending Wakeup Sources: "); + len += scnprintf(pending_wakeup_source + len, max - len, + "%s ", ws->name); + active = true; + } else if (!active && + (!last_active_ws || + ktime_to_ns(ws->last_time) > + ktime_to_ns(last_active_ws->last_time))) { + last_active_ws = ws; + } + } + if (!active && last_active_ws) { + scnprintf(pending_wakeup_source, max, + "Last active Wakeup Source: %s", + last_active_ws->name); + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources); + void pm_print_active_wakeup_sources(void) { struct wakeup_source *ws; diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 6ad5ef48b61e..afa9b8459572 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -49,3 +49,14 @@ config REGMAP_SOUNDWIRE config REGMAP_SCCB tristate depends on I2C + +config REGMAP_HWSPINLOCK + bool + +config REGMAP_SDW + default n + tristate "Regmap support for soundwire" + depends on SDW + help + Enable this if regmap support is required for + soundwire slave devices. diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index f5b4e8851d00..d78c51ae3da2 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o obj-$(CONFIG_REGMAP_W1) += regmap-w1.o obj-$(CONFIG_REGMAP_SOUNDWIRE) += regmap-sdw.o obj-$(CONFIG_REGMAP_SCCB) += regmap-sccb.o +obj-$(CONFIG_REGMAP_SDW) += regmap-sdwint.o diff --git a/drivers/base/regmap/regmap-sdwint.c b/drivers/base/regmap/regmap-sdwint.c new file mode 100644 index 000000000000..ed8c28db03b8 --- /dev/null +++ b/drivers/base/regmap/regmap-sdwint.c @@ -0,0 +1,252 @@ +/* + * regmap-sdw.c - Register map access API - SoundWire support + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include + +#include "internal.h" + +#define SDW_SCP_ADDRPAGE1_MASK 0xFF +#define SDW_SCP_ADDRPAGE1_SHIFT 15 + +#define SDW_SCP_ADDRPAGE2_MASK 0xFF +#define SDW_SCP_ADDRPAGE2_SHIFT 22 + +#define SDW_REGADDR_SHIFT 0x0 +#define SDW_REGADDR_MASK 0xFFFF + +#define SDW_MAX_REG_ADDR 65536 + +static int regmap_sdw_read(void *context, + const void *reg, size_t reg_size, + void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slv *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_READ; + xfer.len = 0; + t_val = val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static int regmap_sdw_gather_write(void *context, + const void *reg, size_t reg_size, + const void *val, size_t val_size) +{ + struct device *dev = context; + struct sdw_slv *sdw = to_sdw_slave(dev); + struct sdw_msg xfer; + int ret, scp_addr1, scp_addr2; + int reg_command; + int reg_addr = *(u32 *)reg; + size_t t_val_size = 0, t_size; + int offset; + u8 *t_val; + + /* All registers are 4 byte on SoundWire bus */ + if (reg_size != 4) + return -ENOTSUPP; + + if (!sdw) + return 0; + + xfer.slave_addr = sdw->slv_number; + xfer.ssp_tag = 0; + xfer.flag = SDW_MSG_FLAG_WRITE; + xfer.len = 0; + t_val = (u8 *)val; + + offset = 0; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + if (val_size > SDW_MAX_REG_ADDR) + t_size = SDW_MAX_REG_ADDR - reg_command; + else + t_size = val_size; + while (t_val_size < val_size) { + + scp_addr1 = (reg_addr >> SDW_SCP_ADDRPAGE1_SHIFT) & + SDW_SCP_ADDRPAGE1_MASK; + scp_addr2 = (reg_addr >> SDW_SCP_ADDRPAGE2_SHIFT) & + SDW_SCP_ADDRPAGE2_MASK; + xfer.addr_page1 = scp_addr1; + xfer.addr_page2 = scp_addr2; + xfer.addr = reg_command; + xfer.len += t_size; + xfer.buf = &t_val[offset]; + ret = sdw_slave_transfer(sdw->mstr, &xfer, 1); + if (ret < 0) + return ret; + else if (ret != 1) + return -EIO; + + t_val_size += t_size; + offset += t_size; + if (val_size - t_val_size > 65535) + t_size = 65535; + else + t_size = val_size - t_val_size; + reg_addr += t_size; + reg_command = (reg_addr >> SDW_REGADDR_SHIFT) & + SDW_REGADDR_MASK; + } + return 0; +} + +static inline void regmap_sdw_count_check(size_t count, u32 offset) +{ + BUG_ON(count <= offset); +} + +static int regmap_sdw_write(void *context, const void *data, size_t count) +{ + /* 4-byte register address for the soundwire */ + unsigned int offset = 4; + + regmap_sdw_count_check(count, offset); + return regmap_sdw_gather_write(context, data, 4, + data + offset, count - offset); +} + +static struct regmap_bus regmap_sdw = { + .write = regmap_sdw_write, + .gather_write = regmap_sdw_gather_write, + .read = regmap_sdw_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static int regmap_sdw_config_check(const struct regmap_config *config) +{ + /* All register are 8-bits wide as per MIPI Soundwire 1.0 Spec */ + if (config->val_bits != 8) + return -ENOTSUPP; + /* Registers are 32 bit in size, based on SCP_ADDR1 and SCP_ADDR2 + * implementation address range may vary in slave. + */ + if (config->reg_bits != 32) + return -ENOTSUPP; + /* SoundWire register address are contiguous. */ + if (config->reg_stride != 0) + return -ENOTSUPP; + if (config->pad_bits != 0) + return -ENOTSUPP; + + + return 0; +} + +/** + * regmap_init_sdwint(): Initialise register map + * + * @sdw: Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer to + * a struct regmap. + */ +struct regmap *regmap_init_sdwint(struct sdw_slv *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(regmap_init_sdwint); + + +/** + * devm_regmap_init_sdwint(): Initialise managed register map + * + * @sdw Device that will be interacted with + * @config: Configuration for register map + * + * The return value will be an ERR_PTR() on error or a valid pointer + * to a struct regmap. The regmap will be automatically freed by the + * device management code. + */ +struct regmap *devm_regmap_init_sdwint(struct sdw_slv *sdw, + const struct regmap_config *config) +{ + int ret; + + ret = regmap_sdw_config_check(config); + if (ret) + return ERR_PTR(ret); + + return devm_regmap_init(&sdw->dev, ®map_sdw, &sdw->dev, config); +} +EXPORT_SYMBOL_GPL(devm_regmap_init_sdwint); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index 6e076f359dcc..996573ffa58e 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c @@ -10,6 +10,7 @@ #include #include #include +#include static LIST_HEAD(syscore_ops_list); static DEFINE_MUTEX(syscore_ops_lock); @@ -74,6 +75,8 @@ int syscore_suspend(void) return 0; err_out: + log_suspend_abort_reason("System core suspend callback %pF failed", + ops->suspend); pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); list_for_each_entry_continue(ops, &syscore_ops_list, node) diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 1851112ccc29..16f0e0f0d59c 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -183,4 +183,28 @@ config DA8XX_MSTPRI source "drivers/bus/fsl-mc/Kconfig" +config DVC_TRACE_BUS + bool "DvC-Trace pseudobus" + default n + depends on USB_GADGET + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable it. + +config DVC_TRACE_BUS_DEBUG + bool "DvC-Trace pseudobus debug" + default n + depends on DVC_TRACE_BUS + help + DvC-Trace pseudobus is meant to group devices capable of sending + trace data via a USB DvC-Trace gadget function. + An usb function driver will be able to choose a source device and + provide the means to transfer the data. + + Say Y to enable extended debug messages in this driver. + endmenu diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index ca300b1914ce..cf118be0785f 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -32,3 +32,5 @@ obj-$(CONFIG_UNIPHIER_SYSTEM_BUS) += uniphier-system-bus.o obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o +obj-$(CONFIG_DVC_TRACE_BUS) += dvctrace.o +subdir-ccflags-$(CONFIG_DVC_TRACE_BUS_DEBUG) += -DDVCT_DEBUG diff --git a/drivers/bus/dvctrace.c b/drivers/bus/dvctrace.c new file mode 100644 index 000000000000..a44b1b3e5490 --- /dev/null +++ b/drivers/bus/dvctrace.c @@ -0,0 +1,732 @@ +/* + * DvC-Trace(dvct) Bus driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include + +#ifdef DVCT_DEBUG +#define DVCT_IN() pr_debug("in\n") +#else +#define DVCT_IN() do {} while (0) +#endif + +/* Count the number of USB descriptors in the given ascii hex string + * What we expect: + * ll tt ss xx xx xx + * | | | +- Fill up the descriptor + * | | +- Descriptor sub-type (1-4) + * | | DC_INPUT_CONNECTION 0x01 + * | | DC_OUTPUT_CONNECTION 0x02 + * | | DC_DEBUG_UNIT 0x03 + * | | DC_DEBUG_ATTRIBUTES 0x04 + * | +- Descriptor type (USB_DT_CS_INTERFACE) + * +- Descriptor length (check > 3 and we have the rest of it) + */ +static int count_descriptors(const char *buf, size_t size) +{ + size_t off = 0; + int j, count0 = 0, count1 = 0; + u8 len, type, sub_type; + + if (buf == NULL) + return -EINVAL; + /* Ensuring 'buf' only has serval correct format USB descriptors */ + while(off < size) { + j = sscanf(buf + off, "%2hhx", &len); + if (j <= 0) + return -EINVAL; + /* skip related data and space to read next 'len': aa bb cc ... */ + off += len * 3; + count0++; + } + if (off != size) + return -EINVAL; + off = 0; + DVCT_IN(); + /* Check every USB descriptor type and sub_type */ + while (count0--) { + /*the length*/ + j = sscanf(buf + off, "%2hhx%2hhx%2hhx", &len, &type, &sub_type); + if (j <= 0 || type != USB_DT_CS_INTERFACE || sub_type < DC_INPUT_CONNECTION || sub_type > DC_DEBUG_ATTRIBUTES) + return -EINVAL; + off += len * 3; + count1++; + } + return count1; +} + +/* Parse @buf and get a pointer to the descriptor identified + * @idx*/ +static u8 *get_descriptor(const char *buf, size_t size, int idx) +{ + size_t off = 0; + int i, j, k, count = 0; + u8 len, tmp, *ret = NULL; + + if (buf == NULL) + return ERR_PTR(-EINVAL); + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%2hhx%n", &len, &i); + if (j < 0) + return ERR_PTR(-EINVAL); + if (!j) + return ERR_PTR(-ERANGE); + + if (count == idx) { + ret = kmalloc(len, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + ret[0] = len; + } + off += i; + for (k = 1; k < len; k++) { + j = sscanf(buf + off, "%2hhx%n", &tmp, &i); + if (j <= 0) { + kfree(ret); + return ERR_PTR(-EINVAL); + } + if (count == idx) + ret[k] = tmp; + off += i; + } + if (count == idx) + break; + count++; + } + return ret; +} + + +static void free_strings(struct dvct_usb_descriptors *desc) +{ + struct usb_string *string; + + DVCT_IN(); + for (string = desc->str.strings; string && string->s; string++) + kfree(string->s); + + kfree(desc->str.strings); + desc->str.strings = NULL; + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +} + +static void free_descriptors(struct dvct_usb_descriptors *desc) +{ + struct usb_descriptor_header **hdr; + + DVCT_IN(); + if (desc->dvc_spec) { + for (hdr = desc->dvc_spec; *hdr; hdr++) + kfree(*hdr); + kfree(desc->dvc_spec); + desc->dvc_spec = NULL; + } + free_strings(desc); + kfree(desc); +} + +static int alloc_strings(struct dvct_usb_descriptors *desc, int count) +{ + DVCT_IN(); + desc->lk_tbl = kzalloc((count + 1) * sizeof(struct dvct_string_lookup), + GFP_KERNEL); + if (!desc->lk_tbl) + goto err; + + desc->str.strings = kzalloc((count + 1) * sizeof(*desc->str.strings), + GFP_KERNEL); + if (!desc->str.strings) + goto err_str; + + desc->str.language = 0x0409; + + return count; +err_str: + kfree(desc->lk_tbl); + desc->lk_tbl = NULL; +err: + return -ENOMEM; +} + +static struct dvct_usb_descriptors *alloc_descriptors(int count) +{ + struct dvct_usb_descriptors *desc; + + DVCT_IN(); + desc = kzalloc(sizeof(struct dvct_usb_descriptors), GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->dvc_spec = + kzalloc((count + 1) * sizeof(struct usb_descriptor_header *), + GFP_KERNEL); + + if (!desc->dvc_spec) { + kfree(desc); + return ERR_PTR(-ENOMEM); + } + return desc; +} + +static ssize_t descriptors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + struct usb_descriptor_header **desc; + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + for (desc = ds_dev->desc->dvc_spec; *desc; desc++) { + u8 len, *pdesc; + int i; + + len = (*desc)->bLength; + + /* Check if it fits, total output is 3 * len */ + if ((ret + 3 * len) > PAGE_SIZE) { + dev_warn(dev, "Descriptors attribute page overrun\n"); + break; + } + + pdesc = (u8 *)(*desc); + for (i = 0; i < len; i++) + ret += snprintf(buf + ret, PAGE_SIZE - ret, "%02hhX ", + pdesc[i]); + buf[ret - 1] = '\n'; + } + return ret; +} + +static ssize_t descriptors_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int desc_count, i; + u8 *hdr; + + DVCT_IN(); + + if (ds_dev->instance_taken) + return -EBUSY; + + /*count the new descriptors, exit if invalid input*/ + desc_count = count_descriptors(buf, size); + if (desc_count <= 0) { + dev_warn(dev, "Invalid descriptor input:[%zu] %s", size, buf); + return -EINVAL; + } + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) + free_descriptors(ds_dev->desc); + + ds_dev->desc = alloc_descriptors(desc_count); + if (IS_ERR_OR_NULL(ds_dev->desc)) { + ds_dev->desc = NULL; + return -ENOMEM; + } + + for (i = 0; i < desc_count; i++) { + hdr = get_descriptor(buf, size, i); + if (IS_ERR_OR_NULL(hdr)) { + dev_err(dev, "Cannot get descriptor %d, %ld\n", i, + PTR_ERR(hdr)); + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + return -EINVAL; + } + ds_dev->desc->dvc_spec[i] = (struct usb_descriptor_header *)hdr; + } + return size; +} + +static DEVICE_ATTR_RW(descriptors); + + +/*find out at which member(offset) of which descriptor the pointer + * points to */ +static int dvctrace_string_ptr_to_offset(struct usb_descriptor_header **first, + u8 *ptr, int *desc_offset, int *offset) +{ + u8 *hdr_start, *hdr_end; + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + hdr_start = (u8 *) (*first); + hdr_end = hdr_start + ((*first)->bLength - 1); + if (ptr >= hdr_start && ptr <= hdr_end) { + *desc_offset = idx; + *offset = ptr - hdr_start; + return 0; + } + } + return -EINVAL; +} + +static u8 *dvctrace_offset_to_string_ptr(struct usb_descriptor_header **first, + int desc_offset, int offset) +{ + int idx = 0; + + DVCT_IN(); + for (; *first; first++, idx++) { + if (idx == desc_offset) { + if (offset >= (*first)->bLength) + return ERR_PTR(-ERANGE); + return ((u8 *) (*first)) + offset; + } + } + return ERR_PTR(-ERANGE); +} + +static int count_strings(const char *buf, size_t size) +{ + int count = 0; + size_t off = 0, slen; + int i = 0, j, desc_offset, offset; + + if (buf == NULL) + return -EINVAL; + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", &desc_offset, &offset, &i); + if (j < 2) + break; + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return count; +} + +static char *get_string(const char *buf, size_t size, int index, + int *desc_offset, int *offset) +{ + int count = 0; + size_t off = 0, slen; + int i, j; + char *ret = ERR_PTR(-EINVAL); + + if (buf == NULL) + return ERR_PTR(-EINVAL); + DVCT_IN(); + while (off < size) { + j = sscanf(buf + off, "%d.%d: %n", desc_offset, offset, &i); + if (j < 2) + return ERR_PTR(-EINVAL); + off += i; + slen = 0; + while (off + slen < size) { + if (buf[off + slen] == ';' || buf[off + slen] == '\n') + break; + slen++; + } + + if (count == index) { + ret = kmalloc(slen+1, GFP_KERNEL); + if (!ret) + return ERR_PTR(-ENOMEM); + memcpy(ret, buf + off, slen); + ret[slen] = 0; + return ret; + } + off += slen; + if (buf[off] == ';' || buf[off] == '\n') + off++; + count++; + } + return ERR_PTR(-EINVAL); +} + +static ssize_t strings_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_string_lookup *lk_s; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int ret = 0; + + DVCT_IN(); + if (!ds_dev->desc || !ds_dev->desc->dvc_spec + || !*ds_dev->desc->dvc_spec) + return sprintf(buf, "No Descriptors.\n"); + + if (!ds_dev->desc->lk_tbl) + return sprintf(buf, "No Strings.\n"); + + for (lk_s = ds_dev->desc->lk_tbl; lk_s->str && lk_s->id; lk_s++) { + int desc_offset, offset; + + /* + * Check if it fits, worst case is "Unknown(%p): %s\n" + * 8 + 16 + 3 + string length + 1 + */ + if ((ret + 28 + strlen(lk_s->str->s)) > PAGE_SIZE) { + dev_warn(dev, "Strings attribute page overrun\n"); + break; + } + + if (dvctrace_string_ptr_to_offset(ds_dev->desc->dvc_spec, + lk_s->id, &desc_offset, + &offset)) + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "Unknown(%p): %s\n", lk_s->id, + lk_s->str->s); + else + ret += snprintf(buf + ret, PAGE_SIZE - ret, + "%d.%d: %s\n", desc_offset, offset, + lk_s->str->s); + } + return ret; +} + +static ssize_t strings_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + int count, i, ret; + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + count = count_strings(buf, size); + if (count <= 0) { + dev_err(dev, "Invalid input string:(%zu) %s\n", size, buf); + return -EINVAL; + } + + if (ds_dev->desc == &ds_dev->static_desc) { + dev_warn(dev, "Cannot set strings in static descriptors\n"); + return -EINVAL; + } + + if (ds_dev->desc->str.strings) + free_strings(ds_dev->desc); + + ret = alloc_strings(ds_dev->desc, count); + if (ret < 0) { + dev_err(dev, "Cannot allocate strings %d\n", ret); + return -EINVAL; + } + + for (i = 0; i < count; i++) { + char *tmp; + int d_off, off; + u8 *pid; + + tmp = get_string(buf, size, i, &d_off, &off); + if (IS_ERR_OR_NULL(tmp)) { + free_strings(ds_dev->desc); + return -EINVAL; + } + + pid = dvctrace_offset_to_string_ptr(ds_dev->desc->dvc_spec, + d_off, off); + if (IS_ERR_OR_NULL(pid)) { + dev_warn(dev, "String out of bounds\n"); + free_strings(ds_dev->desc); + return -EINVAL; + } + + ds_dev->desc->lk_tbl[i].id = pid; + ds_dev->desc->lk_tbl[i].str = &ds_dev->desc->str.strings[i]; + ds_dev->desc->str.strings[i].s = tmp; + } + return size; +} + +static DEVICE_ATTR_RW(strings); + +static ssize_t protocol_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + DVCT_IN(); + return sprintf(buf, "%d\n", dev_to_dvct_source_device(dev)->protocol); +} + +static ssize_t protocol_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) + return -EBUSY; + + if (!kstrtou8(buf, 10, &ds_dev->protocol)) + return size; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(protocol); + +static ssize_t status_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (ds_dev->instance_taken) { + if (ds_dev->function_taken) + return sprintf(buf, "In use\n"); + else + return sprintf(buf, "Reserved\n"); + } else { + return sprintf(buf, "Free\n"); + } +} + +static DEVICE_ATTR_RO(status); + +static struct attribute *dvct_source_attrs[] = { + &dev_attr_protocol.attr, + &dev_attr_status.attr, + &dev_attr_strings.attr, + &dev_attr_descriptors.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(dvct_source); + + +static int dvct_match(struct device *dev, struct device_driver *drv) +{ + const char *devname = dev_name(dev); + + DVCT_IN(); + if (strlen(devname) <= strlen(drv->name)) + return -1; + if (strncmp(devname, drv->name, strlen(drv->name))) + return -1; + return devname[strlen(drv->name)] == '-'; +}; + +static struct bus_type dvctrace_bus_type = { + .name = "dvctrace", + .match = dvct_match, + .dev_groups = dvct_source_groups, +}; + +static struct device dvctrace_bus = { + .init_name = "dvctrace-bus", +}; + +static int dvct_match_free(struct device *dev, void *data) +{ + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + return !ds_dev->instance_taken; +} + +struct dvct_source_device *dvct_source_find_by_name(const char *name) +{ + struct device *dev; + + DVCT_IN(); + dev = bus_find_device_by_name(&dvctrace_bus_type, NULL, name); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_by_name); + +struct dvct_source_device *dvct_source_find_free_by_name(const char *name) +{ + struct dvct_source_device *ds_dev = dvct_source_find_by_name(name); + + DVCT_IN(); + if (IS_ERR_OR_NULL(ds_dev)) + return ERR_PTR(-ENODEV); + + if (ds_dev->instance_taken) + return ERR_PTR(-EBUSY); + + return ds_dev; +} +EXPORT_SYMBOL_GPL(dvct_source_find_free_by_name); + +struct dvct_source_device *dvct_source_find_free(void) +{ + struct device *dev = bus_find_device(&dvctrace_bus_type, NULL, + NULL, dvct_match_free); + DVCT_IN(); + if (IS_ERR_OR_NULL(dev)) + return ERR_PTR(-ENODEV); + + return dev_to_dvct_source_device(dev); +} +EXPORT_SYMBOL_GPL(dvct_source_find_free); + +static int fn_count_free(struct device *dev, void *data) +{ + int *count = data; + struct dvct_source_device *ds_dev = dev_to_dvct_source_device(dev); + + DVCT_IN(); + if (!ds_dev->instance_taken) + (*count)++; + return 0; +} + +int dvct_source_count_free(void) +{ + int count = 0; + + DVCT_IN(); + bus_for_each_dev(&dvctrace_bus_type, NULL, &count, fn_count_free); + return count; +} +EXPORT_SYMBOL_GPL(dvct_source_count_free); + +struct dvct_source_driver +*dvct_source_get_drv(struct dvct_source_device *ds_dev) +{ + BUG_ON(ds_dev->device.driver == NULL); + return drv_to_dvct_source_driver(ds_dev->device.driver); +} +EXPORT_SYMBOL_GPL(dvct_source_get_drv); + +int dvct_source_device_add(struct dvct_source_device *ds_dev, + struct dvct_source_driver *ds_drv) +{ + int ret; + + DVCT_IN(); + if (!ds_dev) + return -ENODEV; + if (!ds_drv) + return -EINVAL; + + spin_lock_init(&ds_dev->lock); + spin_lock(&ds_dev->lock); + ds_dev->instance_taken = 0; + ds_dev->function_taken = 0; + spin_unlock(&ds_dev->lock); + + device_initialize(&ds_dev->device); + ds_dev->device.bus = &dvctrace_bus_type; + + if (!ds_dev->device.parent) + ds_dev->device.parent = &dvctrace_bus; + + dev_set_name(&ds_dev->device, "%s-%s", ds_drv->driver.name, + ds_dev->name_add); + + ret = device_add(&ds_dev->device); + if (ret) { + dev_err(&dvctrace_bus, "Cannot add device %s %d\n", + ds_dev->name_add, ret); + return ret; + } + + if (ds_dev->static_desc.dvc_spec) + ds_dev->desc = &ds_dev->static_desc; + + dev_notice(&dvctrace_bus, "Adding device %s\n", ds_dev->name_add); + return 0; +}; +EXPORT_SYMBOL_GPL(dvct_source_device_add); + +void dvct_source_device_del(struct dvct_source_device *ds_dev) +{ + DVCT_IN(); + + if (ds_dev->desc && ds_dev->desc != &ds_dev->static_desc) { + free_descriptors(ds_dev->desc); + ds_dev->desc = NULL; + } + + device_del(&ds_dev->device); +}; +EXPORT_SYMBOL_GPL(dvct_source_device_del); + +int __dvct_source_driver_register(struct dvct_source_driver *ds_drv, + struct module *owner) +{ + DVCT_IN(); + if (!ds_drv->activate || + !ds_drv->binded || + !ds_drv->start_transfer || + !ds_drv->stop_transfer || + !ds_drv->unbinded || + !ds_drv->deactivate) + return -EINVAL; + + ds_drv->driver.owner = owner; + ds_drv->driver.bus = &dvctrace_bus_type; + return driver_register(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(__dvct_source_driver_register); + +void dvct_source_driver_unregister(struct dvct_source_driver *ds_drv) +{ + DVCT_IN(); + driver_unregister(&ds_drv->driver); +} +EXPORT_SYMBOL_GPL(dvct_source_driver_unregister); + +static int __init dtb_init(void) +{ + int ret; + + DVCT_IN(); + ret = device_register(&dvctrace_bus); + if (ret) { + pr_err("Cannot register bus device %d\n", ret); + return ret; + } + + ret = bus_register(&dvctrace_bus_type); + if (ret) { + pr_err("Cannot register bus %d\n", ret); + return ret; + } + return 0; +} + +static void __exit dtb_exit(void) +{ + DVCT_IN(); + bus_unregister(&dvctrace_bus_type); +} + +subsys_initcall(dtb_init); +module_exit(dtb_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("DvC-Trace bus implementation"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 40728491f37b..26a2da8dde63 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -552,6 +552,8 @@ config ADI and SSM (Silicon Secured Memory). Intended consumers of this driver include crash and makedumpfile. +source "drivers/char/rpmb/Kconfig" + endmenu config RANDOM_TRUST_CPU diff --git a/drivers/char/Makefile b/drivers/char/Makefile index b8d42b4e979b..5a4869c00c17 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile @@ -58,3 +58,5 @@ js-rtc-y = rtc.o obj-$(CONFIG_XILLYBUS) += xillybus/ obj-$(CONFIG_POWERNV_OP_PANEL) += powernv-op-panel.o obj-$(CONFIG_ADI) += adi.o +obj-$(CONFIG_ACRN_VHM) += vhm/ +obj-$(CONFIG_RPMB) += rpmb/ diff --git a/drivers/char/rpmb/Kconfig b/drivers/char/rpmb/Kconfig new file mode 100644 index 000000000000..48f11c19bbda --- /dev/null +++ b/drivers/char/rpmb/Kconfig @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +config RPMB + tristate "RPMB partition interface" + help + Unified RPMB partition interface for eMMC and UFS. + Provides interface for in kernel security controllers to + access RPMB partition. + + If unsure, select N. + +config RPMB_INTF_DEV + bool "RPMB character device interface /dev/rpmbN" + depends on RPMB + help + Say yes here if you want to access RPMB from user space + via character device interface /dev/rpmb%d + +config RPMB_SIM + tristate "RPMB partition device simulator" + default n + select RPMB + select CRYPTO_SHA256 + select CRYPTO_HMAC + help + RPMB partition simulation device is a virtual device that + provides simulation of the RPMB protocol and use kernel memory + as storage. + + Be aware it doesn't promise any real security. This driver is + suitable only for testing of the RPMB subsystem or RPMB applications + prior to RPMB key provisioning. + Most people should say N here. + +config VIRTIO_RPMB + tristate "Virtio RPMB character device interface /dev/vrpmb" + default n + depends on VIRTIO + select RPMB + help + Say yes here if you want to access virtio RPMB from user space + via character device interface /dev/vrpmb. + This device interface is only for guest/frontend virtio driver. diff --git a/drivers/char/rpmb/Makefile b/drivers/char/rpmb/Makefile new file mode 100644 index 000000000000..281c012712ca --- /dev/null +++ b/drivers/char/rpmb/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RPMB) += rpmb.o +rpmb-objs += core.o +rpmb-$(CONFIG_RPMB_INTF_DEV) += cdev.o +obj-$(CONFIG_RPMB_SIM) += rpmb_sim.o +obj-$(CONFIG_VIRTIO_RPMB) += virtio_rpmb.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/char/rpmb/cdev.c b/drivers/char/rpmb/cdev.c new file mode 100644 index 000000000000..028c7ecd2ac7 --- /dev/null +++ b/drivers/char/rpmb/cdev.c @@ -0,0 +1,308 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#include + +#include "rpmb-cdev.h" + +static dev_t rpmb_devt; +#define RPMB_MAX_DEVS MINORMASK + +#define RPMB_DEV_OPEN 0 /** single open bit (position) */ +/* from MMC_IOC_MAX_CMDS */ +#define RPMB_MAX_FRAMES 255 + +/** + * rpmb_open - the open function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_open(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev; + + rdev = container_of(inode->i_cdev, struct rpmb_dev, cdev); + if (!rdev) + return -ENODEV; + + /* the rpmb is single open! */ + if (test_and_set_bit(RPMB_DEV_OPEN, &rdev->status)) + return -EBUSY; + + mutex_lock(&rdev->lock); + + fp->private_data = rdev; + + mutex_unlock(&rdev->lock); + + return nonseekable_open(inode, fp); +} + +/** + * rpmb_release - the cdev release function + * + * @inode: pointer to inode structure + * @fp: pointer to file structure + * + * Return: 0 always. + */ +static int rpmb_release(struct inode *inode, struct file *fp) +{ + struct rpmb_dev *rdev = fp->private_data; + + clear_bit(RPMB_DEV_OPEN, &rdev->status); + + return 0; +} + +static size_t rpmb_ioc_frames_len(struct rpmb_dev *rdev, size_t nframes) +{ + if (rdev->ops->type == RPMB_TYPE_NVME) + return rpmb_ioc_frames_len_nvme(nframes); + else + return rpmb_ioc_frames_len_jdec(nframes); +} + +/** + * rpmb_cmd_copy_from_user - copy rpmb command from the user space + * + * @rdev: rpmb device + * @cmd: internal cmd structure + * @ucmd: user space cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_from_user(struct rpmb_dev *rdev, + struct rpmb_cmd *cmd, + struct rpmb_ioc_cmd __user *ucmd) +{ + void *frames; + u64 frames_ptr; + + if (get_user(cmd->flags, &ucmd->flags)) + return -EFAULT; + + if (get_user(cmd->nframes, &ucmd->nframes)) + return -EFAULT; + + if (cmd->nframes > RPMB_MAX_FRAMES) + return -EOVERFLOW; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + frames = memdup_user(u64_to_user_ptr(frames_ptr), + rpmb_ioc_frames_len(rdev, cmd->nframes)); + if (IS_ERR(frames)) + return PTR_ERR(frames); + + cmd->frames = frames; + return 0; +} + +/** + * rpmb_cmd_copy_to_user - copy rpmb command to the user space + * + * @rdev: rpmb device + * @ucmd: user space cmd structure + * @cmd: internal cmd structure + * + * Return: 0 on success, <0 on error + */ +static int rpmb_cmd_copy_to_user(struct rpmb_dev *rdev, + struct rpmb_ioc_cmd __user *ucmd, + struct rpmb_cmd *cmd) +{ + u64 frames_ptr; + + if (copy_from_user(&frames_ptr, &ucmd->frames_ptr, sizeof(frames_ptr))) + return -EFAULT; + + /* some archs have issues with 64bit get_user */ + if (copy_to_user(u64_to_user_ptr(frames_ptr), cmd->frames, + rpmb_ioc_frames_len(rdev, cmd->nframes))) + return -EFAULT; + + return 0; +} + +/** + * rpmb_ioctl_seq_cmd - issue an rpmb command sequence + * + * @rdev: rpmb device + * @ptr: rpmb cmd sequence + * + * RPMB_IOC_SEQ_CMD handler + * + * Return: 0 on success, <0 on error + */ +static long rpmb_ioctl_seq_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_seq_cmd __user *ptr) +{ + __u64 ncmds; + struct rpmb_cmd *cmds; + struct rpmb_ioc_cmd __user *ucmds; + + int i; + int ret; + + /* The caller must have CAP_SYS_RAWIO, like mmc ioctl */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* some archs have issues with 64bit get_user */ + if (copy_from_user(&ncmds, &ptr->num_of_cmds, sizeof(ncmds))) + return -EFAULT; + + if (ncmds > 3) { + dev_err(&rdev->dev, "supporting up to 3 packets (%llu)\n", + ncmds); + return -EINVAL; + } + + cmds = kcalloc(ncmds, sizeof(*cmds), GFP_KERNEL); + if (!cmds) + return -ENOMEM; + + ucmds = (struct rpmb_ioc_cmd __user *)ptr->cmds; + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_from_user(rdev, &cmds[i], &ucmds[i]); + if (ret) + goto out; + } + + ret = rpmb_cmd_seq(rdev, cmds, ncmds); + if (ret) + goto out; + + for (i = 0; i < ncmds; i++) { + ret = rpmb_cmd_copy_to_user(rdev, &ucmds[i], &cmds[i]); + if (ret) + goto out; + } +out: + for (i = 0; i < ncmds; i++) + kfree(cmds[i].frames); + kfree(cmds); + return ret; +} + +static long rpmb_ioctl_ver_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_ver_cmd __user *ptr) +{ + struct rpmb_ioc_ver_cmd ver = { + .api_version = RPMB_API_VERSION, + }; + + return copy_to_user(ptr, &ver, sizeof(ver)) ? -EFAULT : 0; +} + +static long rpmb_ioctl_cap_cmd(struct rpmb_dev *rdev, + struct rpmb_ioc_cap_cmd __user *ptr) +{ + struct rpmb_ioc_cap_cmd cap; + + cap.device_type = rdev->ops->type; + cap.target = rdev->target; + cap.block_size = rdev->ops->block_size; + cap.wr_cnt_max = rdev->ops->wr_cnt_max; + cap.rd_cnt_max = rdev->ops->rd_cnt_max; + cap.auth_method = rdev->ops->auth_method; + cap.capacity = rpmb_get_capacity(rdev); + cap.reserved = 0; + + return copy_to_user(ptr, &cap, sizeof(cap)) ? -EFAULT : 0; +} + +/** + * rpmb_ioctl - rpmb ioctl dispatcher + * + * @fp: a file pointer + * @cmd: ioctl command RPMB_IOC_SEQ_CMD RPMB_IOC_VER_CMD RPMB_IOC_CAP_CMD + * @arg: ioctl data: rpmb_ioc_ver_cmd rpmb_ioc_cap_cmd pmb_ioc_seq_cmd + * + * Return: 0 on success; < 0 on error + */ +static long rpmb_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) +{ + struct rpmb_dev *rdev = fp->private_data; + void __user *ptr = (void __user *)arg; + + switch (cmd) { + case RPMB_IOC_VER_CMD: + return rpmb_ioctl_ver_cmd(rdev, ptr); + case RPMB_IOC_CAP_CMD: + return rpmb_ioctl_cap_cmd(rdev, ptr); + case RPMB_IOC_SEQ_CMD: + return rpmb_ioctl_seq_cmd(rdev, ptr); + default: + dev_err(&rdev->dev, "unsupported ioctl 0x%x.\n", cmd); + return -ENOIOCTLCMD; + } +} + +#ifdef CONFIG_COMPAT +static long rpmb_compat_ioctl(struct file *fp, unsigned int cmd, + unsigned long arg) +{ + return rpmb_ioctl(fp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif /* CONFIG_COMPAT */ + +static const struct file_operations rpmb_fops = { + .open = rpmb_open, + .release = rpmb_release, + .unlocked_ioctl = rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = rpmb_compat_ioctl, +#endif + .owner = THIS_MODULE, + .llseek = noop_llseek, +}; + +void rpmb_cdev_prepare(struct rpmb_dev *rdev) +{ + rdev->dev.devt = MKDEV(MAJOR(rpmb_devt), rdev->id); + rdev->cdev.owner = THIS_MODULE; + cdev_init(&rdev->cdev, &rpmb_fops); +} + +void rpmb_cdev_add(struct rpmb_dev *rdev) +{ + cdev_add(&rdev->cdev, rdev->dev.devt, 1); +} + +void rpmb_cdev_del(struct rpmb_dev *rdev) +{ + if (rdev->dev.devt) + cdev_del(&rdev->cdev); +} + +int __init rpmb_cdev_init(void) +{ + int ret; + + ret = alloc_chrdev_region(&rpmb_devt, 0, RPMB_MAX_DEVS, "rpmb"); + if (ret < 0) + pr_err("unable to allocate char dev region\n"); + + return ret; +} + +void __exit rpmb_cdev_exit(void) +{ + unregister_chrdev_region(rpmb_devt, RPMB_MAX_DEVS); +} diff --git a/drivers/char/rpmb/core.c b/drivers/char/rpmb/core.c new file mode 100644 index 000000000000..e02c12b8046c --- /dev/null +++ b/drivers/char/rpmb/core.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include "rpmb-cdev.h" + +static DEFINE_IDA(rpmb_ida); + +/** + * rpmb_dev_get - increase rpmb device ref counter + * + * @rdev: rpmb device + */ +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + return get_device(&rdev->dev) ? rdev : NULL; +} +EXPORT_SYMBOL_GPL(rpmb_dev_get); + +/** + * rpmb_dev_put - decrease rpmb device ref counter + * + * @rdev: rpmb device + */ +void rpmb_dev_put(struct rpmb_dev *rdev) +{ + put_device(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_put); + +/** + * rpmb_cmd_fixup - fixup rpmb command + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + */ +static void rpmb_cmd_fixup(struct rpmb_dev *rdev, + struct rpmb_cmd *cmds, u32 ncmds) +{ + int i; + + if (RPMB_TYPE_HW(rdev->ops->type) != RPMB_TYPE_EMMC) + return; + + /* Fixup RPMB_READ_DATA specific to eMMC + * The block count of the RPMB read operation is not indicated + * in the original RPMB Data Read Request packet. + * This is different then implementation for other protocol + * standards. + */ + for (i = 0; i < ncmds; i++) { + struct rpmb_frame_jdec *frame = cmds[i].frames; + + if (frame->req_resp == cpu_to_be16(RPMB_READ_DATA)) { + dev_dbg(&rdev->dev, "Fixing up READ_DATA frame to block_count=0\n"); + frame->block_count = 0; + } + } +} + +/** + * rpmb_cmd_seq - send RPMB command sequence + * + * @rdev: rpmb device + * @cmds: rpmb command list + * @ncmds: number of commands + * + * Return: 0 on success + * -EINVAL on wrong parameters + * -EOPNOTSUPP if device doesn't support the requested operation + * < 0 if the operation fails + */ +int rpmb_cmd_seq(struct rpmb_dev *rdev, struct rpmb_cmd *cmds, u32 ncmds) +{ + int err; + + if (!rdev || !cmds || !ncmds) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->cmd_seq) { + rpmb_cmd_fixup(rdev, cmds, ncmds); + err = rdev->ops->cmd_seq(rdev->dev.parent, rdev->target, + cmds, ncmds); + } + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_cmd_seq); + +int rpmb_get_capacity(struct rpmb_dev *rdev) +{ + int err; + + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + err = -EOPNOTSUPP; + if (rdev->ops && rdev->ops->get_capacity) + err = rdev->ops->get_capacity(rdev->dev.parent, rdev->target); + mutex_unlock(&rdev->lock); + + return err; +} +EXPORT_SYMBOL_GPL(rpmb_get_capacity); + +static void rpmb_dev_release(struct device *dev) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + ida_simple_remove(&rpmb_ida, rdev->id); + kfree(rdev); +} + +struct class rpmb_class = { + .name = "rpmb", + .owner = THIS_MODULE, + .dev_release = rpmb_dev_release, +}; +EXPORT_SYMBOL(rpmb_class); + +/** + * rpmb_dev_find_device - return first matching rpmb device + * + * @data: data for the match function + * @match: the matching function + * + * Return: matching rpmb device or NULL on failure + */ +static +struct rpmb_dev *rpmb_dev_find_device(const void *data, + int (*match)(struct device *dev, + const void *data)) +{ + struct device *dev; + + dev = class_find_device(&rpmb_class, NULL, data, match); + + return dev ? to_rpmb_dev(dev) : NULL; +} + +static int match_by_type(struct device *dev, const void *data) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const u32 *type = data; + + return (*type == RPMB_TYPE_ANY || rdev->ops->type == *type); +} + +/** + * rpmb_dev_get_by_type - return first registered rpmb device + * with matching type. + * If run with RPMB_TYPE_ANY the first an probably only + * device is returned + * + * @type: rpbm underlying device type + * + * Return: matching rpmb device or NULL/ERR_PTR on failure + */ +struct rpmb_dev *rpmb_dev_get_by_type(u32 type) +{ + if (type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + return rpmb_dev_find_device(&type, match_by_type); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_by_type); + +struct device_with_target { + const struct device *dev; + u8 target; +}; + +static int match_by_parent(struct device *dev, const void *data) +{ + const struct device_with_target *d = data; + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return (d->dev && dev->parent == d->dev && rdev->target == d->target); +} + +/** + * rpmb_dev_find_by_device - retrieve rpmb device from the parent device + * + * @parent: parent device of the rpmb device + * @target: RPMB target/region within the physical device + * + * Return: NULL if there is no rpmb device associated with the parent device + */ +struct rpmb_dev *rpmb_dev_find_by_device(struct device *parent, u8 target) +{ + struct device_with_target t; + + if (!parent) + return NULL; + + t.dev = parent; + t.target = target; + + return rpmb_dev_find_device(&t, match_by_parent); +} +EXPORT_SYMBOL_GPL(rpmb_dev_find_by_device); + +static ssize_t type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + const char *sim; + ssize_t ret; + + sim = (rdev->ops->type & RPMB_TYPE_SIM) ? ":SIM" : ""; + switch (RPMB_TYPE_HW(rdev->ops->type)) { + case RPMB_TYPE_EMMC: + ret = sprintf(buf, "EMMC%s\n", sim); + break; + case RPMB_TYPE_UFS: + ret = sprintf(buf, "UFS%s\n", sim); + break; + case RPMB_TYPE_NVME: + ret = sprintf(buf, "NVMe%s\n", sim); + break; + default: + ret = sprintf(buf, "UNKNOWN\n"); + break; + } + + return ret; +} +static DEVICE_ATTR_RO(type); + +static ssize_t id_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + struct device *dev = kobj_to_dev(kobj); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + size_t sz = min_t(size_t, rdev->ops->dev_id_len, PAGE_SIZE); + + if (!rdev->ops->dev_id) + return 0; + + return memory_read_from_buffer(buf, count, &off, rdev->ops->dev_id, sz); +} +static BIN_ATTR_RO(id, 0); + +static ssize_t wr_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->wr_cnt_max); +} +static DEVICE_ATTR_RO(wr_cnt_max); + +static ssize_t rd_cnt_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + return sprintf(buf, "%u\n", rdev->ops->rd_cnt_max); +} +static DEVICE_ATTR_RO(rd_cnt_max); + +static struct attribute *rpmb_attrs[] = { + &dev_attr_type.attr, + &dev_attr_wr_cnt_max.attr, + &dev_attr_rd_cnt_max.attr, + NULL, +}; + +static struct bin_attribute *rpmb_bin_attributes[] = { + &bin_attr_id, + NULL, +}; + +static struct attribute_group rpmb_attr_group = { + .attrs = rpmb_attrs, + .bin_attrs = rpmb_bin_attributes, +}; + +static const struct attribute_group *rpmb_attr_groups[] = { + &rpmb_attr_group, + NULL +}; + +/** + * rpmb_dev_unregister - unregister RPMB partition from the RPMB subsystem + * + * @rdev: the rpmb device to unregister + */ +int rpmb_dev_unregister(struct rpmb_dev *rdev) +{ + if (!rdev) + return -EINVAL; + + mutex_lock(&rdev->lock); + rpmb_cdev_del(rdev); + device_del(&rdev->dev); + mutex_unlock(&rdev->lock); + + rpmb_dev_put(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister); + +/** + * rpmb_dev_unregister_by_device - unregister RPMB partition + * from the RPMB subsystem + * + * @dev: the parent device of the rpmb device + * @target: RPMB target/region within the physical device + */ +int rpmb_dev_unregister_by_device(struct device *dev, u8 target) +{ + struct rpmb_dev *rdev; + + if (!dev) + return -EINVAL; + + rdev = rpmb_dev_find_by_device(dev, target); + if (!rdev) { + dev_warn(dev, "no disk found %s\n", dev_name(dev->parent)); + return -ENODEV; + } + + rpmb_dev_put(rdev); + + return rpmb_dev_unregister(rdev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_unregister_by_device); + +/** + * rpmb_dev_get_drvdata - driver data getter + * + * @rdev: rpmb device + * + * Return: driver private data + */ +void *rpmb_dev_get_drvdata(const struct rpmb_dev *rdev) +{ + return dev_get_drvdata(&rdev->dev); +} +EXPORT_SYMBOL_GPL(rpmb_dev_get_drvdata); + +/** + * rpmb_dev_set_drvdata - driver data setter + * + * @rdev: rpmb device + * @data: data to store + */ +void rpmb_dev_set_drvdata(struct rpmb_dev *rdev, void *data) +{ + dev_set_drvdata(&rdev->dev, data); +} +EXPORT_SYMBOL_GPL(rpmb_dev_set_drvdata); + +/** + * rpmb_dev_register - register RPMB partition with the RPMB subsystem + * + * @dev: storage device of the rpmb device + * @target: RPMB target/region within the physical device + * @ops: device specific operations + */ +struct rpmb_dev *rpmb_dev_register(struct device *dev, u8 target, + const struct rpmb_ops *ops) +{ + struct rpmb_dev *rdev; + int id; + int ret; + + if (!dev || !ops) + return ERR_PTR(-EINVAL); + + if (!ops->cmd_seq) + return ERR_PTR(-EINVAL); + + if (!ops->get_capacity) + return ERR_PTR(-EINVAL); + + if (ops->type == RPMB_TYPE_ANY || ops->type > RPMB_TYPE_MAX) + return ERR_PTR(-EINVAL); + + rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); + if (!rdev) + return ERR_PTR(-ENOMEM); + + id = ida_simple_get(&rpmb_ida, 0, 0, GFP_KERNEL); + if (id < 0) { + ret = id; + goto exit; + } + + mutex_init(&rdev->lock); + rdev->ops = ops; + rdev->id = id; + rdev->target = target; + + dev_set_name(&rdev->dev, "rpmb%d", id); + rdev->dev.class = &rpmb_class; + rdev->dev.parent = dev; + rdev->dev.groups = rpmb_attr_groups; + + rpmb_cdev_prepare(rdev); + + ret = device_register(&rdev->dev); + if (ret) + goto exit; + + rpmb_cdev_add(rdev); + + dev_dbg(&rdev->dev, "registered device\n"); + + return rdev; + +exit: + if (id >= 0) + ida_simple_remove(&rpmb_ida, id); + kfree(rdev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(rpmb_dev_register); + +static int __init rpmb_init(void) +{ + ida_init(&rpmb_ida); + class_register(&rpmb_class); + return rpmb_cdev_init(); +} + +static void __exit rpmb_exit(void) +{ + rpmb_cdev_exit(); + class_unregister(&rpmb_class); + ida_destroy(&rpmb_ida); +} + +subsys_initcall(rpmb_init); +module_exit(rpmb_exit); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("RPMB class"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/rpmb/rpmb-cdev.h b/drivers/char/rpmb/rpmb-cdev.h new file mode 100644 index 000000000000..e59ff0c05e9d --- /dev/null +++ b/drivers/char/rpmb/rpmb-cdev.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifdef CONFIG_RPMB_INTF_DEV +int __init rpmb_cdev_init(void); +void __exit rpmb_cdev_exit(void); +void rpmb_cdev_prepare(struct rpmb_dev *rdev); +void rpmb_cdev_add(struct rpmb_dev *rdev); +void rpmb_cdev_del(struct rpmb_dev *rdev); +#else +static inline int __init rpmb_cdev_init(void) { return 0; } +static inline void __exit rpmb_cdev_exit(void) {} +static inline void rpmb_cdev_prepare(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_add(struct rpmb_dev *rdev) {} +static inline void rpmb_cdev_del(struct rpmb_dev *rdev) {} +#endif /* CONFIG_RPMB_INTF_DEV */ diff --git a/drivers/char/rpmb/rpmb_sim.c b/drivers/char/rpmb/rpmb_sim.c new file mode 100644 index 000000000000..728e25511377 --- /dev/null +++ b/drivers/char/rpmb/rpmb_sim.c @@ -0,0 +1,715 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#include + +static const char id[] = "RPMB:SIM"; +#define CAPACITY_UNIT SZ_128K +#define CAPACITY_MIN SZ_128K +#define CAPACITY_MAX SZ_16M +#define BLK_UNIT SZ_256 + +static unsigned int max_wr_blks = 2; +module_param(max_wr_blks, uint, 0644); +MODULE_PARM_DESC(max_wr_blks, "max blocks that can be written in a single command (default: 2)"); + +static unsigned int daunits = 1; +module_param(daunits, uint, 0644); +MODULE_PARM_DESC(daunits, "number of data area units of 128K (default: 1)"); + +struct blk { + u8 data[BLK_UNIT]; +}; + +/** + * struct rpmb_sim_dev + * + * @dev: back pointer device + * @rdev: rpmb device + * @auth_key: Authentication key register which is used to authenticate + * accesses when MAC is calculated; + * @auth_key_set: true if authentication key was set + * @write_counter: Counter value for the total amount of successful + * authenticated data write requests made by the host. + * The initial value of this register after production is 00000000h. + * The value will be incremented by one along with each successful + * programming access. The value cannot be reset. After the counter + * has reached the maximum value of FFFFFFFFh, + * it will not be incremented anymore (overflow prevention) + * @hash_desc: hmac(sha256) shash descriptor + * + * @res_frames: frame that holds the result of the last write operation + * @out_frames: next read operation result frames + * @out_frames_cnt: number of the output frames + * + * @capacity: size of the partition in bytes multiple of 128K + * @blkcnt: block count + * @da: data area in blocks + */ +struct rpmb_sim_dev { + struct device *dev; + struct rpmb_dev *rdev; + u8 auth_key[32]; + bool auth_key_set; + u32 write_counter; + struct shash_desc *hash_desc; + + struct rpmb_frame_jdec res_frames[1]; + struct rpmb_frame_jdec *out_frames; + unsigned int out_frames_cnt; + + size_t capacity; + size_t blkcnt; + struct blk *da; +}; + +static __be16 op_result(struct rpmb_sim_dev *rsdev, u16 result) +{ + if (!rsdev->auth_key_set) + return cpu_to_be16(RPMB_ERR_NO_KEY); + + if (rsdev->write_counter == 0xFFFFFFFF) + result |= RPMB_ERR_COUNTER_EXPIRED; + + return cpu_to_be16(result); +} + +static __be16 req_to_resp(u16 req) +{ + return cpu_to_be16(RPMB_REQ2RESP(req)); +} + +static int rpmb_sim_calc_hmac(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, + unsigned int blks, u8 *mac) +{ + struct shash_desc *desc = rsdev->hash_desc; + int i; + int ret; + + ret = crypto_shash_init(desc); + if (ret) + goto out; + + for (i = 0; i < blks; i++) { + ret = crypto_shash_update(desc, frames[i].data, + rpmb_jdec_hmac_data_len); + if (ret) + goto out; + } + ret = crypto_shash_final(desc, mac); +out: + if (ret) + dev_err(rsdev->dev, "digest error = %d", ret); + + return ret; +} + +static int rpmb_op_not_programmed(struct rpmb_sim_dev *rsdev, u16 req) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, RPMB_ERR_NO_KEY); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + dev_err(rsdev->dev, "not programmed\n"); + + return 0; +} + +static int rpmb_op_program_key(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct crypto_shash *tfm = rsdev->hash_desc->tfm; + u16 req; + int ret; + u16 err = RPMB_ERR_OK; + + req = be16_to_cpu(in_frame[0].req_resp); + + if (req != RPMB_PROGRAM_KEY) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + if (rsdev->auth_key_set) { + dev_err(rsdev->dev, "key already set\n"); + err = RPMB_ERR_WRITE; + goto out; + } + + ret = crypto_shash_setkey(tfm, in_frame[0].key_mac, 32); + if (ret) { + dev_err(rsdev->dev, "set key failed = %d\n", ret); + err = RPMB_ERR_GENERAL; + goto out; + } + + dev_dbg(rsdev->dev, "digest size %u\n", crypto_shash_digestsize(tfm)); + + memcpy(rsdev->auth_key, in_frame[0].key_mac, 32); + rsdev->auth_key_set = true; +out: + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + + return 0; +} + +static int rpmb_op_get_wr_counter(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *frame; + int ret = 0; + u16 req; + u16 err; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_GET_WRITE_COUNTER) + return -EINVAL; + + if (cnt != 1) { + dev_err(rsdev->dev, "wrong number of frames %d != 1\n", cnt); + return -EINVAL; + } + + frame = kcalloc(1, sizeof(*frame), GFP_KERNEL); + if (!frame) { + err = RPMB_ERR_READ; + ret = -ENOMEM; + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = cnt; + goto out; + } + + rsdev->out_frames = frame; + rsdev->out_frames_cnt = cnt; + + frame->req_resp = req_to_resp(req); + frame->write_counter = cpu_to_be32(rsdev->write_counter); + memcpy(frame->nonce, in_frame[0].nonce, 16); + + err = RPMB_ERR_OK; + if (rpmb_sim_calc_hmac(rsdev, frame, cnt, frame->key_mac)) + err = RPMB_ERR_READ; + +out: + rsdev->out_frames[0].req_resp = req_to_resp(req); + rsdev->out_frames[0].result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_op_write_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret = 0; + + req = be16_to_cpu(in_frame[0].req_resp); + if (req != RPMB_WRITE_DATA) + return -EINVAL; + + if (rsdev->write_counter == 0xFFFFFFFF) { + err = RPMB_ERR_WRITE; + goto out; + } + + blks = be16_to_cpu(in_frame[0].block_count); + if (blks == 0 || blks > cnt) { + dev_err(rsdev->dev, "wrong number of blocks: blks=%u cnt=%u\n", + blks, cnt); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + if (blks > max_wr_blks) { + err = RPMB_ERR_WRITE; + goto out; + } + + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (rpmb_sim_calc_hmac(rsdev, in_frame, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + /* mac is in the last frame */ + if (memcmp(mac, in_frame[blks - 1].key_mac, sizeof(mac)) != 0) { + err = RPMB_ERR_AUTH; + goto out; + } + + if (be32_to_cpu(in_frame[0].write_counter) != rsdev->write_counter) { + err = RPMB_ERR_COUNTER; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_WRITE; + goto out; + } + + dev_dbg(rsdev->dev, "Writing = %u blocks at addr = 0x%X\n", blks, addr); + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + memcpy(rsdev->da[addr + i].data, in_frame[i].data, BLK_UNIT); + + rsdev->write_counter++; + + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->write_counter = cpu_to_be32(rsdev->write_counter); + res_frame->addr = cpu_to_be16(addr); + if (rpmb_sim_calc_hmac(rsdev, res_frame, 1, res_frame->key_mac)) + err = RPMB_ERR_READ; + +out: + if (err != RPMB_ERR_OK) { + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + } + res_frame->result = op_result(rsdev, err); + + return ret; +} + +static int rpmb_do_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + struct rpmb_frame_jdec *out_frames = NULL; + u8 mac[32]; + u16 req, err, addr, blks; + unsigned int i; + int ret; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + /* eMMC intentionally set 0 here */ + blks = be16_to_cpu(in_frame->block_count); + blks = blks ?: cnt; + if (blks > cnt) { + dev_err(rsdev->dev, "wrong number of frames cnt %u\n", blks); + ret = -EINVAL; + err = RPMB_ERR_GENERAL; + goto out; + } + + out_frames = kcalloc(blks, sizeof(*out_frames), GFP_KERNEL); + if (!out_frames) { + ret = -ENOMEM; + err = RPMB_ERR_READ; + goto out; + } + + ret = 0; + addr = be16_to_cpu(in_frame[0].addr); + if (addr >= rsdev->blkcnt) { + err = RPMB_ERR_ADDRESS; + goto out; + } + + if (addr + blks > rsdev->blkcnt) { + err = RPMB_ERR_READ; + goto out; + } + + dev_dbg(rsdev->dev, "reading = %u blocks at addr = 0x%X\n", blks, addr); + for (i = 0; i < blks; i++) { + memcpy(out_frames[i].data, rsdev->da[addr + i].data, BLK_UNIT); + memcpy(out_frames[i].nonce, in_frame[0].nonce, 16); + out_frames[i].req_resp = req_to_resp(req); + out_frames[i].addr = in_frame[0].addr; + out_frames[i].block_count = cpu_to_be16(blks); + } + + if (rpmb_sim_calc_hmac(rsdev, out_frames, blks, mac)) { + err = RPMB_ERR_AUTH; + goto out; + } + + memcpy(out_frames[blks - 1].key_mac, mac, sizeof(mac)); + + err = RPMB_ERR_OK; + for (i = 0; i < blks; i++) + out_frames[i].result = op_result(rsdev, err); + + rsdev->out_frames = out_frames; + rsdev->out_frames_cnt = cnt; + + return 0; + +out: + memset(res_frame, 0, sizeof(*res_frame)); + res_frame->req_resp = req_to_resp(req); + res_frame->result = op_result(rsdev, err); + kfree(out_frames); + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return ret; +} + +static int rpmb_op_read_data(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *in_frame, u32 cnt) +{ + struct rpmb_frame_jdec *res_frame = rsdev->res_frames; + u16 req; + + req = be16_to_cpu(in_frame->req_resp); + if (req != RPMB_READ_DATA) + return -EINVAL; + + memcpy(res_frame, in_frame, sizeof(*res_frame)); + + rsdev->out_frames = res_frame; + rsdev->out_frames_cnt = 1; + + return 0; +} + +static int rpmb_op_result_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req = be16_to_cpu(frames[0].req_resp); + u16 blks = be16_to_cpu(frames[0].block_count); + + if (req != RPMB_RESULT_READ) + return -EINVAL; + + if (blks != 0) { + dev_err(rsdev->dev, "wrong number of frames %u != 0\n", blks); + return -EINVAL; + } + + rsdev->out_frames = rsdev->res_frames; + rsdev->out_frames_cnt = 1; + return 0; +} + +static int rpmb_sim_write(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + u16 req; + int ret; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + req = be16_to_cpu(frames[0].req_resp); + if (!rsdev->auth_key_set && req != RPMB_PROGRAM_KEY) + return rpmb_op_not_programmed(rsdev, req); + + switch (req) { + case RPMB_PROGRAM_KEY: + dev_dbg(rsdev->dev, "rpmb: program key\n"); + ret = rpmb_op_program_key(rsdev, frames, cnt); + break; + case RPMB_WRITE_DATA: + dev_dbg(rsdev->dev, "rpmb: write data\n"); + ret = rpmb_op_write_data(rsdev, frames, cnt); + break; + case RPMB_GET_WRITE_COUNTER: + dev_dbg(rsdev->dev, "rpmb: get write counter\n"); + ret = rpmb_op_get_wr_counter(rsdev, frames, cnt); + break; + case RPMB_READ_DATA: + dev_dbg(rsdev->dev, "rpmb: read data\n"); + ret = rpmb_op_read_data(rsdev, frames, cnt); + break; + case RPMB_RESULT_READ: + dev_dbg(rsdev->dev, "rpmb: result read\n"); + ret = rpmb_op_result_read(rsdev, frames, cnt); + break; + default: + dev_err(rsdev->dev, "unsupported command %u\n", req); + ret = -EINVAL; + break; + } + + dev_dbg(rsdev->dev, "rpmb: ret=%d\n", ret); + + return ret; +} + +static int rpmb_sim_read(struct rpmb_sim_dev *rsdev, + struct rpmb_frame_jdec *frames, u32 cnt) +{ + int i; + + if (!frames) + return -EINVAL; + + if (cnt == 0) + cnt = 1; + + if (!rsdev->out_frames || rsdev->out_frames_cnt == 0) { + dev_err(rsdev->dev, "out_frames are not set\n"); + return -EINVAL; + } + + if (rsdev->out_frames->req_resp == cpu_to_be16(RPMB_READ_DATA)) + rpmb_do_read_data(rsdev, rsdev->out_frames, cnt); + + for (i = 0; i < min_t(u32, rsdev->out_frames_cnt, cnt); i++) + memcpy(&frames[i], &rsdev->out_frames[i], sizeof(frames[i])); + + if (rsdev->out_frames != rsdev->res_frames) + kfree(rsdev->out_frames); + + rsdev->out_frames = NULL; + rsdev->out_frames_cnt = 0; + dev_dbg(rsdev->dev, "rpmb: cnt=%d\n", cnt); + + return 0; +} + +static int rpmb_sim_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct rpmb_sim_dev *rsdev; + int i; + int ret; + struct rpmb_cmd *cmd; + + if (!dev) + return -EINVAL; + + rsdev = dev_get_drvdata(dev); + + if (!rsdev) + return -EINVAL; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + if (cmd->flags & RPMB_F_WRITE) + ret = rpmb_sim_write(rsdev, cmd->frames, cmd->nframes); + else + ret = rpmb_sim_read(rsdev, cmd->frames, cmd->nframes); + } + return ret; +} + +static int rpmb_sim_get_capacity(struct device *dev, u8 target) +{ + return daunits; +} + +static struct rpmb_ops rpmb_sim_ops = { + .cmd_seq = rpmb_sim_cmd_seq, + .get_capacity = rpmb_sim_get_capacity, + .type = RPMB_TYPE_EMMC | RPMB_TYPE_SIM, +}; + +static int rpmb_sim_hmac_256_alloc(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + rsdev->hash_desc = desc; + + dev_dbg(rsdev->dev, "hamac(sha256) registered\n"); + return 0; +} + +static void rpmb_sim_hmac_256_free(struct rpmb_sim_dev *rsdev) +{ + struct shash_desc *desc = rsdev->hash_desc; + + if (desc->tfm) + crypto_free_shash(desc->tfm); + kfree(desc); + + rsdev->hash_desc = NULL; +} + +static int rpmb_sim_probe(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + int ret; + + rsdev = kzalloc(sizeof(*rsdev), GFP_KERNEL); + if (!rsdev) + return -ENOMEM; + + rsdev->dev = dev; + + ret = rpmb_sim_hmac_256_alloc(rsdev); + if (ret) + goto err; + + rsdev->capacity = CAPACITY_UNIT * daunits; + rsdev->blkcnt = rsdev->capacity / BLK_UNIT; + rsdev->da = kzalloc(rsdev->capacity, GFP_KERNEL); + if (!rsdev->da) { + ret = -ENOMEM; + goto err; + } + + rpmb_sim_ops.dev_id_len = strlen(id); + rpmb_sim_ops.dev_id = id; + rpmb_sim_ops.wr_cnt_max = max_wr_blks; + rpmb_sim_ops.rd_cnt_max = max_wr_blks; + rpmb_sim_ops.block_size = 1; + + rsdev->rdev = rpmb_dev_register(rsdev->dev, 0, &rpmb_sim_ops); + if (IS_ERR(rsdev->rdev)) { + ret = PTR_ERR(rsdev->rdev); + goto err; + } + + dev_info(dev, "registered RPMB capacity = %zu of %zu blocks\n", + rsdev->capacity, rsdev->blkcnt); + + dev_set_drvdata(dev, rsdev); + + return 0; +err: + rpmb_sim_hmac_256_free(rsdev); + if (rsdev) + kfree(rsdev->da); + kfree(rsdev); + return ret; +} + +static int rpmb_sim_remove(struct device *dev) +{ + struct rpmb_sim_dev *rsdev; + + rsdev = dev_get_drvdata(dev); + + rpmb_dev_unregister(rsdev->rdev); + + dev_set_drvdata(dev, NULL); + + rpmb_sim_hmac_256_free(rsdev); + + kfree(rsdev->da); + kfree(rsdev); + return 0; +} + +static void rpmb_sim_shutdown(struct device *dev) +{ + rpmb_sim_remove(dev); +} + +static int rpmb_sim_match(struct device *dev, struct device_driver *drv) +{ + return 1; +} + +static struct bus_type rpmb_sim_bus = { + .name = "rpmb_sim", + .match = rpmb_sim_match, +}; + +static struct device_driver rpmb_sim_drv = { + .name = "rpmb_sim", + .probe = rpmb_sim_probe, + .remove = rpmb_sim_remove, + .shutdown = rpmb_sim_shutdown, +}; + +static void rpmb_sim_dev_release(struct device *dev) +{ +} + +static struct device rpmb_sim_dev; + +static int __init rpmb_sim_init(void) +{ + int ret; + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + ret = bus_register(&rpmb_sim_bus); + if (ret) + return ret; + + dev->bus = &rpmb_sim_bus; + dev->release = rpmb_sim_dev_release; + dev_set_name(dev, "%s", "rpmb_sim"); + ret = device_register(dev); + if (ret) { + pr_err("device register failed %d\n", ret); + goto err_device; + } + + drv->bus = &rpmb_sim_bus; + ret = driver_register(drv); + if (ret) { + pr_err("driver register failed %d\n", ret); + goto err_driver; + } + + return 0; + +err_driver: + device_unregister(dev); +err_device: + bus_unregister(&rpmb_sim_bus); + return ret; +} + +static void __exit rpmb_sim_exit(void) +{ + struct device *dev = &rpmb_sim_dev; + struct device_driver *drv = &rpmb_sim_drv; + + device_unregister(dev); + driver_unregister(drv); + bus_unregister(&rpmb_sim_bus); +} + +module_init(rpmb_sim_init); +module_exit(rpmb_sim_exit); + +MODULE_AUTHOR("Tomas Winkler +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const char id[] = "RPMB:VIRTIO"; +#ifndef VIRTIO_ID_RPMB +#define VIRTIO_ID_RPMB 0xFFFF +#endif + +#define RPMB_SEQ_CMD_MAX 3 /* support up to 3 cmds */ + +struct virtio_rpmb_info { + struct virtqueue *vq; + struct mutex lock; /* info lock */ + wait_queue_head_t have_data; + struct rpmb_dev *rdev; +}; + +struct virtio_rpmb_ioc { + unsigned int ioc_cmd; + int result; + u8 target; + u8 reserved[3]; +}; + +static void virtio_rpmb_recv_done(struct virtqueue *vq) +{ + struct virtio_rpmb_info *vi; + struct virtio_device *vdev = vq->vdev; + + vi = vq->vdev->priv; + if (!vi) { + dev_err(&vdev->dev, "Error: no found vi data.\n"); + return; + } + + wake_up(&vi->have_data); +} + +static int rpmb_virtio_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + unsigned int i; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_seq_cmd *seq_cmd; + size_t seq_cmd_sz; + struct scatterlist vio_ioc, vio_seq, frame[3]; + struct scatterlist *sgs[5]; + unsigned int num_out = 0, num_in = 0; + size_t sz; + int ret; + unsigned int len; + + if (ncmds > RPMB_SEQ_CMD_MAX) + return -EINVAL; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + seq_cmd_sz = sizeof(*seq_cmd) + sizeof(struct rpmb_ioc_cmd) * ncmds; + seq_cmd = kzalloc(seq_cmd_sz, GFP_KERNEL); + if (!vio_cmd || !seq_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_SEQ_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + seq_cmd->num_of_cmds = ncmds; + for (i = 0; i < ncmds; i++) { + seq_cmd->cmds[i].flags = cmds[i].flags; + seq_cmd->cmds[i].nframes = cmds[i].nframes; + seq_cmd->cmds[i].frames_ptr = i; + } + sg_init_one(&vio_seq, seq_cmd, seq_cmd_sz); + sgs[num_out + num_in++] = &vio_seq; + + for (i = 0; i < ncmds; i++) { + sz = sizeof(struct rpmb_frame_jdec) * (cmds[i].nframes ?: 1); + sg_init_one(&frame[i], cmds[i].frames, sz); + sgs[num_out + num_in++] = &frame[i]; + } + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(seq_cmd); + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_cmd_cap(struct device *dev, u8 target) +{ + struct virtio_device *vdev = dev_to_virtio(dev); + struct virtio_rpmb_info *vi = vdev->priv; + struct virtio_rpmb_ioc *vio_cmd; + struct rpmb_ioc_cap_cmd *cap_cmd; + struct scatterlist vio_ioc, cap_ioc; + struct scatterlist *sgs[2]; + unsigned int num_out = 0, num_in = 0; + unsigned int len; + int ret; + + mutex_lock(&vi->lock); + + vio_cmd = kzalloc(sizeof(*vio_cmd), GFP_KERNEL); + cap_cmd = kzalloc(sizeof(*cap_cmd), GFP_KERNEL); + if (!vio_cmd || !cap_cmd) { + ret = -ENOMEM; + goto out; + } + + vio_cmd->ioc_cmd = RPMB_IOC_CAP_CMD; + vio_cmd->result = 0; + vio_cmd->target = target; + sg_init_one(&vio_ioc, vio_cmd, sizeof(*vio_cmd)); + sgs[num_out + num_in++] = &vio_ioc; + + sg_init_one(&cap_ioc, cap_cmd, sizeof(*cap_cmd)); + sgs[num_out + num_in++] = &cap_ioc; + + virtqueue_add_sgs(vi->vq, sgs, num_out, num_in, vi, GFP_KERNEL); + virtqueue_kick(vi->vq); + + wait_event(vi->have_data, virtqueue_get_buf(vi->vq, &len)); + + ret = 0; + + if (vio_cmd->result != 0) { + dev_err(dev, "Error: command error = %d.\n", vio_cmd->result); + ret = -EIO; + } + +out: + kfree(vio_cmd); + kfree(cap_cmd); + + mutex_unlock(&vi->lock); + return ret; +} + +static int rpmb_virtio_get_capacity(struct device *dev, u8 target) +{ + return 0; +} + +static struct rpmb_ops rpmb_virtio_ops = { + .cmd_seq = rpmb_virtio_cmd_seq, + .get_capacity = rpmb_virtio_get_capacity, + .type = RPMB_TYPE_EMMC, +}; + +static int rpmb_virtio_dev_init(struct virtio_rpmb_info *vi) +{ + int ret = 0; + struct device *dev = &vi->vq->vdev->dev; + + rpmb_virtio_ops.dev_id_len = strlen(id); + rpmb_virtio_ops.dev_id = id; + rpmb_virtio_ops.wr_cnt_max = 1; + rpmb_virtio_ops.rd_cnt_max = 1; + rpmb_virtio_ops.block_size = 1; + + vi->rdev = rpmb_dev_register(dev, 0, &rpmb_virtio_ops); + if (IS_ERR(vi->rdev)) { + ret = PTR_ERR(vi->rdev); + goto err; + } + + dev_set_drvdata(dev, vi); +err: + return ret; +} + +static int virtio_rpmb_init(struct virtio_device *vdev) +{ + int ret; + struct virtio_rpmb_info *vi; + + vi = kzalloc(sizeof(*vi), GFP_KERNEL); + if (!vi) + return -ENOMEM; + + init_waitqueue_head(&vi->have_data); + mutex_init(&vi->lock); + vdev->priv = vi; + + /* We expect a single virtqueue. */ + vi->vq = virtio_find_single_vq(vdev, virtio_rpmb_recv_done, "request"); + if (IS_ERR(vi->vq)) { + dev_err(&vdev->dev, "get single vq failed!\n"); + ret = PTR_ERR(vi->vq); + goto err; + } + + /* create vrpmb device. */ + ret = rpmb_virtio_dev_init(vi); + if (ret) { + dev_err(&vdev->dev, "create vrpmb device failed.\n"); + goto err; + } + + dev_info(&vdev->dev, "init done!\n"); + + return 0; + +err: + kfree(vi); + return ret; +} + +static void virtio_rpmb_remove(struct virtio_device *vdev) +{ + struct virtio_rpmb_info *vi; + + vi = vdev->priv; + if (!vi) + return; + + if (wq_has_sleeper(&vi->have_data)) + wake_up(&vi->have_data); + + rpmb_dev_unregister(vi->rdev); + + if (vdev->config->reset) + vdev->config->reset(vdev); + + if (vdev->config->del_vqs) + vdev->config->del_vqs(vdev); + + kfree(vi); +} + +static int virtio_rpmb_probe(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_rpmb_freeze(struct virtio_device *vdev) +{ + virtio_rpmb_remove(vdev); + return 0; +} + +static int virtio_rpmb_restore(struct virtio_device *vdev) +{ + return virtio_rpmb_init(vdev); +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_RPMB, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_rpmb_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_rpmb_probe, + .remove = virtio_rpmb_remove, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_rpmb_freeze, + .restore = virtio_rpmb_restore, +#endif +}; + +module_virtio_driver(virtio_rpmb_driver); +MODULE_DEVICE_TABLE(virtio, id_table); + +MODULE_DESCRIPTION("Virtio rpmb frontend driver"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/char/vhm/Makefile b/drivers/char/vhm/Makefile new file mode 100644 index 000000000000..5ee68c5f7278 --- /dev/null +++ b/drivers/char/vhm/Makefile @@ -0,0 +1,2 @@ +subdir-ccflags-$(CONFIG_ACRN_VHM) := -Werror +obj-y += vhm_dev.o diff --git a/drivers/char/vhm/vhm_dev.c b/drivers/char/vhm/vhm_dev.c new file mode 100644 index 000000000000..c6a97e8305fd --- /dev/null +++ b/drivers/char/vhm/vhm_dev.c @@ -0,0 +1,873 @@ +/* + * virtio and hyperviosr service module (VHM): main framework + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (c) 2017 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright (C) 2017 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Liang Ding + * Jason Zeng + * Xiao Zheng + * Jason Chen CJ + * Jack Ren + * Mingqiang Chi + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DEVICE_NAME "acrn_vhm" +#define CLASS_NAME "vhm" + +#define VHM_API_VERSION_MAJOR 1 +#define VHM_API_VERSION_MINOR 0 + +static int major; +static struct class *vhm_class; +static struct device *vhm_device; +static struct tasklet_struct vhm_io_req_tasklet; + +struct table_iomems { + /* list node for this table_iomems */ + struct list_head list; + /* device's physical BDF */ + unsigned short phys_bdf; + /* virtual base address of MSI-X table in memory space after ioremap */ + unsigned long mmap_addr; +}; +static LIST_HEAD(table_iomems_list); +static DEFINE_MUTEX(table_iomems_lock); + +static int vhm_dev_open(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm; + int i; + + vm = kzalloc(sizeof(struct vhm_vm), GFP_KERNEL); + pr_info("vhm_dev_open: opening device node\n"); + + if (!vm) + return -ENOMEM; + vm->vmid = ACRN_INVALID_VMID; + vm->dev = vhm_device; + + for (i = 0; i < HUGEPAGE_HLIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&vm->hugepage_hlist[i]); + mutex_init(&vm->hugepage_lock); + + INIT_LIST_HEAD(&vm->ioreq_client_list); + spin_lock_init(&vm->ioreq_client_lock); + + vm_mutex_lock(&vhm_vm_list_lock); + vm->refcnt = 1; + vm_list_add(&vm->list); + vm_mutex_unlock(&vhm_vm_list_lock); + filep->private_data = vm; + return 0; +} + +static ssize_t vhm_dev_read(struct file *filep, char *buffer, size_t len, + loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: reading device node\n"); + return 0; +} + +static ssize_t vhm_dev_write(struct file *filep, const char *buffer, + size_t len, loff_t *offset) +{ + /* Does Nothing */ + pr_info("vhm_dev_read: writing device node\n"); + return 0; +} + +static long vhm_dev_ioctl(struct file *filep, + unsigned int ioctl_num, unsigned long ioctl_param) +{ + long ret = 0; + struct vhm_vm *vm; + struct ic_ptdev_irq ic_pt_irq; + struct hc_ptdev_irq hc_pt_irq; + + pr_debug("[%s] ioctl_num=0x%x\n", __func__, ioctl_num); + + if (ioctl_num == IC_GET_API_VERSION) { + struct api_version api_version; + + api_version.major_version = VHM_API_VERSION_MAJOR; + api_version.minor_version = VHM_API_VERSION_MINOR; + + if (copy_to_user((void *)ioctl_param, &api_version, + sizeof(struct api_version))) + return -EFAULT; + + return 0; + } else if (ioctl_num == IC_PM_SET_SSTATE_DATA) { + struct acpi_sstate_data host_sstate_data; + + if (copy_from_user(&host_sstate_data, + (void *)ioctl_param, sizeof(host_sstate_data))) + return -EFAULT; + + ret = hcall_set_sstate_data(virt_to_phys(&host_sstate_data)); + if (ret < 0) { + pr_err("vhm: failed to set host Sstate data!"); + return -EFAULT; + } + return 0; + } + + memset(&hc_pt_irq, 0, sizeof(hc_pt_irq)); + memset(&ic_pt_irq, 0, sizeof(ic_pt_irq)); + vm = (struct vhm_vm *)filep->private_data; + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + if ((vm->vmid == ACRN_INVALID_VMID) && (ioctl_num != IC_CREATE_VM)) { + pr_err("vhm: invalid VM ID !\n"); + return -EFAULT; + } + + switch (ioctl_num) { + case IC_CREATE_VM: { + struct acrn_create_vm created_vm; + + if (copy_from_user(&created_vm, (void *)ioctl_param, + sizeof(struct acrn_create_vm))) + return -EFAULT; + + ret = hcall_create_vm(virt_to_phys(&created_vm)); + if ((ret < 0) || + (created_vm.vmid == ACRN_INVALID_VMID)) { + pr_err("vhm: failed to create VM from Hypervisor !\n"); + return -EFAULT; + } + + if (copy_to_user((void *)ioctl_param, &created_vm, + sizeof(struct acrn_create_vm))) { + ret = -EFAULT; + goto create_vm_fail; + } + vm->vmid = created_vm.vmid; + + if (created_vm.vm_flag & SECURE_WORLD_ENABLED) { + ret = init_trusty(vm); + if (ret < 0) { + pr_err("vhm: failed to init trusty for VM!\n"); + goto create_vm_fail; + } + } + + if (created_vm.req_buf) { + ret = acrn_ioreq_init(vm, created_vm.req_buf); + if (ret < 0) + goto ioreq_buf_fail; + } + + acrn_ioeventfd_init(vm->vmid); + acrn_irqfd_init(vm->vmid); + + pr_info("vhm: VM %d created\n", created_vm.vmid); + break; +ioreq_buf_fail: + if (created_vm.vm_flag & SECURE_WORLD_ENABLED) + deinit_trusty(vm); +create_vm_fail: + hcall_destroy_vm(created_vm.vmid); + vm->vmid = ACRN_INVALID_VMID; + break; + + } + + case IC_START_VM: { + ret = hcall_start_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to start VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_PAUSE_VM: { + ret = hcall_pause_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to pause VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_RESET_VM: { + ret = hcall_reset_vm(vm->vmid); + if (ret < 0) { + pr_err("vhm: failed to restart VM %ld!\n", vm->vmid); + return -EFAULT; + } + break; + } + + case IC_DESTROY_VM: { + acrn_ioeventfd_deinit(vm->vmid); + acrn_irqfd_deinit(vm->vmid); + ret = hcall_destroy_vm(vm->vmid); + if (ret < 0) { + pr_err("failed to destroy VM %ld\n", vm->vmid); + return -EFAULT; + } + if (vm->trusty_host_gpa) + deinit_trusty(vm); + vm->vmid = ACRN_INVALID_VMID; + break; + } + + case IC_CREATE_VCPU: { + struct acrn_create_vcpu cv; + + if (copy_from_user(&cv, (void *)ioctl_param, + sizeof(struct acrn_create_vcpu))) + return -EFAULT; + + ret = acrn_hypercall2(HC_CREATE_VCPU, vm->vmid, + virt_to_phys(&cv)); + if (ret < 0) { + pr_err("vhm: failed to create vcpu %d!\n", cv.vcpu_id); + return -EFAULT; + } + atomic_inc(&vm->vcpu_num); + + return ret; + } + + case IC_SET_VCPU_REGS: { + struct acrn_set_vcpu_regs asvr; + + if (copy_from_user(&asvr, (void *)ioctl_param, sizeof(asvr))) + return -EFAULT; + + ret = acrn_hypercall2(HC_SET_VCPU_REGS, vm->vmid, + virt_to_phys(&asvr)); + if (ret < 0) { + pr_err("vhm: failed to set bsp state of vm %ld!\n", + vm->vmid); + return -EFAULT; + } + + return ret; + } + + case IC_SET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = map_guest_memseg(vm, &memmap); + break; + } + + case IC_UNSET_MEMSEG: { + struct vm_memmap memmap; + + if (copy_from_user(&memmap, (void *)ioctl_param, + sizeof(struct vm_memmap))) + return -EFAULT; + + ret = unmap_guest_memseg(vm, &memmap); + break; + } + + case IC_SET_IOREQ_BUFFER: { + /* init ioreq buffer */ + ret = acrn_ioreq_init(vm, (unsigned long)ioctl_param); + if (ret < 0 && ret != -EEXIST) + return ret; + ret = 0; + break; + } + + case IC_CREATE_IOREQ_CLIENT: { + int client_id; + + client_id = acrn_ioreq_create_fallback_client(vm->vmid, "acrndm"); + if (client_id < 0) + return -EFAULT; + return client_id; + } + + case IC_DESTROY_IOREQ_CLIENT: { + int client = ioctl_param; + + acrn_ioreq_destroy_client(client); + break; + } + + case IC_ATTACH_IOREQ_CLIENT: { + int client = ioctl_param; + + return acrn_ioreq_attach_client(client, 0); + } + + case IC_NOTIFY_REQUEST_FINISH: { + struct ioreq_notify notify; + + if (copy_from_user(¬ify, (void *)ioctl_param, + sizeof(notify))) + return -EFAULT; + + ret = acrn_ioreq_complete_request(notify.client_id, notify.vcpu); + if (ret < 0) + return -EFAULT; + break; + } + + case IC_ASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_assert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSERT_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_deassert_irqline(vm->vmid, virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to deassert irq!\n"); + return -EFAULT; + } + break; + } + case IC_PULSE_IRQLINE: { + struct acrn_irqline irq; + + if (copy_from_user(&irq, (void *)ioctl_param, sizeof(irq))) + return -EFAULT; + + ret = hcall_pulse_irqline(vm->vmid, + virt_to_phys(&irq)); + if (ret < 0) { + pr_err("vhm: failed to assert irq!\n"); + return -EFAULT; + } + break; + } + + case IC_SET_IRQLINE: { + ret = hcall_set_irqline(vm->vmid, ioctl_param); + if (ret < 0) { + pr_err("vhm: failed to set irqline!\n"); + return -EFAULT; + } + break; + } + + case IC_INJECT_MSI: { + struct acrn_msi_entry msi; + + if (copy_from_user(&msi, (void *)ioctl_param, sizeof(msi))) + return -EFAULT; + + ret = hcall_inject_msi(vm->vmid, virt_to_phys(&msi)); + if (ret < 0) { + pr_err("vhm: failed to inject!\n"); + return -EFAULT; + } + break; + } + + case IC_ASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_assign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to assign ptdev!\n"); + return -EFAULT; + } + break; + } + case IC_DEASSIGN_PTDEV: { + uint16_t bdf; + + if (copy_from_user(&bdf, + (void *)ioctl_param, sizeof(uint16_t))) + return -EFAULT; + + ret = hcall_deassign_ptdev(vm->vmid, virt_to_phys(&bdf)); + if (ret < 0) { + pr_err("vhm: failed to deassign ptdev!\n"); + return -EFAULT; + } + break; + } + + case IC_SET_PTDEV_INTR_INFO: { + struct table_iomems *new; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_set_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to set intr info for ptdev!\n"); + return -EFAULT; + } + + if ((ic_pt_irq.type == IRQ_MSIX) && + ic_pt_irq.msix.table_paddr) { + new = kmalloc(sizeof(struct table_iomems), GFP_KERNEL); + if (new == NULL) + return -EFAULT; + new->phys_bdf = ic_pt_irq.phys_bdf; + new->mmap_addr = (unsigned long) + ioremap_nocache(ic_pt_irq.msix.table_paddr, + ic_pt_irq.msix.table_size); + + mutex_lock(&table_iomems_lock); + list_add(&new->list, &table_iomems_list); + mutex_unlock(&table_iomems_lock); + } + + break; + } + case IC_RESET_PTDEV_INTR_INFO: { + struct table_iomems *ptr; + int dev_found = 0; + + if (copy_from_user(&ic_pt_irq, + (void *)ioctl_param, sizeof(ic_pt_irq))) + return -EFAULT; + + memcpy(&hc_pt_irq, &ic_pt_irq, sizeof(hc_pt_irq)); + + ret = hcall_reset_ptdev_intr_info(vm->vmid, + virt_to_phys(&hc_pt_irq)); + if (ret < 0) { + pr_err("vhm: failed to reset intr info for ptdev!\n"); + return -EFAULT; + } + + if (ic_pt_irq.type == IRQ_MSIX) { + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == ic_pt_irq.phys_bdf) { + dev_found = 1; + break; + } + } + if (dev_found) { + iounmap((void __iomem *)ptr->mmap_addr); + list_del(&ptr->list); + } + mutex_unlock(&table_iomems_lock); + } + + break; + } + + case IC_VM_PCI_MSIX_REMAP: { + struct acrn_vm_pci_msix_remap msix_remap; + + if (copy_from_user(&msix_remap, + (void *)ioctl_param, sizeof(msix_remap))) + return -EFAULT; + + ret = hcall_remap_pci_msix(vm->vmid, virt_to_phys(&msix_remap)); + + if (copy_to_user((void *)ioctl_param, + &msix_remap, sizeof(msix_remap))) + return -EFAULT; + + if (msix_remap.msix) { + void __iomem *msix_entry; + struct table_iomems *ptr; + int dev_found = 0; + + mutex_lock(&table_iomems_lock); + list_for_each_entry(ptr, &table_iomems_list, list) { + if (ptr->phys_bdf == msix_remap.phys_bdf) { + dev_found = 1; + break; + } + } + mutex_unlock(&table_iomems_lock); + + if (!dev_found || !ptr->mmap_addr) + return -EFAULT; + + msix_entry = (void __iomem *) (ptr->mmap_addr + + msix_remap.msix_entry_index * + PCI_MSIX_ENTRY_SIZE); + + /* mask the entry when setup */ + writel(PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + + /* setup the msi entry */ + writel((uint32_t)msix_remap.msi_addr, + msix_entry + PCI_MSIX_ENTRY_LOWER_ADDR); + writel((uint32_t)(msix_remap.msi_addr >> 32), + msix_entry + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msix_remap.msi_data, + msix_entry + PCI_MSIX_ENTRY_DATA); + + /* unmask the entry */ + writel(msix_remap.vector_ctl & + PCI_MSIX_ENTRY_CTRL_MASKBIT, + msix_entry + PCI_MSIX_ENTRY_VECTOR_CTRL); + } + break; + } + + case IC_PM_GET_CPU_STATE: { + uint64_t cmd; + + if (copy_from_user(&cmd, + (void *)ioctl_param, sizeof(cmd))) + return -EFAULT; + + switch (cmd & PMCMD_TYPE_MASK) { + case PMCMD_GET_PX_CNT: + case PMCMD_GET_CX_CNT: { + uint64_t pm_info; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&pm_info)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &pm_info, sizeof(pm_info))) + ret = -EFAULT; + + break; + } + case PMCMD_GET_PX_DATA: { + struct cpu_px_data px_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&px_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &px_data, sizeof(px_data))) + ret = -EFAULT; + break; + } + case PMCMD_GET_CX_DATA: { + struct cpu_cx_data cx_data; + + ret = hcall_get_cpu_state(cmd, virt_to_phys(&cx_data)); + if (ret < 0) + return -EFAULT; + + if (copy_to_user((void *)ioctl_param, + &cx_data, sizeof(cx_data))) + ret = -EFAULT; + break; + } + default: + ret = -EFAULT; + break; + } + break; + } + + case IC_VM_INTR_MONITOR: { + struct page *page; + + ret = get_user_pages_fast(ioctl_param, 1, 1, &page); + if (unlikely(ret != 1) || (page == NULL)) { + pr_err("vhm-dev: failed to pin intr hdr buffer!\n"); + return -ENOMEM; + } + + ret = hcall_vm_intr_monitor(vm->vmid, page_to_phys(page)); + if (ret < 0) { + pr_err("vhm-dev: monitor intr data err=%ld\n", ret); + return -EFAULT; + } + break; + } + + case IC_EVENT_IOEVENTFD: { + struct acrn_ioeventfd args; + + if (copy_from_user(&args, (void *)ioctl_param, sizeof(args))) + return -EFAULT; + ret = acrn_ioeventfd(vm->vmid, &args); + break; + } + + case IC_EVENT_IRQFD: { + struct acrn_irqfd args; + + if (copy_from_user(&args, (void *)ioctl_param, sizeof(args))) + return -EFAULT; + ret = acrn_irqfd(vm->vmid, &args); + break; + } + + default: + pr_warn("Unknown IOCTL 0x%x\n", ioctl_num); + ret = 0; + break; + } + + return ret; +} + +static void io_req_tasklet(unsigned long data) +{ + struct vhm_vm *vm; + + list_for_each_entry(vm, &vhm_vm_list, list) { + if (!vm || !vm->req_buf) + break; + + acrn_ioreq_distribute_request(vm); + } +} + +static void vhm_intr_handler(void) +{ + tasklet_schedule(&vhm_io_req_tasklet); +} + +static int vhm_dev_release(struct inode *inodep, struct file *filep) +{ + struct vhm_vm *vm = filep->private_data; + + if (vm == NULL) { + pr_err("vhm: invalid VM !\n"); + return -EFAULT; + } + put_vm(vm); + filep->private_data = NULL; + return 0; +} + +static const struct file_operations fops = { + .open = vhm_dev_open, + .read = vhm_dev_read, + .write = vhm_dev_write, + .release = vhm_dev_release, + .unlocked_ioctl = vhm_dev_ioctl, + .poll = vhm_dev_poll, +}; + +static ssize_t +store_offline_cpu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ +#ifdef CONFIG_X86 + u64 cpu, lapicid; + + if (kstrtoull(buf, 0, &cpu) < 0) + return -EINVAL; + + if (cpu_possible(cpu)) { + lapicid = cpu_data(cpu).apicid; + pr_info("vhm: try to offline cpu %lld with lapicid %lld\n", + cpu, lapicid); + if (hcall_sos_offline_cpu(lapicid) < 0) { + pr_err("vhm: failed to offline cpu from Hypervisor!\n"); + return -EINVAL; + } + } +#endif + return count; +} + +static DEVICE_ATTR(offline_cpu, S_IWUSR, NULL, store_offline_cpu); + +static struct attribute *vhm_attrs[] = { + &dev_attr_offline_cpu.attr, + NULL +}; + +static struct attribute_group vhm_attr_group = { + .attrs = vhm_attrs, +}; + +#define SUPPORT_HV_API_VERSION_MAJOR 1 +#define SUPPORT_HV_API_VERSION_MINOR 0 +static int __init vhm_init(void) +{ + unsigned long flag; + struct hc_api_version api_version = {0, 0}; + + if (x86_hyper_type != X86_HYPER_ACRN) + return -ENODEV; + + pr_info("vhm: initializing\n"); + + if (hcall_get_api_version(virt_to_phys(&api_version)) < 0) { + pr_err("vhm: failed to get api version from Hypervisor !\n"); + return -EINVAL; + } + + if (api_version.major_version == SUPPORT_HV_API_VERSION_MAJOR && + api_version.minor_version == SUPPORT_HV_API_VERSION_MINOR) { + pr_info("vhm: hv api version %d.%d\n", + api_version.major_version, api_version.minor_version); + } else { + pr_err("vhm: not support hv api version %d.%d!\n", + api_version.major_version, api_version.minor_version); + return -EINVAL; + } + + /* Try to dynamically allocate a major number for the device */ + major = register_chrdev(0, DEVICE_NAME, &fops); + if (major < 0) { + pr_warn("vhm: failed to register a major number\n"); + return major; + } + pr_info("vhm: registered correctly with major number %d\n", major); + + /* Register the device class */ + vhm_class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(vhm_class)) { + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to register device class\n"); + return PTR_ERR(vhm_class); + } + pr_info("vhm: device class registered correctly\n"); + + /* Register the device driver */ + vhm_device = device_create(vhm_class, NULL, MKDEV(major, 0), + NULL, DEVICE_NAME); + if (IS_ERR(vhm_device)) { + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + pr_warn("vhm: failed to create the device\n"); + return PTR_ERR(vhm_device); + } + pr_info("register IPI handler\n"); + tasklet_init(&vhm_io_req_tasklet, io_req_tasklet, 0); + if (x86_platform_ipi_callback) { + pr_warn("vhm: ipi callback was occupied\n"); + return -EINVAL; + } + local_irq_save(flag); + x86_platform_ipi_callback = vhm_intr_handler; + local_irq_restore(flag); + + if (sysfs_create_group(&vhm_device->kobj, &vhm_attr_group)) { + pr_warn("vhm: sysfs create failed\n"); + return -EINVAL; + } + + pr_info("vhm: Virtio & Hypervisor service module initialized\n"); + return 0; +} +static void __exit vhm_exit(void) +{ + tasklet_kill(&vhm_io_req_tasklet); + device_destroy(vhm_class, MKDEV(major, 0)); + class_unregister(vhm_class); + class_destroy(vhm_class); + unregister_chrdev(major, DEVICE_NAME); + sysfs_remove_group(&vhm_device->kobj, &vhm_attr_group); + pr_info("vhm: exit\n"); +} + +module_init(vhm_init); +module_exit(vhm_exit); + +MODULE_AUTHOR("Intel"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("This is a char device driver, acts as a route " + "responsible for transferring IO requsts from other modules " + "either in user-space or in kernel to and from hypervisor"); +MODULE_VERSION("0.1"); diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 608af20a3494..e1312374725b 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -37,6 +37,13 @@ config CPU_FREQ_STAT If in doubt, say N. +config CPU_FREQ_TIMES + bool "CPU frequency time-in-state statistics" + help + Export CPU time-in-state information through procfs. + + If in doubt, say N. + choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index c1ffeabe4ecf..648beca8ad41 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -5,7 +5,10 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o -# CPUfreq governors +# CPUfreq times +obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o + +# CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index cf62a1f64dd7..803d41c629c3 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -456,6 +457,7 @@ static int get_cluster_clk_and_freq_table(struct device *cpu_dev, /* Per-CPU initialization */ static int bL_cpufreq_init(struct cpufreq_policy *policy) { + struct em_data_callback em_cb = EM_DATA_CB(of_dev_pm_opp_get_cpu_power); u32 cur_cluster = cpu_to_cluster(policy->cpu); struct device *cpu_dev; int ret; @@ -487,6 +489,14 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = arm_bL_ops->get_transition_latency(cpu_dev); + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); + return -EPROBE_DEFER; + } + + em_register_perf_domain(policy->cpus, ret, &em_cb); + if (is_bL_switching_enabled()) per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 0a9ebf00be46..83ad2a60991c 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -151,6 +152,7 @@ static int resources_available(void) static int cpufreq_init(struct cpufreq_policy *policy) { + struct em_data_callback em_cb = EM_DATA_CB(of_dev_pm_opp_get_cpu_power); struct cpufreq_frequency_table *freq_table; struct opp_table *opp_table = NULL; struct private_data *priv; @@ -159,7 +161,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) unsigned int transition_latency; bool fallback = false; const char *name; - int ret; + int ret, nr_opp; cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -226,6 +228,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) ret = -EPROBE_DEFER; goto out_free_opp; } + nr_opp = ret; if (fallback) { cpumask_setall(policy->cpus); @@ -278,6 +281,8 @@ static int cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = transition_latency; policy->dvfs_possible_from_any_cpu = true; + em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + return 0; out_free_cpufreq_table: diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index f53fb41efb7b..b3c384ac6064 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -19,12 +19,14 @@ #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -158,6 +160,12 @@ __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq, } EXPORT_SYMBOL_GPL(arch_set_freq_scale); +__weak void arch_set_max_freq_scale(struct cpumask *cpus, + unsigned long policy_max_freq) +{ +} +EXPORT_SYMBOL_GPL(arch_set_max_freq_scale); + /* * This is a generic cpufreq init() routine which can be used by cpufreq * drivers of SMP systems. It will do following: @@ -349,6 +357,7 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy, } cpufreq_stats_record_transition(policy, freqs->new); + cpufreq_times_record_transition(freqs); policy->cur = freqs->new; } } @@ -1295,6 +1304,7 @@ static int cpufreq_online(unsigned int cpu) goto out_destroy_policy; cpufreq_stats_create_table(policy); + cpufreq_times_create_policy(policy); write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); @@ -2243,6 +2253,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = new_policy->max; trace_cpu_frequency_limits(policy); + arch_set_max_freq_scale(policy->cpus, policy->max); + policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", @@ -2277,6 +2289,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, ret = cpufreq_start_governor(policy); if (!ret) { pr_debug("cpufreq: governor change\n"); + sched_cpufreq_governor_change(policy, old_gov); return 0; } cpufreq_exit_governor(policy); diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c new file mode 100644 index 000000000000..a43eeee30e8e --- /dev/null +++ b/drivers/cpufreq/cpufreq_times.c @@ -0,0 +1,464 @@ +/* drivers/cpufreq/cpufreq_times.c + * + * Copyright (C) 2018 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define UID_HASH_BITS 10 + +static DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS); + +static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ +static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */ + +struct uid_entry { + uid_t uid; + unsigned int max_state; + struct hlist_node hash; + struct rcu_head rcu; + u64 time_in_state[0]; +}; + +/** + * struct cpu_freqs - per-cpu frequency information + * @offset: start of these freqs' stats in task time_in_state array + * @max_state: number of entries in freq_table + * @last_index: index in freq_table of last frequency switched to + * @freq_table: list of available frequencies + */ +struct cpu_freqs { + unsigned int offset; + unsigned int max_state; + unsigned int last_index; + unsigned int freq_table[0]; +}; + +static struct cpu_freqs *all_freqs[NR_CPUS]; + +static unsigned int next_offset; + + +/* Caller must hold rcu_read_lock() */ +static struct uid_entry *find_uid_entry_rcu(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_uid_entry_locked(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_or_register_uid_locked(uid_t uid) +{ + struct uid_entry *uid_entry, *temp; + unsigned int max_state = READ_ONCE(next_offset); + size_t alloc_size = sizeof(*uid_entry) + max_state * + sizeof(uid_entry->time_in_state[0]); + + uid_entry = find_uid_entry_locked(uid); + if (uid_entry) { + if (uid_entry->max_state == max_state) + return uid_entry; + /* uid_entry->time_in_state is too small to track all freqs, so + * expand it. + */ + temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC); + if (!temp) + return uid_entry; + temp->max_state = max_state; + memset(temp->time_in_state + uid_entry->max_state, 0, + (max_state - uid_entry->max_state) * + sizeof(uid_entry->time_in_state[0])); + if (temp != uid_entry) { + hlist_replace_rcu(&uid_entry->hash, &temp->hash); + kfree_rcu(uid_entry, rcu); + } + return temp; + } + + uid_entry = kzalloc(alloc_size, GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; + uid_entry->max_state = max_state; + + hash_add_rcu(uid_hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static bool freq_index_invalid(unsigned int index) +{ + unsigned int cpu; + struct cpu_freqs *freqs; + + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || index < freqs->offset || + freqs->offset + freqs->max_state <= index) + continue; + return freqs->freq_table[index - freqs->offset] == + CPUFREQ_ENTRY_INVALID; + } + return true; +} + +static int single_uid_time_in_state_show(struct seq_file *m, void *ptr) +{ + struct uid_entry *uid_entry; + unsigned int i; + u64 time; + uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private); + + if (uid == overflowuid) + return -EINVAL; + + rcu_read_lock(); + + uid_entry = find_uid_entry_rcu(uid); + if (!uid_entry) { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + time = nsec_to_clock_t(uid_entry->time_in_state[i]); + seq_write(m, &time, sizeof(time)); + } + + rcu_read_unlock(); + + return 0; +} + +static void *uid_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void uid_seq_stop(struct seq_file *seq, void *v) { } + +static int uid_time_in_state_seq_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + struct cpu_freqs *freqs, *last_freqs = NULL; + int i, cpu; + + if (v == uid_hash_table) { + seq_puts(m, "uid:"); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == + CPUFREQ_ENTRY_INVALID) + continue; + seq_printf(m, " %d", freqs->freq_table[i]); + } + } + seq_putc(m, '\n'); + } + + rcu_read_lock(); + + hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { + if (uid_entry->max_state) + seq_printf(m, "%d:", uid_entry->uid); + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t( + uid_entry->time_in_state[i])); + } + if (uid_entry->max_state) + seq_putc(m, '\n'); + } + + rcu_read_unlock(); + return 0; +} + +void cpufreq_task_times_init(struct task_struct *p) +{ + unsigned long flags; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = 0; +} + +void cpufreq_task_times_alloc(struct task_struct *p) +{ + void *temp; + unsigned long flags; + unsigned int max_state = READ_ONCE(next_offset); + + /* We use one array to avoid multiple allocs per task */ + temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC); + if (!temp) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = temp; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = max_state; +} + +/* Caller must hold task_time_in_state_lock */ +static int cpufreq_task_times_realloc_locked(struct task_struct *p) +{ + void *temp; + unsigned int max_state = READ_ONCE(next_offset); + + temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC); + if (!temp) + return -ENOMEM; + p->time_in_state = temp; + memset(p->time_in_state + p->max_state, 0, + (max_state - p->max_state) * sizeof(u64)); + p->max_state = max_state; + return 0; +} + +void cpufreq_task_times_exit(struct task_struct *p) +{ + unsigned long flags; + void *temp; + + if (!p->time_in_state) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + temp = p->time_in_state; + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + kfree(temp); +} + +int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *p) +{ + unsigned int cpu, i; + u64 cputime; + unsigned long flags; + struct cpu_freqs *freqs; + struct cpu_freqs *last_freqs = NULL; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + + seq_printf(m, "cpu%u\n", cpu); + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID) + continue; + cputime = 0; + if (freqs->offset + i < p->max_state && + p->time_in_state) + cputime = p->time_in_state[freqs->offset + i]; + seq_printf(m, "%u %lu\n", freqs->freq_table[i], + (unsigned long)nsec_to_clock_t(cputime)); + } + } + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + return 0; +} + +void cpufreq_acct_update_power(struct task_struct *p, u64 cputime) +{ + unsigned long flags; + unsigned int state; + struct uid_entry *uid_entry; + struct cpu_freqs *freqs = all_freqs[task_cpu(p)]; + uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); + + if (!freqs || p->flags & PF_EXITING) + return; + + state = freqs->offset + READ_ONCE(freqs->last_index); + + spin_lock_irqsave(&task_time_in_state_lock, flags); + if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) && + p->time_in_state) + p->time_in_state[state] += cputime; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + + spin_lock_irqsave(&uid_lock, flags); + uid_entry = find_or_register_uid_locked(uid); + if (uid_entry && state < uid_entry->max_state) + uid_entry->time_in_state[state] += cputime; + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_create_policy(struct cpufreq_policy *policy) +{ + int cpu, index; + unsigned int count = 0; + struct cpufreq_frequency_table *pos, *table; + struct cpu_freqs *freqs; + void *tmp; + + if (all_freqs[policy->cpu]) + return; + + table = policy->freq_table; + if (!table) + return; + + cpufreq_for_each_entry(pos, table) + count++; + + tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count, + GFP_KERNEL); + if (!tmp) + return; + + freqs = tmp; + freqs->max_state = count; + + index = cpufreq_frequency_table_get_index(policy, policy->cur); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_for_each_entry(pos, table) + freqs->freq_table[pos - table] = pos->frequency; + + freqs->offset = next_offset; + WRITE_ONCE(next_offset, freqs->offset + count); + for_each_cpu(cpu, policy->related_cpus) + all_freqs[cpu] = freqs; +} + +void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + unsigned long flags; + + spin_lock_irqsave(&uid_lock, flags); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp, + hash, uid_start) { + if (uid_start == uid_entry->uid) { + hash_del_rcu(&uid_entry->hash); + kfree_rcu(uid_entry, rcu); + } + } + } + + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_record_transition(struct cpufreq_freqs *freq) +{ + int index; + struct cpu_freqs *freqs = all_freqs[freq->cpu]; + struct cpufreq_policy *policy; + + if (!freqs) + return; + + policy = cpufreq_cpu_get(freq->cpu); + if (!policy) + return; + + index = cpufreq_frequency_table_get_index(policy, freq->new); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_cpu_put(policy); +} + +static const struct seq_operations uid_time_in_state_seq_ops = { + .start = uid_seq_start, + .next = uid_seq_next, + .stop = uid_seq_stop, + .show = uid_time_in_state_seq_show, +}; + +static int uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &uid_time_in_state_seq_ops); +} + +int single_uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, single_uid_time_in_state_show, + &(inode->i_uid)); +} + +static const struct file_operations uid_time_in_state_fops = { + .open = uid_time_in_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init cpufreq_times_init(void) +{ + proc_create_data("uid_time_in_state", 0444, NULL, + &uid_time_in_state_fops, NULL); + + return 0; +} + +early_initcall(cpufreq_times_init); diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 50b1551ba894..80a7f8da7e74 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -103,13 +104,42 @@ scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) return 0; } +static int __maybe_unused +scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) +{ + struct device *cpu_dev = get_cpu_device(cpu); + unsigned long Hz; + int ret, domain; + + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", cpu); + return -ENODEV; + } + + domain = handle->perf_ops->device_domain_id(cpu_dev); + if (domain < 0) + return domain; + + /* Get the power cost of the performance domain. */ + Hz = *KHz * 1000; + ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power); + if (ret) + return ret; + + /* The EM framework specifies the frequency in KHz. */ + *KHz = Hz / 1000; + + return 0; +} + static int scmi_cpufreq_init(struct cpufreq_policy *policy) { - int ret; + int ret, nr_opp; unsigned int latency; struct device *cpu_dev; struct scmi_data *priv; struct cpufreq_frequency_table *freq_table; + struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -142,6 +172,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) ret = -EPROBE_DEFER; goto out_free_opp; } + nr_opp = ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { @@ -171,6 +202,9 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = true; + + em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + return 0; out_free_priv: diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 87a98ec77773..05fc7448f5cb 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -98,11 +99,12 @@ scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) static int scpi_cpufreq_init(struct cpufreq_policy *policy) { - int ret; + int ret, nr_opp; unsigned int latency; struct device *cpu_dev; struct scpi_data *priv; struct cpufreq_frequency_table *freq_table; + struct em_data_callback em_cb = EM_DATA_CB(of_dev_pm_opp_get_cpu_power); cpu_dev = get_cpu_device(policy->cpu); if (!cpu_dev) { @@ -135,6 +137,7 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) ret = -EPROBE_DEFER; goto out_free_opp; } + nr_opp = ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { @@ -170,6 +173,9 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy) policy->cpuinfo.transition_latency = latency; policy->fast_switch_possible = false; + + em_register_perf_domain(policy->cpus, nr_opp, &em_cb); + return 0; out_free_cpufreq_table: diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 6df894d65d9e..96a3a9bf8b12 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -221,7 +221,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, } /* Take note of the planned idle state. */ - sched_idle_set_state(target_state); + sched_idle_set_state(target_state, index); trace_cpu_idle_rcuidle(index, dev->cpu); time_start = ns_to_ktime(local_clock()); @@ -235,7 +235,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu); /* The cpu is no longer idle or about to enter idle. */ - sched_idle_set_state(NULL); + sched_idle_set_state(NULL, -1); if (broadcast) { if (WARN_ON_ONCE(!irqs_disabled())) diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index e26a40971b26..f9374dd071be 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c @@ -179,7 +179,12 @@ static inline int performance_multiplier(unsigned long nr_iowaiters, unsigned lo /* for higher loadavg, we are more reluctant */ - mult += 2 * get_loadavg(load); + /* + * this doesn't work as intended - it is almost always 0, but can + * sometimes, depending on workload, spike very high into the hundreds + * even when the average cpu load is under 10%. + */ + /* mult += 2 * get_loadavg(); */ /* for IO wait tasks (per cpu!) we add 5x each */ mult += 10 * nr_iowaiters; diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ed3b785bae37..09ccac1768e3 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -30,4 +30,6 @@ config SW_SYNC WARNING: improper use of this can result in deadlocking kernel drivers from userspace. Intended for test and debug only. +source "drivers/dma-buf/hyper_dmabuf/Kconfig" + endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index c33bf8863147..3f15a841502e 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -1,3 +1,4 @@ obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o +obj-$(CONFIG_HYPER_DMABUF) += hyper_dmabuf/ diff --git a/drivers/dma-buf/hyper_dmabuf/Kconfig b/drivers/dma-buf/hyper_dmabuf/Kconfig new file mode 100644 index 000000000000..1d91a114ba61 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Kconfig @@ -0,0 +1,72 @@ +menu "hyper_dmabuf options" + +config HYPER_DMABUF + bool "Enables hyper dmabuf driver" + default y + depends on (X86=y || X86_64=y) + +choice + prompt "Hypervisor" + depends on HYPER_DMABUF + default HYPER_DMABUF_XEN + +config HYPER_DMABUF_XEN + bool "Configure hyper_dmabuf for XEN hypervisor" + depends on HYPER_DMABUF && XEN + help + Configuring hyper_dmabuf driver for XEN hypervisor + +config HYPER_DMABUF_ACRN + bool "Configure hyper_dmabuf for ACRN hypervisor" + depends on HYPER_DMABUF && ACRN_VIRTIO_DEVICES + help + Configuring hyper_dmabuf driver for ACRN hypervisor +endchoice + +choice + prompt "Virtio driver type" + depends on HYPER_DMABUF && HYPER_DMABUF_ACRN + default HYPER_DMABUF_VIRTIO_BE + +config HYPER_DMABUF_VIRTIO_BE + depends on VBS && DRM_I915_GVT + bool "Configure hyper_dmabuf as virtio backend" + help + Configuring hyper_dmabuf driver as virtio backend + +config HYPER_DMABUF_VIRTIO_FE + depends on ACRN_VIRTIO_DEVICES + bool "Configure hyper_dmabuf as virtio frontend" + help + Configuring hyper_dmabuf driver as virtio frontend +endchoice + +config HYPER_DMABUF_SYSFS + bool "Enable sysfs information about hyper DMA buffers" + default y + depends on HYPER_DMABUF + help + Expose information about imported and exported buffers using + hyper_dmabuf driver + +config HYPER_DMABUF_EVENT_GEN + bool "Enable event-generation and polling operation" + default n + depends on HYPER_DMABUF + help + With this config enabled, hyper_dmabuf driver on the importer side + generates events and queue those up in the event list whenever a new + shared DMA-BUF is available. Events in the list can be retrieved by + read operation. + +config HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + bool "Enable automatic rx-ch add with 10 secs interval" + default y + depends on HYPER_DMABUF && HYPER_DMABUF_XEN + help + If enabled, driver reads a node in xenstore every 10 seconds + to check whether there is any tx comm ch configured by another + domain then initialize matched rx comm ch automatically for any + existing tx comm chs. + +endmenu diff --git a/drivers/dma-buf/hyper_dmabuf/Makefile b/drivers/dma-buf/hyper_dmabuf/Makefile new file mode 100644 index 000000000000..f63967cc99f6 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/Makefile @@ -0,0 +1,57 @@ +TARGET_MODULE:=hyper_dmabuf + +# If we running by kernel building system +ifneq ($(KERNELRELEASE),) + $(TARGET_MODULE)-objs := hyper_dmabuf_drv.o \ + hyper_dmabuf_ioctl.o \ + hyper_dmabuf_list.o \ + hyper_dmabuf_sgl_proc.o \ + hyper_dmabuf_ops.o \ + hyper_dmabuf_msg.o \ + hyper_dmabuf_id.o \ + hyper_dmabuf_remote_sync.o \ + hyper_dmabuf_query.o \ + +ifeq ($(CONFIG_HYPER_DMABUF_EVENT_GEN), y) + $(TARGET_MODULE)-objs += hyper_dmabuf_event.o +endif + +ifeq ($(CONFIG_HYPER_DMABUF_XEN), y) + $(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \ + xen/hyper_dmabuf_xen_comm_list.o \ + xen/hyper_dmabuf_xen_shm.o \ + xen/hyper_dmabuf_xen_drv.o +else ifeq ($(CONFIG_HYPER_DMABUF_ACRN), y) + ifeq ($(CONFIG_HYPER_DMABUF_VIRTIO_BE), y) + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_be_drv.o \ + virtio/hyper_dmabuf_virtio_fe_list.o + else + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_fe_drv.o + endif + $(TARGET_MODULE)-objs += virtio/hyper_dmabuf_virtio_common.o \ + virtio/hyper_dmabuf_virtio_shm.o \ + virtio/hyper_dmabuf_virtio_comm_ring.o +endif + +obj-$(CONFIG_HYPER_DMABUF) := $(TARGET_MODULE).o + +# If we are running without kernel build system +else +BUILDSYSTEM_DIR?=../../../ +PWD:=$(shell pwd) + +all : +# run kernel build system to make module + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) modules + +clean: +# run kernel build system to cleanup in current directory + $(MAKE) -C $(BUILDSYSTEM_DIR) M=$(PWD) clean + +load: + insmod ./$(TARGET_MODULE).ko + +unload: + rmmod ./$(TARGET_MODULE).ko + +endif diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c new file mode 100644 index 000000000000..f1afce29d6af --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.c @@ -0,0 +1,411 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_event.h" + +#ifdef CONFIG_HYPER_DMABUF_XEN +#include "xen/hyper_dmabuf_xen_drv.h" +#elif defined (CONFIG_HYPER_DMABUF_ACRN) +#include "virtio/hyper_dmabuf_virtio_common.h" +#endif + +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Intel Corporation"); + +struct hyper_dmabuf_private *hy_drv_priv; + +static void force_free(struct exported_sgt_info *exported, + void *attr) +{ + struct ioctl_hyper_dmabuf_unexport unexport_attr; + struct file *filp = (struct file *)attr; + + if (!filp || !exported) + return; + + if (exported->filp == filp) { + dev_dbg(hy_drv_priv->dev, + "Forcefully releasing buffer {id:%d key:%d %d %d}\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + unexport_attr.hid = exported->hid; + unexport_attr.delay_ms = 0; + + hyper_dmabuf_unexport_ioctl(filp, &unexport_attr); + } +} + +static int hyper_dmabuf_open(struct inode *inode, struct file *filp) +{ + int ret = 0; + + /* Do not allow exclusive open */ + if (filp->f_flags & O_EXCL) + return -EBUSY; + + return ret; +} + +static int hyper_dmabuf_release(struct inode *inode, struct file *filp) +{ + hyper_dmabuf_foreach_exported(force_free, filp); + + return 0; +} + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + +static unsigned int hyper_dmabuf_event_poll(struct file *filp, + struct poll_table_struct *wait) +{ + poll_wait(filp, &hy_drv_priv->event_wait, wait); + + if (!list_empty(&hy_drv_priv->event_list)) + return POLLIN | POLLRDNORM; + + return 0; +} + +static ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer, + size_t count, loff_t *offset) +{ + int ret; + + /* only root can read events */ + if (!capable(CAP_DAC_OVERRIDE)) { + dev_err(hy_drv_priv->dev, + "Only root can read events\n"); + return -EPERM; + } + + /* make sure user buffer can be written */ + if (!access_ok(VERIFY_WRITE, buffer, count)) { + dev_err(hy_drv_priv->dev, + "User buffer can't be written.\n"); + return -EINVAL; + } + + ret = mutex_lock_interruptible(&hy_drv_priv->event_read_lock); + if (ret) + return ret; + + while (1) { + struct hyper_dmabuf_event *e = NULL; + + spin_lock_irq(&hy_drv_priv->event_lock); + if (!list_empty(&hy_drv_priv->event_list)) { + e = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&e->link); + } + spin_unlock_irq(&hy_drv_priv->event_lock); + + if (!e) { + if (ret) + break; + + if (filp->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + ret = wait_event_interruptible(hy_drv_priv->event_wait, + !list_empty(&hy_drv_priv->event_list)); + + if (ret == 0) + ret = mutex_lock_interruptible( + &hy_drv_priv->event_read_lock); + + if (ret) + return ret; + } else { + unsigned int length = (sizeof(e->event_data.hdr) + + e->event_data.hdr.size); + + if (length > count - ret) { +put_back_event: + spin_lock_irq(&hy_drv_priv->event_lock); + list_add(&e->link, &hy_drv_priv->event_list); + spin_unlock_irq(&hy_drv_priv->event_lock); + break; + } + + if (copy_to_user(buffer + ret, &e->event_data.hdr, + sizeof(e->event_data.hdr))) { + if (ret == 0) + ret = -EFAULT; + + goto put_back_event; + } + + ret += sizeof(e->event_data.hdr); + + if (copy_to_user(buffer + ret, e->event_data.data, + e->event_data.hdr.size)) { + /* error while copying void *data */ + + struct hyper_dmabuf_event_hdr dummy_hdr = {0}; + + ret -= sizeof(e->event_data.hdr); + + /* nullifying hdr of the event in user buffer */ + if (copy_to_user(buffer + ret, &dummy_hdr, + sizeof(dummy_hdr))) { + dev_err(hy_drv_priv->dev, + "failed to nullify invalid hdr already in userspace\n"); + } + + ret = -EFAULT; + + goto put_back_event; + } + + ret += e->event_data.hdr.size; + hy_drv_priv->pending--; + kfree(e); + } + } + + mutex_unlock(&hy_drv_priv->event_read_lock); + + return ret; +} + +#endif + +static const struct file_operations hyper_dmabuf_driver_fops = { + .owner = THIS_MODULE, + .open = hyper_dmabuf_open, + .release = hyper_dmabuf_release, + +/* poll and read interfaces are needed only for event-polling */ +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + .read = hyper_dmabuf_event_read, + .poll = hyper_dmabuf_event_poll, +#endif + + .unlocked_ioctl = hyper_dmabuf_ioctl, +}; + +static struct miscdevice hyper_dmabuf_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hyper_dmabuf", + .fops = &hyper_dmabuf_driver_fops, +}; + +static int register_device(void) +{ + int ret = 0; + + ret = misc_register(&hyper_dmabuf_miscdev); + + if (ret) { + printk(KERN_ERR "hyper_dmabuf: driver can't be registered\n"); + return ret; + } + + hy_drv_priv->dev = hyper_dmabuf_miscdev.this_device; + + /* TODO: Check if there is a different way to initialize dma mask */ + dma_coerce_mask_and_coherent(hy_drv_priv->dev, DMA_BIT_MASK(64)); + + return ret; +} + +static void unregister_device(void) +{ + dev_info(hy_drv_priv->dev, + "hyper_dmabuf: unregister_device() is called\n"); + + misc_deregister(&hyper_dmabuf_miscdev); +} + +static int __init hyper_dmabuf_drv_init(void) +{ + int ret = 0; + + printk(KERN_NOTICE "hyper_dmabuf_starting: Initialization started\n"); + + hy_drv_priv = kcalloc(1, sizeof(struct hyper_dmabuf_private), + GFP_KERNEL); + + if (!hy_drv_priv) + return -ENOMEM; + + ret = register_device(); + if (ret < 0) + return ret; + +/* currently only supports XEN hypervisor */ +#ifdef CONFIG_HYPER_DMABUF_XEN + hy_drv_priv->bknd_ops = &xen_bknd_ops; +#elif defined (CONFIG_HYPER_DMABUF_ACRN) + hy_drv_priv->bknd_ops = &virtio_bknd_ops; +#else + hy_drv_priv->bknd_ops = NULL; + printk(KERN_ERR "No backend configured for hyper_dmabuf in kernel config\n"); +#endif + + if (hy_drv_priv->bknd_ops == NULL) { + printk(KERN_ERR "Hyper_dmabuf: no backend found\n"); + return -1; + } + + mutex_init(&hy_drv_priv->lock); + + mutex_lock(&hy_drv_priv->lock); + + hy_drv_priv->initialized = false; + + dev_info(hy_drv_priv->dev, + "initializing database for imported/exported dmabufs\n"); + + hy_drv_priv->work_queue = create_workqueue("hyper_dmabuf_wqueue"); + + ret = hyper_dmabuf_table_init(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "fail to init table for exported/imported entries\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } + +#ifdef CONFIG_HYPER_DMABUF_SYSFS + ret = hyper_dmabuf_register_sysfs(hy_drv_priv->dev); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to initialize sysfs\n"); + mutex_unlock(&hy_drv_priv->lock); + kfree(hy_drv_priv); + return ret; + } +#endif + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + mutex_init(&hy_drv_priv->event_read_lock); + spin_lock_init(&hy_drv_priv->event_lock); + + /* Initialize event queue */ + INIT_LIST_HEAD(&hy_drv_priv->event_list); + init_waitqueue_head(&hy_drv_priv->event_wait); + + /* resetting number of pending events */ + hy_drv_priv->pending = 0; +#endif + + if (hy_drv_priv->bknd_ops->init) { + ret = hy_drv_priv->bknd_ops->init(); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "failed to initialize backend.\n"); + return ret; + } + } + + hy_drv_priv->domid = hy_drv_priv->bknd_ops->get_vm_id(); + + hy_drv_priv->initialized = true; + if (hy_drv_priv->bknd_ops->init_comm_env) { + ret = hy_drv_priv->bknd_ops->init_comm_env(); + if (ret < 0) { + hy_drv_priv->initialized = false; + dev_dbg(hy_drv_priv->dev, + "failed to initialize comm-env.\n"); + } + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "Finishing up initialization of hyper_dmabuf drv\n"); + + /* interrupt for comm should be registered here: */ + return ret; +} + +static void hyper_dmabuf_drv_exit(void) +{ +#ifdef CONFIG_HYPER_DMABUF_SYSFS + hyper_dmabuf_unregister_sysfs(hy_drv_priv->dev); +#endif + + mutex_lock(&hy_drv_priv->lock); + + /* hash tables for export/import entries and ring_infos */ + hyper_dmabuf_table_destroy(); + + if (hy_drv_priv->bknd_ops->destroy_comm) { + hy_drv_priv->bknd_ops->destroy_comm(); + } + + if (hy_drv_priv->bknd_ops->cleanup) { + hy_drv_priv->bknd_ops->cleanup(); + }; + + /* destroy workqueue */ + if (hy_drv_priv->work_queue) + destroy_workqueue(hy_drv_priv->work_queue); + + /* destroy id_queue */ + if (hy_drv_priv->id_queue) + hyper_dmabuf_free_hid_list(); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* clean up event queue */ + hyper_dmabuf_events_release(); +#endif + + mutex_unlock(&hy_drv_priv->lock); + + dev_info(hy_drv_priv->dev, + "hyper_dmabuf driver: Exiting\n"); + + kfree(hy_drv_priv); + + unregister_device(); +} + +module_init(hyper_dmabuf_drv_init); +module_exit(hyper_dmabuf_drv_exit); diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h new file mode 100644 index 000000000000..ad4839b9c0f2 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_drv.h @@ -0,0 +1,118 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ +#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ + +#include +#include + +struct hyper_dmabuf_req; + +struct hyper_dmabuf_event { + struct hyper_dmabuf_event_data event_data; + struct list_head link; +}; + +struct hyper_dmabuf_private { + struct device *dev; + + /* VM(domain) id of current VM instance */ + int domid; + + /* workqueue dedicated to hyper_dmabuf driver */ + struct workqueue_struct *work_queue; + + /* list of reusable hyper_dmabuf_ids */ + struct list_reusable_id *id_queue; + + /* backend ops - hypervisor specific */ + struct hyper_dmabuf_bknd_ops *bknd_ops; + + /* device global lock */ + /* TODO: might need a lock per resource (e.g. EXPORT LIST) */ + struct mutex lock; + + /* flag that shows whether backend is initialized */ + bool initialized; + + wait_queue_head_t event_wait; + struct list_head event_list; + + spinlock_t event_lock; + struct mutex event_read_lock; + + /* # of pending events */ + int pending; +}; + +struct list_reusable_id { + hyper_dmabuf_id_t hid; + struct list_head list; +}; + +struct hyper_dmabuf_bknd_ops { + /* backend initialization routine (optional) */ + int (*init)(void); + + /* backend cleanup routine (optional) */ + void (*cleanup)(void); + + /* retreiving id of current virtual machine */ + int (*get_vm_id)(void); + + /* get pages shared via hypervisor-specific method */ + long (*share_pages)(struct page **, int, int, void **); + + /* make shared pages unshared via hypervisor specific method */ + int (*unshare_pages)(void **, int); + + /* map remotely shared pages on importer's side via + * hypervisor-specific method + */ + struct page ** (*map_shared_pages)(unsigned long, int, int, void **); + + /* unmap and free shared pages on importer's side via + * hypervisor-specific method + */ + int (*unmap_shared_pages)(void **, int); + + /* initialize communication environment */ + int (*init_comm_env)(void); + + void (*destroy_comm)(void); + + /* upstream ch setup (receiving and responding) */ + int (*init_rx_ch)(int); + + /* downstream ch setup (transmitting and parsing responses) */ + int (*init_tx_ch)(int); + + int (*send_req)(int, struct hyper_dmabuf_req *, int); +}; + +/* exporting global drv private info */ +extern struct hyper_dmabuf_private *hy_drv_priv; + +#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c new file mode 100644 index 000000000000..392ea99e0784 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.c @@ -0,0 +1,122 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_event.h" + +static void send_event(struct hyper_dmabuf_event *e) +{ + struct hyper_dmabuf_event *oldest; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + /* check current number of event then if it hits the max num allowed + * then remove the oldest event in the list + */ + if (hy_drv_priv->pending > MAX_DEPTH_EVENT_QUEUE - 1) { + oldest = list_first_entry(&hy_drv_priv->event_list, + struct hyper_dmabuf_event, link); + list_del(&oldest->link); + hy_drv_priv->pending--; + kfree(oldest); + } + + list_add_tail(&e->link, + &hy_drv_priv->event_list); + + hy_drv_priv->pending++; + + wake_up_interruptible(&hy_drv_priv->event_wait); + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +void hyper_dmabuf_events_release(void) +{ + struct hyper_dmabuf_event *e, *et; + unsigned long irqflags; + + spin_lock_irqsave(&hy_drv_priv->event_lock, irqflags); + + list_for_each_entry_safe(e, et, &hy_drv_priv->event_list, + link) { + list_del(&e->link); + kfree(e); + hy_drv_priv->pending--; + } + + if (hy_drv_priv->pending) { + dev_err(hy_drv_priv->dev, + "possible leak on event_list\n"); + } + + spin_unlock_irqrestore(&hy_drv_priv->event_lock, irqflags); +} + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid) +{ + struct hyper_dmabuf_event *e; + struct imported_sgt_info *imported; + + imported = hyper_dmabuf_find_imported(hid); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "can't find imported_sgt_info in the list\n"); + return -EINVAL; + } + + e = kzalloc(sizeof(*e), GFP_KERNEL); + + if (!e) + return -ENOMEM; + + e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT; + e->event_data.hdr.hid = hid; + e->event_data.data = (void *)imported->priv; + e->event_data.hdr.size = imported->sz_priv; + + send_event(e); + + dev_dbg(hy_drv_priv->dev, + "event number = %d :", hy_drv_priv->pending); + + dev_dbg(hy_drv_priv->dev, + "generating events for {%d, %d, %d, %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h new file mode 100644 index 000000000000..50db04faf222 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_event.h @@ -0,0 +1,38 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_EVENT_H__ +#define __HYPER_DMABUF_EVENT_H__ + +#define MAX_DEPTH_EVENT_QUEUE 32 + +enum hyper_dmabuf_event_type { + HYPER_DMABUF_NEW_IMPORT = 0x10000, +}; + +void hyper_dmabuf_events_release(void); + +int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid); + +#endif /* __HYPER_DMABUF_EVENT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c new file mode 100644 index 000000000000..e67b84a7e64c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.c @@ -0,0 +1,133 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" + +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *new_reusable; + + new_reusable = kmalloc(sizeof(*new_reusable), GFP_KERNEL); + + if (!new_reusable) + return; + + new_reusable->hid = hid; + + list_add(&new_reusable->list, &reusable_head->list); +} + +static hyper_dmabuf_id_t get_reusable_hid(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + + /* check there is reusable id */ + if (!list_empty(&reusable_head->list)) { + reusable_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + + list_del(&reusable_head->list); + hid = reusable_head->hid; + kfree(reusable_head); + } + + return hid; +} + +void hyper_dmabuf_free_hid_list(void) +{ + struct list_reusable_id *reusable_head = hy_drv_priv->id_queue; + struct list_reusable_id *temp_head; + + if (reusable_head) { + /* freeing mem space all reusable ids in the stack */ + while (!list_empty(&reusable_head->list)) { + temp_head = list_first_entry(&reusable_head->list, + struct list_reusable_id, + list); + list_del(&temp_head->list); + kfree(temp_head); + } + + /* freeing head */ + kfree(reusable_head); + } +} + +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void) +{ + static int count; + hyper_dmabuf_id_t hid; + struct list_reusable_id *reusable_head; + + /* first call to hyper_dmabuf_get_id */ + if (count == 0) { + reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL); + + if (!reusable_head) + return (hyper_dmabuf_id_t){-1, {0, 0, 0} }; + + /* list head has an invalid count */ + reusable_head->hid.id = -1; + INIT_LIST_HEAD(&reusable_head->list); + hy_drv_priv->id_queue = reusable_head; + } + + hid = get_reusable_hid(); + + /*creating a new H-ID only if nothing in the reusable id queue + * and count is less than maximum allowed + */ + if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) + hid.id = HYPER_DMABUF_ID_CREATE(hy_drv_priv->domid, count++); + + /* random data embedded in the id for security */ + get_random_bytes(&hid.rng_key[0], 12); + + return hid; +} + +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2) +{ + int i; + + /* compare keys */ + for (i = 0; i < 3; i++) { + if (hid1.rng_key[i] != hid2.rng_key[i]) + return false; + } + + return true; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h new file mode 100644 index 000000000000..ed690f3a478c --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_id.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_ID_H__ +#define __HYPER_DMABUF_ID_H__ + +#define HYPER_DMABUF_ID_CREATE(domid, cnt) \ + ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF)) + +#define HYPER_DMABUF_DOM_ID(hid) \ + (((hid.id) >> 24) & 0xFF) + +/* currently maximum number of buffers shared + * at any given moment is limited to 1000 + */ +#define HYPER_DMABUF_ID_MAX 1000 + +/* adding freed hid to the reusable list */ +void hyper_dmabuf_store_hid(hyper_dmabuf_id_t hid); + +/* freeing the reusasble list */ +void hyper_dmabuf_free_hid_list(void); + +/* getting a hid available to use. */ +hyper_dmabuf_id_t hyper_dmabuf_get_hid(void); + +/* comparing two different hid */ +bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2); + +#endif /*__HYPER_DMABUF_ID_H*/ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c new file mode 100644 index 000000000000..62f83cc45f36 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.c @@ -0,0 +1,793 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ioctl.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_query.h" + +static int hyper_dmabuf_tx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_tx_ch_setup *tx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data; + + if (bknd_ops->init_tx_ch) { + ret = bknd_ops->init_tx_ch(tx_ch_attr->remote_domain); + } + + return ret; +} + +static int hyper_dmabuf_rx_ch_setup_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_rx_ch_setup *rx_ch_attr; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int ret = 0; + + if (!data) { + dev_err(hy_drv_priv->dev, "user data is NULL\n"); + return -EINVAL; + } + + rx_ch_attr = (struct ioctl_hyper_dmabuf_rx_ch_setup *)data; + + if (bknd_ops->init_rx_ch) + ret = bknd_ops->init_rx_ch(rx_ch_attr->source_domain); + + return ret; +} + +static int send_export_msg(struct exported_sgt_info *exported, + struct pages_info *pg_info) +{ + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct hyper_dmabuf_req *req; + int op[MAX_NUMBER_OF_OPERANDS] = {0}; + int ret, i; + long tmp; + + /* now create request for importer via ring */ + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + if (pg_info) { + op[4] = pg_info->nents; + op[5] = pg_info->frst_ofst; + op[6] = pg_info->last_len; + tmp = bknd_ops->share_pages(pg_info->pgs, exported->rdomid, + pg_info->nents, &exported->refs_info); + if (tmp < 0) { + dev_err(hy_drv_priv->dev, "pages sharing failed\n"); + return tmp; + } + op[7] = tmp & 0xffffffff; + op[8] = (tmp >> 32) & 0xffffffff; + } + + op[9] = exported->sz_priv; + + /* driver/application specific private info */ + memcpy(&op[10], exported->priv, op[9]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + /* composing a message to the importer */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT, &op[0]); + + ret = bknd_ops->send_req(exported->rdomid, req, true); + + kfree(req); + + return ret; +} + +/* Fast path exporting routine in case same buffer is already exported. + * In this function, we skip normal exporting process and just update + * private data on both VMs (importer and exporter) + * + * return '1' if reexport is needed, return '0' if succeeds, return + * Kernel error code if something goes wrong + */ +static int fastpath_export(hyper_dmabuf_id_t hid, int sz_priv, char *priv) +{ + int reexport = 1; + int ret = 0; + struct exported_sgt_info *exported; + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) + return reexport; + + if (exported->valid == false) + return reexport; + + /* + * Check if unexport is already scheduled for that buffer, + * if so try to cancel it. If that will fail, buffer needs + * to be reexport once again. + */ + if (exported->unexport_sched) { + if (!cancel_delayed_work_sync(&exported->unexport)) + return reexport; + + exported->unexport_sched = false; + } + + /* if there's any change in size of private data. + * we reallocate space for private data with new size + */ + if (sz_priv != exported->sz_priv) { + kfree(exported->priv); + + /* truncating size */ + if (sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = sz_priv; + + exported->priv = kcalloc(1, exported->sz_priv, + GFP_KERNEL); + + if (!exported->priv) { + hyper_dmabuf_remove_exported(exported->hid); + hyper_dmabuf_cleanup_sgt_info(exported, true); + kfree(exported); + return -ENOMEM; + } + } + + /* update private data in sgt_info with new ones */ + ret = copy_from_user(exported->priv, priv, exported->sz_priv); + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to load a new private data\n"); + ret = -EINVAL; + } else { + /* send an export msg for updating priv in importer */ + ret = send_export_msg(exported, NULL); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to send a new private data\n"); + ret = -EBUSY; + } + } + + return ret; +} + +static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_remote *export_remote_attr = + (struct ioctl_hyper_dmabuf_export_remote *)data; + struct dma_buf *dma_buf; + struct dma_buf_attachment *attachment; + struct sg_table *sgt; + struct pages_info *pg_info; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + int ret = 0; + + if (hy_drv_priv->domid == export_remote_attr->remote_domain) { + dev_err(hy_drv_priv->dev, + "exporting to the same VM is not permitted\n"); + return -EINVAL; + } + + dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd); + + if (IS_ERR(dma_buf)) { + dev_err(hy_drv_priv->dev, "Cannot get dma buf\n"); + return PTR_ERR(dma_buf); + } + + /* we check if this specific attachment was already exported + * to the same domain and if yes and it's valid sgt_info, + * it returns hyper_dmabuf_id of pre-exported sgt_info + */ + hid = hyper_dmabuf_find_hid_exported(dma_buf, + export_remote_attr->remote_domain); + + if (hid.id != -1) { + ret = fastpath_export(hid, export_remote_attr->sz_priv, + export_remote_attr->priv); + + /* return if fastpath_export succeeds or + * gets some fatal error + */ + if (ret <= 0) { + dma_buf_put(dma_buf); + export_remote_attr->hid = hid; + return ret; + } + } + + attachment = dma_buf_attach(dma_buf, hy_drv_priv->dev); + if (IS_ERR(attachment)) { + dev_err(hy_drv_priv->dev, "cannot get attachment\n"); + ret = PTR_ERR(attachment); + goto fail_attach; + } + + sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL); + + if (IS_ERR(sgt)) { + dev_err(hy_drv_priv->dev, "cannot map attachment\n"); + ret = PTR_ERR(sgt); + goto fail_map_attachment; + } + + exported = kcalloc(1, sizeof(*exported), GFP_KERNEL); + + if (!exported) { + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + /* possible truncation */ + if (export_remote_attr->sz_priv > MAX_SIZE_PRIV_DATA) + exported->sz_priv = MAX_SIZE_PRIV_DATA; + else + exported->sz_priv = export_remote_attr->sz_priv; + + /* creating buffer for private data of buffer */ + if (exported->sz_priv != 0) { + exported->priv = kcalloc(1, exported->sz_priv, GFP_KERNEL); + + if (!exported->priv) { + ret = -ENOMEM; + goto fail_priv_creation; + } + } else { + dev_err(hy_drv_priv->dev, "size is 0\n"); + } + + exported->hid = hyper_dmabuf_get_hid(); + + /* no more exported dmabuf allowed */ + if (exported->hid.id == -1) { + dev_err(hy_drv_priv->dev, + "exceeds allowed number of dmabuf to be exported\n"); + ret = -ENOMEM; + goto fail_sgt_info_creation; + } + + exported->rdomid = export_remote_attr->remote_domain; + exported->dma_buf = dma_buf; + exported->valid = true; + + exported->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL); + if (!exported->active_sgts) { + ret = -ENOMEM; + goto fail_map_active_sgts; + } + + exported->active_attached = kmalloc(sizeof(struct attachment_list), + GFP_KERNEL); + if (!exported->active_attached) { + ret = -ENOMEM; + goto fail_map_active_attached; + } + + exported->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_kmapped) { + ret = -ENOMEM; + goto fail_map_va_kmapped; + } + + exported->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), + GFP_KERNEL); + if (!exported->va_vmapped) { + ret = -ENOMEM; + goto fail_map_va_vmapped; + } + + exported->active_sgts->sgt = sgt; + exported->active_attached->attach = attachment; + exported->va_kmapped->vaddr = NULL; + exported->va_vmapped->vaddr = NULL; + + /* initialize list of sgt, attachment and vaddr for dmabuf sync + * via shadow dma-buf + */ + INIT_LIST_HEAD(&exported->active_sgts->list); + INIT_LIST_HEAD(&exported->active_attached->list); + INIT_LIST_HEAD(&exported->va_kmapped->list); + INIT_LIST_HEAD(&exported->va_vmapped->list); + + /* copy private data to sgt_info */ + ret = copy_from_user(exported->priv, export_remote_attr->priv, + exported->sz_priv); + + if (ret) { + dev_err(hy_drv_priv->dev, + "failed to load private data\n"); + ret = -EINVAL; + goto fail_export; + } + + pg_info = hyper_dmabuf_ext_pgs(sgt); + if (!pg_info) { + dev_err(hy_drv_priv->dev, + "failed to construct pg_info\n"); + ret = -ENOMEM; + goto fail_export; + } + + exported->nents = pg_info->nents; + + /* now register it to export list */ + hyper_dmabuf_register_exported(exported); + + export_remote_attr->hid = exported->hid; + + ret = send_export_msg(exported, pg_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "failed to send out the export request\n"); + goto fail_send_request; + } + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + + exported->filp = filp; + + return ret; + +/* Clean-up if error occurs */ + +fail_send_request: + hyper_dmabuf_remove_exported(exported->hid); + + /* free pg_info */ + kfree(pg_info->pgs); + kfree(pg_info); + +fail_export: + kfree(exported->va_vmapped); + +fail_map_va_vmapped: + kfree(exported->va_kmapped); + +fail_map_va_kmapped: + kfree(exported->active_attached); + +fail_map_active_attached: + kfree(exported->active_sgts); + kfree(exported->priv); + +fail_priv_creation: + kfree(exported); + +fail_map_active_sgts: +fail_sgt_info_creation: + dma_buf_unmap_attachment(attachment, sgt, + DMA_BIDIRECTIONAL); + +fail_map_attachment: + dma_buf_detach(dma_buf, attachment); + +fail_attach: + dma_buf_put(dma_buf); + + return ret; +} + +static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_export_fd *export_fd_attr = + (struct ioctl_hyper_dmabuf_export_fd *)data; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct imported_sgt_info *imported; + struct hyper_dmabuf_req *req; + struct page **data_pgs; + int op[4]; + int i; + int ret = 0; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* look for dmabuf for the id */ + imported = hyper_dmabuf_find_imported(export_fd_attr->hid); + + /* can't find sgt from the table */ + if (!imported) { + dev_err(hy_drv_priv->dev, "can't find the entry\n"); + return -ENOENT; + } + + mutex_lock(&hy_drv_priv->lock); + + imported->importers++; + + /* send notification for export_fd to exporter */ + op[0] = imported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = imported->hid.rng_key[i]; + + dev_dbg(hy_drv_priv->dev, "Export FD of buffer {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD, &op[0]); + + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, true); + + if (ret < 0) { + /* in case of timeout other end eventually will receive request, + * so we need to undo it + */ + hyper_dmabuf_create_req(req, HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, false); + kfree(req); + dev_err(hy_drv_priv->dev, + "Failed to create sgt or notify exporter\n"); + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return ret; + } + + kfree(req); + + if (ret == HYPER_DMABUF_REQ_ERROR) { + dev_err(hy_drv_priv->dev, + "Buffer invalid {id:%d key:%d %d %d}, cannot import\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + imported->importers--; + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + ret = 0; + + dev_dbg(hy_drv_priv->dev, + "Found buffer gref 0x%lx off %d\n", + imported->ref_handle, imported->frst_ofst); + + dev_dbg(hy_drv_priv->dev, + "last len %d nents %d domain %d\n", + imported->last_len, imported->nents, + HYPER_DMABUF_DOM_ID(imported->hid)); + + if (!imported->sgt) { + dev_dbg(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} pages not mapped yet\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], imported->hid.rng_key[2]); + + data_pgs = bknd_ops->map_shared_pages(imported->ref_handle, + HYPER_DMABUF_DOM_ID(imported->hid), + imported->nents, + &imported->refs_info); + + if (!data_pgs) { + dev_err(hy_drv_priv->dev, + "can't map pages hid {id:%d key:%d %d %d}\n", + imported->hid.id, imported->hid.rng_key[0], + imported->hid.rng_key[1], + imported->hid.rng_key[2]); + + imported->importers--; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) { + mutex_unlock(&hy_drv_priv->lock); + return -ENOMEM; + } + + hyper_dmabuf_create_req(req, + HYPER_DMABUF_EXPORT_FD_FAILED, + &op[0]); + bknd_ops->send_req(HYPER_DMABUF_DOM_ID(imported->hid), req, + false); + kfree(req); + mutex_unlock(&hy_drv_priv->lock); + return -EINVAL; + } + + imported->sgt = hyper_dmabuf_create_sgt(data_pgs, + imported->frst_ofst, + imported->last_len, + imported->nents); + + } + + export_fd_attr->fd = hyper_dmabuf_export_fd(imported, + export_fd_attr->flags); + + if (export_fd_attr->fd < 0) { + /* fail to get fd */ + ret = export_fd_attr->fd; + } + + mutex_unlock(&hy_drv_priv->lock); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return ret; +} + +/* unexport dmabuf from the database and send int req to the source domain + * to unmap it. + */ +static void delayed_unexport(struct work_struct *work) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + struct exported_sgt_info *exported; + int op[4]; + int i, ret; + + if (!work) + return; + + exported = container_of(work, struct exported_sgt_info, unexport.work); + + dev_dbg(hy_drv_priv->dev, + "Marking buffer {id:%d key:%d %d %d} as invalid\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + /* no longer valid */ + exported->valid = false; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return; + + op[0] = exported->hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = exported->hid.rng_key[i]; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &op[0]); + + /* Now send unexport request to remote domain, marking + * that buffer should not be used anymore + */ + ret = bknd_ops->send_req(exported->rdomid, req, true); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "unexport message for buffer {id:%d key:%d %d %d} failed\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + } + + kfree(req); + exported->unexport_sched = false; + + /* Immediately clean-up if it has never been exported by importer + * (so no SGT is constructed on importer). + * clean it up later in remote sync when final release ops + * is called (importer does this only when there's no + * no consumer of locally exported FDs) + */ + if (exported->active == 0) { + dev_dbg(hy_drv_priv->dev, + "claning up buffer {id:%d key:%d %d %d} completly\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(exported->hid); + + /* register hyper_dmabuf_id to the list for reuse */ + hyper_dmabuf_store_hid(exported->hid); + + if (exported->sz_priv > 0 && !exported->priv) + kfree(exported->priv); + + kfree(exported); + } +} + +/* Schedule unexport of dmabuf. + */ +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_unexport *unexport_attr = + (struct ioctl_hyper_dmabuf_unexport *)data; + struct exported_sgt_info *exported; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + /* find dmabuf in export list */ + exported = hyper_dmabuf_find_exported(unexport_attr->hid); + + dev_dbg(hy_drv_priv->dev, + "scheduling unexport of buffer {id:%d key:%d %d %d}\n", + unexport_attr->hid.id, unexport_attr->hid.rng_key[0], + unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]); + + /* failed to find corresponding entry in export list */ + if (exported == NULL) { + unexport_attr->status = -ENOENT; + return -ENOENT; + } + + if (exported->unexport_sched) + return 0; + + exported->unexport_sched = true; + INIT_DELAYED_WORK(&exported->unexport, delayed_unexport); + schedule_delayed_work(&exported->unexport, + msecs_to_jiffies(unexport_attr->delay_ms)); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +static int hyper_dmabuf_query_ioctl(struct file *filp, void *data) +{ + struct ioctl_hyper_dmabuf_query *query_attr = + (struct ioctl_hyper_dmabuf_query *)data; + struct exported_sgt_info *exported = NULL; + struct imported_sgt_info *imported = NULL; + int ret = 0; + + if (HYPER_DMABUF_DOM_ID(query_attr->hid) == hy_drv_priv->domid) { + /* query for exported dmabuf */ + exported = hyper_dmabuf_find_exported(query_attr->hid); + if (exported) { + ret = hyper_dmabuf_query_exported(exported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in exp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } else { + /* query for imported dmabuf */ + imported = hyper_dmabuf_find_imported(query_attr->hid); + if (imported) { + ret = hyper_dmabuf_query_imported(imported, + query_attr->item, + &query_attr->info); + } else { + dev_err(hy_drv_priv->dev, + "hid {id:%d key:%d %d %d} not in imp list\n", + query_attr->hid.id, + query_attr->hid.rng_key[0], + query_attr->hid.rng_key[1], + query_attr->hid.rng_key[2]); + return -ENOENT; + } + } + + return ret; +} + +const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = { + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_TX_CH_SETUP, + hyper_dmabuf_tx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_RX_CH_SETUP, + hyper_dmabuf_rx_ch_setup_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, + hyper_dmabuf_export_remote_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, + hyper_dmabuf_export_fd_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, + hyper_dmabuf_unexport_ioctl, 0), + HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, + hyper_dmabuf_query_ioctl, 0), +}; + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param) +{ + const struct hyper_dmabuf_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + int ret; + hyper_dmabuf_ioctl_t func; + char *kdata; + + if (nr >= ARRAY_SIZE(hyper_dmabuf_ioctls)) { + dev_err(hy_drv_priv->dev, "invalid ioctl\n"); + return -EINVAL; + } + + ioctl = &hyper_dmabuf_ioctls[nr]; + + func = ioctl->func; + + if (unlikely(!func)) { + dev_err(hy_drv_priv->dev, "no function\n"); + return -EINVAL; + } + + kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL); + if (!kdata) + return -ENOMEM; + + if (copy_from_user(kdata, (void __user *)param, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy from user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + + ret = func(filp, kdata); + + if (copy_to_user((void __user *)param, kdata, + _IOC_SIZE(cmd)) != 0) { + dev_err(hy_drv_priv->dev, + "failed to copy to user arguments\n"); + ret = -EFAULT; + goto ioctl_error; + } + +ioctl_error: + kfree(kdata); + + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h new file mode 100644 index 000000000000..5991a87b194f --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ioctl.h @@ -0,0 +1,50 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IOCTL_H__ +#define __HYPER_DMABUF_IOCTL_H__ + +typedef int (*hyper_dmabuf_ioctl_t)(struct file *filp, void *data); + +struct hyper_dmabuf_ioctl_desc { + unsigned int cmd; + int flags; + hyper_dmabuf_ioctl_t func; + const char *name; +}; + +#define HYPER_DMABUF_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = { \ + .cmd = ioctl, \ + .func = _func, \ + .flags = _flags, \ + .name = #ioctl \ + } + +long hyper_dmabuf_ioctl(struct file *filp, + unsigned int cmd, unsigned long param); + +int hyper_dmabuf_unexport_ioctl(struct file *filp, void *data); + +#endif //__HYPER_DMABUF_IOCTL_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c new file mode 100644 index 000000000000..84cfb065bddd --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.c @@ -0,0 +1,292 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_id.h" + +DECLARE_HASHTABLE(hyper_dmabuf_hash_imported, MAX_ENTRY_IMPORTED); +DECLARE_HASHTABLE(hyper_dmabuf_hash_exported, MAX_ENTRY_EXPORTED); + +#ifdef CONFIG_HYPER_DMABUF_SYSFS +static ssize_t hyper_dmabuf_imported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_imported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->imported->hid; + int nents = info_entry->imported->nents; + bool valid = info_entry->imported->valid; + int num_importers = info_entry->imported->importers; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, numi:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + num_importers); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static ssize_t hyper_dmabuf_exported_show(struct device *drv, + struct device_attribute *attr, + char *buf) +{ + struct list_entry_exported *info_entry; + int bkt; + ssize_t count = 0; + size_t total = 0; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) { + hyper_dmabuf_id_t hid = info_entry->exported->hid; + int nents = info_entry->exported->nents; + bool valid = info_entry->exported->valid; + int importer_exported = info_entry->exported->active; + + total += nents; + count += scnprintf(buf + count, PAGE_SIZE - count, + "hid:{%d %d %d %d}, nent:%d, v:%c, ie:%d\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2], nents, (valid ? 't' : 'f'), + importer_exported); + } + count += scnprintf(buf + count, PAGE_SIZE - count, + "total nents: %lu\n", total); + + return count; +} + +static DEVICE_ATTR(imported, 0400, hyper_dmabuf_imported_show, NULL); +static DEVICE_ATTR(exported, 0400, hyper_dmabuf_exported_show, NULL); + +int hyper_dmabuf_register_sysfs(struct device *dev) +{ + int err; + + err = device_create_file(dev, &dev_attr_imported); + if (err < 0) + goto err1; + err = device_create_file(dev, &dev_attr_exported); + if (err < 0) + goto err2; + + return 0; +err2: + device_remove_file(dev, &dev_attr_imported); +err1: + return -1; +} + +int hyper_dmabuf_unregister_sysfs(struct device *dev) +{ + device_remove_file(dev, &dev_attr_imported); + device_remove_file(dev, &dev_attr_exported); + return 0; +} + +#endif + +int hyper_dmabuf_table_init(void) +{ + hash_init(hyper_dmabuf_hash_imported); + hash_init(hyper_dmabuf_hash_exported); + return 0; +} + +int hyper_dmabuf_table_destroy(void) +{ + /* TODO: cleanup hyper_dmabuf_hash_imported + * and hyper_dmabuf_hash_exported + */ + return 0; +} + +int hyper_dmabuf_register_exported(struct exported_sgt_info *exported) +{ + struct list_entry_exported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->exported = exported; + + hash_add(hyper_dmabuf_hash_exported, &info_entry->node, + info_entry->exported->hid.id); + + return 0; +} + +int hyper_dmabuf_register_imported(struct imported_sgt_info *imported) +{ + struct list_entry_imported *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->imported = imported; + + hash_add(hyper_dmabuf_hash_imported, &info_entry->node, + info_entry->imported->hid.id); + + return 0; +} + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) + return info_entry->exported; + + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid) +{ + struct list_entry_exported *info_entry; + hyper_dmabuf_id_t hid = {-1, {0, 0, 0} }; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + if (info_entry->exported->dma_buf == dmabuf && + info_entry->exported->rdomid == domid) + return info_entry->exported->hid; + + return hid; +} + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) + return info_entry->imported; + /* if key is unmatched, given HID is invalid, + * so returning NULL + */ + break; + } + + return NULL; +} + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid) +{ + struct list_entry_exported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->exported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->exported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid) +{ + struct list_entry_imported *info_entry; + int bkt; + + hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) + /* checking hid.id first */ + if (info_entry->imported->hid.id == hid.id) { + /* then key is compared */ + if (hyper_dmabuf_hid_keycomp(info_entry->imported->hid, + hid)) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + break; + } + + return -ENOENT; +} + +void hyper_dmabuf_foreach_exported( + void (*func)(struct exported_sgt_info *, void *attr), + void *attr) +{ + struct list_entry_exported *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(hyper_dmabuf_hash_exported, bkt, tmp, + info_entry, node) { + func(info_entry->exported, attr); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h new file mode 100644 index 000000000000..f7102f5db75d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_list.h @@ -0,0 +1,71 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_LIST_H__ +#define __HYPER_DMABUF_LIST_H__ + +#include "hyper_dmabuf_struct.h" + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_EXPORTED 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_IMPORTED 7 + +struct list_entry_exported { + struct exported_sgt_info *exported; + struct hlist_node node; +}; + +struct list_entry_imported { + struct imported_sgt_info *imported; + struct hlist_node node; +}; + +int hyper_dmabuf_table_init(void); + +int hyper_dmabuf_table_destroy(void); + +int hyper_dmabuf_register_exported(struct exported_sgt_info *info); + +/* search for pre-exported sgt and return id of it if it exist */ +hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, + int domid); + +int hyper_dmabuf_register_imported(struct imported_sgt_info *info); + +struct exported_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid); + +struct imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid); + +int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid); + +void hyper_dmabuf_foreach_exported(void (*func)(struct exported_sgt_info *, + void *attr), void *attr); + +int hyper_dmabuf_register_sysfs(struct device *dev); +int hyper_dmabuf_unregister_sysfs(struct device *dev); + +#endif /* __HYPER_DMABUF_LIST_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c new file mode 100644 index 000000000000..fe9e4e2339a1 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.c @@ -0,0 +1,414 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_remote_sync.h" +#include "hyper_dmabuf_event.h" +#include "hyper_dmabuf_list.h" + +struct cmd_process { + struct work_struct work; + struct hyper_dmabuf_req *rq; + int domid; +}; + +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command cmd, int *op) +{ + int i; + + req->stat = HYPER_DMABUF_REQ_NOT_RESPONDED; + req->cmd = cmd; + + switch (cmd) { + /* as exporter, commands to importer */ + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : 32 LSB of top-level reference number for shared pages + * op8 : 32 MSB of top-level reference number for shared pages + * op9 : size of private data (from op9) + * op10 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + memcpy(&req->op[0], &op[0], 10 * sizeof(int) + op[9]); + break; + + case HYPER_DMABUF_NOTIFY_UNEXPORT: + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : DMABUF_DESTROY, + * op0~op3 : hyper_dmabuf_id_t hid + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_EXPORT_FD: + case HYPER_DMABUF_EXPORT_FD_FAILED: + /* dmabuf fd is being created on imported side or importing + * failed + * + * command : HYPER_DMABUF_EXPORT_FD or + * HYPER_DMABUF_EXPORT_FD_FAILED, + * op0~op3 : hyper_dmabuf_id + */ + + for (i = 0; i < 4; i++) + req->op[i] = op[i]; + break; + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer (probably not needed) + * for dmabuf synchronization + */ + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will make + * the driver to do shadow mapping or unmapping for + * synchronization with original exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op4 : map(=1)/unmap(=2)/attach(=3)/detach(=4) + */ + for (i = 0; i < 5; i++) + req->op[i] = op[i]; + break; + + default: + /* no command found */ + return; + } +} + +static void cmd_process_work(struct work_struct *work) +{ + struct imported_sgt_info *imported; + struct cmd_process *proc = container_of(work, + struct cmd_process, work); + struct hyper_dmabuf_req *req; + hyper_dmabuf_id_t hid; + int i; + + req = proc->rq; + + switch (req->cmd) { + case HYPER_DMABUF_EXPORT: + /* exporting pages for dmabuf */ + /* command : HYPER_DMABUF_EXPORT, + * op0~op3 : hyper_dmabuf_id + * op4 : number of pages to be shared + * op5 : offset of data in the first page + * op6 : length of data in the last page + * op7 : 32 LSB of top-level reference number for shared pages + * op8 : 32 MSB of top-level reference number for shared pages + * op9 : size of private data (from op9) + * op10 ~ : Driver-specific private data + * (e.g. graphic buffer's meta info) + */ + + /* if nents == 0, it means it is a message only for + * priv synchronization. for existing imported_sgt_info + * so not creating a new one + */ + if (req->op[4] == 0) { + hyper_dmabuf_id_t exist = {req->op[0], + {req->op[1], req->op[2], + req->op[3] } }; + + imported = hyper_dmabuf_find_imported(exist); + + if (!imported) { + dev_err(hy_drv_priv->dev, + "Can't find imported sgt_info\n"); + break; + } + + /* if size of new private data is different, + * we reallocate it. + */ + if (imported->sz_priv != req->op[9]) { + kfree(imported->priv); + imported->sz_priv = req->op[9]; + imported->priv = kcalloc(1, req->op[9], + GFP_KERNEL); + if (!imported->priv) { + /* set it invalid */ + imported->valid = 0; + break; + } + } + + /* updating priv data */ + memcpy(imported->priv, &req->op[10], req->op[9]); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + } + + imported = kcalloc(1, sizeof(*imported), GFP_KERNEL); + + if (!imported) + break; + + imported->sz_priv = req->op[9]; + imported->priv = kcalloc(1, req->op[9], GFP_KERNEL); + + if (!imported->priv) { + kfree(imported); + break; + } + + imported->hid.id = req->op[0]; + + for (i = 0; i < 3; i++) + imported->hid.rng_key[i] = req->op[i+1]; + + imported->nents = req->op[4]; + imported->frst_ofst = req->op[5]; + imported->last_len = req->op[6]; + imported->ref_handle = (u64)req->op[8] << 32 | req->op[7]; + + dev_dbg(hy_drv_priv->dev, "DMABUF was exported\n"); + dev_dbg(hy_drv_priv->dev, "\thid{id:%d key:%d %d %d}\n", + req->op[0], req->op[1], req->op[2], + req->op[3]); + dev_dbg(hy_drv_priv->dev, "\tnents %d\n", req->op[4]); + dev_dbg(hy_drv_priv->dev, "\tfirst offset %d\n", req->op[5]); + dev_dbg(hy_drv_priv->dev, "\tlast len %d\n", req->op[6]); + dev_dbg(hy_drv_priv->dev, "\tgrefid 0x%llx\n", + (u64)req->op[8] << 32 | req->op[7]); + + memcpy(imported->priv, &req->op[10], req->op[9]); + + imported->valid = true; + hyper_dmabuf_register_imported(imported); + +#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN + /* generating import event */ + hyper_dmabuf_import_event(imported->hid); +#endif + + break; + + case HYPER_DMABUF_OPS_TO_SOURCE: + /* notifying dmabuf map/unmap to exporter, map will + * make the driver to do shadow mapping + * or unmapping for synchronization with original + * exporter (e.g. i915) + * + * command : DMABUF_OPS_TO_SOURCE. + * op0~3 : hyper_dmabuf_id + * op1 : enum hyper_dmabuf_ops {....} + */ + dev_dbg(hy_drv_priv->dev, + "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__); + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + hyper_dmabuf_remote_sync(hid, req->op[4]); + + break; + + + case HYPER_DMABUF_OPS_TO_REMOTE: + /* notifying dmabuf map/unmap to importer + * (probably not needed) for dmabuf synchronization + */ + break; + + default: + /* shouldn't get here */ + break; + } + + kfree(req); + kfree(proc); +} + +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req) +{ + struct cmd_process *proc; + struct hyper_dmabuf_req *temp_req; + struct imported_sgt_info *imported; + struct exported_sgt_info *exported; + hyper_dmabuf_id_t hid; + + if (!req) { + dev_err(hy_drv_priv->dev, "request is NULL\n"); + return -EINVAL; + } + + hid.id = req->op[0]; + hid.rng_key[0] = req->op[1]; + hid.rng_key[1] = req->op[2]; + hid.rng_key[2] = req->op[3]; + + if ((req->cmd < HYPER_DMABUF_EXPORT) || + (req->cmd > HYPER_DMABUF_OPS_TO_SOURCE)) { + dev_err(hy_drv_priv->dev, "invalid command\n"); + return -EINVAL; + } + + req->stat = HYPER_DMABUF_REQ_PROCESSED; + + /* HYPER_DMABUF_DESTROY requires immediate + * follow up so can't be processed in workqueue + */ + if (req->cmd == HYPER_DMABUF_NOTIFY_UNEXPORT) { + /* destroy sg_list for hyper_dmabuf_id on remote side */ + /* command : HYPER_DMABUF_NOTIFY_UNEXPORT, + * op0~3 : hyper_dmabuf_id + */ + dev_dbg(hy_drv_priv->dev, + "processing HYPER_DMABUF_NOTIFY_UNEXPORT\n"); + + imported = hyper_dmabuf_find_imported(hid); + + if (imported) { + /* if anything is still using dma_buf */ + if (imported->importers) { + /* Buffer is still in use, just mark that + * it should not be allowed to export its fd + * anymore. + */ + imported->valid = false; + } else { + /* No one is using buffer, remove it from + * imported list + */ + hyper_dmabuf_remove_imported(hid); + kfree(imported->priv); + kfree(imported); + } + } else { + req->stat = HYPER_DMABUF_REQ_ERROR; + } + + return req->cmd; + } + + /* synchronous dma_buf_fd export */ + if (req->cmd == HYPER_DMABUF_EXPORT_FD) { + /* find a corresponding SGT for the id */ + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else if (!exported->valid) { + dev_dbg(hy_drv_priv->dev, + "Buffer no longer valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + dev_dbg(hy_drv_priv->dev, + "Buffer still valid {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + exported->active++; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + if (req->cmd == HYPER_DMABUF_EXPORT_FD_FAILED) { + dev_dbg(hy_drv_priv->dev, + "HYPER_DMABUF_EXPORT_FD_FAILED for {id:%d key:%d %d %d}\n", + hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]); + + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "buffer {id:%d key:%d %d %d} not found\n", + hid.id, hid.rng_key[0], hid.rng_key[1], + hid.rng_key[2]); + + req->stat = HYPER_DMABUF_REQ_ERROR; + } else { + exported->active--; + req->stat = HYPER_DMABUF_REQ_PROCESSED; + } + return req->cmd; + } + + dev_dbg(hy_drv_priv->dev, + "%s: putting request to workqueue\n", __func__); + temp_req = kmalloc(sizeof(*temp_req), GFP_ATOMIC); + + if (!temp_req) + return -ENOMEM; + + memcpy(temp_req, req, sizeof(*temp_req)); + + proc = kcalloc(1, sizeof(struct cmd_process), GFP_ATOMIC); + + if (!proc) { + kfree(temp_req); + return -ENOMEM; + } + + proc->rq = temp_req; + proc->domid = domid; + + INIT_WORK(&(proc->work), cmd_process_work); + + queue_work(hy_drv_priv->work_queue, &(proc->work)); + + return req->cmd; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h new file mode 100644 index 000000000000..9c8a76bf261e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_msg.h @@ -0,0 +1,87 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_MSG_H__ +#define __HYPER_DMABUF_MSG_H__ + +#define MAX_NUMBER_OF_OPERANDS 64 + +struct hyper_dmabuf_req { + unsigned int req_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +struct hyper_dmabuf_resp { + unsigned int resp_id; + unsigned int stat; + unsigned int cmd; + unsigned int op[MAX_NUMBER_OF_OPERANDS]; +}; + +enum hyper_dmabuf_command { + HYPER_DMABUF_EXPORT = 0x10, + HYPER_DMABUF_EXPORT_FD, + HYPER_DMABUF_EXPORT_FD_FAILED, + HYPER_DMABUF_NOTIFY_UNEXPORT, + HYPER_DMABUF_OPS_TO_REMOTE, + HYPER_DMABUF_OPS_TO_SOURCE, +}; + +enum hyper_dmabuf_ops { + HYPER_DMABUF_OPS_ATTACH = 0x1000, + HYPER_DMABUF_OPS_DETACH, + HYPER_DMABUF_OPS_MAP, + HYPER_DMABUF_OPS_UNMAP, + HYPER_DMABUF_OPS_RELEASE, + HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS, + HYPER_DMABUF_OPS_END_CPU_ACCESS, + HYPER_DMABUF_OPS_KMAP_ATOMIC, + HYPER_DMABUF_OPS_KUNMAP_ATOMIC, + HYPER_DMABUF_OPS_KMAP, + HYPER_DMABUF_OPS_KUNMAP, + HYPER_DMABUF_OPS_MMAP, + HYPER_DMABUF_OPS_VMAP, + HYPER_DMABUF_OPS_VUNMAP, +}; + +enum hyper_dmabuf_req_feedback { + HYPER_DMABUF_REQ_PROCESSED = 0x100, + HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP, + HYPER_DMABUF_REQ_ERROR, + HYPER_DMABUF_REQ_NOT_RESPONDED +}; + +/* create a request packet with given command and operands */ +void hyper_dmabuf_create_req(struct hyper_dmabuf_req *req, + enum hyper_dmabuf_command command, + int *operands); + +/* parse incoming request packet (or response) and take + * appropriate actions for those + */ +int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req); + +#endif // __HYPER_DMABUF_MSG_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c new file mode 100644 index 000000000000..3bd13c584ffc --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.c @@ -0,0 +1,409 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_ops.h" +#include "hyper_dmabuf_sgl_proc.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_list.h" + +#define WAIT_AFTER_SYNC_REQ 0 +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +static int dmabuf_refcount(struct dma_buf *dma_buf) +{ + if ((dma_buf != NULL) && (dma_buf->file != NULL)) + return file_count(dma_buf->file); + + return -EINVAL; +} + +static int sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops) +{ + struct hyper_dmabuf_req *req; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int op[5]; + int i; + int ret; + + op[0] = hid.id; + + for (i = 0; i < 3; i++) + op[i+1] = hid.rng_key[i]; + + op[4] = dmabuf_ops; + + req = kcalloc(1, sizeof(*req), GFP_KERNEL); + + if (!req) + return -ENOMEM; + + hyper_dmabuf_create_req(req, HYPER_DMABUF_OPS_TO_SOURCE, &op[0]); + + /* send request and wait for a response */ + ret = bknd_ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, + WAIT_AFTER_SYNC_REQ); + + if (ret < 0) { + dev_dbg(hy_drv_priv->dev, + "dmabuf sync request failed:%d\n", req->op[4]); + } + + kfree(req); + + return ret; +} + +static int hyper_dmabuf_ops_attach(struct dma_buf *dmabuf, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + struct device *dev, +#endif + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + int ret; + + if (!attach->dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_ATTACH); + + return ret; +} + +static void hyper_dmabuf_ops_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct imported_sgt_info *imported; + + if (!attach->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attach->dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_DETACH); +} + +static struct sg_table *hyper_dmabuf_ops_map( + struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct sg_table *st; + struct imported_sgt_info *imported; + struct pages_info *pg_info; + int ret; + + if (!attachment->dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + /* extract pages from sgt */ + pg_info = hyper_dmabuf_ext_pgs(imported->sgt); + + if (!pg_info) + return NULL; + + /* create a new sg_table with extracted pages */ + st = hyper_dmabuf_create_sgt(pg_info->pgs, pg_info->frst_ofst, + pg_info->last_len, pg_info->nents); + if (!st) + goto err_free_sg; + + if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) + goto err_free_sg; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MAP); + + kfree(pg_info->pgs); + kfree(pg_info); + + return st; + +err_free_sg: + if (st) { + sg_free_table(st); + kfree(st); + } + + kfree(pg_info->pgs); + kfree(pg_info); + + return NULL; +} + +static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sg, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!attachment->dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)attachment->dmabuf->priv; + + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); + + sg_free_table(sg); + kfree(sg); + + sync_request(imported->hid, HYPER_DMABUF_OPS_UNMAP); +} + +static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf) +{ + struct imported_sgt_info *imported; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + int finish; + + if (!dma_buf->priv) + return; + + imported = (struct imported_sgt_info *)dma_buf->priv; + + if (!dmabuf_refcount(imported->dma_buf)) + imported->dma_buf = NULL; + + imported->importers--; + + if (imported->importers == 0) { + bknd_ops->unmap_shared_pages(&imported->refs_info, + imported->nents); + + if (imported->sgt) { + sg_free_table(imported->sgt); + kfree(imported->sgt); + imported->sgt = NULL; + } + } + + finish = imported && !imported->valid && + !imported->importers; + + sync_request(imported->hid, HYPER_DMABUF_OPS_RELEASE); + + /* + * Check if buffer is still valid and if not remove it + * from imported list. That has to be done after sending + * sync request + */ + if (finish) { + hyper_dmabuf_remove_imported(imported->hid); + kfree(imported->priv); + kfree(imported); + } +} + +static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS); + + return ret; +} + +static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction dir) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + return sync_request(imported->hid, HYPER_DMABUF_OPS_END_CPU_ACCESS); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) +static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP_ATOMIC); + + /* TODO: NULL for now. Need to return the addr of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, + unsigned long pgnum, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP_ATOMIC); +} +#endif + +static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KMAP); + + /* for now NULL.. need to return the address of mapped region */ + return NULL; +} + +static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum, + void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_KUNMAP); +} + +static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct imported_sgt_info *imported; + int ret; + + if (!dmabuf->priv) + return -EINVAL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + ret = sync_request(imported->hid, HYPER_DMABUF_OPS_MMAP); + + return ret; +} + +static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return NULL; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VMAP); + + return NULL; +} + +static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr) +{ + struct imported_sgt_info *imported; + + if (!dmabuf->priv) + return; + + imported = (struct imported_sgt_info *)dmabuf->priv; + + sync_request(imported->hid, HYPER_DMABUF_OPS_VUNMAP); +} + +static const struct dma_buf_ops hyper_dmabuf_ops = { + .attach = hyper_dmabuf_ops_attach, + .detach = hyper_dmabuf_ops_detach, + .map_dma_buf = hyper_dmabuf_ops_map, + .unmap_dma_buf = hyper_dmabuf_ops_unmap, + .release = hyper_dmabuf_ops_release, + .begin_cpu_access = hyper_dmabuf_ops_begin_cpu_access, + .end_cpu_access = hyper_dmabuf_ops_end_cpu_access, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + .map_atomic = hyper_dmabuf_ops_kmap_atomic, + .unmap_atomic = hyper_dmabuf_ops_kunmap_atomic, +#endif + .map = hyper_dmabuf_ops_kmap, + .unmap = hyper_dmabuf_ops_kunmap, + .mmap = hyper_dmabuf_ops_mmap, + .vmap = hyper_dmabuf_ops_vmap, + .vunmap = hyper_dmabuf_ops_vunmap, +}; + +/* exporting dmabuf as fd */ +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags) +{ + int fd = -1; + + /* call hyper_dmabuf_export_dmabuf and create + * and bind a handle for it then release + */ + hyper_dmabuf_export_dma_buf(imported); + + if (imported->dma_buf) + fd = dma_buf_fd(imported->dma_buf, flags); + + return fd; +} + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported) +{ + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + exp_info.ops = &hyper_dmabuf_ops; + + /* multiple of PAGE_SIZE, not considering offset */ + exp_info.size = imported->sgt->nents * PAGE_SIZE; + exp_info.flags = /* not sure about flag */ 0; + exp_info.priv = imported; + + imported->dma_buf = dma_buf_export(&exp_info); +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h new file mode 100644 index 000000000000..c5505a41f0fe --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_ops.h @@ -0,0 +1,32 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_OPS_H__ +#define __HYPER_DMABUF_OPS_H__ + +int hyper_dmabuf_export_fd(struct imported_sgt_info *imported, int flags); + +void hyper_dmabuf_export_dma_buf(struct imported_sgt_info *imported); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c new file mode 100644 index 000000000000..1f2f56b1162d --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.c @@ -0,0 +1,172 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_id.h" + +#define HYPER_DMABUF_SIZE(nents, first_offset, last_len) \ + ((nents)*PAGE_SIZE - (first_offset) - PAGE_SIZE + (last_len)) + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = EXPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(exported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = exported->rdomid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + *info = exported->dma_buf->size; + break; + + /* whether the buffer is used by importer */ + case HYPER_DMABUF_QUERY_BUSY: + *info = (exported->active > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !exported->valid; + break; + + /* whether the buffer is scheduled to be unexported */ + case HYPER_DMABUF_QUERY_DELAYED_UNEXPORTED: + *info = !exported->unexport_sched; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = exported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (exported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *) *info, + exported->priv, + exported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info) +{ + switch (query) { + case HYPER_DMABUF_QUERY_TYPE: + *info = IMPORTED; + break; + + /* exporting domain of this specific dmabuf*/ + case HYPER_DMABUF_QUERY_EXPORTER: + *info = HYPER_DMABUF_DOM_ID(imported->hid); + break; + + /* importing domain of this specific dmabuf */ + case HYPER_DMABUF_QUERY_IMPORTER: + *info = hy_drv_priv->domid; + break; + + /* size of dmabuf in byte */ + case HYPER_DMABUF_QUERY_SIZE: + if (imported->dma_buf) { + /* if local dma_buf is created (if it's + * ever mapped), retrieve it directly + * from struct dma_buf * + */ + *info = imported->dma_buf->size; + } else { + /* calcuate it from given nents, frst_ofst + * and last_len + */ + *info = HYPER_DMABUF_SIZE(imported->nents, + imported->frst_ofst, + imported->last_len); + } + break; + + /* whether the buffer is used or not */ + case HYPER_DMABUF_QUERY_BUSY: + /* checks if it's used by importer */ + *info = (imported->importers > 0); + break; + + /* whether the buffer is unexported */ + case HYPER_DMABUF_QUERY_UNEXPORTED: + *info = !imported->valid; + break; + + /* size of private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO_SIZE: + *info = imported->sz_priv; + break; + + /* copy private info attached to buffer */ + case HYPER_DMABUF_QUERY_PRIV_INFO: + if (imported->sz_priv > 0) { + int n; + + n = copy_to_user((void __user *)*info, + imported->priv, + imported->sz_priv); + if (n != 0) + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h new file mode 100644 index 000000000000..65ae738f8f53 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_query.h @@ -0,0 +1,10 @@ +#ifndef __HYPER_DMABUF_QUERY_H__ +#define __HYPER_DMABUF_QUERY_H__ + +int hyper_dmabuf_query_imported(struct imported_sgt_info *imported, + int query, unsigned long *info); + +int hyper_dmabuf_query_exported(struct exported_sgt_info *exported, + int query, unsigned long *info); + +#endif // __HYPER_DMABUF_QUERY_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c new file mode 100644 index 000000000000..3cd3d6c98c33 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.c @@ -0,0 +1,327 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_list.h" +#include "hyper_dmabuf_msg.h" +#include "hyper_dmabuf_id.h" +#include "hyper_dmabuf_sgl_proc.h" + +/* Whenever importer does dma operations from remote domain, + * a notification is sent to the exporter so that exporter + * issues equivalent dma operation on the original dma buf + * for indirect synchronization via shadow operations. + * + * All ptrs and references (e.g struct sg_table*, + * struct dma_buf_attachment) created via these operations on + * exporter's side are kept in stack (implemented as circular + * linked-lists) separately so that those can be re-referenced + * later when unmapping operations are invoked to free those. + * + * The very first element on the bottom of each stack holds + * is what is created when initial exporting is issued so it + * should not be modified or released by this fuction. + */ +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops) +{ + struct exported_sgt_info *exported; + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + int ret; + + /* find a coresponding SGT for the id */ + exported = hyper_dmabuf_find_exported(hid); + + if (!exported) { + dev_err(hy_drv_priv->dev, + "dmabuf remote sync::can't find exported list\n"); + return -ENOENT; + } + + switch (ops) { + case HYPER_DMABUF_OPS_ATTACH: + attachl = kcalloc(1, sizeof(*attachl), GFP_KERNEL); + + if (!attachl) + return -ENOMEM; + + attachl->attach = dma_buf_attach(exported->dma_buf, + hy_drv_priv->dev); + + if (!attachl->attach) { + kfree(attachl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_ATTACH\n"); + return -ENOMEM; + } + + list_add(&attachl->list, &exported->active_attached->list); + break; + + case HYPER_DMABUF_OPS_DETACH: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_DETACH\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be detached\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + break; + + case HYPER_DMABUF_OPS_MAP: + if (list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf attachment left to be mapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = kcalloc(1, sizeof(*sgtl), GFP_KERNEL); + + if (!sgtl) + return -ENOMEM; + + sgtl->sgt = dma_buf_map_attachment(attachl->attach, + DMA_BIDIRECTIONAL); + if (!sgtl->sgt) { + kfree(sgtl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_MAP\n"); + return -ENOMEM; + } + list_add(&sgtl->list, &exported->active_sgts->list); + break; + + case HYPER_DMABUF_OPS_UNMAP: + if (list_empty(&exported->active_sgts->list) || + list_empty(&exported->active_attached->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_UNMAP\n"); + dev_err(hy_drv_priv->dev, + "no SGT or attach left to be unmapped\n"); + return -EFAULT; + } + + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + break; + + case HYPER_DMABUF_OPS_RELEASE: + dev_dbg(hy_drv_priv->dev, + "id:%d key:%d %d %d} released, ref left: %d\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2], + exported->active - 1); + + exported->active--; + + /* If there are still importers just break, if no then + * continue with final cleanup + */ + if (exported->active) + break; + + /* Importer just released buffer fd, check if there is + * any other importer still using it. + * If not and buffer was unexported, clean up shared + * data and remove that buffer. + */ + dev_dbg(hy_drv_priv->dev, + "Buffer {id:%d key:%d %d %d} final released\n", + exported->hid.id, exported->hid.rng_key[0], + exported->hid.rng_key[1], exported->hid.rng_key[2]); + + if (!exported->valid && !exported->active && + !exported->unexport_sched) { + hyper_dmabuf_cleanup_sgt_info(exported, false); + hyper_dmabuf_remove_exported(hid); + kfree(exported); + /* store hyper_dmabuf_id in the list for reuse */ + hyper_dmabuf_store_hid(hid); + } + + break; + + case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS: + ret = dma_buf_begin_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_END_CPU_ACCESS: + ret = dma_buf_end_cpu_access(exported->dma_buf, + DMA_BIDIRECTIONAL); + if (ret) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_END_CPU_ACCESS\n"); + return ret; + } + break; + + case HYPER_DMABUF_OPS_KMAP_ATOMIC: + case HYPER_DMABUF_OPS_KMAP: + va_kmapl = kcalloc(1, sizeof(*va_kmapl), GFP_KERNEL); + if (!va_kmapl) + return -ENOMEM; + + /* dummy kmapping of 1 page */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + if (ops == HYPER_DMABUF_OPS_KMAP_ATOMIC) + va_kmapl->vaddr = dma_buf_kmap_atomic( + exported->dma_buf, 1); + else +#endif + va_kmapl->vaddr = dma_buf_kmap( + exported->dma_buf, 1); + + if (!va_kmapl->vaddr) { + kfree(va_kmapl); + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n"); + return -ENOMEM; + } + list_add(&va_kmapl->list, &exported->va_kmapped->list); + break; + + case HYPER_DMABUF_OPS_KUNMAP_ATOMIC: + case HYPER_DMABUF_OPS_KUNMAP: + if (list_empty(&exported->va_kmapped->list)) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + if (!va_kmapl->vaddr) { + dev_err(hy_drv_priv->dev, + "HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n"); + return PTR_ERR(va_kmapl->vaddr); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0) + /* unmapping 1 page */ + if (ops == HYPER_DMABUF_OPS_KUNMAP_ATOMIC) + dma_buf_kunmap_atomic(exported->dma_buf, + 1, va_kmapl->vaddr); + else +#endif + dma_buf_kunmap(exported->dma_buf, + 1, va_kmapl->vaddr); + + list_del(&va_kmapl->list); + kfree(va_kmapl); + break; + + case HYPER_DMABUF_OPS_MMAP: + /* currently not supported: looking for a way to create + * a dummy vma + */ + dev_warn(hy_drv_priv->dev, + "remote sync::sychronized mmap is not supported\n"); + break; + + case HYPER_DMABUF_OPS_VMAP: + va_vmapl = kcalloc(1, sizeof(*va_vmapl), GFP_KERNEL); + + if (!va_vmapl) + return -ENOMEM; + + /* dummy vmapping */ + va_vmapl->vaddr = dma_buf_vmap(exported->dma_buf); + + if (!va_vmapl->vaddr) { + kfree(va_vmapl); + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VMAP\n"); + return -ENOMEM; + } + list_add(&va_vmapl->list, &exported->va_vmapped->list); + break; + + case HYPER_DMABUF_OPS_VUNMAP: + if (list_empty(&exported->va_vmapped->list)) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + dev_err(hy_drv_priv->dev, + "no more dmabuf VA to be freed\n"); + return -EFAULT; + } + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + if (!va_vmapl || va_vmapl->vaddr == NULL) { + dev_err(hy_drv_priv->dev, + "remote sync::HYPER_DMABUF_OPS_VUNMAP\n"); + return -EFAULT; + } + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + + list_del(&va_vmapl->list); + kfree(va_vmapl); + break; + + default: + /* program should not get here */ + break; + } + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h new file mode 100644 index 000000000000..366389287f4e --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_remote_sync.h @@ -0,0 +1,30 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__ +#define __HYPER_DMABUF_REMOTE_SYNC_H__ + +int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops); + +#endif // __HYPER_DMABUF_REMOTE_SYNC_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c new file mode 100644 index 000000000000..c1887d1ad709 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.c @@ -0,0 +1,261 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include "hyper_dmabuf_drv.h" +#include "hyper_dmabuf_struct.h" +#include "hyper_dmabuf_sgl_proc.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* return total number of pages referenced by a sgt + * for pre-calculation of # of pages behind a given sgt + */ +static int get_num_pgs(struct sg_table *sgt) +{ + struct scatterlist *sgl; + int length, i; + /* at least one page */ + int num_pages = 1; + + sgl = sgt->sgl; + + length = sgl->length - PAGE_SIZE + sgl->offset; + + /* round-up */ + num_pages += ((length + PAGE_SIZE - 1)/PAGE_SIZE); + + for (i = 1; i < sgt->nents; i++) { + sgl = sg_next(sgl); + + /* round-up */ + num_pages += ((sgl->length + PAGE_SIZE - 1) / + PAGE_SIZE); /* round-up */ + } + + return num_pages; +} + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt) +{ + struct pages_info *pg_info; + int i, j, k; + int length; + struct scatterlist *sgl; + + pg_info = kmalloc(sizeof(*pg_info), GFP_KERNEL); + if (!pg_info) + return NULL; + + pg_info->pgs = kmalloc_array(get_num_pgs(sgt), + sizeof(struct page *), + GFP_KERNEL); + + if (!pg_info->pgs) { + kfree(pg_info); + return NULL; + } + + sgl = sgt->sgl; + + pg_info->nents = 1; + pg_info->frst_ofst = sgl->offset; + pg_info->pgs[0] = sg_page(sgl); + length = sgl->length - PAGE_SIZE + sgl->offset; + i = 1; + + while (length > 0) { + pg_info->pgs[i] = nth_page(sg_page(sgl), i); + length -= PAGE_SIZE; + pg_info->nents++; + i++; + } + + for (j = 1; j < sgt->nents; j++) { + sgl = sg_next(sgl); + pg_info->pgs[i++] = sg_page(sgl); + length = sgl->length - PAGE_SIZE; + pg_info->nents++; + k = 1; + + while (length > 0) { + pg_info->pgs[i++] = nth_page(sg_page(sgl), k++); + length -= PAGE_SIZE; + pg_info->nents++; + } + } + + /* + * lenght at that point will be 0 or negative, + * so to calculate last page size just add it to PAGE_SIZE + */ + pg_info->last_len = PAGE_SIZE + length; + + return pg_info; +} + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents) +{ + struct sg_table *sgt; + struct scatterlist *sgl; + int i, ret; + + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return NULL; + + ret = sg_alloc_table(sgt, nents, GFP_KERNEL); + if (ret) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } + + return NULL; + } + + sgl = sgt->sgl; + + sg_set_page(sgl, pgs[0], PAGE_SIZE-frst_ofst, frst_ofst); + + for (i = 1; i < nents-1; i++) { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], PAGE_SIZE, 0); + } + + if (nents > 1) /* more than one page */ { + sgl = sg_next(sgl); + sg_set_page(sgl, pgs[i], last_len, 0); + } + + return sgt; +} + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force) +{ + struct sgt_list *sgtl; + struct attachment_list *attachl; + struct kmap_vaddr_list *va_kmapl; + struct vmap_vaddr_list *va_vmapl; + struct hyper_dmabuf_bknd_ops *bknd_ops = hy_drv_priv->bknd_ops; + + if (!exported) { + dev_err(hy_drv_priv->dev, "invalid hyper_dmabuf_id\n"); + return -EINVAL; + } + + /* if force != 1, sgt_info can be released only if + * there's no activity on exported dma-buf on importer + * side. + */ + if (!force && + exported->active) { + dev_warn(hy_drv_priv->dev, + "dma-buf is used by importer\n"); + + return -EPERM; + } + + /* force == 1 is not recommended */ + while (!list_empty(&exported->va_kmapped->list)) { + va_kmapl = list_first_entry(&exported->va_kmapped->list, + struct kmap_vaddr_list, list); + + dma_buf_kunmap(exported->dma_buf, 1, va_kmapl->vaddr); + list_del(&va_kmapl->list); + kfree(va_kmapl); + } + + while (!list_empty(&exported->va_vmapped->list)) { + va_vmapl = list_first_entry(&exported->va_vmapped->list, + struct vmap_vaddr_list, list); + + dma_buf_vunmap(exported->dma_buf, va_vmapl->vaddr); + list_del(&va_vmapl->list); + kfree(va_vmapl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + sgtl = list_first_entry(&exported->active_sgts->list, + struct sgt_list, list); + + dma_buf_unmap_attachment(attachl->attach, sgtl->sgt, + DMA_BIDIRECTIONAL); + list_del(&sgtl->list); + kfree(sgtl); + } + + while (!list_empty(&exported->active_sgts->list)) { + attachl = list_first_entry(&exported->active_attached->list, + struct attachment_list, list); + + dma_buf_detach(exported->dma_buf, attachl->attach); + list_del(&attachl->list); + kfree(attachl); + } + + /* Start cleanup of buffer in reverse order to exporting */ + bknd_ops->unshare_pages(&exported->refs_info, exported->nents); + + /* unmap dma-buf */ + dma_buf_unmap_attachment(exported->active_attached->attach, + exported->active_sgts->sgt, + DMA_BIDIRECTIONAL); + + /* detatch dma-buf */ + dma_buf_detach(exported->dma_buf, exported->active_attached->attach); + + /* close connection to dma-buf completely */ + dma_buf_put(exported->dma_buf); + exported->dma_buf = NULL; + + kfree(exported->active_sgts); + kfree(exported->active_attached); + kfree(exported->va_kmapped); + kfree(exported->va_vmapped); + kfree(exported->priv); + + exported->active_sgts = NULL; + exported->active_attached = NULL; + exported->va_kmapped = NULL; + exported->va_vmapped = NULL; + exported->priv = NULL; + + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h new file mode 100644 index 000000000000..869d98204e03 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_sgl_proc.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_IMP_H__ +#define __HYPER_DMABUF_IMP_H__ + +/* extract pages directly from struct sg_table */ +struct pages_info *hyper_dmabuf_ext_pgs(struct sg_table *sgt); + +/* create sg_table with given pages and other parameters */ +struct sg_table *hyper_dmabuf_create_sgt(struct page **pgs, + int frst_ofst, int last_len, + int nents); + +int hyper_dmabuf_cleanup_sgt_info(struct exported_sgt_info *exported, + int force); + +void hyper_dmabuf_free_sgt(struct sg_table *sgt); + +#endif /* __HYPER_DMABUF_IMP_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h new file mode 100644 index 000000000000..f7b7de0e1432 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/hyper_dmabuf_struct.h @@ -0,0 +1,141 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_STRUCT_H__ +#define __HYPER_DMABUF_STRUCT_H__ + +/* stack of mapped sgts */ +struct sgt_list { + struct sg_table *sgt; + struct list_head list; +}; + +/* stack of attachments */ +struct attachment_list { + struct dma_buf_attachment *attach; + struct list_head list; +}; + +/* stack of vaddr mapped via kmap */ +struct kmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* stack of vaddr mapped via vmap */ +struct vmap_vaddr_list { + void *vaddr; + struct list_head list; +}; + +/* Exporter builds pages_info before sharing pages */ +struct pages_info { + int frst_ofst; + int last_len; + int nents; + struct page **pgs; +}; + + +/* Exporter stores references to sgt in a hash table + * Exporter keeps these references for synchronization + * and tracking purposes + */ +struct exported_sgt_info { + hyper_dmabuf_id_t hid; + + /* VM ID of importer */ + int rdomid; + + struct dma_buf *dma_buf; + int nents; + + /* list for tracking activities on dma_buf */ + struct sgt_list *active_sgts; + struct attachment_list *active_attached; + struct kmap_vaddr_list *va_kmapped; + struct vmap_vaddr_list *va_vmapped; + + /* set to 0 when unexported. Importer doesn't + * do a new mapping of buffer if valid == false + */ + bool valid; + + /* active == true if the buffer is actively used + * (mapped) by importer + */ + int active; + + /* hypervisor specific reference data for shared pages */ + void *refs_info; + + struct delayed_work unexport; + bool unexport_sched; + + /* list for file pointers associated with all user space + * application that have exported this same buffer to + * another VM. This needs to be tracked to know whether + * the buffer can be completely freed. + */ + struct file *filp; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +/* imported_sgt_info contains information about imported DMA_BUF + * this info is kept in IMPORT list and asynchorously retrieved and + * used to map DMA_BUF on importer VM's side upon export fd ioctl + * request from user-space + */ + +struct imported_sgt_info { + hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */ + + /* hypervisor-specific handle to pages */ + unsigned long ref_handle; + + /* offset and size info of DMA_BUF */ + int frst_ofst; + int last_len; + int nents; + + struct dma_buf *dma_buf; + struct sg_table *sgt; + + void *refs_info; + bool valid; + int importers; + + /* size of private */ + size_t sz_priv; + + /* private data associated with the exported buffer */ + char *priv; +}; + +#endif /* __HYPER_DMABUF_STRUCT_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c new file mode 100644 index 000000000000..b308d7e00d18 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_be_drv.c @@ -0,0 +1,543 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to frontent definition + */ +enum virio_queue_type { + HDMA_VIRTIO_RX_QUEUE = 0, + HDMA_VIRTIO_TX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +/* Data required for sending TX messages using virtqueues*/ +struct virtio_be_tx_data { + struct iovec tx_iov; + uint16_t tx_idx; +}; + +struct virtio_be_priv { + struct virtio_dev_info dev; + struct virtio_vq_info vqs[HDMA_VIRTIO_QUEUE_MAX]; + bool busy; + struct hyper_dmabuf_req *pending_tx_req; + struct virtio_comm_ring tx_ring; + struct mutex lock; +}; + + +/* + * Received response to TX request, + * or empty buffer to be used for TX requests in future + */ +static void virtio_be_handle_tx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct virtio_be_priv *priv = fe_info->priv; + /* Fill last used buffer with received buffer details */ + struct virtio_be_tx_data *tx_data = + (struct virtio_be_tx_data *) + virtio_comm_ring_pop(&priv->tx_ring); + + virtio_vq_getchain(vq, &tx_data->tx_idx, &tx_data->tx_iov, 1, NULL); + + /* Copy response if request was synchronous */ + if (priv->busy) { + memcpy(priv->pending_tx_req, + tx_data->tx_iov.iov_base, + tx_data->tx_iov.iov_len); + priv->busy = false; + } +} + +/* + * Received request from frontend + */ +static void virtio_be_handle_rx_kick(struct virtio_vq_info *vq, + struct virtio_fe_info *fe_info) +{ + struct iovec iov; + uint16_t idx; + struct hyper_dmabuf_req *req = NULL; + int len; + int ret; + + /* Make sure we will process all pending requests */ + while (virtio_vq_has_descs(vq)) { + virtio_vq_getchain(vq, &idx, &iov, 1, NULL); + + if (iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + /* HACK: if int size buffer was provided, + * treat that as request to get frontend vmid */ + if (iov.iov_len == sizeof(int)) { + *((int *)iov.iov_base) = fe_info->vmid; + len = iov.iov_len; + } else { + len = 0; + dev_warn(hy_drv_priv->dev, + "received request with wrong size"); + dev_warn(hy_drv_priv->dev, + "%zu != %zu\n", + iov.iov_len, + sizeof(struct hyper_dmabuf_req)); + } + + virtio_vq_relchain(vq, idx, len); + continue; + } + + req = (struct hyper_dmabuf_req *)iov.iov_base; + + ret = hyper_dmabuf_msg_parse(1, req); + + len = iov.iov_len; + + virtio_vq_relchain(vq, idx, len); + } + virtio_vq_endchains(vq, 1); +} + +/* + * Check in what virtqueue we received buffer and process it accordingly. + */ +static void virtio_be_handle_vq_kick( + int vq_idx, struct virtio_fe_info *fe_info) +{ + struct virtio_vq_info *vq; + + vq = &fe_info->priv->vqs[vq_idx]; + + if (vq_idx == HDMA_VIRTIO_RX_QUEUE) + virtio_be_handle_rx_kick(vq, fe_info); + else + virtio_be_handle_tx_kick(vq, fe_info); +} + +/* + * Received new buffer in virtqueue + */ +static int virtio_be_handle_kick(int client_id, unsigned long *ioreqs_map) +{ + int val = -1; + struct vhm_request *req; + struct virtio_fe_info *fe_info; + int vcpu; + + fe_info = virtio_fe_find(client_id); + if (fe_info == NULL) { + dev_warn(hy_drv_priv->dev, "Client %d not found\n", client_id); + return -EINVAL; + } + + while (1) { + vcpu = find_first_bit(ioreqs_map, fe_info->max_vcpu); + if (vcpu == fe_info->max_vcpu) + break; + req = &fe_info->req_buf[vcpu]; + if (atomic_read(&req->processed) == REQ_STATE_PROCESSING && + req->client == fe_info->client_id) { + if (req->reqs.pio_request.direction == REQUEST_READ) + req->reqs.pio_request.value = 0; + else + val = req->reqs.pio_request.value; + + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + acrn_ioreq_complete_request(fe_info->client_id, vcpu); + } + } + + if (val >= 0) + virtio_be_handle_vq_kick(val, fe_info); + + return 0; +} + +/* + * New frontend is connecting to backend. + * Creates virtqueues for it and registers internally. + */ +static int virtio_be_register_vhm_client(struct virtio_dev_info *d) +{ + unsigned int vmid; + struct vm_info info; + struct virtio_fe_info *fe_info; + int ret; + + fe_info = kcalloc(1, sizeof(*fe_info), GFP_KERNEL); + if (fe_info == NULL) + return -ENOMEM; + + fe_info->priv = + container_of(d, struct virtio_be_priv, dev); + vmid = d->_ctx.vmid; + fe_info->vmid = vmid; + + dev_dbg(hy_drv_priv->dev, + "Virtio frontend from vm %d connected\n", vmid); + + fe_info->client_id = + acrn_ioreq_create_client(vmid, + virtio_be_handle_kick, + "hyper dmabuf kick"); + if (fe_info->client_id < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create client of ACRN ioreq\n"); + goto err; + } + + ret = acrn_ioreq_add_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to add iorange to acrn ioreq\n"); + goto err; + } + + ret = vhm_get_vm_info(vmid, &info); + if (ret < 0) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + dev_err(hy_drv_priv->dev, "Failed in vhm_get_vm_info\n"); + goto err; + } + + fe_info->max_vcpu = info.max_vcpu; + + fe_info->req_buf = acrn_ioreq_get_reqbuf(fe_info->client_id); + if (fe_info->req_buf == NULL) { + acrn_ioreq_del_iorange(fe_info->client_id, + d->io_range_type ? REQ_MMIO : REQ_PORTIO, + d->io_range_start, + d->io_range_start + d->io_range_len - 1); + + dev_err(hy_drv_priv->dev, "Failed in acrn_ioreq_get_reqbuf\n"); + goto err; + } + + acrn_ioreq_attach_client(fe_info->client_id, 0); + + virtio_fe_add(fe_info); + + return 0; + +err: + acrn_ioreq_destroy_client(fe_info->client_id); + kfree(fe_info); + + return -EINVAL; +} + +/* + * DM is opening our VBS interface to create new frontend instance. + */ +static int vbs_k_open(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv; + struct virtio_dev_info *dev; + struct virtio_vq_info *vqs; + int i; + + priv = kcalloc(1, sizeof(*priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + vqs = &priv->vqs[0]; + + dev = &priv->dev; + + for (i = 0; i < HDMA_VIRTIO_QUEUE_MAX; i++) { + vqs[i].dev = dev; + vqs[i].vq_notify = NULL; + } + dev->vqs = vqs; + + virtio_dev_init(dev, vqs, HDMA_VIRTIO_QUEUE_MAX); + + priv->pending_tx_req = + kcalloc(1, sizeof(struct hyper_dmabuf_req), GFP_KERNEL); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + mutex_init(&priv->lock); + + f->private_data = priv; + + return 0; +} + +static void cleanup_fe(struct virtio_fe_info *fe_info, void *attr) +{ + struct virtio_be_priv *priv = attr; + if (fe_info->priv == priv) { + acrn_ioreq_del_iorange(fe_info->client_id, + priv->dev.io_range_type ? REQ_MMIO : REQ_PORTIO, + priv->dev.io_range_start, + priv->dev.io_range_start + priv->dev.io_range_len - 1); + + acrn_ioreq_destroy_client(fe_info->client_id); + virtio_fe_remove(fe_info->client_id); + kfree(fe_info); + } +} + +static int vbs_k_release(struct inode *inode, struct file *f) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + + kfree(priv->pending_tx_req); + virtio_comm_ring_free(&priv->tx_ring); + + /* + * Find and cleanup virtio frontend that + * has been using released vbs k file + */ + virtio_fe_foreach(cleanup_fe, priv); + + virtio_dev_reset(&priv->dev); + + kfree(priv); + return 0; +} + +static int vbs_k_reset(struct virtio_be_priv *priv) +{ + virtio_comm_ring_free(&priv->tx_ring); + + virtio_fe_foreach(cleanup_fe, priv); + + virtio_dev_reset(&priv->dev); + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct virtio_be_tx_data), + REQ_RING_SIZE); + + return 0; +} + +static long vbs_k_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct virtio_be_priv *priv = + (struct virtio_be_priv *) f->private_data; + void __user *argp = (void __user *)arg; + int r = 0; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No backend private data\n"); + + return -EINVAL; + } + + switch(ioctl) { + case VBS_SET_VQ: + /* Overridden to call additionally + * virtio_be_register_vhm_client */ + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + return -EFAULT; + + if (virtio_be_register_vhm_client(&priv->dev) < 0) + return -EFAULT; + break; + case VBS_RESET_DEV: + vbs_k_reset(priv); + break; + default: + r = virtio_dev_ioctl(&priv->dev, ioctl, argp); + if (r == -ENOIOCTLCMD) + r = virtio_vqs_ioctl(&priv->dev, ioctl, argp); + break; + } + + return r; +} + +static const struct file_operations vbs_hyper_dmabuf_fops = { + .owner = THIS_MODULE, + .open = vbs_k_open, + .release = vbs_k_release, + .unlocked_ioctl = vbs_k_ioctl, + .llseek = noop_llseek, +}; + +static struct miscdevice vbs_hyper_dmabuf_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "vbs_hyper_dmabuf", + .fops = &vbs_hyper_dmabuf_fops, +}; + +static int virtio_be_register(void) +{ + return misc_register(&vbs_hyper_dmabuf_misc); +} + +static void virtio_be_unregister(void) +{ + misc_deregister(&vbs_hyper_dmabuf_misc); +} + +/* + * ACRN SOS will always has vmid 0 + * TODO: check if that always will be true + */ +static int virtio_be_get_vmid(void) +{ + return 0; +} + +static int virtio_be_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + int timeout = 1000; + struct virtio_fe_info *fe_info; + struct virtio_be_priv *priv; + struct virtio_be_tx_data *tx_data; + struct virtio_vq_info *vq; + int len; + + fe_info = virtio_fe_find_by_vmid(vmid); + + if (fe_info == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend registered for vmid %d\n", vmid); + return -ENOENT; + } + + priv = fe_info->priv; + + mutex_lock(&priv->lock); + + /* Check if we have any free buffers for sending new request */ + while (virtio_comm_ring_full(&priv->tx_ring) && + timeout--) { + usleep_range(100, 120); + } + + if (timeout <= 0) { + dev_warn(hy_drv_priv->dev, "Requests ring full\n"); + return -EBUSY; + } + + /* Get free buffer for sending request from ring */ + tx_data = (struct virtio_be_tx_data *) + virtio_comm_ring_push(&priv->tx_ring); + + vq = &priv->vqs[HDMA_VIRTIO_TX_QUEUE]; + + if (tx_data->tx_iov.iov_len != sizeof(struct hyper_dmabuf_req)) { + dev_warn(hy_drv_priv->dev, + "received request with wrong size\n"); + virtio_vq_relchain(vq, tx_data->tx_idx, 0); + mutex_unlock(&priv->lock); + return -EINVAL; + } + + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* Copy request data to virtqueue buffer */ + memcpy(tx_data->tx_iov.iov_base, req, sizeof(*req)); + len = tx_data->tx_iov.iov_len; + + /* update req_pending with current request */ + if (wait) { + priv->busy = true; + memcpy(priv->pending_tx_req, req, sizeof(*req)); + } + + virtio_vq_relchain(vq, tx_data->tx_idx, len); + + virtio_vq_endchains(vq, 1); + + if (wait) { + while (timeout--) { + if (priv->pending_tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&priv->lock); + dev_err(hy_drv_priv->dev, "request timed-out\n"); + return -EBUSY; + } + } + + mutex_unlock(&priv->lock); + return 0; +}; + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_be_register, + .cleanup = virtio_be_unregister, + .get_vm_id = virtio_be_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, + .send_req = virtio_be_send_req, +}; + + +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c new file mode 100644 index 000000000000..d73bcbcc8e87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.c @@ -0,0 +1,89 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include "hyper_dmabuf_virtio_comm_ring.h" + +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries) +{ + ring->data = kcalloc(num_entries, entry_size, GFP_KERNEL); + + if (!ring->data) + return -ENOMEM; + + ring->head = 0; + ring->tail = 0; + ring->used = 0; + ring->num_entries = num_entries; + ring->entry_size = entry_size; + + return 0; +} + +void virtio_comm_ring_free(struct virtio_comm_ring *ring) +{ + kfree(ring->data); + ring->data = NULL; +} + +bool virtio_comm_ring_full(struct virtio_comm_ring *ring) +{ + if (ring->used == ring->num_entries) + return true; + + return false; +} + +void *virtio_comm_ring_push(struct virtio_comm_ring *ring) +{ + int old_head; + + if (virtio_comm_ring_full(ring)) + return NULL; + + old_head = ring->head; + + ring->head++; + ring->head %= ring->num_entries; + ring->used++; + + return ring->data + (ring->entry_size * old_head); +} + +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring) +{ + int old_tail = ring->tail; + + ring->tail++; + ring->tail %= ring->num_entries; + ring->used--; + + return ring->data + (ring->entry_size * old_tail); +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h new file mode 100644 index 000000000000..a95a63af2ba0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_comm_ring.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMM_RING_H__ +#define __HYPER_DMABUF_VIRTIO_COMM_RING_H__ + +/* Generic ring buffer */ +struct virtio_comm_ring { + /* Buffer allocated for keeping ring entries */ + void *data; + + /* Index pointing to next free element in ring */ + int head; + + /* Index pointing to last released element in ring */ + int tail; + + /* Total number of elements that ring can contain */ + int num_entries; + + /* Size of single ring element in bytes */ + int entry_size; + + /* Number of currently used elements */ + int used; +}; + +/* Initializes given ring for keeping given a + * number of entries of specific size */ +int virtio_comm_ring_init(struct virtio_comm_ring *ring, + int entry_size, + int num_entries); + +/* Frees buffer used for storing ring entries */ +void virtio_comm_ring_free(struct virtio_comm_ring *ring); + +/* Checks if ring is full */ +bool virtio_comm_ring_full(struct virtio_comm_ring *ring); + +/* Gets next free element from ring and marks it as used + * or NULL if ring is full */ +void *virtio_comm_ring_push(struct virtio_comm_ring *ring); + +/* Pops oldest element from ring and marks it as free */ +void *virtio_comm_ring_pop(struct virtio_comm_ring *ring); + +#endif /* __HYPER_DMABUF_VIRTIO_COMM_RING_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c new file mode 100644 index 000000000000..05be74358a74 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.c @@ -0,0 +1,35 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include "hyper_dmabuf_virtio_common.h" + +int hyper_dmabuf_virtio_get_next_req_id(void) +{ + static int req_id; + + return req_id++; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h new file mode 100644 index 000000000000..24a652ef54c0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_common.h @@ -0,0 +1,55 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_COMMON_H__ +#define __HYPER_DMABUF_VIRTIO_COMMON_H__ + +/* + * ACRN uses physicall addresses for memory sharing, + * so size of one page ref will be 64-bits + */ + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(u64)) + +/* Defines size of requests circular buffer */ +#define REQ_RING_SIZE 128 + +extern struct hyper_dmabuf_bknd_ops virtio_bknd_ops; +struct virtio_be_priv; +struct vhm_request; + +/* Entry describing each connected frontend */ +struct virtio_fe_info { + struct virtio_be_priv *priv; + int client_id; + int vmid; + int max_vcpu; + struct vhm_request *req_buf; +}; + +extern struct hyper_dmabuf_private hyper_dmabuf_private; + +int hyper_dmabuf_virtio_get_next_req_id(void); + +#endif /* __HYPER_DMABUF_VIRTIO_COMMON_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c new file mode 100644 index 000000000000..e0c811135699 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_drv.c @@ -0,0 +1,439 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_msg.h" +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_comm_ring.h" + +/* + * Identifies which queue is used for TX and RX + * Note: it is opposite regarding to backend definition + */ +enum virio_queue_type { + HDMA_VIRTIO_TX_QUEUE = 0, + HDMA_VIRTIO_RX_QUEUE, + HDMA_VIRTIO_QUEUE_MAX +}; + +struct virtio_hdma_fe_priv { + struct virtqueue *vqs[HDMA_VIRTIO_QUEUE_MAX]; + struct virtio_comm_ring tx_ring; + struct virtio_comm_ring rx_ring; + int vmid; + /* + * Lock to protect operations on virtqueue + * which are not safe to run concurrently + */ + spinlock_t lock; +}; + +/* Assuming there will be one FE instance per VM */ +static struct virtio_hdma_fe_priv *hyper_dmabuf_virtio_fe; + +/* + * Received response for request. + * No need for copying request with updated result, + * as backend is processing original request data directly. + */ +static void virtio_hdma_fe_tx_done(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + int len; + unsigned long flags; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + spin_lock_irqsave(&priv->lock, flags); + /* Make sure that all pending responses are processed */ + while (virtqueue_get_buf(vq, &len)) { + if (len == sizeof(struct hyper_dmabuf_req)) { + /* Mark that response was received + * and buffer can be reused */ + virtio_comm_ring_pop(&priv->tx_ring); + } + } + spin_unlock_irqrestore(&priv->lock, flags); +} + +/* + * Sends given data buffer via given virtqueue. + */ +static void virtio_hdma_fe_queue_buffer(struct virtio_hdma_fe_priv *priv, + unsigned int queue_nr, + void *buf, size_t size) +{ + struct scatterlist sg; + + if (queue_nr >= HDMA_VIRTIO_QUEUE_MAX) { + dev_dbg(hy_drv_priv->dev, + "queue_nr exceeding max queue number\n"); + return; + } + + sg_init_one(&sg, buf, size); + + virtqueue_add_inbuf(priv->vqs[queue_nr], &sg, 1, buf, GFP_KERNEL); + + virtqueue_kick(priv->vqs[queue_nr]); +} + +/* + * Handle requests coming from other VMs + */ +static void virtio_hdma_fe_handle_rx(struct virtqueue *vq) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vq->vdev->priv; + struct hyper_dmabuf_req *rx_req; + int size, ret; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + return; + } + + /* Make sure all pending requests will be processed */ + while (virtqueue_get_buf(vq, &size)) { + + /* Get next request from ring */ + rx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_pop(&priv->rx_ring); + + if (size != sizeof(struct hyper_dmabuf_req)) { + dev_dbg(hy_drv_priv->dev, + "Received malformed request\n"); + } else { + ret = hyper_dmabuf_msg_parse(1, rx_req); + } + + /* Send updated request back to virtqueue as a response.*/ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +static int virtio_hdma_fe_probe_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv; + vq_callback_t *callbacks[] = {virtio_hdma_fe_tx_done, + virtio_hdma_fe_handle_rx}; + static const char *names[] = {"txqueue", "rxqueue"}; + int ret; + + priv = kzalloc(sizeof(struct virtio_hdma_fe_priv), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + + virtio_comm_ring_init(&priv->tx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + virtio_comm_ring_init(&priv->rx_ring, + sizeof(struct hyper_dmabuf_req), + REQ_RING_SIZE); + + /* Set vmid to -1 to mark that it is not initialized yet */ + priv->vmid = -1; + + spin_lock_init(&priv->lock); + + vdev->priv = priv; + + ret = virtio_find_vqs(vdev, HDMA_VIRTIO_QUEUE_MAX, + priv->vqs, callbacks, names, NULL); + if (ret) + goto err; + + hyper_dmabuf_virtio_fe = priv; + + return 0; +err: + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + return ret; +} + +static void virtio_hdma_fe_remove_common(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + virtio_comm_ring_free(&priv->tx_ring); + virtio_comm_ring_free(&priv->rx_ring); + kfree(priv); + hyper_dmabuf_virtio_fe = NULL; +} + +static int virtio_hdma_fe_probe(struct virtio_device *vdev) +{ + return virtio_hdma_fe_probe_common(vdev); +} + +static void virtio_hdma_fe_remove(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); +} + +struct virtio_hdma_restore_work +{ + struct work_struct work; + struct virtio_device *dev; +}; + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_query_vmid(struct virtio_device *vdev) +{ + struct virtio_hdma_fe_priv *priv = + (struct virtio_hdma_fe_priv *) vdev->priv; + struct hyper_dmabuf_req *rx_req; + int timeout = 1000; + + if (priv == NULL) { + dev_dbg(hy_drv_priv->dev, + "No frontend private data\n"); + + return; + } + + /* Send request to query vmid, in ACRN guest instances don't + * know their ids, but host does. Here a small hack is used, + * and buffer of int size is sent to backend, in that case + * backend will fill it with vmid of instance that sent that request + */ + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_TX_QUEUE, + &priv->vmid, sizeof(priv->vmid)); + + while (timeout--) { + if (priv->vmid > 0) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + dev_err(hy_drv_priv->dev, + "Cannot query vmid\n"); + + while (!virtio_comm_ring_full(&priv->rx_ring)) { + rx_req = virtio_comm_ring_push(&priv->rx_ring); + + virtio_hdma_fe_queue_buffer(priv, HDMA_VIRTIO_RX_QUEUE, + rx_req, sizeof(*rx_req)); + } +} + +/* + * Queues empty requests buffers to backend, + * which will be used by it to send requests back to frontend. + */ +static void virtio_hdma_fe_scan(struct virtio_device *vdev) +{ + virtio_hdma_query_vmid(vdev); +} + +static void virtio_hdma_restore_bh(struct work_struct *w) +{ + struct virtio_hdma_restore_work *work = + (struct virtio_hdma_restore_work *) w; + + while (!(VIRTIO_CONFIG_S_DRIVER_OK & + work->dev->config->get_status(work->dev))) { + usleep_range(100, 120); + } + + virtio_hdma_query_vmid(work->dev); + kfree(w); +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_hdma_fe_freeze(struct virtio_device *vdev) +{ + virtio_hdma_fe_remove_common(vdev); + return 0; +} + +static int virtio_hdma_fe_restore(struct virtio_device *vdev) +{ + struct virtio_hdma_restore_work *work; + int ret; + + ret = virtio_hdma_fe_probe_common(vdev); + if (!ret) { + work = kmalloc(sizeof(*work), GFP_KERNEL); + INIT_WORK(&work->work, virtio_hdma_restore_bh); + work->dev = vdev; + schedule_work(&work->work); + } + + return ret; +} +#endif + + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_HYPERDMABUF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct virtio_driver virtio_hdma_fe_driver = { + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_hdma_fe_probe, + .remove = virtio_hdma_fe_remove, + .scan = virtio_hdma_fe_scan, +#ifdef CONFIG_PM_SLEEP + .freeze = virtio_hdma_fe_freeze, + .restore = virtio_hdma_fe_restore, +#endif +}; + +int virtio_hdma_fe_register(void) +{ + return register_virtio_driver(&virtio_hdma_fe_driver); +} + +void virtio_hdma_fe_unregister(void) +{ + unregister_virtio_driver(&virtio_hdma_fe_driver); +} + +static int virtio_hdma_fe_get_vmid(void) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + + if (hyper_dmabuf_virtio_fe == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -1; + } + + return priv->vmid; +} + +static int virtio_hdma_fe_send_req(int vmid, struct hyper_dmabuf_req *req, + int wait) +{ + struct virtio_hdma_fe_priv *priv = hyper_dmabuf_virtio_fe; + struct hyper_dmabuf_req *tx_req; + int timeout = 1000; + unsigned long flags; + + if (priv == NULL) { + dev_err(hy_drv_priv->dev, + "Backend not connected\n"); + return -ENOENT; + } + + /* Check if there are any free buffers in ring */ + while (timeout--) { + if (!virtio_comm_ring_full(&priv->tx_ring)) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + dev_err(hy_drv_priv->dev, + "Timedout while waiting for free request buffers\n"); + return -EBUSY; + } + + spin_lock_irqsave(&priv->lock, flags); + /* Get free buffer for sending request from ring */ + tx_req = (struct hyper_dmabuf_req *) + virtio_comm_ring_push(&priv->tx_ring); + req->req_id = hyper_dmabuf_virtio_get_next_req_id(); + + /* copy request to buffer that will be used in virtqueue */ + memcpy(tx_req, req, sizeof(*req)); + + virtio_hdma_fe_queue_buffer(hyper_dmabuf_virtio_fe, + HDMA_VIRTIO_TX_QUEUE, + tx_req, sizeof(*tx_req)); + spin_unlock_irqrestore(&priv->lock, flags); + + if (wait) { + while (timeout--) { + if (tx_req->stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) + return -EBUSY; + } + + return 0; +} + +struct hyper_dmabuf_bknd_ops virtio_bknd_ops = { + .init = virtio_hdma_fe_register, + .cleanup = virtio_hdma_fe_unregister, + .get_vm_id = virtio_hdma_fe_get_vmid, + .share_pages = virtio_share_pages, + .unshare_pages = virtio_unshare_pages, + .map_shared_pages = virtio_map_shared_pages, + .unmap_shared_pages = virtio_unmap_shared_pages, + .send_req = virtio_hdma_fe_send_req, + .init_comm_env = NULL, + .destroy_comm = NULL, + .init_rx_ch = NULL, + .init_tx_ch = NULL, +}; + + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Hyper dmabuf virtio driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c new file mode 100644 index 000000000000..84b6ed5e96c1 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.c @@ -0,0 +1,113 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_common.h" +#include "hyper_dmabuf_virtio_fe_list.h" + +DECLARE_HASHTABLE(virtio_fe_hash, MAX_ENTRY_FE); + +void virtio_fe_table_init(void) +{ + hash_init(virtio_fe_hash); +} + +int virtio_fe_add(struct virtio_fe_info *fe_info) +{ + struct virtio_fe_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = fe_info; + + hash_add(virtio_fe_hash, &info_entry->node, + info_entry->info->client_id); + + return 0; +} + +struct virtio_fe_info *virtio_fe_find(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) + return info_entry->info; + + return NULL; +} + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->vmid == vmid) + return info_entry->info; + + return NULL; +} + +int virtio_fe_remove(int client_id) +{ + struct virtio_fe_info_entry *info_entry; + int bkt; + + hash_for_each(virtio_fe_hash, bkt, info_entry, node) + if (info_entry->info->client_id == client_id) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void virtio_fe_foreach( + void (*func)(struct virtio_fe_info *, void *attr), + void *attr) +{ + struct virtio_fe_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(virtio_fe_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info, attr); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h new file mode 100644 index 000000000000..c353c1e5baa1 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_fe_list.h @@ -0,0 +1,51 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_FE_LIST_H__ +#define __HYPER_DMABUF_VIRTIO_FE_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_FE 7 + +struct virtio_fe_info; + +struct virtio_fe_info_entry { + struct virtio_fe_info *info; + struct hlist_node node; +}; + +void virtio_fe_table_init(void); + +int virtio_fe_add(struct virtio_fe_info *fe_info); + +int virtio_fe_remove(int client_id); + +struct virtio_fe_info *virtio_fe_find(int client_id); + +struct virtio_fe_info *virtio_fe_find_by_vmid(int vmid); + +void virtio_fe_foreach(void (*func)(struct virtio_fe_info *, + void *attr), void *attr); + +#endif /* __HYPER_DMABUF_VIRTIO_FE_LIST_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c new file mode 100644 index 000000000000..b18f7cae0115 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.c @@ -0,0 +1,343 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +#include +#endif +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_virtio_shm.h" +#include "hyper_dmabuf_virtio_common.h" + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 **lvl2_table; + u64 lvl3_gref; + struct page **data_pages; + int n_lvl2_refs; + int nents_last; + int vmid; +}; +#else +struct virtio_shared_pages_info { + u64 *lvl3_table; + u64 *lvl2_table; + u64 lvl3_gref; +}; +#endif + +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE +static long virtio_be_share_pages(struct page **pages, + int vmid, + int nents, + void **refs_info) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static int virtio_be_unshare_pages(void **refs_info, + int nents) +{ + dev_err(hy_drv_priv->dev, + "Pages sharing not available with ACRN backend in SOS\n"); + + return -EINVAL; +} + +static struct page **virtio_be_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + u64 *lvl3_table = NULL; + u64 **lvl2_table = NULL; + struct page **data_pages = NULL; + struct virtio_shared_pages_info *sh_pages_info = NULL; + void *pageaddr; + + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_refs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + if (sh_pages_info == NULL) + goto map_failed; + + *refs_info = (void *) sh_pages_info; + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + if (data_pages == NULL) + goto map_failed; + + lvl2_table = kcalloc(n_lvl2_refs, sizeof(u64 *), GFP_KERNEL); + if (lvl2_table == NULL) + goto map_failed; + + lvl3_table = (u64 *)map_guest_phys(vmid, lvl3_gref, PAGE_SIZE); + if (lvl3_table == NULL) + goto map_failed; + + for (i = 0; i < n_lvl2_refs; i++) { + lvl2_table[i] = (u64 *)map_guest_phys(vmid, + lvl3_table[i], + PAGE_SIZE); + if (lvl2_table[i] == NULL) + goto map_failed; + } + + k = 0; + for (i = 0; i < n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + } + + for (j = 0; j < nents_last; j++) { + pageaddr = map_guest_phys(vmid, + lvl2_table[i][j], + PAGE_SIZE); + if (pageaddr == NULL) + goto map_failed; + + data_pages[k] = virt_to_page(pageaddr); + k++; + } + + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl3_gref = lvl3_gref; + sh_pages_info->n_lvl2_refs = n_lvl2_refs; + sh_pages_info->nents_last = nents_last; + sh_pages_info->data_pages = data_pages; + sh_pages_info->vmid = vmid; + + return data_pages; + +map_failed: + dev_err(hy_drv_priv->dev, + "Cannot map guest memory\n"); + + kfree(lvl2_table); + kfree(data_pages); + kfree(sh_pages_info); + + return NULL; +} + +/* + * TODO: In theory pages don't need to be unmaped, + * as ACRN is just translating memory addresses, + * but not sure if that will work the same way in future + */ +static int virtio_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int vmid; + int i, j; + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "Imported pages already cleaned up"); + dev_warn(hy_drv_priv->dev, + "or buffer was not imported yet\n"); + return 0; + } + vmid = sh_pages_info->vmid; + + for (i = 0; i < sh_pages_info->n_lvl2_refs - 1; i++) { + for (j = 0; j < REFS_PER_PAGE; j++) + unmap_guest_phys(vmid, + sh_pages_info->lvl2_table[i][j]); + } + + for (j = 0; j < sh_pages_info->nents_last; j++) + unmap_guest_phys(vmid, sh_pages_info->lvl2_table[i][j]); + + for (i = 0; i < sh_pages_info->n_lvl2_refs; i++) + unmap_guest_phys(vmid, sh_pages_info->lvl3_table[i]); + + unmap_guest_phys(vmid, sh_pages_info->lvl3_gref); + + kfree(sh_pages_info->lvl2_table); + kfree(sh_pages_info->data_pages); + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + return 0; +} +#else +static long virtio_fe_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + struct virtio_shared_pages_info *sh_pages_info; + u64 lvl3_gref; + u64 *lvl2_table; + u64 *lvl3_table; + int i; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + lvl3_table = (u64 *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (u64 *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (sh_pages_info == NULL) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* Share physical address of pages */ + for (i = 0; i < nents; i++) + lvl2_table[i] = page_to_phys(pages[i]); + + for (i = 0; i < n_lvl2_grefs; i++) + lvl3_table[i] = + virt_to_phys((void *)lvl2_table + i * PAGE_SIZE); + + lvl3_gref = virt_to_phys(lvl3_table); + + sh_pages_info->lvl3_table = lvl3_table; + sh_pages_info->lvl2_table = lvl2_table; + sh_pages_info->lvl3_gref = lvl3_gref; + + return lvl3_gref; +} + +static int virtio_fe_unshare_pages(void **refs_info, + int nents) +{ + struct virtio_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + sh_pages_info = (struct virtio_shared_pages_info *)(*refs_info); + + if (sh_pages_info == NULL) { + dev_err(hy_drv_priv->dev, + "No pages info\n"); + return -EINVAL; + } + + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + kfree(sh_pages_info); + + return 0; +} + +static struct page **virtio_fe_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return NULL; +} + +static int virtio_fe_unmap_shared_pages(void **refs_info, int nents) +{ + dev_dbg(hy_drv_priv->dev, + "Virtio frontend not supporting currently page mapping\n"); + return -EINVAL; +} + +#endif + +long virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info) +{ + long ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_share_pages(pages, domid, nents, refs_info); +#else + ret = virtio_fe_share_pages(pages, domid, nents, refs_info); +#endif + return ret; +} + +int virtio_unshare_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unshare_pages(refs_info, nents); +#else + ret = virtio_fe_unshare_pages(refs_info, nents); +#endif + return ret; +} + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info) +{ + struct page **ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#else + ret = virtio_fe_map_shared_pages(lvl3_gref, vmid, + nents, refs_info); +#endif + return ret; +} + +int virtio_unmap_shared_pages(void **refs_info, int nents) +{ + int ret; +#ifdef CONFIG_HYPER_DMABUF_VIRTIO_BE + ret = virtio_be_unmap_shared_pages(refs_info, nents); +#else + ret = virtio_fe_unmap_shared_pages(refs_info, nents); +#endif + return ret; +} diff --git a/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h new file mode 100644 index 000000000000..55f3e13ef2df --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/virtio/hyper_dmabuf_virtio_shm.h @@ -0,0 +1,40 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_VIRTIO_SHM_H__ +#define __HYPER_DMABUF_VIRTIO_SHM_H__ + +long virtio_share_pages(struct page **pages, + int domid, int nents, + void **refs_info); + +int virtio_unshare_pages(void **refs_info, int nents); + +struct page **virtio_map_shared_pages(unsigned long lvl3_gref, + int vmid, int nents, + void **refs_info); + +int virtio_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_VIRTIO_SHM_H__*/ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c new file mode 100644 index 000000000000..3dd49db66e31 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c @@ -0,0 +1,951 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" +#include "../hyper_dmabuf_drv.h" + +static int export_req_id; + +struct hyper_dmabuf_req req_pending = {0}; + +static void xen_get_domid_delayed(struct work_struct *unused); +static void xen_init_comm_env_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(get_vm_id_work, xen_get_domid_delayed); +static DECLARE_DELAYED_WORK(xen_init_comm_env_work, xen_init_comm_env_delayed); + +/* Creates entry in xen store that will keep details of all + * exporter rings created by this domain + */ +static int xen_comm_setup_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_mkdir(XBT_NIL, buf, ""); +} + +/* Removes entry from xenstore with exporter ring details. + * Other domains that has connected to any of exporter rings + * created by this domain, will be notified about removal of + * this entry and will treat that as signal to cleanup importer + * rings created for this domain + */ +static int xen_comm_destroy_data_dir(void) +{ + char buf[255]; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf", + hy_drv_priv->domid); + + return xenbus_rm(XBT_NIL, buf, ""); +} + +/* Adds xenstore entries with details of exporter ring created + * for given remote domain. It requires special daemon running + * in dom0 to make sure that given remote domain will have right + * permissions to access that data. + */ +static int xen_comm_expose_ring_details(int domid, int rdomid, + int gref, int port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + domid, rdomid); + + ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Failed to write xenbus entry %s: %d\n", + buf, ret); + + return ret; + } + + return 0; +} + +/* + * Queries details of ring exposed by remote domain. + */ +static int xen_comm_get_ring_details(int domid, int rdomid, + int *grefid, int *port) +{ + char buf[255]; + int ret; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + rdomid, domid); + + ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port); + + if (ret <= 0) { + dev_err(hy_drv_priv->dev, + "Failed to read xenbus entry %s: %d\n", + buf, ret); + + return 1; + } + + return 0; +} + +static void xen_get_domid_delayed(struct work_struct *unused) +{ + struct xenbus_transaction xbt; + int domid, ret; + + /* scheduling another if driver is still running + * and xenstore has not been initialized + */ + if (likely(xenstored_ready == 0)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore is not ready yet. Will retry in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, msecs_to_jiffies(500)); + } else { + xenbus_transaction_start(&xbt); + + ret = xenbus_scanf(xbt, "domid", "", "%d", &domid); + + if (ret <= 0) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + /* try again since -1 is an invalid id for domain + * (but only if driver is still running) + */ + if (unlikely(domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "domid==-1 is invalid. Will retry it in 500ms\n"); + schedule_delayed_work(&get_vm_id_work, + msecs_to_jiffies(500)); + } else { + dev_info(hy_drv_priv->dev, + "Successfully retrieved domid from Xenstore:%d\n", + domid); + hy_drv_priv->domid = domid; + } + } +} + +int xen_be_get_domid(void) +{ + struct xenbus_transaction xbt; + int domid; + + if (unlikely(xenstored_ready == 0)) { + xen_get_domid_delayed(NULL); + return -1; + } + + xenbus_transaction_start(&xbt); + + if (!xenbus_scanf(xbt, "domid", "", "%d", &domid)) + domid = -1; + + xenbus_transaction_end(xbt, 0); + + return domid; +} + +static int xen_comm_next_req_id(void) +{ + export_req_id++; + return export_req_id; +} + +/* For now cache latast rings as global variables TODO: keep them in list*/ +static irqreturn_t front_ring_isr(int irq, void *info); +static irqreturn_t back_ring_isr(int irq, void *info); + +/* Callback function that will be called on any change of xenbus path + * being watched. Used for detecting creation/destruction of remote + * domain exporter ring. + * + * When remote domain's exporter ring will be detected, importer ring + * on this domain will be created. + * + * When remote domain's exporter ring destruction will be detected it + * will celanup this domain importer ring. + * + * Destruction can be caused by unloading module by remote domain or + * it's crash/force shutdown. + */ +static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch, + const char *path, const char *token) +{ + int rdom, ret; + uint32_t grefid, port; + struct xen_comm_rx_ring_info *ring_info; + + /* Check which domain has changed its exporter rings */ + ret = sscanf(watch->node, "/local/domain/%d/", &rdom); + if (ret <= 0) + return; + + /* Check if we have importer ring for given remote domain already + * created + */ + ring_info = xen_comm_find_rx_ring(rdom); + + /* Try to query remote domain exporter ring details - if + * that will fail and we have importer ring that means remote + * domains has cleanup its exporter ring, so our importer ring + * is no longer useful. + * + * If querying details will succeed and we don't have importer ring, + * it means that remote domain has setup it for us and we should + * connect to it. + */ + + ret = xen_comm_get_ring_details(xen_be_get_domid(), + rdom, &grefid, &port); + + if (ring_info && ret != 0) { + dev_info(hy_drv_priv->dev, + "Remote exporter closed, cleaninup importer\n"); + xen_be_cleanup_rx_rbuf(rdom); + } else if (!ring_info && ret == 0) { + dev_info(hy_drv_priv->dev, + "Registering importer\n"); + xen_be_init_rx_rbuf(rdom); + } +} + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_sring *sring; + struct evtchn_alloc_unbound alloc_unbound; + struct evtchn_close close; + + void *shared_ring; + int ret; + + /* check if there's any existing tx channel in the table */ + ring_info = xen_comm_find_tx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n", + ring_info->rdomain, ring_info->gref_ring, ring_info->port); + return 0; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + /* from exporter to importer */ + shared_ring = (void *)__get_free_pages(GFP_KERNEL, 1); + if (shared_ring == 0) { + kfree(ring_info); + return -ENOMEM; + } + + sring = (struct xen_comm_sring *) shared_ring; + + SHARED_RING_INIT(sring); + + FRONT_RING_INIT(&(ring_info->ring_front), sring, PAGE_SIZE); + + ring_info->gref_ring = gnttab_grant_foreign_access(domid, + virt_to_mfn(shared_ring), + 0); + if (ring_info->gref_ring < 0) { + /* fail to get gref */ + kfree(ring_info); + return -EFAULT; + } + + alloc_unbound.dom = DOMID_SELF; + alloc_unbound.remote_dom = domid; + ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, + &alloc_unbound); + if (ret) { + dev_err(hy_drv_priv->dev, + "Cannot allocate event channel\n"); + kfree(ring_info); + return -EIO; + } + + /* setting up interrupt */ + ret = bind_evtchn_to_irqhandler(alloc_unbound.port, + front_ring_isr, 0, + NULL, (void *) ring_info); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to setup event channel\n"); + close.port = alloc_unbound.port; + HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); + gnttab_end_foreign_access(ring_info->gref_ring, 0, + virt_to_mfn(shared_ring)); + kfree(ring_info); + return -EIO; + } + + ring_info->rdomain = domid; + ring_info->irq = ret; + ring_info->port = alloc_unbound.port; + + mutex_init(&ring_info->lock); + + dev_dbg(hy_drv_priv->dev, + "%s: allocated eventchannel gref %d port: %d irq: %d\n", + __func__, + ring_info->gref_ring, + ring_info->port, + ring_info->irq); + + ret = xen_comm_add_tx_ring(ring_info); + + if (ret < 0) { + kfree(ring_info); + return -ENOMEM; + } + + ret = xen_comm_expose_ring_details(xen_be_get_domid(), + domid, + ring_info->gref_ring, + ring_info->port); + + /* Register watch for remote domain exporter ring. + * When remote domain will setup its exporter ring, + * we will automatically connect our importer ring to it. + */ + ring_info->watch.callback = remote_dom_exporter_watch_cb; + ring_info->watch.node = kmalloc(255, GFP_KERNEL); + + if (!ring_info->watch.node) { + kfree(ring_info); + return -ENOMEM; + } + + sprintf((char *)ring_info->watch.node, + "/local/domain/%d/data/hyper_dmabuf/%d/port", + domid, xen_be_get_domid()); + + register_xenbus_watch(&ring_info->watch); + + return ret; +} + +/* cleans up exporter ring created for given remote domain */ +void xen_be_cleanup_tx_rbuf(int domid) +{ + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_rx_ring_info *rx_ring_info; + + /* check if we at all have exporter ring for given rdomain */ + ring_info = xen_comm_find_tx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_tx_ring(domid); + + unregister_xenbus_watch(&ring_info->watch); + kfree(ring_info->watch.node); + + /* No need to close communication channel, will be done by + * this function + */ + unbind_from_irqhandler(ring_info->irq, (void *) ring_info); + + /* No need to free sring page, will be freed by this function + * when other side will end its access + */ + gnttab_end_foreign_access(ring_info->gref_ring, 0, + (unsigned long) ring_info->ring_front.sring); + + kfree(ring_info); + + rx_ring_info = xen_comm_find_rx_ring(domid); + if (!rx_ring_info) + return; + + BACK_RING_INIT(&(rx_ring_info->ring_back), + rx_ring_info->ring_back.sring, + PAGE_SIZE); +} + +/* importer needs to know about shared page and port numbers for + * ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_sring *sring; + + struct page *shared_ring; + + struct gnttab_map_grant_ref *map_ops; + + int ret; + int rx_gref, rx_port; + + /* check if there's existing rx ring channel */ + ring_info = xen_comm_find_rx_ring(domid); + + if (ring_info) { + dev_info(hy_drv_priv->dev, + "rx ring ch from domid = %d already exist\n", + ring_info->sdomain); + + return 0; + } + + ret = xen_comm_get_ring_details(xen_be_get_domid(), domid, + &rx_gref, &rx_port); + + if (ret) { + dev_err(hy_drv_priv->dev, + "Domain %d has not created exporter ring for current domain\n", + domid); + + return ret; + } + + ring_info = kmalloc(sizeof(*ring_info), GFP_KERNEL); + + if (!ring_info) + return -ENOMEM; + + ring_info->sdomain = domid; + ring_info->evtchn = rx_port; + + map_ops = kmalloc(sizeof(*map_ops), GFP_KERNEL); + + if (!map_ops) { + ret = -ENOMEM; + goto fail_no_map_ops; + } + + if (gnttab_alloc_pages(1, &shared_ring)) { + ret = -ENOMEM; + goto fail_others; + } + + gnttab_set_map_op(&map_ops[0], + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, rx_gref, domid); + + gnttab_set_unmap_op(&ring_info->unmap_op, + (unsigned long)pfn_to_kaddr( + page_to_pfn(shared_ring)), + GNTMAP_host_map, -1); + + ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1); + if (ret < 0) { + dev_err(hy_drv_priv->dev, "Cannot map ring\n"); + ret = -EFAULT; + goto fail_others; + } + + if (map_ops[0].status) { + dev_err(hy_drv_priv->dev, "Ring mapping failed\n"); + ret = -EFAULT; + goto fail_others; + } else { + ring_info->unmap_op.handle = map_ops[0].handle; + } + + kfree(map_ops); + + sring = (struct xen_comm_sring *)pfn_to_kaddr(page_to_pfn(shared_ring)); + + BACK_RING_INIT(&ring_info->ring_back, sring, PAGE_SIZE); + + ret = bind_interdomain_evtchn_to_irq(domid, rx_port); + + if (ret < 0) { + ret = -EIO; + goto fail_others; + } + + ring_info->irq = ret; + + dev_dbg(hy_drv_priv->dev, + "%s: bound to eventchannel port: %d irq: %d\n", __func__, + rx_port, + ring_info->irq); + + ret = xen_comm_add_rx_ring(ring_info); + + if (ret < 0) { + ret = -ENOMEM; + goto fail_others; + } + + /* Setup communcation channel in opposite direction */ + if (!xen_comm_find_tx_ring(domid)) + ret = xen_be_init_tx_rbuf(domid); + + ret = request_irq(ring_info->irq, + back_ring_isr, 0, + NULL, (void *)ring_info); + + return ret; + +fail_others: + kfree(map_ops); + +fail_no_map_ops: + kfree(ring_info); + + return ret; +} + +/* clenas up importer ring create for given source domain */ +void xen_be_cleanup_rx_rbuf(int domid) +{ + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_tx_ring_info *tx_ring_info; + struct page *shared_ring; + + /* check if we have importer ring created for given sdomain */ + ring_info = xen_comm_find_rx_ring(domid); + + if (!ring_info) + return; + + xen_comm_remove_rx_ring(domid); + + /* no need to close event channel, will be done by that function */ + unbind_from_irqhandler(ring_info->irq, (void *)ring_info); + + /* unmapping shared ring page */ + shared_ring = virt_to_page(ring_info->ring_back.sring); + gnttab_unmap_refs(&ring_info->unmap_op, NULL, &shared_ring, 1); + gnttab_free_pages(1, &shared_ring); + + kfree(ring_info); + + tx_ring_info = xen_comm_find_tx_ring(domid); + if (!tx_ring_info) + return; + + SHARED_RING_INIT(tx_ring_info->ring_front.sring); + FRONT_RING_INIT(&(tx_ring_info->ring_front), + tx_ring_info->ring_front.sring, + PAGE_SIZE); +} + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + +static void xen_rx_ch_add_delayed(struct work_struct *unused); + +static DECLARE_DELAYED_WORK(xen_rx_ch_auto_add_work, xen_rx_ch_add_delayed); + +#define DOMID_SCAN_START 1 /* domid = 1 */ +#define DOMID_SCAN_END 10 /* domid = 10 */ + +static void xen_rx_ch_add_delayed(struct work_struct *unused) +{ + int ret; + char buf[128]; + int i, dummy; + + dev_dbg(hy_drv_priv->dev, + "Scanning new tx channel comming from another domain\n"); + + /* check other domains and schedule another work if driver + * is still running and backend is valid + */ + if (hy_drv_priv && + hy_drv_priv->initialized) { + for (i = DOMID_SCAN_START; i < DOMID_SCAN_END + 1; i++) { + if (i == hy_drv_priv->domid) + continue; + + sprintf(buf, "/local/domain/%d/data/hyper_dmabuf/%d", + i, hy_drv_priv->domid); + + ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", &dummy); + + if (ret > 0) { + if (xen_comm_find_rx_ring(i) != NULL) + continue; + + ret = xen_be_init_rx_rbuf(i); + + if (!ret) + dev_info(hy_drv_priv->dev, + "Done rx ch init for VM %d\n", + i); + } + } + + /* check every 10 seconds */ + schedule_delayed_work(&xen_rx_ch_auto_add_work, + msecs_to_jiffies(10000)); + } +} + +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + +void xen_init_comm_env_delayed(struct work_struct *unused) +{ + int ret; + + /* scheduling another work if driver is still running + * and xenstore hasn't been initialized or dom_id hasn't + * been correctly retrieved. + */ + if (likely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + dev_dbg(hy_drv_priv->dev, + "Xenstore not ready Will re-try in 500ms\n"); + schedule_delayed_work(&xen_init_comm_env_work, + msecs_to_jiffies(500)); + } else { + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env init\n"); + hy_drv_priv->initialized = true; + +#ifdef CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD + xen_rx_ch_add_delayed(NULL); +#endif /* CONFIG_HYPER_DMABUF_XEN_AUTO_RX_CH_ADD */ + } + } +} + +int xen_be_init_comm_env(void) +{ + int ret; + + xen_comm_ring_table_init(); + + if (unlikely(xenstored_ready == 0 || + hy_drv_priv->domid == -1)) { + xen_init_comm_env_delayed(NULL); + return -1; + } + + ret = xen_comm_setup_data_dir(); + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "Failed to create data dir in Xenstore\n"); + } else { + dev_info(hy_drv_priv->dev, + "Successfully finished comm env initialization\n"); + + hy_drv_priv->initialized = true; + } + + return ret; +} + +/* cleans up all tx/rx rings */ +static void xen_be_cleanup_all_rbufs(void) +{ + xen_comm_foreach_tx_ring(xen_be_cleanup_tx_rbuf); + xen_comm_foreach_rx_ring(xen_be_cleanup_rx_rbuf); +} + +void xen_be_destroy_comm(void) +{ + xen_be_cleanup_all_rbufs(); + xen_comm_destroy_data_dir(); +} + +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait) +{ + struct xen_comm_front_ring *ring; + struct hyper_dmabuf_req *new_req; + struct xen_comm_tx_ring_info *ring_info; + int notify; + + struct timeval tv_start, tv_end; + struct timeval tv_diff; + + int timeout = 1000; + + /* find a ring info for the channel */ + ring_info = xen_comm_find_tx_ring(domid); + if (!ring_info) { + dev_err(hy_drv_priv->dev, + "Can't find ring info for the channel\n"); + return -ENOENT; + } + + + ring = &ring_info->ring_front; + + do_gettimeofday(&tv_start); + + while (RING_FULL(ring)) { + dev_dbg(hy_drv_priv->dev, "RING_FULL\n"); + + if (timeout == 0) { + dev_err(hy_drv_priv->dev, + "Timeout while waiting for an entry in the ring\n"); + return -EIO; + } + usleep_range(100, 120); + timeout--; + } + + timeout = 1000; + + mutex_lock(&ring_info->lock); + + new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt); + if (!new_req) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "NULL REQUEST\n"); + return -EIO; + } + + req->req_id = xen_comm_next_req_id(); + + /* update req_pending with current request */ + memcpy(&req_pending, req, sizeof(req_pending)); + + /* pass current request to the ring */ + memcpy(new_req, req, sizeof(*new_req)); + + ring->req_prod_pvt++; + + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); + if (notify) + notify_remote_via_irq(ring_info->irq); + + if (wait) { + while (timeout--) { + if (req_pending.stat != + HYPER_DMABUF_REQ_NOT_RESPONDED) + break; + usleep_range(100, 120); + } + + if (timeout < 0) { + mutex_unlock(&ring_info->lock); + dev_err(hy_drv_priv->dev, + "request timed-out\n"); + return -EBUSY; + } + + mutex_unlock(&ring_info->lock); + do_gettimeofday(&tv_end); + + /* checking time duration for round-trip of a request + * for debugging + */ + if (tv_end.tv_usec >= tv_start.tv_usec) { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec; + tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec; + } else { + tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1; + tv_diff.tv_usec = tv_end.tv_usec+1000000- + tv_start.tv_usec; + } + + if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000) + dev_dbg(hy_drv_priv->dev, + "send_req:time diff: %ld sec, %ld usec\n", + tv_diff.tv_sec, tv_diff.tv_usec); + } + + mutex_unlock(&ring_info->lock); + + return 0; +} + +/* ISR for handling request */ +static irqreturn_t back_ring_isr(int irq, void *info) +{ + RING_IDX rc, rp; + struct hyper_dmabuf_req req; + struct hyper_dmabuf_resp resp; + + int notify, more_to_do; + int ret; + + struct xen_comm_rx_ring_info *ring_info; + struct xen_comm_back_ring *ring; + + ring_info = (struct xen_comm_rx_ring_info *)info; + ring = &ring_info->ring_back; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + rc = ring->req_cons; + rp = ring->sring->req_prod; + more_to_do = 0; + while (rc != rp) { + if (RING_REQUEST_CONS_OVERFLOW(ring, rc)) + break; + + memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req)); + ring->req_cons = ++rc; + + ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req); + + if (ret > 0) { + /* preparing a response for the request and + * send it to the requester + */ + memcpy(&resp, &req, sizeof(resp)); + memcpy(RING_GET_RESPONSE(ring, + ring->rsp_prod_pvt), + &resp, sizeof(resp)); + ring->rsp_prod_pvt++; + + dev_dbg(hy_drv_priv->dev, + "responding to exporter for req:%d\n", + resp.resp_id); + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, + notify); + + if (notify) + notify_remote_via_irq(ring_info->irq); + } + + RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do); + } + } while (more_to_do); + + return IRQ_HANDLED; +} + +/* ISR for handling responses */ +static irqreturn_t front_ring_isr(int irq, void *info) +{ + /* front ring only care about response from back */ + struct hyper_dmabuf_resp *resp; + RING_IDX i, rp; + int more_to_do, ret; + + struct xen_comm_tx_ring_info *ring_info; + struct xen_comm_front_ring *ring; + + ring_info = (struct xen_comm_tx_ring_info *)info; + ring = &ring_info->ring_front; + + dev_dbg(hy_drv_priv->dev, "%s\n", __func__); + + do { + more_to_do = 0; + rp = ring->sring->rsp_prod; + for (i = ring->rsp_cons; i != rp; i++) { + resp = RING_GET_RESPONSE(ring, i); + + /* update pending request's status with what is + * in the response + */ + + dev_dbg(hy_drv_priv->dev, + "getting response from importer\n"); + + if (req_pending.req_id == resp->resp_id) + req_pending.stat = resp->stat; + + if (resp->stat == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) { + /* parsing response */ + ret = hyper_dmabuf_msg_parse(ring_info->rdomain, + (struct hyper_dmabuf_req *)resp); + + if (ret < 0) { + dev_err(hy_drv_priv->dev, + "err while parsing resp\n"); + } + } else if (resp->stat == HYPER_DMABUF_REQ_PROCESSED) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_PROCESSED\n"); + } else if (resp->stat == HYPER_DMABUF_REQ_ERROR) { + /* for debugging dma_buf remote synch */ + dev_dbg(hy_drv_priv->dev, + "original request = 0x%x\n", resp->cmd); + dev_dbg(hy_drv_priv->dev, + "got HYPER_DMABUF_REQ_ERROR\n"); + } + } + + ring->rsp_cons = i; + + if (i != ring->req_prod_pvt) + RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do); + else + ring->sring->rsp_event = i+1; + + } while (more_to_do); + + return IRQ_HANDLED; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h new file mode 100644 index 000000000000..70a2b704badd --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h @@ -0,0 +1,78 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_H__ +#define __HYPER_DMABUF_XEN_COMM_H__ + +#include "xen/interface/io/ring.h" +#include "xen/xenbus.h" +#include "../hyper_dmabuf_msg.h" + +extern int xenstored_ready; + +DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp); + +struct xen_comm_tx_ring_info { + struct xen_comm_front_ring ring_front; + int rdomain; + int gref_ring; + int irq; + int port; + struct mutex lock; + struct xenbus_watch watch; +}; + +struct xen_comm_rx_ring_info { + int sdomain; + int irq; + int evtchn; + struct xen_comm_back_ring ring_back; + struct gnttab_unmap_grant_ref unmap_op; +}; + +int xen_be_get_domid(void); + +int xen_be_init_comm_env(void); + +/* exporter needs to generated info for page sharing */ +int xen_be_init_tx_rbuf(int domid); + +/* importer needs to know about shared page and port numbers + * for ring buffer and event channel + */ +int xen_be_init_rx_rbuf(int domid); + +/* cleans up exporter ring created for given domain */ +void xen_be_cleanup_tx_rbuf(int domid); + +/* cleans up importer ring created for given domain */ +void xen_be_cleanup_rx_rbuf(int domid); + +void xen_be_destroy_comm(void); + +/* send request to the remote domain */ +int xen_be_send_req(int domid, struct hyper_dmabuf_req *req, + int wait); + +#endif /* __HYPER_DMABUF_XEN_COMM_H__ */ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c new file mode 100644 index 000000000000..15023dbc8ced --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.c @@ -0,0 +1,158 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include +#include +#include +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_comm_list.h" + +DECLARE_HASHTABLE(xen_comm_tx_ring_hash, MAX_ENTRY_TX_RING); +DECLARE_HASHTABLE(xen_comm_rx_ring_hash, MAX_ENTRY_RX_RING); + +void xen_comm_ring_table_init(void) +{ + hash_init(xen_comm_rx_ring_hash); + hash_init(xen_comm_tx_ring_hash); +} + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_tx_ring_hash, &info_entry->node, + info_entry->info->rdomain); + + return 0; +} + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + + info_entry = kmalloc(sizeof(*info_entry), GFP_KERNEL); + + if (!info_entry) + return -ENOMEM; + + info_entry->info = ring_info; + + hash_add(xen_comm_rx_ring_hash, &info_entry->node, + info_entry->info->sdomain); + + return 0; +} + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) + return info_entry->info; + + return NULL; +} + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) + return info_entry->info; + + return NULL; +} + +int xen_comm_remove_tx_ring(int domid) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_tx_ring_hash, bkt, info_entry, node) + if (info_entry->info->rdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +int xen_comm_remove_rx_ring(int domid) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + int bkt; + + hash_for_each(xen_comm_rx_ring_hash, bkt, info_entry, node) + if (info_entry->info->sdomain == domid) { + hash_del(&info_entry->node); + kfree(info_entry); + return 0; + } + + return -ENOENT; +} + +void xen_comm_foreach_tx_ring(void (*func)(int domid)) +{ + struct xen_comm_tx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_tx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->rdomain); + } +} + +void xen_comm_foreach_rx_ring(void (*func)(int domid)) +{ + struct xen_comm_rx_ring_info_entry *info_entry; + struct hlist_node *tmp; + int bkt; + + hash_for_each_safe(xen_comm_rx_ring_hash, bkt, tmp, + info_entry, node) { + func(info_entry->info->sdomain); + } +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h new file mode 100644 index 000000000000..8502fe7df578 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_comm_list.h @@ -0,0 +1,67 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_COMM_LIST_H__ +#define __HYPER_DMABUF_XEN_COMM_LIST_H__ + +/* number of bits to be used for exported dmabufs hash table */ +#define MAX_ENTRY_TX_RING 7 +/* number of bits to be used for imported dmabufs hash table */ +#define MAX_ENTRY_RX_RING 7 + +struct xen_comm_tx_ring_info_entry { + struct xen_comm_tx_ring_info *info; + struct hlist_node node; +}; + +struct xen_comm_rx_ring_info_entry { + struct xen_comm_rx_ring_info *info; + struct hlist_node node; +}; + +void xen_comm_ring_table_init(void); + +int xen_comm_add_tx_ring(struct xen_comm_tx_ring_info *ring_info); + +int xen_comm_add_rx_ring(struct xen_comm_rx_ring_info *ring_info); + +int xen_comm_remove_tx_ring(int domid); + +int xen_comm_remove_rx_ring(int domid); + +struct xen_comm_tx_ring_info *xen_comm_find_tx_ring(int domid); + +struct xen_comm_rx_ring_info *xen_comm_find_rx_ring(int domid); + +/* iterates over all exporter rings and calls provided + * function for each of them + */ +void xen_comm_foreach_tx_ring(void (*func)(int domid)); + +/* iterates over all importer rings and calls provided + * function for each of them + */ +void xen_comm_foreach_rx_ring(void (*func)(int domid)); + +#endif // __HYPER_DMABUF_XEN_COMM_LIST_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c new file mode 100644 index 000000000000..14ed3bc51e6a --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.c @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include "../hyper_dmabuf_drv.h" +#include "hyper_dmabuf_xen_comm.h" +#include "hyper_dmabuf_xen_shm.h" + +struct hyper_dmabuf_bknd_ops xen_bknd_ops = { + .init = NULL, /* not needed for xen */ + .cleanup = NULL, /* not needed for xen */ + .get_vm_id = xen_be_get_domid, + .share_pages = xen_be_share_pages, + .unshare_pages = xen_be_unshare_pages, + .map_shared_pages = (void *)xen_be_map_shared_pages, + .unmap_shared_pages = xen_be_unmap_shared_pages, + .init_comm_env = xen_be_init_comm_env, + .destroy_comm = xen_be_destroy_comm, + .init_rx_ch = xen_be_init_rx_rbuf, + .init_tx_ch = xen_be_init_tx_rbuf, + .send_req = xen_be_send_req, +}; diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h new file mode 100644 index 000000000000..a4902b747a87 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_drv.h @@ -0,0 +1,53 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_DRV_H__ +#define __HYPER_DMABUF_XEN_DRV_H__ +#include + +extern struct hyper_dmabuf_bknd_ops xen_bknd_ops; + +/* Main purpose of this structure is to keep + * all references created or acquired for sharing + * pages with another domain for freeing those later + * when unsharing. + */ +struct xen_shared_pages_info { + /* top level refid */ + grant_ref_t lvl3_gref; + + /* page of top level addressing, it contains refids of 2nd lvl pages */ + grant_ref_t *lvl3_table; + + /* table of 2nd level pages, that contains refids to data pages */ + grant_ref_t *lvl2_table; + + /* unmap ops for mapped pages */ + struct gnttab_unmap_grant_ref *unmap_ops; + + /* data pages to be unmapped */ + struct page **data_pages; +}; + +#endif // __HYPER_DMABUF_XEN_COMM_H__ diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c new file mode 100644 index 000000000000..5889485125e0 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.c @@ -0,0 +1,525 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Dongwon Kim + * Mateusz Polrola + * + */ + +#include +#include +#include +#include "hyper_dmabuf_xen_drv.h" +#include "../hyper_dmabuf_drv.h" + +#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t)) + +/* + * Creates 2 level page directory structure for referencing shared pages. + * Top level page is a single page that contains up to 1024 refids that + * point to 2nd level pages. + * + * Each 2nd level page contains up to 1024 refids that point to shared + * data pages. + * + * There will always be one top level page and number of 2nd level pages + * depends on number of shared data pages. + * + * 3rd level page 2nd level pages Data pages + * +-------------------------+ ┌>+--------------------+ ┌>+------------+ + * |2nd level page 0 refid |---┘ |Data page 0 refid |-┘ |Data page 0 | + * |2nd level page 1 refid |---┐ |Data page 1 refid |-┐ +------------+ + * | ... | | | .... | | + * |2nd level page 1023 refid|-┐ | |Data page 1023 refid| └>+------------+ + * +-------------------------+ | | +--------------------+ |Data page 1 | + * | | +------------+ + * | └>+--------------------+ + * | |Data page 1024 refid| + * | |Data page 1025 refid| + * | | ... | + * | |Data page 2047 refid| + * | +--------------------+ + * | + * | ..... + * └-->+-----------------------+ + * |Data page 1047552 refid| + * |Data page 1047553 refid| + * | ... | + * |Data page 1048575 refid| + * +-----------------------+ + * + * Using such 2 level structure it is possible to reference up to 4GB of + * shared data using single refid pointing to top level page. + * + * Returns refid of top level page. + */ +long xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info) +{ + grant_ref_t lvl3_gref; + grant_ref_t *lvl2_table; + grant_ref_t *lvl3_table; + + /* + * Calculate number of pages needed for 2nd level addresing: + */ + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + + struct xen_shared_pages_info *sh_pages_info; + int i; + + lvl3_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, 1); + lvl2_table = (grant_ref_t *)__get_free_pages(GFP_KERNEL, n_lvl2_grefs); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + + if (!sh_pages_info) + return -ENOMEM; + + *refs_info = (void *)sh_pages_info; + + /* share data pages in readonly mode for security */ + for (i = 0; i < nents; i++) { + lvl2_table[i] = gnttab_grant_foreign_access(domid, + pfn_to_mfn(page_to_pfn(pages[i])), + true /* read only */); + if (lvl2_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl2 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl2_table[i], 0); + gnttab_free_grant_reference(lvl2_table[i]); + } + goto err_cleanup; + } + } + + /* Share 2nd level addressing pages in readonly mode*/ + for (i = 0; i < n_lvl2_grefs; i++) { + lvl3_table[i] = gnttab_grant_foreign_access(domid, + virt_to_mfn( + (unsigned long)lvl2_table+i*PAGE_SIZE), + true); + + if (lvl3_table[i] == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all already shared pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref( + lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + } + + /* Share lvl3_table in readonly mode*/ + lvl3_gref = gnttab_grant_foreign_access(domid, + virt_to_mfn((unsigned long)lvl3_table), + true); + + if (lvl3_gref == -ENOSPC) { + dev_err(hy_drv_priv->dev, + "No more space left in grant table\n"); + + /* Unshare all pages for lvl3 */ + while (i--) { + gnttab_end_foreign_access_ref(lvl3_table[i], 1); + gnttab_free_grant_reference(lvl3_table[i]); + } + + /* Unshare all pages for lvl2 */ + while (nents--) { + gnttab_end_foreign_access_ref(lvl2_table[nents], 0); + gnttab_free_grant_reference(lvl2_table[nents]); + } + + goto err_cleanup; + } + + /* Store lvl3_table page to be freed later */ + sh_pages_info->lvl3_table = lvl3_table; + + /* Store lvl2_table pages to be freed later */ + sh_pages_info->lvl2_table = lvl2_table; + + + /* Store exported pages refid to be unshared later */ + sh_pages_info->lvl3_gref = lvl3_gref; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return lvl3_gref; + +err_cleanup: + free_pages((unsigned long)lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)lvl3_table, 1); + + return -ENOSPC; +} + +int xen_be_unshare_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + int n_lvl2_grefs = (nents/REFS_PER_PAGE + + ((nents % REFS_PER_PAGE) ? 1 : 0)); + int i; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->lvl3_table == NULL || + sh_pages_info->lvl2_table == NULL || + sh_pages_info->lvl3_gref == -1) { + dev_warn(hy_drv_priv->dev, + "gref table for hyper_dmabuf already cleaned up\n"); + return 0; + } + + /* End foreign access for data pages, but do not free them */ + for (i = 0; i < nents; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0); + gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]); + } + + /* End foreign access for 2nd level addressing pages */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) + dev_warn(hy_drv_priv->dev, "refid not shared !!\n"); + + if (!gnttab_end_foreign_access_ref( + sh_pages_info->lvl3_table[i], 1)) + dev_warn(hy_drv_priv->dev, "refid still in use!!!\n"); + + gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]); + } + + /* End foreign access for top level addressing page */ + if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) + dev_warn(hy_drv_priv->dev, "gref not shared !!\n"); + + gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1); + gnttab_free_grant_reference(sh_pages_info->lvl3_gref); + + /* freeing all pages used for 2 level addressing */ + free_pages((unsigned long)sh_pages_info->lvl2_table, n_lvl2_grefs); + free_pages((unsigned long)sh_pages_info->lvl3_table, 1); + + sh_pages_info->lvl3_gref = -1; + sh_pages_info->lvl2_table = NULL; + sh_pages_info->lvl3_table = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} + +/* Maps provided top level ref id and then return array of pages + * containing data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, void **refs_info) +{ + struct page *lvl3_table_page; + struct page **lvl2_table_pages; + struct page **data_pages; + struct xen_shared_pages_info *sh_pages_info; + + grant_ref_t *lvl3_table; + grant_ref_t *lvl2_table; + + struct gnttab_map_grant_ref lvl3_map_ops; + struct gnttab_unmap_grant_ref lvl3_unmap_ops; + + struct gnttab_map_grant_ref *lvl2_map_ops; + struct gnttab_unmap_grant_ref *lvl2_unmap_ops; + + struct gnttab_map_grant_ref *data_map_ops; + struct gnttab_unmap_grant_ref *data_unmap_ops; + + /* # of grefs in the last page of lvl2 table */ + int nents_last = (nents - 1) % REFS_PER_PAGE + 1; + int n_lvl2_grefs = (nents / REFS_PER_PAGE) + + ((nents_last > 0) ? 1 : 0) - + (nents_last == REFS_PER_PAGE); + int i, j, k; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL); + *refs_info = (void *) sh_pages_info; + + lvl2_table_pages = kcalloc(n_lvl2_grefs, sizeof(struct page *), + GFP_KERNEL); + + data_pages = kcalloc(nents, sizeof(struct page *), GFP_KERNEL); + + lvl2_map_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_map_ops), + GFP_KERNEL); + + lvl2_unmap_ops = kcalloc(n_lvl2_grefs, sizeof(*lvl2_unmap_ops), + GFP_KERNEL); + + data_map_ops = kcalloc(nents, sizeof(*data_map_ops), GFP_KERNEL); + data_unmap_ops = kcalloc(nents, sizeof(*data_unmap_ops), GFP_KERNEL); + + /* Map top level addressing page */ + if (gnttab_alloc_pages(1, &lvl3_table_page)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + return NULL; + } + + lvl3_table = (grant_ref_t *)pfn_to_kaddr(page_to_pfn(lvl3_table_page)); + + gnttab_set_map_op(&lvl3_map_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, + (grant_ref_t)lvl3_gref, domid); + + gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, + GNTMAP_host_map | GNTMAP_readonly, -1); + + if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + if (lvl3_map_ops.status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl3_map_ops.status); + + goto error_cleanup_lvl3; + } else { + lvl3_unmap_ops.handle = lvl3_map_ops.handle; + } + + /* Map all second level pages */ + if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) { + dev_err(hy_drv_priv->dev, "Cannot allocate pages\n"); + goto error_cleanup_lvl3; + } + + for (i = 0; i < n_lvl2_grefs; i++) { + lvl2_table = (grant_ref_t *)pfn_to_kaddr( + page_to_pfn(lvl2_table_pages[i])); + gnttab_set_map_op(&lvl2_map_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, + lvl3_table[i], domid); + gnttab_set_unmap_op(&lvl2_unmap_ops[i], + (unsigned long)lvl2_table, GNTMAP_host_map | + GNTMAP_readonly, -1); + } + + /* Unmap top level page, as it won't be needed any longer */ + if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1)) { + dev_err(hy_drv_priv->dev, + "xen: cannot unmap top level page\n"); + return NULL; + } + + /* Mark that page was unmapped */ + lvl3_unmap_ops.handle = -1; + + if (gnttab_map_refs(lvl2_map_ops, NULL, + lvl2_table_pages, n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed"); + return NULL; + } + + /* Checks if pages were mapped correctly */ + for (i = 0; i < n_lvl2_grefs; i++) { + if (lvl2_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d", + lvl2_map_ops[i].status); + goto error_cleanup_lvl2; + } else { + lvl2_unmap_ops[i].handle = lvl2_map_ops[i].handle; + } + } + + if (gnttab_alloc_pages(nents, data_pages)) { + dev_err(hy_drv_priv->dev, + "Cannot allocate pages\n"); + goto error_cleanup_lvl2; + } + + k = 0; + + for (i = 0; i < n_lvl2_grefs - 1; i++) { + lvl2_table = pfn_to_kaddr(page_to_pfn(lvl2_table_pages[i])); + for (j = 0; j < REFS_PER_PAGE; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr( + page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + } + + /* for grefs in the last lvl2 table page */ + lvl2_table = pfn_to_kaddr(page_to_pfn( + lvl2_table_pages[n_lvl2_grefs - 1])); + + for (j = 0; j < nents_last; j++) { + gnttab_set_map_op(&data_map_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, + lvl2_table[j], domid); + + gnttab_set_unmap_op(&data_unmap_ops[k], + (unsigned long)pfn_to_kaddr(page_to_pfn(data_pages[k])), + GNTMAP_host_map | GNTMAP_readonly, -1); + k++; + } + + if (gnttab_map_refs(data_map_ops, NULL, + data_pages, nents)) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed\n"); + return NULL; + } + + /* unmapping lvl2 table pages */ + if (gnttab_unmap_refs(lvl2_unmap_ops, + NULL, lvl2_table_pages, + n_lvl2_grefs)) { + dev_err(hy_drv_priv->dev, + "Cannot unmap 2nd level refs\n"); + return NULL; + } + + /* Mark that pages were unmapped */ + for (i = 0; i < n_lvl2_grefs; i++) + lvl2_unmap_ops[i].handle = -1; + + for (i = 0; i < nents; i++) { + if (data_map_ops[i].status) { + dev_err(hy_drv_priv->dev, + "HYPERVISOR map grant ref failed status = %d\n", + data_map_ops[i].status); + goto error_cleanup_data; + } else { + data_unmap_ops[i].handle = data_map_ops[i].handle; + } + } + + /* store these references for unmapping in the future */ + sh_pages_info->unmap_ops = data_unmap_ops; + sh_pages_info->data_pages = data_pages; + + gnttab_free_pages(1, &lvl3_table_page); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return data_pages; + +error_cleanup_data: + gnttab_unmap_refs(data_unmap_ops, NULL, data_pages, + nents); + + gnttab_free_pages(nents, data_pages); + +error_cleanup_lvl2: + if (lvl2_unmap_ops[0].handle != -1) + gnttab_unmap_refs(lvl2_unmap_ops, NULL, + lvl2_table_pages, n_lvl2_grefs); + gnttab_free_pages(n_lvl2_grefs, lvl2_table_pages); + +error_cleanup_lvl3: + if (lvl3_unmap_ops.handle != -1) + gnttab_unmap_refs(&lvl3_unmap_ops, NULL, + &lvl3_table_page, 1); + gnttab_free_pages(1, &lvl3_table_page); + + kfree(lvl2_table_pages); + kfree(lvl2_map_ops); + kfree(lvl2_unmap_ops); + kfree(data_map_ops); + + + return NULL; +} + +int xen_be_unmap_shared_pages(void **refs_info, int nents) +{ + struct xen_shared_pages_info *sh_pages_info; + + dev_dbg(hy_drv_priv->dev, "%s entry\n", __func__); + + sh_pages_info = (struct xen_shared_pages_info *)(*refs_info); + + if (sh_pages_info->unmap_ops == NULL || + sh_pages_info->data_pages == NULL) { + dev_warn(hy_drv_priv->dev, + "pages already cleaned up or buffer not imported yet\n"); + return 0; + } + + if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL, + sh_pages_info->data_pages, nents)) { + dev_err(hy_drv_priv->dev, "Cannot unmap data pages\n"); + return -EFAULT; + } + + gnttab_free_pages(nents, sh_pages_info->data_pages); + + kfree(sh_pages_info->data_pages); + kfree(sh_pages_info->unmap_ops); + sh_pages_info->unmap_ops = NULL; + sh_pages_info->data_pages = NULL; + kfree(sh_pages_info); + sh_pages_info = NULL; + + dev_dbg(hy_drv_priv->dev, "%s exit\n", __func__); + return 0; +} diff --git a/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h new file mode 100644 index 000000000000..f23deb394a00 --- /dev/null +++ b/drivers/dma-buf/hyper_dmabuf/xen/hyper_dmabuf_xen_shm.h @@ -0,0 +1,46 @@ +/* + * Copyright © 2017 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef __HYPER_DMABUF_XEN_SHM_H__ +#define __HYPER_DMABUF_XEN_SHM_H__ + +/* This collects all reference numbers for 2nd level shared pages and + * create a table with those in 1st level shared pages then return reference + * numbers for this top level table. + */ +long xen_be_share_pages(struct page **pages, int domid, int nents, + void **refs_info); + +int xen_be_unshare_pages(void **refs_info, int nents); + +/* Maps provided top level ref id and then return array of pages containing + * data refs. + */ +struct page **xen_be_map_shared_pages(unsigned long lvl3_gref, int domid, + int nents, + void **refs_info); + +int xen_be_unmap_shared_pages(void **refs_info, int nents); + +#endif /* __HYPER_DMABUF_XEN_SHM_H__ */ diff --git a/drivers/energy_model/Kconfig b/drivers/energy_model/Kconfig new file mode 100644 index 000000000000..3fbf968926d5 --- /dev/null +++ b/drivers/energy_model/Kconfig @@ -0,0 +1,16 @@ +config LEGACY_ENERGY_MODEL_DT + bool "Legacy DT-based Energy Model of CPUs" + default n + help + The Energy Aware Scheduler (EAS) used to rely on Energy Models + (EMs) statically defined in the Device Tree. More recent + versions of EAS now rely on the EM framework to get the power + costs of CPUs. + + This driver reads old-style static EMs in DT and feeds them in + the EM framework, hence enabling to use EAS on platforms with + old DT files. Since EAS now uses only the active costs of CPUs, + the cluster-related costs and idle-costs of the old EM are + ignored. + + If in doubt, say N. diff --git a/drivers/energy_model/Makefile b/drivers/energy_model/Makefile new file mode 100644 index 000000000000..7bc0a7e502ea --- /dev/null +++ b/drivers/energy_model/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_LEGACY_ENERGY_MODEL_DT) += legacy_em_dt.o diff --git a/drivers/energy_model/legacy_em_dt.c b/drivers/energy_model/legacy_em_dt.c new file mode 100644 index 000000000000..b608790fcc19 --- /dev/null +++ b/drivers/energy_model/legacy_em_dt.c @@ -0,0 +1,193 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Legacy Energy Model loading driver + * + * Copyright (C) 2018, ARM Ltd. + * Written by: Quentin Perret, ARM Ltd. + */ + +#define pr_fmt(fmt) "legacy-dt-em: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static cpumask_var_t cpus_to_visit; + +static DEFINE_PER_CPU(unsigned long, nr_states) = 0; + +struct em_state { + unsigned long frequency; + unsigned long power; + unsigned long capacity; +}; +static DEFINE_PER_CPU(struct em_state*, cpu_em) = NULL; + +static void finish_em_loading_workfn(struct work_struct *work); +static DECLARE_WORK(finish_em_loading_work, finish_em_loading_workfn); + +static DEFINE_MUTEX(em_loading_mutex); + +/* + * Callback given to the EM framework. All this does is browse the table + * created by legacy_em_dt(). + */ +static int get_power(unsigned long *mW, unsigned long *KHz, int cpu) +{ + unsigned long nstates = per_cpu(nr_states, cpu); + struct em_state *em = per_cpu(cpu_em, cpu); + int i; + + if (!nstates || !em) + return -ENODEV; + + for (i = 0; i < nstates - 1; i++) { + if (em[i].frequency > *KHz) + break; + } + + *KHz = em[i].frequency; + *mW = em[i].power; + + return 0; +} + +static int init_em_dt_callback(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct em_data_callback em_cb = EM_DATA_CB(get_power); + unsigned long nstates, scale_cpu, max_freq; + struct cpufreq_policy *policy = data; + const struct property *prop; + struct device_node *cn, *cp; + struct em_state *em; + int cpu, i, ret = 0; + const __be32 *tmp; + + if (val != CPUFREQ_NOTIFY) + return 0; + + mutex_lock(&em_loading_mutex); + + /* Do not register twice an energy model */ + for_each_cpu(cpu, policy->cpus) { + if (per_cpu(nr_states, cpu) || per_cpu(cpu_em, cpu)) { + pr_err("EM of CPU%d already loaded\n", cpu); + ret = -EEXIST; + goto unlock; + } + } + + max_freq = policy->cpuinfo.max_freq; + if (!max_freq) { + pr_err("No policy->max for CPU%d\n", cpu); + ret = -EINVAL; + goto unlock; + } + + cpu = cpumask_first(policy->cpus); + cn = of_get_cpu_node(cpu, NULL); + if (!cn) { + pr_err("No device_node for CPU%d\n", cpu); + ret = -ENODEV; + goto unlock; + } + + cp = of_parse_phandle(cn, "sched-energy-costs", 0); + if (!cp) { + pr_err("CPU%d node has no sched-energy-costs\n", cpu); + ret = -ENODEV; + goto unlock; + } + + prop = of_find_property(cp, "busy-cost-data", NULL); + if (!prop || !prop->value) { + pr_err("No busy-cost-data for CPU%d\n", cpu); + ret = -ENODEV; + goto unlock; + } + + nstates = (prop->length / sizeof(u32)) / 2; + em = kcalloc(nstates, sizeof(struct em_cap_state), GFP_KERNEL); + if (!em) { + ret = -ENOMEM; + goto unlock; + } + + /* Copy the capacity and power cost to the table. */ + for (i = 0, tmp = prop->value; i < nstates; i++) { + em[i].capacity = be32_to_cpup(tmp++); + em[i].power = be32_to_cpup(tmp++); + } + + /* Get the CPU capacity (according to the EM) */ + scale_cpu = em[nstates - 1].capacity; + if (!scale_cpu) { + pr_err("CPU%d: capacity cannot be 0\n", cpu); + kfree(em); + ret = -EINVAL; + goto unlock; + } + + /* Re-compute the intermediate frequencies based on the EM. */ + for (i = 0; i < nstates; i++) + em[i].frequency = em[i].capacity * max_freq / scale_cpu; + + /* Assign the table to all CPUs of this policy. */ + for_each_cpu(i, policy->cpus) { + per_cpu(nr_states, i) = nstates; + per_cpu(cpu_em, i) = em; + } + + pr_info("Registering EM of %*pbl\n", cpumask_pr_args(policy->cpus)); + em_register_perf_domain(policy->cpus, nstates, &em_cb); + + /* Finish the work when all possible CPUs have been registered. */ + cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->cpus); + if (cpumask_empty(cpus_to_visit)) + schedule_work(&finish_em_loading_work); + +unlock: + mutex_unlock(&em_loading_mutex); + + return ret; +} + +static struct notifier_block init_em_dt_notifier = { + .notifier_call = init_em_dt_callback, +}; + +static void finish_em_loading_workfn(struct work_struct *work) +{ + cpufreq_unregister_notifier(&init_em_dt_notifier, + CPUFREQ_POLICY_NOTIFIER); + free_cpumask_var(cpus_to_visit); + + /* Let the scheduler know the Energy Model is ready. */ + rebuild_sched_domains(); +} + +static int __init register_cpufreq_notifier(void) +{ + int ret; + + if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) + return -ENOMEM; + + cpumask_copy(cpus_to_visit, cpu_possible_mask); + + ret = cpufreq_register_notifier(&init_em_dt_notifier, + CPUFREQ_POLICY_NOTIFIER); + + if (ret) + free_cpumask_var(cpus_to_visit); + + return ret; +} +core_initcall(register_cpufreq_notifier); diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 64342944d917..c8024a39171b 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -427,6 +427,33 @@ static int scmi_dvfs_freq_get(const struct scmi_handle *handle, u32 domain, return ret; } +static int scmi_dvfs_est_power_get(const struct scmi_handle *handle, u32 domain, + unsigned long *freq, unsigned long *power) +{ + struct scmi_perf_info *pi = handle->perf_priv; + struct perf_dom_info *dom; + unsigned long opp_freq; + int idx, ret = -EINVAL; + struct scmi_opp *opp; + + dom = pi->dom_info + domain; + if (!dom) + return -EIO; + + for (opp = dom->opp, idx = 0; idx < dom->opp_count; idx++, opp++) { + opp_freq = opp->perf * dom->mult_factor; + if (opp_freq < *freq) + continue; + + *freq = opp_freq; + *power = opp->power; + ret = 0; + break; + } + + return ret; +} + static struct scmi_perf_ops perf_ops = { .limits_set = scmi_perf_limits_set, .limits_get = scmi_perf_limits_get, @@ -437,6 +464,7 @@ static struct scmi_perf_ops perf_ops = { .device_opps_add = scmi_dvfs_device_opps_add, .freq_set = scmi_dvfs_freq_set, .freq_get = scmi_dvfs_freq_get, + .est_power_get = scmi_dvfs_est_power_get, }; static int scmi_perf_protocol_init(struct scmi_handle *handle) diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c index 503bbe2a9d49..ff145f39e8ca 100644 --- a/drivers/firmware/efi/efibc.c +++ b/drivers/firmware/efi/efibc.c @@ -19,6 +19,15 @@ #include #include +#define REBOOT_REASON_CRASH "kernel_panic" +#define REBOOT_REASON_NORMAL "reboot" +#define REBOOT_REASON_SHUTDOWN "shutdown" +#define REBOOT_REASON_WATCHDOG "watchdog" + +#define WATCHDOG_KERNEL_H "Watchdog" +#define WATCHDOG_KERNEL_S "softlockup" +#define WATCHDOG_KERNEL_D "Software Watchdog" + static void efibc_str_to_str16(const char *str, efi_char16_t *str16) { size_t i; @@ -67,11 +76,11 @@ static int efibc_set_variable(const char *name, const char *value) static int efibc_reboot_notifier_call(struct notifier_block *notifier, unsigned long event, void *data) { - const char *reason = "shutdown"; + const char *reason = REBOOT_REASON_SHUTDOWN; int ret; if (event == SYS_RESTART) - reason = "reboot"; + reason = REBOOT_REASON_NORMAL; ret = efibc_set_variable("LoaderEntryRebootReason", reason); if (ret || !data) @@ -82,10 +91,41 @@ static int efibc_reboot_notifier_call(struct notifier_block *notifier, return NOTIFY_DONE; } +static int efibc_panic_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + int i; + char *str = data; + const char *reason = REBOOT_REASON_CRASH; + const char *watchdogs[] = { + WATCHDOG_KERNEL_H, + WATCHDOG_KERNEL_S, + WATCHDOG_KERNEL_D + }; + + + if (str) { + for (i = 0; i < ARRAY_SIZE(watchdogs); i++) { + if (strncmp(str, watchdogs[i], strlen(watchdogs[i])) == 0) { + reason = REBOOT_REASON_WATCHDOG; + break; + } + } + } + + efibc_set_variable("LoaderEntryRebootReason", reason); + + return NOTIFY_DONE; +} + static struct notifier_block efibc_reboot_notifier = { .notifier_call = efibc_reboot_notifier_call, }; +static struct notifier_block paniced = { + .notifier_call = efibc_panic_notifier_call, +}; + static int __init efibc_init(void) { int ret; @@ -94,8 +134,12 @@ static int __init efibc_init(void) return -ENODEV; ret = register_reboot_notifier(&efibc_reboot_notifier); - if (ret) + if (ret) { pr_err("unable to register reboot notifier\n"); + return ret; + } + + atomic_notifier_chain_register(&panic_notifier_list, &paniced); return ret; } @@ -104,6 +148,7 @@ module_init(efibc_init); static void __exit efibc_exit(void) { unregister_reboot_notifier(&efibc_reboot_notifier); + atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); } module_exit(efibc_exit); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 018fcdb353d2..3fdb8ced9264 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1405,17 +1405,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector, return -EINVAL; } state->content_protection = val; - } else if (property == config->writeback_fb_id_property) { - struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val); - int ret = drm_atomic_set_writeback_fb_for_connector(state, fb); - if (fb) - drm_framebuffer_put(fb); - return ret; - } else if (property == config->writeback_out_fence_ptr_property) { - s32 __user *fence_ptr = u64_to_user_ptr(val); - - return set_out_fence_for_connector(state->state, connector, - fence_ptr); + } else if (property == connector->cp_srm_property) { + state->cp_srm_blob_id = val; } else if (connector->funcs->atomic_set_property) { return connector->funcs->atomic_set_property(connector, state, property, val); @@ -1506,11 +1497,11 @@ drm_atomic_connector_get_property(struct drm_connector *connector, *val = state->scaling_mode; } else if (property == connector->content_protection_property) { *val = state->content_protection; - } else if (property == config->writeback_fb_id_property) { - /* Writeback framebuffer is one-shot, write and forget */ - *val = 0; - } else if (property == config->writeback_out_fence_ptr_property) { - *val = 0; + } else if (property == connector->cp_srm_property) { + *val = state->cp_srm_blob_id; + } else if (property == connector->cp_downstream_property) { + *val = connector->cp_downstream_blob_ptr ? + connector->cp_downstream_blob_ptr->base.id : 0; } else if (connector->funcs->atomic_get_property) { return connector->funcs->atomic_get_property(connector, state, property, val); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 80be74df7ba6..bb7582f7613b 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1074,7 +1074,7 @@ drm_atomic_helper_update_legacy_modeset_state(struct drm_device *dev, crtc->enabled = new_crtc_state->enable; new_plane_state = - drm_atomic_get_new_plane_state(old_state, primary); + primary ? drm_atomic_get_new_plane_state(old_state, primary) : NULL; if (new_plane_state && new_plane_state->crtc == crtc) { crtc->x = new_plane_state->src_x >> 16; @@ -2912,6 +2912,9 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set, int hdisplay, vdisplay; int ret; + if (!crtc->primary) + return -EINVAL; + crtc_state = drm_atomic_get_crtc_state(state, crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 6011d769d50b..60858b76374c 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -241,6 +241,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->modes); mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; + connector->cp_downstream_blob_ptr = NULL; connector->status = connector_status_unknown; connector->display_info.panel_orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN; @@ -1346,6 +1347,77 @@ int drm_connector_attach_content_protection_property( } EXPORT_SYMBOL(drm_connector_attach_content_protection_property); +/** + * drm_connector_attach_cp_downstream_property - attach cp downstream + * property + * + * @connector: connector to attach cp downstream property on. + * + * This is used to add support for content protection downstream info on + * select connectors. when Intel platform is configured as repeater, + * this downstream info is used by userspace, to complete the repeater + * authentication of HDCP specification with upstream HDCP transmitter. + * + * The cp downstream will be set to &drm_connector_state.cp_downstream + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_cp_downstream_property( + struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | + DRM_MODE_PROP_IMMUTABLE, + "CP_Downstream_Info", 0); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, 0); + + connector->cp_downstream_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_cp_downstream_property); + +/** + * drm_connector_attach_cp_srm_property - attach cp srm + * property + * + * @connector: connector to attach cp srm property on. + * + * This is used to add support for sending the SRM table from userspace to + * kernel on selected connectors. Protected content provider will provide + * the system renewability Message(SRM) to userspace before requesting for + * HDCP on a port. Hence if a Port supports content protection (mostly HDCP) + * then this property will be attached to receive the SRM for revocation check + * of the ksvs. + * + * The srm blob id will be set to &drm_connector_state.cp_srm_blob_id + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_connector_attach_cp_srm_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct drm_property *prop; + + prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "CP_SRM", 0); + if (!prop) + return -ENOMEM; + + drm_object_attach_property(&connector->base, prop, 0); + connector->cp_srm_property = prop; + + return 0; +} +EXPORT_SYMBOL(drm_connector_attach_cp_srm_property); + + /** * drm_mode_create_aspect_ratio_property - create aspect ratio property * @dev: DRM device @@ -1578,6 +1650,38 @@ void drm_connector_set_link_status_property(struct drm_connector *connector, } EXPORT_SYMBOL(drm_connector_set_link_status_property); +/** + * drm_mode_connector_update_cp_downstream_property - update the cp_downstream + * property of a connector + * @connector: drm connector + * @cp_downstream_info: new value of the cp_downstream property + * + * This function creates a new blob modeset object and assigns its id to the + * connector's cp_downstream property. + * + * Returns: + * Zero on success, negative errno on failure. + */ +int drm_mode_connector_update_cp_downstream_property( + struct drm_connector *connector, + const struct cp_downstream_info *info) +{ + struct drm_device *dev = connector->dev; + int ret; + + if (!info) + return -EINVAL; + + ret = drm_property_replace_global_blob(dev, + &connector->cp_downstream_blob_ptr, + sizeof(struct cp_downstream_info), + info, + &connector->base, + connector->cp_downstream_property); + return ret; +} +EXPORT_SYMBOL(drm_mode_connector_update_cp_downstream_property); + /** * drm_connector_init_panel_orientation_property - * initialize the connecters panel_orientation property diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bae43938c8f6..8dd3c1123eba 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -410,6 +410,9 @@ int drm_mode_getcrtc(struct drm_device *dev, plane = crtc->primary; + if (!crtc->primary) + return -EINVAL; + crtc_resp->gamma_size = crtc->gamma_size; drm_modeset_lock(&plane->mutex, NULL); @@ -461,6 +464,9 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, struct drm_crtc *tmp; int ret; + if (!crtc->primary) + return -EINVAL; + WARN_ON(drm_drv_uses_atomic_modeset(crtc->dev)); /* @@ -470,6 +476,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, */ drm_for_each_crtc(tmp, crtc->dev) { struct drm_plane *plane = tmp->primary; + if (!tmp->primary) + continue; plane->old_fb = plane->fb; } @@ -486,6 +494,8 @@ static int __drm_mode_set_config_internal(struct drm_mode_set *set, drm_for_each_crtc(tmp, crtc->dev) { struct drm_plane *plane = tmp->primary; + if (!tmp->primary) + continue; if (plane->fb) drm_framebuffer_get(plane->fb); @@ -602,6 +612,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, if (ret) goto out; + if (!crtc->primary) + return -EINVAL; + if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 5a84c3bc915d..aaa80d640203 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -185,6 +185,8 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) (*crtc_funcs->disable)(crtc); else (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF); + if (!crtc->primary) + continue; crtc->primary->fb = NULL; } } @@ -539,6 +541,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, crtc_funcs = set->crtc->helper_private; + if (!set->crtc->primary) + return -EINVAL; + if (!set->mode) set->fb = NULL; @@ -950,6 +955,8 @@ void drm_helper_resume_force_mode(struct drm_device *dev) if (!crtc->enabled) continue; + if (!crtc->primary) + continue; ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); @@ -1072,6 +1079,9 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_plane_state *plane_state; struct drm_plane *plane = crtc->primary; + if (!plane) + return -EINVAL; + if (plane->funcs->atomic_duplicate_state) plane_state = plane->funcs->atomic_duplicate_state(plane); else { diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 515a7aec57ac..03f780be85a0 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -549,6 +549,8 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) drm_for_each_crtc(crtc, dev) { drm_modeset_lock(&crtc->mutex, NULL); + if (!crtc->primary) + continue; if (crtc->primary->fb) crtcs_bound++; if (crtc->primary->fb == fb_helper->fb) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index ffa8dc35515f..d47119e045e8 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -46,6 +46,10 @@ /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +EXPORT_SYMBOL(drm_global_mutex); +#endif + /** * DOC: file operations * diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 781af1d42d76..d2d2c30bea97 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -891,7 +891,7 @@ static void legacy_remove_fb(struct drm_framebuffer *fb) drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { - if (crtc->primary->fb == fb) { + if (crtc->primary && crtc->primary->fb == fb) { /* should turn off the crtc */ if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 40179c5fc6b8..d9848ae2219e 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -48,6 +48,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr struct dma_buf *dma_buf); /* drm_drv.c */ + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ +#endif + struct drm_minor *drm_minor_acquire(unsigned int minor_id); void drm_minor_release(struct drm_minor *minor); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 33a458b7f1fc..3d25ace132e7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -84,6 +84,16 @@ config DRM_I915_COMPRESS_ERROR If in doubt, say "Y". +config DRM_I915_MEMTRACK + bool "Enable shmem usage status track" + depends on DRM_I915_CAPTURE_ERROR + default y + help + This option enables shmem usage status track of system summary and + each process. + + If in doubt, say "N". + config DRM_I915_USERPTR bool "Always enable userptr support" depends on DRM_I915 @@ -127,6 +137,15 @@ config DRM_I915_GVT_KVMGT help Choose this option if you want to enable KVMGT support for Intel GVT-g. +config DRM_I915_GVT_ACRN_GVT + tristate "Enable ACRN support for Intel GVT-g" + depends on DRM_I915_GVT + depends on ACRN + depends on ACRN_VHM + default n + help + Choose this option if you want to enable ACRN_GVT support for + Intel GVT-g under ACRN hypervisor environment. menu "drm/i915 Debugging" depends on DRM_I915 diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5794f102f9b8..a5198df1b1ca 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -79,6 +79,7 @@ i915-y += i915_cmd_parser.o \ i915_trace_points.o \ i915_vma.o \ intel_breadcrumbs.o \ + i915_gem_gvtbuffer.o \ intel_engine_cs.o \ intel_hangcheck.o \ intel_lrc.o \ diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile index b016dc753db9..0acb4dabc00c 100644 --- a/drivers/gpu/drm/i915/gvt/Makefile +++ b/drivers/gpu/drm/i915/gvt/Makefile @@ -5,6 +5,7 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ fb_decoder.o dmabuf.o page_track.o -ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) +ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o +obj-$(CONFIG_DRM_I915_GVT_ACRN_GVT) += $(GVT_DIR)/acrngt.o diff --git a/drivers/gpu/drm/i915/gvt/acrngt.c b/drivers/gpu/drm/i915/gvt/acrngt.c new file mode 100644 index 000000000000..abde541b76f8 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrngt.c @@ -0,0 +1,1012 @@ +/* + * Interfaces coupled to ACRN + * + * Copyright(c) 2018 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of Version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + */ + +/* + * NOTE: + * This file contains hypervisor specific interactions to + * implement the concept of mediated pass-through framework. + * What this file provides is actually a general abstraction + * of in-kernel device model, which is not gvt specific. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include "acrngt.h" + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("ACRNGT mediated passthrough driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); + +#define ASSERT(x) \ +do { if (x) break; \ + printk(KERN_EMERG "### ASSERTION FAILED %s: %s: %d: %s\n", \ + __FILE__, __func__, __LINE__, #x); dump_stack(); BUG(); \ +} while (0) + + +struct kobject *acrn_gvt_ctrl_kobj; +static struct kset *acrn_gvt_kset; +static DEFINE_MUTEX(acrn_gvt_sysfs_lock); + +struct gvt_acrngt acrngt_priv; +const struct intel_gvt_ops *intel_gvt_ops; + +static void disable_domu_plane(int pipe, int plane) +{ + struct drm_i915_private *dev_priv = acrngt_priv.gvt->dev_priv; + + I915_WRITE(PLANE_CTL(pipe, plane), 0); + + I915_WRITE(PLANE_SURF(pipe, plane), 0); + POSTING_READ(PLANE_SURF(pipe, plane)); +} + +void acrngt_instance_destroy(struct intel_vgpu *vgpu) +{ + int pipe, plane; + struct acrngt_hvm_dev *info = NULL; + struct intel_gvt *gvt = acrngt_priv.gvt; + + if (vgpu) { + info = (struct acrngt_hvm_dev *)vgpu->handle; + + if (info && info->emulation_thread != NULL) + kthread_stop(info->emulation_thread); + + for_each_pipe(gvt->dev_priv, pipe) { + for_each_universal_plane(gvt->dev_priv, pipe, plane) { + if (gvt->pipe_info[pipe].plane_owner[plane] == + vgpu->id) { + disable_domu_plane(pipe, plane); + } + } + } + + intel_gvt_ops->vgpu_deactivate(vgpu); + intel_gvt_ops->vgpu_destroy(vgpu); + } + + if (info) { + gvt_dbg_core("destroy vgpu instance, vm id: %d, client %d", + info->vm_id, info->client); + + if (info->client != 0) + acrn_ioreq_destroy_client(info->client); + + if (info->vm) + put_vm(info->vm); + + kfree(info); + } +} + +static bool acrngt_write_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long val) +{ + if (intel_gvt_ops->emulate_cfg_write(vgpu, port, &val, bytes)) { + gvt_err("failed to write config space port 0x%x\n", port); + return false; + } + return true; +} + +static bool acrngt_read_cfg_space(struct intel_vgpu *vgpu, + unsigned int port, unsigned int bytes, unsigned long *val) +{ + unsigned long data; + + if (intel_gvt_ops->emulate_cfg_read(vgpu, port, &data, bytes)) { + gvt_err("failed to read config space port 0x%x\n", port); + return false; + } + memcpy(val, &data, bytes); + return true; +} + +static int acrngt_hvm_pio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.pci_request.direction == REQUEST_READ) { + /* PIO READ */ + gvt_dbg_core("handle pio read emulation at port 0x%x\n", + req->reqs.pci_request.reg); + if (!acrngt_read_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long *)&req->reqs.pci_request.value)) { + gvt_err("failed to read pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } else if (req->reqs.pci_request.direction == REQUEST_WRITE) { + /* PIO WRITE */ + gvt_dbg_core("handle pio write emulation at address 0x%x, " + "value 0x%x\n", + req->reqs.pci_request.reg, req->reqs.pci_request.value); + if (!acrngt_write_cfg_space(vgpu, + req->reqs.pci_request.reg, + req->reqs.pci_request.size, + (unsigned long)req->reqs.pci_request.value)) { + gvt_err("failed to write pio at addr 0x%x\n", + req->reqs.pci_request.reg); + return -EINVAL; + } + } + return 0; +} + +static int acrngt_hvm_write_handler(struct intel_vgpu *vgpu, uint64_t pa, + void *p_data, unsigned int bytes) +{ + + /* Check whether pa is ppgtt */ + if (intel_gvt_ops->write_protect_handler(vgpu, pa, p_data, bytes) == 0) + return 0; + + /* pa is mmio reg or gtt */ + return intel_gvt_ops->emulate_mmio_write(vgpu, pa, p_data, bytes); +} + +static int acrngt_hvm_mmio_emulation(struct intel_vgpu *vgpu, + struct vhm_request *req) +{ + if (req->reqs.mmio_request.direction == REQUEST_READ) { + /* MMIO READ */ + gvt_dbg_core("handle mmio read emulation at address 0x%llx\n", + req->reqs.mmio_request.address); + if (intel_gvt_ops->emulate_mmio_read(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to read mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + } else if (req->reqs.mmio_request.direction == REQUEST_WRITE) { + /* MMIO Write */ + if (acrngt_hvm_write_handler(vgpu, + req->reqs.mmio_request.address, + &req->reqs.mmio_request.value, + req->reqs.mmio_request.size)) { + gvt_err("failed to write mmio at addr 0x%llx\n", + req->reqs.mmio_request.address); + return -EINVAL; + } + gvt_dbg_core("handle mmio write emulation at address 0x%llx, " + "value 0x%llx\n", + req->reqs.mmio_request.address, req->reqs.mmio_request.value); + } + + return 0; +} + +static void handle_request_error(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + if (vgpu->failsafe == false) { + vgpu->failsafe= true; + gvt_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); + } + mutex_unlock(&vgpu->gvt->lock); +} + +static int acrngt_emulation_thread(void *priv) +{ + struct intel_vgpu *vgpu = (struct intel_vgpu *)priv; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)vgpu->handle; + struct vhm_request *req; + + int vcpu, ret; + int nr_vcpus = info->nr_vcpu; + + gvt_dbg_core("start kthread for VM%d\n", info->vm_id); + ASSERT(info->nr_vcpu <= MAX_HVM_VCPUS_SUPPORTED); + + set_freezable(); + while (1) { + acrn_ioreq_attach_client(info->client, 1); + + if (kthread_should_stop()) + return 0; + + for (vcpu = 0; vcpu < nr_vcpus; vcpu++) { + req = &info->req_buf[vcpu]; + if (atomic_read(&req->processed) == + REQ_STATE_PROCESSING && + req->client == info->client) { + gvt_dbg_core("handle ioreq type %d\n", + req->type); + switch (req->type) { + case REQ_PCICFG: + ret = acrngt_hvm_pio_emulation(vgpu, req); + break; + case REQ_MMIO: + case REQ_WP: + ret = acrngt_hvm_mmio_emulation(vgpu, req); + break; + default: + gvt_err("Unknown ioreq type %x\n", + req->type); + ret = -EINVAL; + break; + } + /* error handling */ + if (ret) + handle_request_error(vgpu); + + smp_mb(); + atomic_set(&req->processed, REQ_STATE_COMPLETE); + /* complete request */ + if (acrn_ioreq_complete_request(info->client, + vcpu)) + gvt_err("failed complete request\n"); + } + } + } + + BUG(); /* It's actually impossible to reach here */ + return 0; +} + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *vgpu_type) +{ + struct acrngt_hvm_dev *info; + struct intel_vgpu *vgpu; + int ret = 0; + struct task_struct *thread; + struct vm_info vm_info; + + gvt_dbg_core("acrngt_instance_create enter\n"); + if (!intel_gvt_ops || !acrngt_priv.gvt) + return NULL; + + vgpu = intel_gvt_ops->vgpu_create(acrngt_priv.gvt, vgpu_type); + if (IS_ERR(vgpu)) { + gvt_err("failed to create vgpu\n"); + return NULL; + } + + info = kzalloc(sizeof(struct acrngt_hvm_dev), GFP_KERNEL); + if (info == NULL) { + gvt_err("failed to alloc acrngt_hvm_dev\n"); + goto err; + } + + info->vm_id = vm_id; + info->vgpu = vgpu; + vgpu->handle = (unsigned long)info; + + if ((info->vm = find_get_vm(vm_id)) == NULL) { + gvt_err("failed to get vm %d\n", vm_id); + acrngt_instance_destroy(vgpu); + return NULL; + } + if (info->vm->req_buf == NULL) { + gvt_err("failed to get req buf for vm %d\n", vm_id); + goto err; + } + gvt_dbg_core("get vm req_buf from vm_id %d\n", vm_id); + + /* create client: no handler -> handle request by itself */ + info->client = acrn_ioreq_create_client(vm_id, NULL, "ioreq gvt-g"); + if (info->client < 0) { + gvt_err("failed to create ioreq client for vm id %d\n", vm_id); + goto err; + } + + /* get vm info */ + ret = vhm_get_vm_info(vm_id, &vm_info); + if (ret < 0) { + gvt_err("failed to get vm info for vm id %d\n", vm_id); + goto err; + } + + info->nr_vcpu = vm_info.max_vcpu; + + /* get req buf */ + info->req_buf = acrn_ioreq_get_reqbuf(info->client); + if (info->req_buf == NULL) { + gvt_err("failed to get req_buf for client %d\n", info->client); + goto err; + } + + /* trap config space access */ + acrn_ioreq_intercept_bdf(info->client, 0, 2, 0); + + thread = kthread_run(acrngt_emulation_thread, vgpu, + "acrngt_emulation:%d", vm_id); + if (IS_ERR(thread)) { + gvt_err("failed to run emulation thread for vm %d\n", vm_id); + goto err; + } + info->emulation_thread = thread; + gvt_dbg_core("create vgpu instance success, vm_id %d, client %d," + " nr_vcpu %d\n", info->vm_id,info->client, info->nr_vcpu); + + intel_gvt_ops->vgpu_activate(vgpu); + + return vgpu; + +err: + acrngt_instance_destroy(vgpu); + return NULL; +} + +static ssize_t kobj_attr_show(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->show) + ret = kattr->show(kobj, kattr, buf); + return ret; +} + +static ssize_t kobj_attr_store(struct kobject *kobj, + struct attribute *attr, const char *buf, size_t count) +{ + struct kobj_attribute *kattr; + ssize_t ret = -EIO; + + kattr = container_of(attr, struct kobj_attribute, attr); + if (kattr->store) + ret = kattr->store(kobj, kattr, buf, count); + return ret; +} + +const struct sysfs_ops acrngt_kobj_sysfs_ops = { + .show = kobj_attr_show, + .store = kobj_attr_store, +}; + +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i; + + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) { + if (acrngt_priv.vgpus[i] && + (kobj == &((struct acrngt_hvm_dev *) + (acrngt_priv.vgpus[i]->handle))->kobj)) { + return sprintf(buf, "%d\n", acrngt_priv.vgpus[i]->id); + } + } + return 0; +} + +static struct kobj_attribute acrngt_vm_attr = +__ATTR(vgpu_id, 0440, acrngt_sysfs_vgpu_id, NULL); + + +static struct attribute *acrngt_vm_attrs[] = { + &acrngt_vm_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_instance_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_vm_attrs, +}; + +static int acrngt_sysfs_add_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu; + struct acrngt_hvm_dev *info; + + struct intel_vgpu_type type = acrngt_priv.gvt->types[0]; + + /* todo: wa patch due to plane restriction patches are not porting */ + acrngt_priv.gvt->pipe_info[1].plane_owner[0] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[1] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[2] = 1; + acrngt_priv.gvt->pipe_info[1].plane_owner[3] = 1; + + type.low_gm_size = vp->aperture_sz * VMEM_1MB; + type.high_gm_size = (vp->gm_sz - vp->aperture_sz) * VMEM_1MB; + type.fence = vp->fence_sz; + mutex_lock(&acrn_gvt_sysfs_lock); + vgpu = acrngt_instance_create(vp->vm_id, &type); + mutex_unlock(&acrn_gvt_sysfs_lock); + if (vgpu == NULL) { + gvt_err("acrngt_sysfs_add_instance failed.\n"); + ret = -EINVAL; + } else { + info = (struct acrngt_hvm_dev *) vgpu->handle; + info->vm_id = vp->vm_id; + acrngt_priv.vgpus[vgpu->id - 1] = vgpu; + gvt_dbg_core("add acrngt instance for vm-%d with vgpu-%d.\n", + vp->vm_id, vgpu->id); + + kobject_init(&info->kobj, &acrngt_instance_ktype); + info->kobj.kset = acrn_gvt_kset; + /* add kobject, NULL parent indicates using kset as parent */ + ret = kobject_add(&info->kobj, NULL, "vm%u", info->vm_id); + if (ret) { + gvt_err("%s: kobject add error: %d\n", __func__, ret); + kobject_put(&info->kobj); + } + } + + return ret; +} + +static struct intel_vgpu *vgpu_from_id(int vm_id) +{ + int i; + struct acrngt_hvm_dev *hvm_dev = NULL; + + /* vm_id is negtive in del_instance call */ + if (vm_id < 0) + vm_id = -vm_id; + for (i = 0; i < GVT_MAX_VGPU_INSTANCE; i++) + if (acrngt_priv.vgpus[i]) { + hvm_dev = (struct acrngt_hvm_dev *) + acrngt_priv.vgpus[i]->handle; + if (hvm_dev && (vm_id == hvm_dev->vm_id)) + return acrngt_priv.vgpus[i]; + } + return NULL; +} + +static int acrngt_sysfs_del_instance(struct acrngt_hvm_params *vp) +{ + int ret = 0; + struct intel_vgpu *vgpu = vgpu_from_id(vp->vm_id); + struct acrngt_hvm_dev *info = NULL; + + if (vgpu) { + info = (struct acrngt_hvm_dev *) vgpu->handle; + gvt_dbg_core("remove vm-%d sysfs node.\n", vp->vm_id); + kobject_put(&info->kobj); + + mutex_lock(&acrn_gvt_sysfs_lock); + acrngt_priv.vgpus[vgpu->id - 1] = NULL; + acrngt_instance_destroy(vgpu); + mutex_unlock(&acrn_gvt_sysfs_lock); + } + + return ret; +} + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct acrngt_hvm_params vp; + int param_cnt; + char param_str[64]; + int rc; + int high_gm_sz; + int low_gm_sz; + + /* We expect the param_str should be vmid,a,b,c (where the guest + * wants a MB aperture and b MB gm, and c fence registers) or -vmid + * (where we want to release the gvt instance). + */ + (void)sscanf(buf, "%63s", param_str); + param_cnt = sscanf(param_str, "%d,%d,%d,%d", &vp.vm_id, + &low_gm_sz, &high_gm_sz, &vp.fence_sz); + gvt_dbg_core("create vm-%d sysfs node, low gm size %d," + " high gm size %d, fence size %d\n", + vp.vm_id, low_gm_sz, high_gm_sz, vp.fence_sz); + vp.aperture_sz = low_gm_sz; + vp.gm_sz = high_gm_sz + low_gm_sz; + if (param_cnt == 1) { + if (vp.vm_id >= 0) + return -EINVAL; + } else if (param_cnt == 4) { + if (!(vp.vm_id > 0 && vp.aperture_sz > 0 && + vp.aperture_sz <= vp.gm_sz && vp.fence_sz > 0)) + return -EINVAL; + } else { + gvt_err("%s: parameter counter incorrect\n", __func__); + return -EINVAL; + } + + rc = (vp.vm_id > 0) ? acrngt_sysfs_add_instance(&vp) : + acrngt_sysfs_del_instance(&vp); + + return rc < 0 ? rc : count; +} + +static ssize_t show_plane_owner(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "Planes:\nPipe A: %d %d %d %d\n" + "Pipe B: %d %d %d %d\nPipe C: %d %d %d\n", + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_A].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE1], + acrngt_priv.gvt->pipe_info[PIPE_B].plane_owner[PLANE_SPRITE2], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_PRIMARY], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE0], + acrngt_priv.gvt->pipe_info[PIPE_C].plane_owner[PLANE_SPRITE1]); +} + +static struct kobj_attribute acrngt_instance_attr = +__ATTR(create_gvt_instance, 0220, NULL, acrngt_sysfs_instance_manage); + +static struct kobj_attribute plane_owner_attr = +__ATTR(plane_owner_show, 0440, show_plane_owner, NULL); + +static struct attribute *acrngt_ctrl_attrs[] = { + &acrngt_instance_attr.attr, + &plane_owner_attr.attr, + NULL, /* need to NULL terminate the list of attributes */ +}; + +static struct kobj_type acrngt_ctrl_ktype = { + .sysfs_ops = &acrngt_kobj_sysfs_ops, + .default_attrs = acrngt_ctrl_attrs, +}; + +int acrngt_sysfs_init(struct intel_gvt *gvt) +{ + int ret; + + acrn_gvt_kset = kset_create_and_add("gvt", NULL, kernel_kobj); + if (!acrn_gvt_kset) { + ret = -ENOMEM; + goto kset_fail; + } + + acrn_gvt_ctrl_kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); + if (!acrn_gvt_ctrl_kobj) { + ret = -ENOMEM; + goto ctrl_fail; + } + + acrn_gvt_ctrl_kobj->kset = acrn_gvt_kset; + ret = kobject_init_and_add(acrn_gvt_ctrl_kobj, &acrngt_ctrl_ktype, + NULL, "control"); + if (ret) { + ret = -EINVAL; + goto kobj_fail; + } + + return 0; + +kobj_fail: + kobject_put(acrn_gvt_ctrl_kobj); +ctrl_fail: + kset_unregister(acrn_gvt_kset); +kset_fail: + return ret; +} + +void acrngt_sysfs_del(void) +{ + kobject_put(acrn_gvt_ctrl_kobj); + kset_unregister(acrn_gvt_kset); +} + +static int acrngt_host_init(struct device *dev, void *gvt, const void *ops) +{ + int ret = -EFAULT; + + if (!gvt || !ops) + return -EINVAL; + + acrngt_priv.gvt = (struct intel_gvt *)gvt; + intel_gvt_ops = (const struct intel_gvt_ops *)ops; + + ret = acrngt_sysfs_init(acrngt_priv.gvt); + if (ret) { + gvt_err("failed call acrngt_sysfs_init, error: %d\n", ret); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; + } + + return ret; +} + +static void acrngt_host_exit(struct device *dev, void *gvt) +{ + acrngt_sysfs_del(); + acrngt_priv.gvt = NULL; + intel_gvt_ops = NULL; +} + +static int acrngt_attach_vgpu(void *vgpu, unsigned long *handle) +{ + return 0; +} + +static void acrngt_detach_vgpu(unsigned long handle) +{ + return; +} + +static int acrngt_inject_msi(unsigned long handle, u32 addr_lo, u16 data) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("inject msi irq, addr 0x%x, data 0x%hx\n", addr_lo, data); + + ret = vhm_inject_msi(info->vm_id, addr_lo, data); + if (ret) + gvt_err("failed to inject msi for vm %d\n", info->vm_id); + return ret; +} + +static unsigned long acrngt_virt_to_mfn(void *addr) +{ + uint64_t gpa; + uint64_t hpa; + gvt_dbg_core("virt 0x%lx to mfn\n", (unsigned long)addr); + + gpa = virt_to_phys(addr); + hpa = vhm_vm_gpa2hpa(0, gpa); + + return (unsigned long) (hpa >> PAGE_SHIFT); +} + +static int acrngt_page_track_add(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set wp page for gfn 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = acrn_ioreq_add_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) { + gvt_err("failed acrn_ioreq_add_iorange for gfn 0x%llx\n", gfn); + return ret; + } + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, true); + if (ret) + gvt_err("failed set write protect for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_page_track_remove(unsigned long handle, u64 gfn) +{ + int ret; + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("unset wp page for gfx 0x%llx\n", gfn); + + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + ret = write_protect_page(info->vm_id, gfn << PAGE_SHIFT, false); + if (ret) { + gvt_err("failed update_memmap_attr unset for gfn 0x%llx\n", gfn); + return ret; + } + ret = acrn_ioreq_del_iorange(info->client, REQ_WP, gfn << PAGE_SHIFT, + ((gfn + 1) << PAGE_SHIFT) - 1); + if (ret) + gvt_err("failed acrn_ioreq_del_iorange for gfn 0x%llx\n", gfn); + return ret; +} + +static int acrngt_read_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("read gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not read gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) buf) = *((uint8_t *) va); + break; + case 2: + *((uint16_t *) buf) = *((uint16_t *) va); + break; + case 4: + *((uint32_t *) buf) = *((uint32_t *) va); + break; + case 8: + *((uint64_t *) buf) = *((uint64_t *) va); + break; + default: + memcpy(buf, va, len); + } + return 0; +} + +static int acrngt_write_gpa(unsigned long handle, unsigned long gpa, + void *buf, unsigned long len) +{ + void *va = NULL; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("write gpa 0x%lx with len 0x%lx\n", gpa, len); + + va = map_guest_phys(info->vm_id, gpa, len); + if (!va) { + gvt_err("GVT: can not write gpa = 0x%lx!!!\n", gpa); + return -EFAULT; + } + + switch (len) + { + case 1: + *((uint8_t *) va) = *((uint8_t *) buf); + break; + case 2: + *((uint16_t *) va) = *((uint16_t *) buf); + break; + case 4: + *((uint32_t *) va) = *((uint32_t *) buf); + break; + case 8: + *((uint64_t *) va) = *((uint64_t *) buf); + break; + default: + memcpy(va, buf, len); + } + return 0; +} + +static bool is_identical_mmap(void) +{ + /* todo: need add hypercall to get such info from hypervisor */ + return true; +} + +static unsigned long acrngt_gfn_to_pfn(unsigned long handle, unsigned long gfn) +{ + unsigned long hpa; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + gvt_dbg_core("convert gfn 0x%lx to pfn\n", gfn); + if (is_identical_mmap()) { + void *va = NULL; + + va = map_guest_phys(info->vm_id, gfn << PAGE_SHIFT, + 1 << PAGE_SHIFT); + if (!va) { + gvt_err("GVT: can not map gfn = 0x%lx!!!\n", gfn); + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } else { + hpa = virt_to_phys(va); + } + } else { + hpa = vhm_vm_gpa2hpa(info->vm_id, gfn << PAGE_SHIFT); + } + + return hpa >> PAGE_SHIFT; +} + +static int acrngt_map_gfn_to_mfn(unsigned long handle, unsigned long gfn, + unsigned long mfn, unsigned int nr, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("map/unmap gfn 0x%lx to mfn 0x%lx with %u pages, map %d\n", + gfn, mfn, nr, map); + + if (map) + ret = add_memory_region(info->vm_id, gfn << PAGE_SHIFT, + mfn << PAGE_SHIFT, nr << PAGE_SHIFT, + MEM_TYPE_UC, MEM_ACCESS_RWX); + else + ret = del_memory_region(info->vm_id, gfn << PAGE_SHIFT, + nr << PAGE_SHIFT); + if (ret) + gvt_err("failed map/unmap gfn 0x%lx to mfn 0x%lx with %u pages," + " map %d\n", gfn, mfn, nr, map); + return ret; +} + +static int acrngt_set_trap_area(unsigned long handle, u64 start, + u64 end, bool map) +{ + int ret; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + gvt_dbg_core("set trap area, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + + if (map) + ret = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + start, end); + else + ret = acrn_ioreq_del_iorange(info->client, REQ_MMIO, + start, end); + if (ret) + gvt_err("failed set trap, start 0x%llx, end 0x%llx, map %d\n", + start, end, map); + return ret; +} + +static int acrngt_set_pvmmio(unsigned long handle, u64 start, u64 end, bool map) +{ + int rc, i; + unsigned long mfn, shared_mfn; + unsigned long pfn = start >> PAGE_SHIFT; + u32 mmio_size_fn = acrngt_priv.gvt->device_info.mmio_size >> PAGE_SHIFT; + struct acrngt_hvm_dev *info = (struct acrngt_hvm_dev *)handle; + + if (map) { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + + /* map the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + + /* mmio access is trapped like memory write protection */ + rc = acrn_ioreq_add_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + for (i = 0; i < mmio_size_fn; i++) { + rc = write_protect_page(info->vm_id, + (pfn + i) << PAGE_SHIFT, true); + if (rc) { + gvt_err("failed set wp for pfn 0x%lx\n", pfn + i); + return rc; + } + } + + /* scratch reg access is trapped like mmio access, 1 page */ + rc = acrngt_map_gfn_to_mfn(handle, pfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), + mfn + (VGT_PVINFO_PAGE >> PAGE_SHIFT), 1, 0); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE, + ((pfn + 1) << PAGE_SHIFT) + VGT_PVINFO_PAGE - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", + (pfn << PAGE_SHIFT) + VGT_PVINFO_PAGE); + return rc; + } + + } else { + mfn = acrngt_virt_to_mfn(info->vgpu->mmio.vreg); + rc = acrngt_map_gfn_to_mfn(handle, pfn, mfn, mmio_size_fn, map); + if (rc) { + gvt_err("acrn-gvt: map pfn %lx to mfn %lx fail with ret %d\n", + pfn, mfn, rc); + return rc; + } + rc = acrn_ioreq_del_iorange(info->client, REQ_WP, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_add_iorange for pfn 0x%lx\n", pfn); + return rc; + } + rc = acrn_ioreq_add_iorange(info->client, REQ_MMIO, pfn << PAGE_SHIFT, + ((pfn + mmio_size_fn) << PAGE_SHIFT) - 1); + if (rc) { + gvt_err("failed acrn_ioreq_del_iorange for pfn 0x%lx\n", pfn); + return rc; + } + + /* unmap the shared page to guest */ + shared_mfn = acrngt_virt_to_mfn(info->vgpu->mmio.shared_page); + rc = acrngt_map_gfn_to_mfn(handle, pfn + mmio_size_fn, shared_mfn, 1, map); + if (rc) { + gvt_err("acrn-gvt: map shared page fail with ret %d\n", rc); + return rc; + } + } + return rc; +} + +static int acrngt_dom0_ready(void) +{ + char *env[] = {"GVT_DOM0_READY=1", NULL}; + if(!acrn_gvt_ctrl_kobj) + return 0; + gvt_dbg_core("acrngt: Dom 0 ready to accept Dom U guests\n"); + return kobject_uevent_env(acrn_gvt_ctrl_kobj, KOBJ_ADD, env); +} + +static int acrngt_dma_map_guest_page(unsigned long handle, unsigned long gfn, + unsigned long size, dma_addr_t *dma_addr) +{ + unsigned long pfn; + + pfn = acrngt_gfn_to_pfn(handle, gfn); + *dma_addr = pfn << PAGE_SHIFT; + + return 0; +} + +static void acrngt_dma_unmap_guest_page(unsigned long handle, + dma_addr_t dma_addr) +{ +} + +struct intel_gvt_mpt acrn_gvt_mpt = { + //.detect_host = acrngt_detect_host, + .host_init = acrngt_host_init, + .host_exit = acrngt_host_exit, + .attach_vgpu = acrngt_attach_vgpu, + .detach_vgpu = acrngt_detach_vgpu, + .inject_msi = acrngt_inject_msi, + .from_virt_to_mfn = acrngt_virt_to_mfn, + .enable_page_track = acrngt_page_track_add, + .disable_page_track = acrngt_page_track_remove, + .read_gpa = acrngt_read_gpa, + .write_gpa = acrngt_write_gpa, + .gfn_to_mfn = acrngt_gfn_to_pfn, + .map_gfn_to_mfn = acrngt_map_gfn_to_mfn, + .dma_map_guest_page = acrngt_dma_map_guest_page, + .dma_unmap_guest_page = acrngt_dma_unmap_guest_page, + .set_trap_area = acrngt_set_trap_area, + .set_pvmmio = acrngt_set_pvmmio, + .dom0_ready = acrngt_dom0_ready, +}; +EXPORT_SYMBOL_GPL(acrn_gvt_mpt); + +static int __init acrngt_init(void) +{ + /* todo: to support need implment check_gfx_iommu_enabled func */ + gvt_dbg_core("acrngt loaded\n"); + return 0; +} + +static void __exit acrngt_exit(void) +{ + gvt_dbg_core("acrngt: unloaded\n"); +} + +module_init(acrngt_init); +module_exit(acrngt_exit); diff --git a/drivers/gpu/drm/i915/gvt/acrngt.h b/drivers/gpu/drm/i915/gvt/acrngt.h new file mode 100644 index 000000000000..0799df2ec557 --- /dev/null +++ b/drivers/gpu/drm/i915/gvt/acrngt.h @@ -0,0 +1,81 @@ +/* + * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef INTEL_GVT_ACRNGT_H +#define INTEL_GVT_ACRNGT_H + +extern struct intel_gvt *gvt_instance; +extern const struct intel_gvt_ops *acrn_intel_gvt_ops; + +#define MAX_HVM_VCPUS_SUPPORTED 127 + +#define VMEM_1MB (1ULL << 20) /* the size of the first 1MB */ + +typedef uint16_t domid_t; + +/* + * acrngt_hvm_dev is a wrapper of a vGPU instance which is reprensented by the + * intel_vgpu structure. Under acrn hypervisor, the acrngt_instance stands for a + * HVM device, which the related resource. + */ +struct acrngt_hvm_dev { + domid_t vm_id; + struct kobject kobj; + struct intel_vgpu *vgpu; + + int nr_vcpu; + struct task_struct *emulation_thread; + + int client; + struct vhm_request *req_buf; + struct vhm_vm *vm; +}; + +struct acrngt_hvm_params { + int vm_id; + int aperture_sz; /* in MB */ + int gm_sz; /* in MB */ + int fence_sz; +}; + +/* + * struct gvt_acrngt should be a single instance to share global + * information for ACRNGT module. + */ +#define GVT_MAX_VGPU_INSTANCE 15 +struct gvt_acrngt { + struct intel_gvt *gvt; + struct intel_vgpu *vgpus[GVT_MAX_VGPU_INSTANCE]; +}; + +static ssize_t acrngt_sysfs_instance_manage(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count); +static ssize_t acrngt_sysfs_vgpu_id(struct kobject *kobj, + struct kobj_attribute *attr, char *buf); + +struct intel_vgpu *acrngt_instance_create(domid_t vm_id, + struct intel_vgpu_type *type); +void acrngt_instance_destroy(struct intel_vgpu *vgpu); + +#endif diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index c62346fdc05d..8606925d339d 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -138,6 +138,54 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) return 0; } +int map_gttmmio(struct intel_vgpu *vgpu, bool map) +{ + struct intel_vgpu_gm *gm = &vgpu->gm; + unsigned long mfn; + struct scatterlist *sg; + struct sg_table *st = gm->st; + u64 start, end; + int ret = 0; + + if (!st) { + DRM_INFO("no scatter list, fallback to disable ggtt pv\n"); + return -EINVAL; + } + + if (vgpu->gtt.ggtt_pv_mapped == map) { + /* If it is already set as the target state, skip it */ + return ret; + } + + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + start &= ~GENMASK(3, 0); + start += vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1; + + end = start + + (vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size >> 1); + + WARN_ON((end - start) != gvt_ggtt_sz(vgpu->gvt)); + + gvt_dbg_mmio("%s start=%llx end=%llx map=%d\n", + __func__, start, end, map); + + start >>= PAGE_SHIFT; + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + mfn = page_to_pfn(sg_page(sg)); + gvt_dbg_mmio("page=%p mfn=%lx size=%x start=%llx\n", + sg_page(sg), mfn, sg->length, start); + ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, start, + mfn, sg->length >> PAGE_SHIFT, map); + if (ret) + return ret; + start += sg->length >> PAGE_SHIFT; + } + + vgpu->gtt.ggtt_pv_mapped = map; + + return ret; +} + static int trap_gttmmio(struct intel_vgpu *vgpu, bool trap) { u64 start, end; @@ -295,9 +343,22 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, /* First check if it's PCI_COMMAND */ if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) { - if (WARN_ON(bytes > 2)) + if (WARN_ON(bytes != 2 && bytes != 4)) return -EINVAL; - return emulate_pci_command_write(vgpu, offset, p_data, bytes); + + ret = -EINVAL; + if (bytes == 2) + ret = emulate_pci_command_write(vgpu, offset, + p_data, bytes); + if (bytes == 4) { + ret = emulate_pci_command_write(vgpu, offset, + p_data, 2); + if (ret) + return ret; + vgpu_pci_cfg_mem_write(vgpu, offset + 2, + (u8 *)p_data + 2, 2); + } + return ret; } switch (rounddown(offset, 4)) { @@ -322,6 +383,15 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, case INTEL_GVT_PCI_OPREGION: if (WARN_ON(!IS_ALIGNED(offset, 4))) return -EINVAL; + + /* + * To support virtual display, we need to override the real VBT in the + * OpRegion. So here we don't report OpRegion to guest. + */ + if (IS_BROXTON(vgpu->gvt->dev_priv) || + IS_KABYLAKE(vgpu->gvt->dev_priv)) + return 0; + ret = intel_vgpu_opregion_base_write_handler(vgpu, *(u32 *)p_data); if (ret) @@ -399,6 +469,8 @@ void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) INTEL_GVT_PCI_CLASS_VGA_OTHER; if (cmd & PCI_COMMAND_MEMORY) { + if (VGPU_PVMMIO(vgpu)) + set_pvmmio(vgpu, false); trap_gttmmio(vgpu, false); map_aperture(vgpu, false); } diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index a614db310ea2..8d130d4d58b7 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1840,6 +1840,8 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) return ret; } +static int mi_noop_index; + static struct cmd_info cmd_info[] = { {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, @@ -2525,7 +2527,12 @@ static int cmd_parser_exec(struct parser_exec_state *s) cmd = cmd_val(s, 0); - info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + /* fastpath for MI_NOOP */ + if (cmd == MI_NOOP) + info = &cmd_info[mi_noop_index]; + else + info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); + if (info == NULL) { gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n", cmd, get_opcode(cmd, s->ring_id), @@ -2710,6 +2717,33 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) return ret; } +#define GEN8_PDPES 4 +int gvt_emit_pdps(struct intel_vgpu_workload *workload) +{ + const int num_cmds = GEN8_PDPES * 2; + struct i915_request *req = workload->req; + struct intel_engine_cs *engine = req->engine; + u32 *cs; + u32 *pdps = (u32 *)(workload->shadow_mm->ppgtt_mm.shadow_pdps); + int i; + + cs = intel_ring_begin(req, num_cmds * 2 + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_LOAD_REGISTER_IMM(num_cmds); + for (i = 0; i < GEN8_PDPES; i++) { + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i)); + *cs++ = pdps[i * 2]; + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i)); + *cs++ = pdps[i * 2 + 1]; + } + *cs++ = MI_NOOP; + intel_ring_advance(req, cs); + + return 0; +} + static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -2928,6 +2962,8 @@ static int init_cmd_table(struct intel_gvt *gvt) kfree(e); return -EEXIST; } + if (cmd_info[i].opcode == OP_MI_NOOP) + mi_noop_index = i; INIT_HLIST_NODE(&e->hlist); add_cmd_entry(gvt, e); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.h b/drivers/gpu/drm/i915/gvt/cmd_parser.h index 286703643002..1356803a0586 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.h +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.h @@ -46,4 +46,5 @@ int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload); int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); +int gvt_emit_pdps(struct intel_vgpu_workload *workload); #endif diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 3019dbc39aef..b013d1be62db 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -228,7 +228,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } @@ -248,7 +248,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } @@ -268,7 +268,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |= PORT_CLK_SEL_LCPLL_810; } - vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } @@ -314,15 +314,20 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) port->dpcd = NULL; } -static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, - int type, unsigned int resolution) +static int setup_virtual_monitor(struct intel_vgpu *vgpu, int port_num, + int type, unsigned int resolution, void *edid, bool is_dp) { struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); + int valid_extensions = 1; + struct edid *tmp_edid = NULL; if (WARN_ON(resolution >= GVT_EDID_NUM)) return -EINVAL; - port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); + if (edid) + valid_extensions += ((struct edid *)edid)->extensions; + port->edid = kzalloc(sizeof(*(port->edid)) + + valid_extensions * EDID_SIZE, GFP_KERNEL); if (!port->edid) return -ENOMEM; @@ -332,13 +337,30 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, return -ENOMEM; } - memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], - EDID_SIZE); + if (edid) + memcpy(port->edid->edid_block, edid, EDID_SIZE * valid_extensions); + else + memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], + EDID_SIZE); + + /* Sometimes the physical display will report the EDID with no + * digital bit set, which will cause the guest fail to enumerate + * the virtual HDMI monitor. So here we will set the digital + * bit and re-calculate the checksum. + */ + tmp_edid = ((struct edid *)port->edid->edid_block); + if (!(tmp_edid->input & DRM_EDID_INPUT_DIGITAL)) { + tmp_edid->input += DRM_EDID_INPUT_DIGITAL; + tmp_edid->checksum -= DRM_EDID_INPUT_DIGITAL; + } + port->edid->data_valid = true; - memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); - port->dpcd->data_valid = true; - port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + if (is_dp) { + memcpy(port->dpcd->data, dpcd_fix_data, DPCD_HEADER_SIZE); + port->dpcd->data_valid = true; + port->dpcd->data[DPCD_SINK_COUNT] = 0x1; + } port->type = type; emulate_monitor_status_change(vgpu); @@ -442,6 +464,118 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt) mutex_unlock(&gvt->lock); } +static void intel_gvt_vblank_work(struct work_struct *w) +{ + struct intel_gvt_pipe_info *pipe_info = container_of(w, + struct intel_gvt_pipe_info, vblank_work); + struct intel_gvt *gvt = pipe_info->gvt; + struct intel_vgpu *vgpu; + int id; + + mutex_lock(&gvt->lock); + for_each_active_vgpu(gvt, vgpu, id) + emulate_vblank_on_pipe(vgpu, pipe_info->pipe_num); + mutex_unlock(&gvt->lock); +} + +#define BITS_PER_DOMAIN 4 +#define MAX_SCALERS_PER_DOMAIN 2 + +#define DOMAIN_SCALER_OWNER(owner, pipe, scaler) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_SCALERS_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (scaler)) & 0xf) + +int bxt_check_planes(struct intel_vgpu *vgpu, int pipe) +{ + int plane = 0; + bool ret = false; + + for (plane = 0; + plane < ((INTEL_INFO(vgpu->gvt->dev_priv)->num_sprites[pipe]) + 1); + plane++) { + if (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id) { + ret = true; + break; + } + } + return ret; +} + +void intel_gvt_init_pipe_info(struct intel_gvt *gvt) +{ + enum pipe pipe; + unsigned int scaler; + unsigned int domain_scaler_owner = i915_modparams.domain_scaler_owner; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) { + gvt->pipe_info[pipe].pipe_num = pipe; + gvt->pipe_info[pipe].gvt = gvt; + INIT_WORK(&gvt->pipe_info[pipe].vblank_work, + intel_gvt_vblank_work); + /* Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * scaler_owner[i] holds the id of the domain that owns it, + * eg:0,1,2 etc + */ + for_each_universal_scaler(dev_priv, pipe, scaler) + gvt->pipe_info[pipe].scaler_owner[scaler] = + DOMAIN_SCALER_OWNER(domain_scaler_owner, pipe, scaler); + } +} + +bool gvt_emulate_hdmi = true; + +int setup_virtual_monitors(struct intel_vgpu *vgpu) +{ + struct intel_connector *connector = NULL; + struct drm_connector_list_iter conn_iter; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe = 0; + int ret = 0; + int type = gvt_emulate_hdmi ? GVT_HDMI_A : GVT_DP_A; + int port = PORT_B; + + /* BXT have to use port A for HDMI to support 3 HDMI monitors */ + if (IS_BROXTON(dev_priv)) + port = PORT_A; + + drm_connector_list_iter_begin(&vgpu->gvt->dev_priv->drm, &conn_iter); + for_each_intel_connector_iter(connector, &conn_iter) { + if (connector->encoder->get_hw_state(connector->encoder, &pipe) + && connector->detect_edid) { + /* if no planes are allocated for this pipe, skip it */ + if (i915_modparams.avail_planes_per_pipe && + !bxt_check_planes(vgpu, pipe)) + continue; + /* Get (Dom0) port associated with current pipe. */ + port = enc_to_dig_port( + &(connector->encoder->base))->base.port; + ret = setup_virtual_monitor(vgpu, port, + type, 0, connector->detect_edid, + !gvt_emulate_hdmi); + if (ret) + return ret; + type++; + port++; + } + } + drm_connector_list_iter_end(&conn_iter); + return 0; +} + +void clean_virtual_monitors(struct intel_vgpu *vgpu) +{ + int port = 0; + + for (port = PORT_A; port < I915_MAX_PORTS; port++) { + struct intel_vgpu_port *p = intel_vgpu_port(vgpu, port); + + if (p->edid) + clean_virtual_dp_monitor(vgpu, port); + } +} + /** * intel_vgpu_clean_display - clean vGPU virtual display emulation * @vgpu: a vGPU @@ -453,7 +587,9 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + clean_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -475,12 +611,14 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, - resolution); + if (IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) + return setup_virtual_monitors(vgpu); + else if (IS_SKYLAKE(dev_priv)) + return setup_virtual_monitor(vgpu, PORT_D, GVT_DP_D, + resolution, NULL, true); else - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, - resolution); + return setup_virtual_monitor(vgpu, PORT_B, GVT_DP_B, + resolution, NULL, true); } /** diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h index ea7c1c525b8c..e6d3912bc730 100644 --- a/drivers/gpu/drm/i915/gvt/display.h +++ b/drivers/gpu/drm/i915/gvt/display.h @@ -140,6 +140,7 @@ enum intel_vgpu_port_type { GVT_DP_B, GVT_DP_C, GVT_DP_D, + GVT_HDMI_A, GVT_HDMI_B, GVT_HDMI_C, GVT_HDMI_D, diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index 4b98539025c5..fb690a4f55a0 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c @@ -55,10 +55,6 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); return 0; } - if (edid->current_edid_read >= EDID_SIZE) { - gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); - return 0; - } if (!edid->edid_available) { gvt_vgpu_err("Reading EDID but EDID is not available!\n"); @@ -87,7 +83,7 @@ static inline int bxt_get_port_from_gmbus0(u32 gmbus0) else if (port_select == 2) port = PORT_C; else if (port_select == 3) - port = PORT_D; + port = PORT_A; return port; } @@ -452,6 +448,8 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, u32 value = *(u32 *)p_data; int aux_data_for_write = 0; int reg = get_aux_ch_reg(offset); + uint8_t rxbuf[20]; + size_t rxsize; if (reg != AUX_CH_CTL) { vgpu_vreg(vgpu, offset) = value; @@ -459,6 +457,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } msg_length = AUX_CTL_MSG_LENGTH(value); + for (rxsize = 0; rxsize < msg_length; rxsize += 4) + intel_dp_unpack_aux(vgpu_vreg(vgpu, offset + 4 + rxsize), + rxbuf + rxsize, msg_length - rxsize); // check the msg in DATA register. msg = vgpu_vreg(vgpu, offset + 4); addr = (msg >> 8) & 0xffff; @@ -498,12 +499,13 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, } } } else if ((op & 0x1) == GVT_AUX_I2C_WRITE) { - /* TODO - * We only support EDID reading from I2C_over_AUX. And - * we do not expect the index mode to be used. Right now - * the WRITE operation is ignored. It is good enough to - * support the gfx driver to do EDID access. + /* We only support EDID reading from I2C_over_AUX. + * But if EDID has extension blocks, we use this write + * operation to set block starting address */ + if (addr == EDID_ADDR) { + i2c_edid->current_edid_read = rxbuf[4]; + } } else { if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ)) return; diff --git a/drivers/gpu/drm/i915/gvt/edid.h b/drivers/gpu/drm/i915/gvt/edid.h index f6dfc8b795ec..11a75d69062d 100644 --- a/drivers/gpu/drm/i915/gvt/edid.h +++ b/drivers/gpu/drm/i915/gvt/edid.h @@ -48,7 +48,7 @@ struct intel_vgpu_edid_data { bool data_valid; - unsigned char edid_block[EDID_SIZE]; + unsigned char edid_block[0]; }; enum gmbus_cycle_type { diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 481896fb712a..7c43c916b3d8 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -37,6 +37,7 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include "fb_decoder.h" #define PRIMARY_FORMAT_NUM 16 struct pixel_format { @@ -266,11 +267,12 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, (_PRI_PLANE_STRIDE_MASK >> 6) : _PRI_PLANE_STRIDE_MASK, plane->bpp); - plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> - _PIPE_H_SRCSZ_SHIFT; + plane->width = vgpu_vreg_t(vgpu, PLANE_SIZE(pipe, PLANE_PRIMARY))& + _PLANE_SIZE_WIDTH_MASK; + plane->width += 1; - plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & - _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; + plane->height = (vgpu_vreg_t(vgpu, PLANE_SIZE(pipe, PLANE_PRIMARY)) & + _PLANE_SIZE_HEIGHT_MASK) >> _PLANE_SIZE_HEIGHT_SHIFT; plane->height += 1; /* raw height is one minus the real value */ val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe)); @@ -511,3 +513,70 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, return 0; } + +/** + * intel_vgpu_decode_fb_format - Decode framebuffer information from raw vMMIO + * @gvt: GVT device + * @vmid: guest domain ID + * @fb: frame buffer infomation of guest. + * This function is called for query frame buffer format, so that gl can + * display guest fb in Dom0 + * + * Returns: + * Zero on success, negative error code if failed. + */ +int intel_vgpu_decode_fb_format(struct intel_gvt *gvt, int id, + struct intel_vgpu_fb_format *fb) + +{ + int i; + struct intel_vgpu *vgpu = NULL; + int ret = 0; + struct drm_i915_private *dev_priv = gvt->dev_priv; + + if (!fb) + return -EINVAL; + + /* TODO: use fine-grained refcnt later */ + mutex_lock(&gvt->lock); + + for_each_active_vgpu(gvt, vgpu, i) + if (vgpu->id == id) + break; + + if (!vgpu) { + gvt_err("Invalid vgpu ID (%d)\n", id); + mutex_unlock(&gvt->lock); + return -ENODEV; + } + + for (i = 0; i < I915_MAX_PIPES; i++) { + struct intel_vgpu_pipe_format *pipe = &fb->pipes[i]; + u32 ddi_func_ctl = vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(i)); + + if (!(ddi_func_ctl & TRANS_DDI_FUNC_ENABLE)) { + pipe->ddi_port = DDI_PORT_NONE; + } else { + u32 port = (ddi_func_ctl & TRANS_DDI_PORT_MASK) >> + TRANS_DDI_PORT_SHIFT; + if (port <= DDI_PORT_E) + pipe->ddi_port = port; + else + pipe->ddi_port = DDI_PORT_NONE; + } + + ret |= intel_vgpu_decode_primary_plane(vgpu, &pipe->primary); + ret |= intel_vgpu_decode_sprite_plane(vgpu, &pipe->sprite); + ret |= intel_vgpu_decode_cursor_plane(vgpu, &pipe->cursor); + + if (ret) { + gvt_err("Decode format error for pipe(%d)\n", i); + ret = -EINVAL; + break; + } + } + + mutex_unlock(&gvt->lock); + + return ret; +} diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 60c155085029..51626759534b 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -50,6 +50,10 @@ #define _PRI_PLANE_Y_OFF_SHIFT 16 #define _PRI_PLANE_Y_OFF_MASK (0xfff << _PRI_PLANE_Y_OFF_SHIFT) +#define _PLANE_SIZE_HEIGHT_SHIFT 16 +#define _PLANE_SIZE_HEIGHT_MASK (0xfff << _PLANE_SIZE_HEIGHT_SHIFT) +#define _PLANE_SIZE_WIDTH_MASK 0x1fff + #define _CURSOR_MODE 0x3f #define _CURSOR_ALPHA_FORCE_SHIFT 8 #define _CURSOR_ALPHA_FORCE_MASK (0x3 << _CURSOR_ALPHA_FORCE_SHIFT) @@ -166,4 +170,8 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, struct intel_vgpu_sprite_plane_format *plane); +extern +int intel_vgpu_decode_fb_format(struct intel_gvt *pdev, int vmid, + struct intel_vgpu_fb_format *fb); + #endif diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4ac18b447247..f0d30237c988 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -199,6 +199,7 @@ static int verify_firmware(struct intel_gvt *gvt, #define GVT_FIRMWARE_PATH "i915/gvt" +bool disable_gvt_fw_loading=true; /** * intel_gvt_load_firmware - load GVT firmware * @gvt: intel gvt device @@ -216,27 +217,27 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) void *mem; int ret; - path = kmalloc(PATH_MAX, GFP_KERNEL); - if (!path) - return -ENOMEM; - mem = kmalloc(info->cfg_space_size, GFP_KERNEL); - if (!mem) { - kfree(path); + if (!mem) return -ENOMEM; - } firmware->cfg_space = mem; mem = kmalloc(info->mmio_size, GFP_KERNEL); if (!mem) { - kfree(path); kfree(firmware->cfg_space); return -ENOMEM; } firmware->mmio = mem; + if (disable_gvt_fw_loading) + goto expose_firmware; + + path = kmalloc(PATH_MAX, GFP_KERNEL); + if (!path) + return -ENOMEM; + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 00aad8164dec..22c79df59d65 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -44,7 +44,7 @@ #define gvt_vdbg_mm(fmt, args...) #endif -static bool enable_out_of_sync = false; +static bool enable_out_of_sync = true; static int preallocated_oos_pages = 8192; /* @@ -303,6 +303,18 @@ static inline int gtt_get_entry64(void *pt, return -EINVAL; if (hypervisor_access) { + if (vgpu->ge_cache_enable && vgpu->cached_guest_entry) { + if (index == 0) { + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa, + vgpu->cached_guest_entry, + I915_GTT_PAGE_SIZE); + if (WARN_ON(ret)) + return ret; + } + e->val64 = *(vgpu->cached_guest_entry + index); + return 0; + } + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + (index << info->gtt_entry_size_shift), &e->val64, 8); @@ -1277,8 +1289,10 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) trace_spt_change(spt->vgpu->id, "born", spt, spt->guest_page.gfn, spt->shadow_page.type); + vgpu->ge_cache_enable = true; for_each_present_guest_entry(spt, &ge, i) { if (gtt_type_is_pt(get_next_pt_type(ge.type))) { + vgpu->ge_cache_enable = false; s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge); if (IS_ERR(s)) { ret = PTR_ERR(s); @@ -1300,6 +1314,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) goto fail; } } + vgpu->ge_cache_enable = false; return 0; fail: gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", @@ -1554,6 +1569,106 @@ int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu) return 0; } +static void free_ggtt_virtual_page_table(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu_gm *gm = &mm->vgpu->gm; + struct sg_table *st = gm->st; + struct scatterlist *sg; + + for (sg = st->sgl; sg; sg = __sg_next(sg)) { + if (sg_page(sg)) + __free_pages(sg_page(sg), get_order(sg->length)); + } + + sg_free_table(st); + kfree(st); + vunmap(mm->ggtt_mm.virtual_ggtt); + gm->st = NULL; +} + +/* + * Alloc virtual page table for guest ggtt. If ggtt pv enabled, the + * physical pages behind virtual page table is also mapped to guest, + * guest can update its pte entries directly to avoid trap. + */ +static void *alloc_ggtt_virtual_page_table(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + unsigned int page_count = gvt_ggtt_sz(vgpu->gvt) >> PAGE_SHIFT; + struct intel_vgpu_gm *gm = &vgpu->gm; + struct page **pages = NULL; + struct page *p; + unsigned int i; + void *vaddr = NULL; + int order; + struct sg_table *st; + struct scatterlist *sg; + struct sgt_iter sgt_iter; + unsigned int npages = page_count; + + /* + * page_table_entry_size is bigger than the size alloc_pages can + * allocate, We have to split it according to the PMD size (2M). + * Head page is kept in scatter list so that we can free them later. + */ + order = get_order(1 << PMD_SHIFT); + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return ERR_PTR(-ENOMEM); + + if (sg_alloc_table(st, page_count, GFP_KERNEL)) { + kfree(st); + return ERR_PTR(-ENOMEM); + } + + sg = st->sgl; + st->nents = 0; + gm->st = st; + do { + p = alloc_pages(GFP_KERNEL, order); + if (!p) + goto fail; + gvt_dbg_mm("page=%p size=%ld\n", p, PAGE_SIZE << order); + sg_set_page(sg, p, PAGE_SIZE << order, 0); + st->nents++; + npages -= 1 << order; + if (!npages) { + sg_mark_end(sg); + break; + } + sg = __sg_next(sg); + } while (1); + + + /* keep all the pages for vmap */ + pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto fail; + + i = 0; + for_each_sgt_page(p, sgt_iter, st) + pages[i++] = p; + + WARN_ON(i != page_count); + + vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); + if (!vaddr) { + gvt_vgpu_err("fail to vmap pages"); + goto fail; + } + kfree(pages); + return vaddr; + +fail: + sg_set_page(sg, NULL, 0, 0); + sg_mark_end(sg); + free_ggtt_virtual_page_table(mm); + kfree(pages); + gm->st = NULL; + return NULL; +} + /* * The heart of PPGTT shadow page table. */ @@ -1688,6 +1803,8 @@ static int ppgtt_handle_guest_write_page_table_bytes( index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; + /* Set guest ppgtt entry. Optional for KVMGT, but MUST for XENGT. */ + intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); ppgtt_get_guest_entry(spt, &we, index); /* @@ -1740,6 +1857,32 @@ static int ppgtt_handle_guest_write_page_table_bytes( return 0; } +static void invalidate_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt *gtt = &gvt->gtt; + struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->ppgtt_mm.root_entry_type != + GTT_TYPE_PPGTT_ROOT_L4_ENTRY)) + return; + + i915_ppgtt_close(&mm->ppgtt_mm.ppgtt->vm); + i915_ppgtt_put(mm->ppgtt_mm.ppgtt); + + ppgtt_get_shadow_root_entry(mm, &se, 0); + if (!ops->test_present(&se)) + return; + trace_spt_guest_change(vgpu->id, "destroy root pointer", + NULL, se.type, se.val64, 0); + se.val64 = 0; + ppgtt_set_shadow_root_entry(mm, &se, 0); + + mm->ppgtt_mm.shadowed = false; +} + static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) { struct intel_vgpu *vgpu = mm->vgpu; @@ -1752,6 +1895,11 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) if (!mm->ppgtt_mm.shadowed) return; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) { + invalidate_mm_pv(mm); + return; + } + for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { ppgtt_get_shadow_root_entry(mm, &se, index); @@ -1769,6 +1917,33 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) mm->ppgtt_mm.shadowed = false; } +static int shadow_mm_pv(struct intel_vgpu_mm *mm) +{ + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_gvt *gvt = vgpu->gvt; + struct intel_gvt_gtt_entry se; + + if (WARN_ON(mm->ppgtt_mm.root_entry_type != + GTT_TYPE_PPGTT_ROOT_L4_ENTRY)) + return -EINVAL; + + mm->ppgtt_mm.ppgtt = i915_ppgtt_create(gvt->dev_priv, NULL); + if (IS_ERR(mm->ppgtt_mm.ppgtt)) { + gvt_vgpu_err("fail to create ppgtt for pdp 0x%llx\n", + px_dma(&mm->ppgtt_mm.ppgtt->pml4)); + return PTR_ERR(mm->ppgtt_mm.ppgtt); + } + + se.type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY; + se.val64 = px_dma(&mm->ppgtt_mm.ppgtt->pml4); + ppgtt_set_shadow_root_entry(mm, &se, 0); + + trace_spt_guest_change(vgpu->id, "populate root pointer", + NULL, se.type, se.val64, 0); + mm->ppgtt_mm.shadowed = true; + + return 0; +} static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) { @@ -1783,6 +1958,9 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) if (mm->ppgtt_mm.shadowed) return 0; + if (VGPU_PVMMIO(mm->vgpu) & PVMMIO_PPGTT_UPDATE) + return shadow_mm_pv(mm); + mm->ppgtt_mm.shadowed = true; for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { @@ -1885,7 +2063,6 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu, static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) { struct intel_vgpu_mm *mm; - unsigned long nr_entries; mm = vgpu_alloc_mm(vgpu); if (!mm) @@ -1893,10 +2070,17 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) mm->type = INTEL_GVT_MM_GGTT; - nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; - mm->ggtt_mm.virtual_ggtt = - vzalloc(array_size(nr_entries, + mm->ggtt_mm.virtual_ggtt = alloc_ggtt_virtual_page_table(mm); + if (!mm->ggtt_mm.virtual_ggtt) { + unsigned long nr_entries; + + DRM_INFO("fail to alloc contiguous pages, fallback\n"); + nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; + mm->ggtt_mm.virtual_ggtt = + vzalloc(array_size(nr_entries, vgpu->gvt->device_info.gtt_entry_size)); + } + if (!mm->ggtt_mm.virtual_ggtt) { vgpu_free_mm(mm); return ERR_PTR(-ENOMEM); @@ -1925,7 +2109,17 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) list_del(&mm->ppgtt_mm.lru_list); invalidate_ppgtt_mm(mm); } else { - vfree(mm->ggtt_mm.virtual_ggtt); + if (mm->ggtt_mm.virtual_ggtt) { + struct intel_vgpu *vgpu = mm->vgpu; + struct intel_vgpu_gm *gm = &vgpu->gm; + + if (gm->st) { + map_gttmmio(mm->vgpu, false); + free_ggtt_virtual_page_table(mm); + } else + vfree(mm->ggtt_mm.virtual_ggtt); + mm->ggtt_mm.virtual_ggtt = NULL; + } mm->ggtt_mm.last_partial_off = -1UL; } @@ -2426,6 +2620,13 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) intel_vgpu_reset_ggtt(vgpu, false); + vgpu->cached_guest_entry = kzalloc(I915_GTT_PAGE_SIZE, GFP_KERNEL); + if (!vgpu->cached_guest_entry) { + gvt_vgpu_err("fail to allocate cached_guest_entry page\n"); + return -ENOMEM; + } + vgpu->ge_cache_enable = false; + return create_scratch_page_tree(vgpu); } @@ -2468,6 +2669,7 @@ void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) { intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_destroy_ggtt_mm(vgpu); + kfree(vgpu->cached_guest_entry); release_scratch_page_tree(vgpu); } @@ -2763,3 +2965,431 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_reset_ggtt(vgpu, true); } + +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("alloc_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]); + ret = -EINVAL; + } else { + ret = mm->ppgtt_mm.ppgtt->vm.allocate_va_range( + &mm->ppgtt_mm.ppgtt->vm, + pv_ppgtt->start, pv_ppgtt->length); + if (ret) + gvt_vgpu_err("failed to alloc for pdp %llx\n", pdps[0]); + } + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("clear_4lvl pdp=%llx start=%llx length=%llx\n", + pv_ppgtt->pdp, pv_ppgtt->start, + pv_ppgtt->length); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("failed to find mm for pdp 0x%llx\n", pdps[0]); + ret = -EINVAL; + } else { + mm->ppgtt_mm.ppgtt->vm.clear_range( + &mm->ppgtt_mm.ppgtt->vm, + pv_ppgtt->start, pv_ppgtt->length); + } + + return ret; +} + +#define GEN8_PML4E_SIZE (1UL << GEN8_PML4E_SHIFT) +#define GEN8_PML4E_SIZE_MASK (~(GEN8_PML4E_SIZE - 1)) +#define GEN8_PDPE_SIZE (1UL << GEN8_PDPE_SHIFT) +#define GEN8_PDPE_SIZE_MASK (~(GEN8_PDPE_SIZE - 1)) +#define GEN8_PDE_SIZE (1UL << GEN8_PDE_SHIFT) +#define GEN8_PDE_SIZE_MASK (~(GEN8_PDE_SIZE - 1)) + +#define pml4_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PML4E_SIZE) & GEN8_PML4E_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pdp_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDPE_SIZE) & GEN8_PDPE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +#define pd_addr_end(addr, end) \ +({ unsigned long __boundary = \ + ((addr) + GEN8_PDE_SIZE) & GEN8_PDE_SIZE_MASK; \ + (__boundary < (end)) ? __boundary : (end); \ +}) + +struct ppgtt_walk { + unsigned long *mfns; + int mfn_index; + unsigned long *pt; +}; + +static int walk_pt_range(struct intel_vgpu *vgpu, u64 pt, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long start_index, end_index; + int ret; + int i; + unsigned long mfn, gfn; + + start_index = gma_ops->gma_to_pte_index(start); + end_index = ((end - start) >> PAGE_SHIFT) + start_index; + + gvt_dbg_mm("%s: %llx start=%llx end=%llx start_index=%lx end_index=%lx mfn_index=%x\n", + __func__, pt, start, end, + start_index, end_index, walk->mfn_index); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pt & PAGE_MASK) + (start_index << info->gtt_entry_size_shift), + walk->pt + start_index, + (end_index - start_index) << info->gtt_entry_size_shift); + if (ret) { + gvt_vgpu_err("fail to read gpa %llx\n", pt); + return ret; + } + + for (i = start_index; i < end_index; i++) { + gfn = walk->pt[i] >> PAGE_SHIFT; + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); + return -ENXIO; + } + walk->mfns[walk->mfn_index++] = mfn << PAGE_SHIFT; + } + + return 0; +} + + +static int walk_pd_range(struct intel_vgpu *vgpu, u64 pd, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pt, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pde_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pd & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pt, 8); + if (ret) + return ret; + next = pd_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pd, start, end, next); + walk_pt_range(vgpu, pt, start, next, walk); + + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pdp_range(struct intel_vgpu *vgpu, u64 pdp, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pd, next; + int ret = 0; + + do { + index = gma_ops->gma_to_l4_pdp_index(start); + + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pdp & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pd, 8); + if (ret) + return ret; + next = pdp_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pdp, start, end, next); + + walk_pd_range(vgpu, pd, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + + +static int walk_pml4_range(struct intel_vgpu *vgpu, u64 pml4, + u64 start, u64 end, struct ppgtt_walk *walk) +{ + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + struct intel_gvt_gtt_gma_ops *gma_ops = vgpu->gvt->gtt.gma_ops; + unsigned long index; + u64 pdp, next; + int ret = 0; + + do { + index = gma_ops->gma_to_pml4_index(start); + ret = intel_gvt_hypervisor_read_gpa(vgpu, + (pml4 & PAGE_MASK) + (index << + info->gtt_entry_size_shift), &pdp, 8); + if (ret) + return ret; + next = pml4_addr_end(start, end); + gvt_dbg_mm("%s: %llx start=%llx end=%llx next=%llx\n", + __func__, pml4, start, end, next); + + walk_pdp_range(vgpu, pdp, start, next, walk); + start = next; + } while (start != end); + + return ret; +} + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level) +{ + struct pv_ppgtt_update *pv_ppgtt = &vgpu->mmio.shared_page->pv_ppgtt; + struct intel_vgpu_mm *mm; + u64 pdps[4] = {pv_ppgtt->pdp, 0, 0, 0}; + int ret = 0; + u64 start = pv_ppgtt->start; + u64 length = pv_ppgtt->length; + struct sg_table st; + struct scatterlist *sg = NULL; + int num_pages = length >> PAGE_SHIFT; + struct i915_vma vma; + struct ppgtt_walk walk; + int i; + + if (WARN_ON(page_table_level != 4)) + return -EINVAL; + + gvt_dbg_mm("insert_4lvl pml4=%llx start=%llx length=%llx cache=%x\n", + pv_ppgtt->pdp, start, length, pv_ppgtt->cache_level); + + mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps); + if (!mm) { + gvt_vgpu_err("fail to find mm for pml4 0x%llx\n", pdps[0]); + return -EINVAL; + } + + walk.mfn_index = 0; + walk.mfns = NULL; + walk.pt = NULL; + + walk.mfns = kmalloc_array(num_pages, + sizeof(unsigned long), GFP_KERNEL); + if (!walk.mfns) { + ret = -ENOMEM; + goto fail; + } + + walk.pt = (unsigned long *)__get_free_pages(GFP_KERNEL, 0); + if (!walk.pt) { + ret = -ENOMEM; + goto fail; + } + + if (sg_alloc_table(&st, num_pages, GFP_KERNEL)) { + ret = -ENOMEM; + goto fail; + } + + ret = walk_pml4_range(vgpu, pdps[0], start, start + length, &walk); + if (ret) + goto fail_free_sg; + + WARN_ON(num_pages != walk.mfn_index); + + for_each_sg(st.sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = walk.mfns[i]; + sg_dma_len(sg) = PAGE_SIZE; + } + + /* fake vma for insert call*/ + memset(&vma, 0, sizeof(vma)); + vma.node.start = start; + vma.pages = &st; + mm->ppgtt_mm.ppgtt->vm.insert_entries( + &mm->ppgtt_mm.ppgtt->vm, &vma, + pv_ppgtt->cache_level, 0); + +fail_free_sg: + sg_free_table(&st); +fail: + kfree(walk.mfns); + free_page((unsigned long)walk.pt); + + return ret; +} + +static void validate_ggtt_range(struct intel_vgpu *vgpu, + u64 *start, u64 *length) +{ + u64 end; + + if (WARN_ON(*start > vgpu->gvt->dev_priv->ggtt.vm.total || + *length > vgpu->gvt->dev_priv->ggtt.vm.total)) { + *length = 0; + return; + } + + end = *start + *length - 1; + + if (*start >= vgpu_aperture_gmadr_base(vgpu) && + end <= vgpu_aperture_gmadr_end(vgpu)) + return; + + if (*start >= vgpu_hidden_gmadr_base(vgpu) && + end <= vgpu_hidden_gmadr_end(vgpu)) + return; + + /* handle the cases with invalid ranges */ + WARN_ON(1); + + /* start is in aperture range, end is after apeture range */ + if (*start >= vgpu_aperture_gmadr_base(vgpu) && + *start <= vgpu_aperture_gmadr_end(vgpu)) { + *length = vgpu_aperture_gmadr_end(vgpu) - *start + 1; + return; + } + + /* start is before aperture range, end is in apeture range */ + if (end >= vgpu_aperture_gmadr_base(vgpu) && + end <= vgpu_aperture_gmadr_end(vgpu)) { + *start = vgpu_aperture_gmadr_base(vgpu); + return; + } + + /* start is in hidden range, end is after hidden range */ + if (*start >= vgpu_hidden_gmadr_base(vgpu) && + *start <= vgpu_hidden_gmadr_end(vgpu)) { + *length = vgpu_hidden_gmadr_end(vgpu) - *start + 1; + return; + } + + /* start is before hidden range, end is in hidden range */ + if (end >= vgpu_hidden_gmadr_base(vgpu) && + end <= vgpu_hidden_gmadr_end(vgpu)) { + *start = vgpu_hidden_gmadr_base(vgpu); + return; + } + + /* both start and end are not in valid range*/ + *length = 0; + + return; +} + +int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu) +{ + struct intel_vgpu_gtt *gtt = &vgpu->gtt; + struct gvt_shared_page *shared_page = vgpu->mmio.shared_page; + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + u64 start = shared_page->pv_ggtt.start; + u64 num_entries = shared_page->pv_ggtt.length; + u32 cache_level = shared_page->pv_ggtt.cache_level; + u64 length = num_entries << PAGE_SHIFT; + u64 *vaddr = gtt->ggtt_mm->ggtt_mm.virtual_ggtt; + u64 gtt_entry_index; + u64 gtt_entry; + unsigned long mfn; + struct i915_vma vma; + struct sg_table st; + struct scatterlist *sg = NULL; + int ret = 0; + int i; + + gvt_dbg_mm("ggtt_insert: start=%llx length=%llx cache=%x\n", + start, length, cache_level); + validate_ggtt_range(vgpu, &start, &length); + if (length == 0) + return 0; + + num_entries = length >> PAGE_SHIFT; + + if (sg_alloc_table(&st, num_entries, GFP_KERNEL)) + return -ENOMEM; + + for_each_sg(st.sgl, sg, num_entries, i) { + gtt_entry_index = (start >> PAGE_SHIFT) + i; + gtt_entry = vaddr[gtt_entry_index]; + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + gtt_entry >> PAGE_SHIFT); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn: 0x%llx\n", + gtt_entry >> PAGE_SHIFT); + ret = -ENXIO; + goto fail; + } + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = mfn << PAGE_SHIFT; + sg_dma_len(sg) = PAGE_SIZE; + } + + /* fake vma for insert call*/ + memset(&vma, 0, sizeof(vma)); + vma.node.start = start; + vma.pages = &st; + ggtt->vm.insert_entries(&ggtt->vm, &vma, cache_level, 0); + +fail: + sg_free_table(&st); + return ret; +} + +int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu) +{ + struct gvt_shared_page *shared_page = vgpu->mmio.shared_page; + u64 start = shared_page->pv_ggtt.start; + u64 length = shared_page->pv_ggtt.length; + struct i915_ggtt *ggtt = &vgpu->gvt->dev_priv->ggtt; + + gvt_dbg_mm("ggtt_clear: start=%llx length=%llx\n", + start, length); + validate_ggtt_range(vgpu, &start, &length); + if (length == 0) + return 0; + + ggtt->vm.clear_range(&ggtt->vm, start, length); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 7a9b36176efb..6c0d3bdcaee4 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -131,7 +131,7 @@ enum intel_gvt_mm_type { INTEL_GVT_MM_PPGTT, }; -#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES +#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES struct intel_vgpu_mm { enum intel_gvt_mm_type type; @@ -154,6 +154,7 @@ struct intel_vgpu_mm { struct list_head list; struct list_head lru_list; + struct i915_hw_ppgtt *ppgtt; } ppgtt_mm; struct { void *virtual_ggtt; @@ -198,6 +199,9 @@ struct intel_vgpu_gtt { struct list_head oos_page_list_head; struct list_head post_shadow_list_head; struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; + + /* indicate whether the PV mapped is enabled for ggtt */ + bool ggtt_pv_mapped; }; extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); @@ -272,4 +276,17 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +int intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_clear_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ppgtt_insert_4lvl(struct intel_vgpu *vgpu, + int page_table_level); + +int intel_vgpu_g2v_pv_ggtt_insert(struct intel_vgpu *vgpu); + +int intel_vgpu_g2v_pv_ggtt_clear(struct intel_vgpu *vgpu); + #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 46c8b720e336..976bb6e223d2 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -44,6 +44,7 @@ struct intel_gvt_host intel_gvt_host; static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_XEN] = "XEN", [INTEL_GVT_HYPERVISOR_KVM] = "KVM", + [INTEL_GVT_HYPERVISOR_ACRN] = "ACRN", }; static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, @@ -221,6 +222,11 @@ int intel_gvt_init_host(void) symbol_get(kvmgt_mpt), "kvmgt"); intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; #endif + /* not in Xen. Try ACRN */ + intel_gvt_host.mpt = try_then_request_module( + symbol_get(acrn_gvt_mpt), "acrn_gvt"); + intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_ACRN; + printk("acrngt %s\n", intel_gvt_host.mpt?"found":"not found"); } /* Fail to load MPT modules - bail out */ @@ -242,6 +248,8 @@ static void init_device_info(struct intel_gvt *gvt) info->max_support_vgpus = 8; info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE; info->mmio_size = 2 * 1024 * 1024; + /* order of mmio size. assert(2^order == mmio_size) */ + info->mmio_size_order = 9; info->mmio_bar = 0; info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_entry_size = 8; @@ -301,6 +309,51 @@ static int init_service_thread(struct intel_gvt *gvt) return 0; } +void intel_gvt_init_pipe_info(struct intel_gvt *gvt); + +/* + * When enabling multi-plane in DomU, an issue is that the PLANE_BUF_CFG + * register cannot be updated dynamically, since Dom0 has no idea of the + * plane information of DomU's planes, so here we statically allocate the + * ddb entries for all the possible enabled planes. + */ +void intel_gvt_allocate_ddb(struct intel_gvt *gvt, + struct skl_ddb_allocation *ddb, unsigned int active_crtcs) +{ + struct drm_i915_private *dev_priv = gvt->dev_priv; + unsigned int pipe_size, ddb_size, plane_size, plane_cnt; + u16 start, end; + enum pipe pipe; + enum plane_id plane; + int i = 0; + int num_active = hweight32(active_crtcs); + + if (WARN_ON(!num_active)) + return; + + ddb_size = INTEL_INFO(dev_priv)->ddb_size; + ddb_size -= 4; /* 4 blocks for bypass path allocation */ + pipe_size = ddb_size / num_active; + + memset(ddb, 0, sizeof(*ddb)); + for_each_pipe_masked(dev_priv, pipe, active_crtcs) { + start = pipe_size * (i++); + end = start + pipe_size; + ddb->plane[pipe][PLANE_CURSOR].start = end - 8; + ddb->plane[pipe][PLANE_CURSOR].end = end; + + plane_cnt = (INTEL_INFO(dev_priv)->num_sprites[pipe] + 1); + plane_size = (pipe_size - 8) / plane_cnt; + + for_each_universal_plane(dev_priv, pipe, plane) { + ddb->plane[pipe][plane].start = start + + (plane * (pipe_size - 8) / plane_cnt); + ddb->plane[pipe][plane].end = + ddb->plane[pipe][plane].start + plane_size; + } + } +} + /** * intel_gvt_clean_device - clean a GVT device * @gvt: intel gvt device @@ -336,6 +389,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) dev_priv->gvt = NULL; } +#define BITS_PER_DOMAIN 4 +#define MAX_PLANES_PER_DOMAIN 4 +#define DOMAIN_PLANE_OWNER(owner, pipe, plane) \ + ((((owner) >> (pipe) * BITS_PER_DOMAIN * MAX_PLANES_PER_DOMAIN) >> \ + BITS_PER_DOMAIN * (plane)) & 0xf) + /** * intel_gvt_init_device - initialize a GVT device * @dev_priv: drm i915 private data @@ -421,6 +480,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_types; } + intel_gvt_init_pipe_info(gvt); + ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt, &intel_gvt_ops); if (ret) { @@ -440,8 +501,28 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) if (ret) gvt_err("debugfs registeration failed, go on.\n"); - gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; + + if (i915_modparams.avail_planes_per_pipe) { + unsigned long long domain_plane_owners; + int plane; + enum pipe pipe; + + /* + * Each nibble represents domain id + * ids can be from 0-F. 0 for Dom0, 1,2,3...0xF for DomUs + * plane_owner[i] holds the id of the domain that owns it,eg:0,1,2 etc + */ + domain_plane_owners = i915_modparams.domain_plane_owners; + for_each_pipe(dev_priv, pipe) { + for_each_universal_plane(dev_priv, pipe, plane) { + gvt->pipe_info[pipe].plane_owner[plane] = + DOMAIN_PLANE_OWNER(domain_plane_owners, pipe, plane); + } + } + } + + gvt_dbg_core("gvt device initialization is done\n"); return 0; out_clean_types: @@ -468,6 +549,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) return ret; } +int gvt_dom0_ready(struct drm_i915_private *dev_priv) +{ + if (!intel_gvt_active(dev_priv)) + return 0; + + return intel_gvt_hypervisor_dom0_ready(); +} + #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) MODULE_SOFTDEP("pre: kvmgt"); #endif diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 31f6cdbe5c42..bbf2489f251d 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -55,6 +55,7 @@ enum { INTEL_GVT_HYPERVISOR_XEN = 0, INTEL_GVT_HYPERVISOR_KVM, + INTEL_GVT_HYPERVISOR_ACRN, }; struct intel_gvt_host { @@ -70,6 +71,7 @@ struct intel_gvt_device_info { u32 max_support_vgpus; u32 cfg_space_size; u32 mmio_size; + u32 mmio_size_order; u32 mmio_bar; unsigned long msi_cap_offset; u32 gtt_start_offset; @@ -83,6 +85,7 @@ struct intel_gvt_device_info { struct intel_vgpu_gm { u64 aperture_sz; u64 hidden_sz; + struct sg_table *st; struct drm_mm_node low_gm_node; struct drm_mm_node high_gm_node; }; @@ -99,6 +102,7 @@ struct intel_vgpu_fence { struct intel_vgpu_mmio { void *vreg; void *sreg; + struct gvt_shared_page *shared_page; }; #define INTEL_GVT_MAX_BAR_NUM 4 @@ -182,7 +186,7 @@ struct intel_vgpu { * scheduler structure. So below 2 vgpu data are protected * by sched_lock, not vgpu_lock. */ - void *sched_data; + void *sched_data[I915_NUM_ENGINES]; struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; @@ -232,6 +236,9 @@ struct intel_vgpu { struct completion vblank_done; u32 scan_nonprivbb; + + unsigned long long *cached_guest_entry; + bool ge_cache_enable; }; /* validating GM healthy status*/ @@ -291,6 +298,7 @@ struct intel_gvt_firmware { }; #define NR_MAX_INTEL_VGPU_TYPES 20 + struct intel_vgpu_type { char name[16]; unsigned int avail_instance; @@ -301,6 +309,15 @@ struct intel_vgpu_type { enum intel_vgpu_edid resolution; }; +struct intel_gvt_pipe_info { + enum pipe pipe_num; + int owner; + struct intel_gvt *gvt; + struct work_struct vblank_work; + int plane_owner[I915_MAX_PLANES]; + int scaler_owner[SKL_NUM_SCALERS]; +}; + struct intel_gvt { /* GVT scope lock, protect GVT itself, and all resource currently * not yet protected by special locks(vgpu and scheduler lock). @@ -334,6 +351,10 @@ struct intel_gvt { */ unsigned long service_request; + struct intel_gvt_pipe_info pipe_info[I915_MAX_PIPES]; + + struct skl_ddb_allocation ddb; + struct { struct engine_mmio *mmio; int ctx_mmio_count[I915_NUM_ENGINES]; @@ -458,6 +479,11 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ for_each_if(vgpu->active) +#define for_each_universal_scaler(__dev_priv, __pipe, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_scalers[(__pipe)] + 1; \ + (__s)++) + static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, u32 offset, u32 val, bool low) { @@ -530,6 +556,8 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, bool primary); void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); +int set_pvmmio(struct intel_vgpu *vgpu, bool map); + int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); @@ -543,6 +571,8 @@ static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) PCI_BASE_ADDRESS_MEM_MASK; } +int map_gttmmio(struct intel_vgpu *vgpu, bool map); + void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); @@ -579,6 +609,9 @@ struct intel_gvt_ops { unsigned int); }; +int gvt_dom0_ready(struct drm_i915_private *dev_priv); +void intel_gvt_allocate_ddb(struct intel_gvt *gvt, + struct skl_ddb_allocation *ddb, unsigned int active_crtcs); enum { GVT_FAILSAFE_UNSUPPORTED_GUEST, diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 94c1089ecf59..0fc1fb37e1ef 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -413,27 +413,9 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, +static int mmio_write_empty(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - switch (offset) { - case 0xe651c: - case 0xe661c: - case 0xe671c: - case 0xe681c: - vgpu_vreg(vgpu, offset) = 1 << 17; - break; - case 0xe6c04: - vgpu_vreg(vgpu, offset) = 0x3; - break; - case 0xe6e1c: - vgpu_vreg(vgpu, offset) = 0x2f << 16; - break; - default: - return -EINVAL; - } - - read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -441,18 +423,21 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { u32 data; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + struct intel_crtc *crtc = intel_get_crtc_for_pipe( + vgpu->gvt->dev_priv, pipe); write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (data & PIPECONF_ENABLE) + if (data & PIPECONF_ENABLE) { vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; - else + if (crtc) + drm_crtc_vblank_get(&crtc->base); + } else { vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; - /* vgpu_lock already hold by emulate mmio r/w */ - mutex_unlock(&vgpu->vgpu_lock); - intel_gvt_check_vblank_emulation(vgpu->gvt); - mutex_lock(&vgpu->vgpu_lock); + } + return 0; } @@ -530,6 +515,14 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu, return 0; } +static int pipe_dsl_mmio_read(struct intel_vgpu *vgpu, + unsigned int offset, void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); + return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); +} + static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -785,6 +778,66 @@ static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes); + +static void pvmmio_update_plane_register(struct intel_vgpu *vgpu, + unsigned int pipe, unsigned int plane) +{ + struct pv_plane_update *pv_plane = &vgpu->mmio.shared_page->pv_plane; + + /* null function for PLANE_COLOR_CTL, PLANE_AUX_DIST, PLANE_AUX_OFFSET, + * and SKL_PS_PWR_GATE register trap + */ + + if (pv_plane->flags & PLANE_KEY_BIT) { + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYVAL(pipe, plane)), + &pv_plane->plane_key_val, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMAX(pipe, plane)), + &pv_plane->plane_key_max, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_KEYMSK(pipe, plane)), + &pv_plane->plane_key_msk, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_OFFSET(pipe, plane)), + &pv_plane->plane_offset, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_STRIDE(pipe, plane)), + &pv_plane->plane_stride, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_SIZE(pipe, plane)), + &pv_plane->plane_size, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_DIST(pipe, plane)), + &pv_plane->plane_aux_dist, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_AUX_OFFSET(pipe, plane)), + &pv_plane->plane_aux_offset, 4); + + if (pv_plane->flags & PLANE_SCALER_BIT) { + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_CTRL(pipe, plane)), + &pv_plane->ps_ctrl, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_POS(pipe, plane)), + &pv_plane->ps_win_ps, 4); + skl_ps_mmio_write(vgpu, + i915_mmio_reg_offset(SKL_PS_WIN_SZ(pipe, plane)), + &pv_plane->ps_win_sz, 4); + } + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_POS(pipe, plane)), + &pv_plane->plane_pos, 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_CTL(pipe, plane)), + &pv_plane->plane_ctl, 4); +} + static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, unsigned int reg) { @@ -1152,6 +1205,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { bool invalid_read = false; + int ret = 0; read_vreg(vgpu, offset, p_data, bytes); @@ -1166,8 +1220,27 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, _vgtif_reg(avail_rs.fence_num) + 4) invalid_read = true; break; + case _vgtif_reg(pv_mmio): + /* a remap happens from guest mmio read operation, the target reg offset + * is in the first DWORD of shared_page. + */ + { + u32 reg = vgpu->mmio.shared_page->reg_addr; + struct intel_gvt_mmio_info *mmio; + + mmio = find_mmio_info(vgpu->gvt, rounddown(reg, 4)); + if (mmio) + ret = mmio->read(vgpu, reg, p_data, bytes); + else + ret = intel_vgpu_default_mmio_read(vgpu, reg, p_data, + bytes); + break; + } + case 0x78010: /* vgt_caps */ case 0x7881c: + case _vgtif_reg(scaler_owned): + case _vgtif_reg(enable_pvmmio): break; default: invalid_read = true; @@ -1177,7 +1250,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", offset, bytes, *(u32 *)p_data); vgpu->pv_notified = true; - return 0; + return ret; } static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) @@ -1198,6 +1271,18 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: return intel_vgpu_put_ppgtt_mm(vgpu, pdps); + case VGT_G2V_PPGTT_L4_ALLOC: + return intel_vgpu_g2v_pv_ppgtt_alloc_4lvl(vgpu, 4); + case VGT_G2V_PPGTT_L4_INSERT: + return intel_vgpu_g2v_pv_ppgtt_insert_4lvl(vgpu, 4); + case VGT_G2V_PPGTT_L4_CLEAR: + return intel_vgpu_g2v_pv_ppgtt_clear_4lvl(vgpu, 4); + case VGT_G2V_GGTT_INSERT: + return intel_vgpu_g2v_pv_ggtt_insert(vgpu); + break; + case VGT_G2V_GGTT_CLEAR: + return intel_vgpu_g2v_pv_ggtt_clear(vgpu); + break; case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case 1: /* Remove this in guest driver. */ @@ -1225,6 +1310,26 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) return kobject_uevent_env(kobj, KOBJ_ADD, env); } +#define INTEL_GVT_PCI_BAR_GTTMMIO 0 +int set_pvmmio(struct intel_vgpu *vgpu, bool map) +{ + u64 start, end; + u64 val; + int ret; + + val = vgpu_cfg_space(vgpu)[PCI_BASE_ADDRESS_0]; + if (val & PCI_BASE_ADDRESS_MEM_TYPE_64) + start = *(u64 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + else + start = *(u32 *)(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_0); + + start &= ~GENMASK(3, 0); + end = start + vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size - 1; + + ret = intel_gvt_hypervisor_set_pvmmio(vgpu, start, end, map); + return ret; +} + static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1241,6 +1346,27 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, case _vgtif_reg(g2v_notify): ret = handle_g2v_notification(vgpu, data); break; + case _vgtif_reg(enable_pvmmio): + if (i915_modparams.enable_pvmmio) { + vgpu_vreg(vgpu, offset) = data & + i915_modparams.enable_pvmmio; + if (set_pvmmio(vgpu, !!vgpu_vreg(vgpu, offset))) { + vgpu_vreg(vgpu, offset) = 0; + break; + } + if (vgpu_vreg(vgpu, offset) & PVMMIO_GGTT_UPDATE) { + ret = map_gttmmio(vgpu, true); + if (ret) { + DRM_INFO("ggtt pv mode is off\n"); + vgpu_vreg(vgpu, offset) &= + ~PVMMIO_GGTT_UPDATE; + } + } + + } else { + vgpu_vreg(vgpu, offset) = 0; + } + break; /* add xhot and yhot to handled list to avoid error log */ case _vgtif_reg(cursor_x_hot): case _vgtif_reg(cursor_y_hot): @@ -1266,22 +1392,6 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -static int pf_write(struct intel_vgpu *vgpu, - unsigned int offset, void *p_data, unsigned int bytes) -{ - u32 val = *(u32 *)p_data; - - if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || - offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || - offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) { - WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n", - vgpu->id); - return 0; - } - - return intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes); -} - static int power_well_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { @@ -1650,6 +1760,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); struct intel_vgpu_execlist *execlist; u32 data = *(u32 *)p_data; + u32 *elsp_data = vgpu->mmio.shared_page->elsp_data; int ret = 0; if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) @@ -1657,16 +1768,23 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist = &vgpu->submission.execlist[ring_id]; - execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; - if (execlist->elsp_dwords.index == 3) { + if (VGPU_PVMMIO(vgpu) & PVMMIO_ELSP_SUBMIT) { + execlist->elsp_dwords.data[3] = elsp_data[0]; + execlist->elsp_dwords.data[2] = elsp_data[1]; + execlist->elsp_dwords.data[1] = elsp_data[2]; + execlist->elsp_dwords.data[0] = data; ret = intel_vgpu_submit_execlist(vgpu, ring_id); - if(ret) - gvt_vgpu_err("fail submit workload on ring %d\n", - ring_id); + } else { + execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data; + if (execlist->elsp_dwords.index == 3) + ret = intel_vgpu_submit_execlist(vgpu, ring_id); + ++execlist->elsp_dwords.index; + execlist->elsp_dwords.index &= 0x3; } - ++execlist->elsp_dwords.index; - execlist->elsp_dwords.index &= 0x3; + if (ret) + gvt_vgpu_err("fail submit workload on ring %d\n", ring_id); + return ret; } @@ -1909,9 +2027,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0xc4040), D_ALL); MMIO_D(DERRMR, D_ALL); - MMIO_D(PIPEDSL(PIPE_A), D_ALL); - MMIO_D(PIPEDSL(PIPE_B), D_ALL); - MMIO_D(PIPEDSL(PIPE_C), D_ALL); + MMIO_DH(PIPEDSL(PIPE_A), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_B), D_ALL, pipe_dsl_mmio_read, NULL); + MMIO_DH(PIPEDSL(PIPE_C), D_ALL, pipe_dsl_mmio_read, NULL); MMIO_D(PIPEDSL(_PIPE_EDP), D_ALL); MMIO_DH(PIPECONF(PIPE_A), D_ALL, NULL, pipeconf_mmio_write); @@ -1959,71 +2077,71 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x70098), D_ALL); MMIO_D(_MMIO(0x7009c), D_ALL); - MMIO_D(DSPCNTR(PIPE_A), D_ALL); - MMIO_D(DSPADDR(PIPE_A), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_A), D_ALL); - MMIO_D(DSPPOS(PIPE_A), D_ALL); - MMIO_D(DSPSIZE(PIPE_A), D_ALL); - MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_A), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(DSPCNTR(PIPE_B), D_ALL); - MMIO_D(DSPADDR(PIPE_B), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_B), D_ALL); - MMIO_D(DSPPOS(PIPE_B), D_ALL); - MMIO_D(DSPSIZE(PIPE_B), D_ALL); - MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_B), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(DSPCNTR(PIPE_C), D_ALL); - MMIO_D(DSPADDR(PIPE_C), D_ALL); - MMIO_D(DSPSTRIDE(PIPE_C), D_ALL); - MMIO_D(DSPPOS(PIPE_C), D_ALL); - MMIO_D(DSPSIZE(PIPE_C), D_ALL); - MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write); - MMIO_D(DSPOFFSET(PIPE_C), D_ALL); - MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL); - - MMIO_D(SPRCTL(PIPE_A), D_ALL); - MMIO_D(SPRLINOFF(PIPE_A), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_A), D_ALL); - MMIO_D(SPRPOS(PIPE_A), D_ALL); - MMIO_D(SPRSIZE(PIPE_A), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_A), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_A), D_ALL); - MMIO_DH(SPRSURF(PIPE_A), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_A), D_ALL); - MMIO_D(SPROFFSET(PIPE_A), D_ALL); - MMIO_D(SPRSCALE(PIPE_A), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL); - - MMIO_D(SPRCTL(PIPE_B), D_ALL); - MMIO_D(SPRLINOFF(PIPE_B), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_B), D_ALL); - MMIO_D(SPRPOS(PIPE_B), D_ALL); - MMIO_D(SPRSIZE(PIPE_B), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_B), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_B), D_ALL); - MMIO_DH(SPRSURF(PIPE_B), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_B), D_ALL); - MMIO_D(SPROFFSET(PIPE_B), D_ALL); - MMIO_D(SPRSCALE(PIPE_B), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL); - - MMIO_D(SPRCTL(PIPE_C), D_ALL); - MMIO_D(SPRLINOFF(PIPE_C), D_ALL); - MMIO_D(SPRSTRIDE(PIPE_C), D_ALL); - MMIO_D(SPRPOS(PIPE_C), D_ALL); - MMIO_D(SPRSIZE(PIPE_C), D_ALL); - MMIO_D(SPRKEYVAL(PIPE_C), D_ALL); - MMIO_D(SPRKEYMSK(PIPE_C), D_ALL); - MMIO_DH(SPRSURF(PIPE_C), D_ALL, NULL, spr_surf_mmio_write); - MMIO_D(SPRKEYMAX(PIPE_C), D_ALL); - MMIO_D(SPROFFSET(PIPE_C), D_ALL); - MMIO_D(SPRSCALE(PIPE_C), D_ALL); - MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL); + MMIO_D(DSPCNTR(PIPE_A), D_BDW); + MMIO_D(DSPADDR(PIPE_A), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_A), D_BDW); + MMIO_D(DSPPOS(PIPE_A), D_BDW); + MMIO_D(DSPSIZE(PIPE_A), D_BDW); + MMIO_DH(DSPSURF(PIPE_A), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_A), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(DSPCNTR(PIPE_B), D_BDW); + MMIO_D(DSPADDR(PIPE_B), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_B), D_BDW); + MMIO_D(DSPPOS(PIPE_B), D_BDW); + MMIO_D(DSPSIZE(PIPE_B), D_BDW); + MMIO_DH(DSPSURF(PIPE_B), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_B), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(DSPCNTR(PIPE_C), D_BDW); + MMIO_D(DSPADDR(PIPE_C), D_BDW); + MMIO_D(DSPSTRIDE(PIPE_C), D_BDW); + MMIO_D(DSPPOS(PIPE_C), D_BDW); + MMIO_D(DSPSIZE(PIPE_C), D_BDW); + MMIO_DH(DSPSURF(PIPE_C), D_BDW, NULL, pri_surf_mmio_write); + MMIO_D(DSPOFFSET(PIPE_C), D_BDW); + MMIO_D(DSPSURFLIVE(PIPE_C), D_BDW); + + MMIO_D(SPRCTL(PIPE_A), D_BDW); + MMIO_D(SPRLINOFF(PIPE_A), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_A), D_BDW); + MMIO_D(SPRPOS(PIPE_A), D_BDW); + MMIO_D(SPRSIZE(PIPE_A), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_A), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_A), D_BDW); + MMIO_DH(SPRSURF(PIPE_A), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_A), D_BDW); + MMIO_D(SPROFFSET(PIPE_A), D_BDW); + MMIO_D(SPRSCALE(PIPE_A), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_A), D_BDW); + + MMIO_D(SPRCTL(PIPE_B), D_BDW); + MMIO_D(SPRLINOFF(PIPE_B), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_B), D_BDW); + MMIO_D(SPRPOS(PIPE_B), D_BDW); + MMIO_D(SPRSIZE(PIPE_B), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_B), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_B), D_BDW); + MMIO_DH(SPRSURF(PIPE_B), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_B), D_BDW); + MMIO_D(SPROFFSET(PIPE_B), D_BDW); + MMIO_D(SPRSCALE(PIPE_B), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_B), D_BDW); + + MMIO_D(SPRCTL(PIPE_C), D_BDW); + MMIO_D(SPRLINOFF(PIPE_C), D_BDW); + MMIO_D(SPRSTRIDE(PIPE_C), D_BDW); + MMIO_D(SPRPOS(PIPE_C), D_BDW); + MMIO_D(SPRSIZE(PIPE_C), D_BDW); + MMIO_D(SPRKEYVAL(PIPE_C), D_BDW); + MMIO_D(SPRKEYMSK(PIPE_C), D_BDW); + MMIO_DH(SPRSURF(PIPE_C), D_BDW, NULL, spr_surf_mmio_write); + MMIO_D(SPRKEYMAX(PIPE_C), D_BDW); + MMIO_D(SPROFFSET(PIPE_C), D_BDW); + MMIO_D(SPRSCALE(PIPE_C), D_BDW); + MMIO_D(SPRSURFLIVE(PIPE_C), D_BDW); MMIO_D(HTOTAL(TRANSCODER_A), D_ALL); MMIO_D(HBLANK(TRANSCODER_A), D_ALL); @@ -2229,12 +2347,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(PCH_PP_ON_DELAYS, D_ALL); MMIO_D(PCH_PP_OFF_DELAYS, D_ALL); - MMIO_DH(_MMIO(0xe651c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe661c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe671c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe681c), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe6c04), D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(_MMIO(0xe6e1c), D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(_MMIO(0xe651c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe661c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe671c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe681c), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe6c04), D_ALL, NULL, mmio_write_empty); + MMIO_DH(_MMIO(0xe6e1c), D_ALL, NULL, mmio_write_empty); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK @@ -2804,6 +2922,126 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) return 0; } +static int skl_plane_surf_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + i915_reg_t reg_1ac = _MMIO(_REG_701AC(pipe, plane)); + int flip_event = SKL_FLIP_EVENT(pipe, plane); + + /* plane disable is not pv and it is indicated by value 0 */ + if (*(u32 *)p_data != 0 && VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_UPDATE) + pvmmio_update_plane_register(vgpu, pipe, plane); + + write_vreg(vgpu, offset, p_data, bytes); + vgpu_vreg_t(vgpu, reg_1ac) = vgpu_vreg(vgpu, offset); + + if ((vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + + set_bit(flip_event, vgpu->irq.flip_done_event[pipe]); + return 0; +} + +static int skl_plane_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + + write_vreg(vgpu, offset, p_data, bytes); + if ((vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE) && + (vgpu->gvt->pipe_info[pipe].plane_owner[plane] == vgpu->id)) { + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + } + return 0; +} + +static int pv_plane_wm_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + unsigned int pipe = SKL_PLANE_REG_TO_PIPE(offset); + unsigned int plane = SKL_PLANE_REG_TO_PLANE(offset); + struct pv_plane_wm_update *pv_plane_wm = + &vgpu->mmio.shared_page->pv_plane_wm; + int level; + + if (VGPU_PVMMIO(vgpu) & PVMMIO_PLANE_WM_UPDATE) { + for (level = 0; level <= pv_plane_wm->max_wm_level; level++) + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset( + PLANE_WM(pipe, plane, level)), + &pv_plane_wm->plane_wm_level[level], 4); + skl_plane_mmio_write(vgpu, + i915_mmio_reg_offset(PLANE_WM_TRANS(pipe, plane)), + &pv_plane_wm->plane_trans_wm_level, 4); + /* null function for PLANE_BUF_CFG and PLANE_NV12_BUF_CFG */ + } + return 0; +} + +#define MMIO_PIPES_SDH(prefix, plane, s, d, r, w) do { \ + int pipe; \ + for_each_pipe(dev_priv, pipe) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_SDH(prefix, s, d, r, w) do { \ + int pipe, plane; \ + for_each_pipe(dev_priv, pipe) \ + for_each_universal_plane(dev_priv, pipe, plane) \ + MMIO_F(prefix(pipe, plane), s, 0, 0, 0, d, r, w); \ +} while (0) + +#define MMIO_PLANES_DH(prefix, d, r, w) \ + MMIO_PLANES_SDH(prefix, 4, d, r, w) + +#define PLANE_WM_BASE(pipe, plane) _MMIO(_PLANE_WM_BASE(pipe, plane)) + +static int skl_ps_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + unsigned int pipe = SKL_PS_REG_TO_PIPE(offset); + unsigned int scaler = SKL_PS_REG_TO_SCALER(offset) - 1; + + if (pipe >= I915_MAX_PIPES || scaler >= SKL_NUM_SCALERS || + vgpu->gvt->pipe_info[pipe].scaler_owner[scaler] != vgpu->id) { + gvt_vgpu_err("Unsupport pipe %d, scaler %d scaling\n", + pipe, scaler); + return 0; + } + + if (!(vgpu_vreg_t(vgpu, PIPECONF(pipe)) & I965_PIPECONF_ACTIVE)) + return 0; + + if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL || + offset == _PS_1B_CTRL || offset == _PS_2B_CTRL || + offset == _PS_1C_CTRL) && ((*(u32 *)p_data) & PS_SCALER_EN)) { + unsigned int plane; + + if (SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) == 0) { + gvt_vgpu_err("Unsupport crtc scaling for UOS\n"); + return 0; + } + plane = SKL_PS_REG_VALUE_TO_PLANE(*(u32 *)p_data) - 1; + if (plane >= I915_MAX_PLANES || + vgpu->gvt->pipe_info[pipe].plane_owner[plane] != vgpu->id) { + gvt_vgpu_err("Unsupport plane %d scaling\n", plane); + return 0; + } + } + + write_vreg(vgpu, offset, p_data, bytes); + I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + return 0; +} + static int init_skl_mmio_info(struct intel_gvt *gvt) { struct drm_i915_private *dev_priv = gvt->dev_priv; @@ -2854,129 +3092,65 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS); MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); - - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, skl_ps_mmio_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, skl_ps_mmio_write); + + MMIO_PLANES_DH(PLANE_CTL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_STRIDE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_POS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_SIZE, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYVAL, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_KEYMSK, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + MMIO_PLANES_DH(PLANE_SURF, D_SKL_PLUS, NULL, skl_plane_surf_write); + + MMIO_PLANES_DH(PLANE_KEYMAX, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_AUX_DIST, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_AUX_OFFSET, D_SKL_PLUS, NULL, skl_plane_mmio_write); + + if (i915_modparams.avail_planes_per_pipe) { + MMIO_PLANES_SDH(PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, NULL); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, NULL); + } else { + MMIO_PLANES_SDH(PLANE_WM_BASE, 4 * 8, D_SKL_PLUS, NULL, skl_plane_mmio_write); + MMIO_PLANES_DH(PLANE_WM_TRANS, D_SKL_PLUS, NULL, skl_plane_mmio_write); + } + + MMIO_PLANES_DH(PLANE_NV12_BUF_CFG, D_SKL_PLUS, NULL, + pv_plane_wm_mmio_write); + MMIO_PLANES_DH(PLANE_BUF_CFG, D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C0(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_A, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_B, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 1)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 2)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL); - MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL); - - MMIO_D(_MMIO(0x70380), D_SKL_PLUS); - MMIO_D(_MMIO(0x71380), D_SKL_PLUS); - MMIO_D(_MMIO(0x72380), D_SKL_PLUS); - MMIO_D(_MMIO(0x7239c), D_SKL_PLUS); - MMIO_D(_MMIO(0x7039c), D_SKL_PLUS); - MMIO_D(_MMIO(0x8f074), D_SKL_PLUS); MMIO_D(_MMIO(0x8f004), D_SKL_PLUS); MMIO_D(_MMIO(0x8f034), D_SKL_PLUS); @@ -3031,16 +3205,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x71034), D_SKL_PLUS); MMIO_D(_MMIO(0x72034), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYVAL_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMAX_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_A)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_B)), D_SKL_PLUS); - MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS); - MMIO_D(_MMIO(0x44500), D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, @@ -3051,6 +3215,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x4ab8), D_KBL); MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); + MMIO_D(HUC_STATUS2, D_SKL_PLUS); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h index 5af11cf1b482..4c550627e78e 100644 --- a/drivers/gpu/drm/i915/gvt/hypercall.h +++ b/drivers/gpu/drm/i915/gvt/hypercall.h @@ -60,13 +60,17 @@ struct intel_gvt_mpt { unsigned long mfn, unsigned int nr, bool map); int (*set_trap_area)(unsigned long handle, u64 start, u64 end, bool map); + int (*set_pvmmio)(unsigned long handle, u64 start, u64 end, + bool map); int (*set_opregion)(void *vgpu); int (*get_vfio_device)(void *vgpu); void (*put_vfio_device)(void *vgpu); bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn); + int (*dom0_ready)(void); }; extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt kvmgt_mpt; +extern struct intel_gvt_mpt acrn_gvt_mpt; #endif /* _GVT_HYPERCALL_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 5daa23ae566b..06ce906b6673 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -69,6 +69,7 @@ static const char * const irq_name[INTEL_GVT_EVENT_MAX] = { [VCS_PAGE_DIRECTORY_FAULT] = "Video page directory faults", [VCS_AS_CONTEXT_SWITCH] = "Video AS Context Switch Interrupt", [VCS2_MI_USER_INTERRUPT] = "VCS2 Video CS MI USER INTERRUPT", + [VCS2_CMD_STREAMER_ERR] = "VCS2 Video CS error interrupt", [VCS2_MI_FLUSH_DW] = "VCS2 Video MI FLUSH DW notify", [VCS2_AS_CONTEXT_SWITCH] = "VCS2 Context Switch Interrupt", @@ -524,21 +525,26 @@ static void gen8_init_irq( /* GEN8 interrupt GT0 events */ SET_BIT_INFO(irq, 0, RCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 3, RCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 4, RCS_PIPE_CONTROL, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 8, RCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 16, BCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT0); + SET_BIT_INFO(irq, 19, BCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 20, BCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT0); SET_BIT_INFO(irq, 24, BCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT0); /* GEN8 interrupt GT1 events */ SET_BIT_INFO(irq, 0, VCS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 3, VCS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1); if (HAS_BSD2(gvt->dev_priv)) { SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT1); + SET_BIT_INFO(irq, 19, VCS2_CMD_STREAMER_ERR, + INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1); SET_BIT_INFO(irq, 24, VCS2_AS_CONTEXT_SWITCH, @@ -547,6 +553,7 @@ static void gen8_init_irq( /* GEN8 interrupt GT3 events */ SET_BIT_INFO(irq, 0, VECS_MI_USER_INTERRUPT, INTEL_GVT_IRQ_INFO_GT3); + SET_BIT_INFO(irq, 3, VECS_CMD_STREAMER_ERR, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 4, VECS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT3); SET_BIT_INFO(irq, 8, VECS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT3); @@ -595,6 +602,10 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, SPRITE_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); SET_BIT_INFO(irq, 4, SPRITE_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); SET_BIT_INFO(irq, 4, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); + + SET_BIT_INFO(irq, 5, PLANE_3_A_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_A); + SET_BIT_INFO(irq, 5, PLANE_3_B_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_B); + SET_BIT_INFO(irq, 5, PLANE_3_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); } /* GEN8 interrupt PCU events */ diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 5313fb1b33e1..6ec761a84557 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -53,6 +53,7 @@ enum intel_gvt_event_type { VCS_AS_CONTEXT_SWITCH, VCS2_MI_USER_INTERRUPT, + VCS2_CMD_STREAMER_ERR, VCS2_MI_FLUSH_DW, VCS2_AS_CONTEXT_SWITCH, @@ -64,6 +65,7 @@ enum intel_gvt_event_type { BCS_AS_CONTEXT_SWITCH, VECS_MI_USER_INTERRUPT, + VECS_CMD_STREAMER_ERR, VECS_MI_FLUSH_DW, VECS_AS_CONTEXT_SWITCH, @@ -92,6 +94,9 @@ enum intel_gvt_event_type { SPRITE_A_FLIP_DONE, SPRITE_B_FLIP_DONE, SPRITE_C_FLIP_DONE, + PLANE_3_A_FLIP_DONE, + PLANE_3_B_FLIP_DONE, + PLANE_3_C_FLIP_DONE, PCU_THERMAL, PCU_PCODE2DRIVER_MAILBOX, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 9bb9a85c992c..4cb3f72ab56a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -235,6 +235,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; void *mmio = gvt->firmware.mmio; + struct drm_i915_private *dev_priv = gvt->dev_priv; if (dmlr) { memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); @@ -282,6 +283,21 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); } + /* below vreg init value are got from handler.c, + * which won't change during vgpu life cycle + */ + vgpu_vreg(vgpu, 0xe651c) = 1 << 17; + vgpu_vreg(vgpu, 0xe661c) = 1 << 17; + vgpu_vreg(vgpu, 0xe671c) = 1 << 17; + vgpu_vreg(vgpu, 0xe681c) = 1 << 17; + vgpu_vreg(vgpu, 0xe6c04) = 3; + vgpu_vreg(vgpu, 0xe6e1c) = 0x2f << 16; + + if (HAS_HUC_UCODE(dev_priv)) { + mmio_hw_access_pre(dev_priv); + vgpu_vreg_t(vgpu, HUC_STATUS2) = I915_READ(HUC_STATUS2); + mmio_hw_access_post(dev_priv); + } } /** @@ -295,11 +311,21 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; - vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2)); + BUILD_BUG_ON(sizeof(struct gvt_shared_page) != PAGE_SIZE); + + vgpu->mmio.sreg = vzalloc(info->mmio_size); + vgpu->mmio.vreg = (void *)__get_free_pages(GFP_KERNEL, + info->mmio_size_order); if (!vgpu->mmio.vreg) return -ENOMEM; - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; + vgpu->mmio.shared_page = (struct gvt_shared_page *) __get_free_pages( + GFP_KERNEL, 0); + if (!vgpu->mmio.shared_page) { + vfree(vgpu->mmio.vreg); + vgpu->mmio.vreg = NULL; + return -ENOMEM; + } intel_vgpu_reset_mmio(vgpu, true); @@ -313,6 +339,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) */ void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) { - vfree(vgpu->mmio.vreg); - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; + + vfree(vgpu->mmio.sreg); + free_pages((unsigned long) vgpu->mmio.vreg, info->mmio_size_order); + free_pages((unsigned long) vgpu->mmio.shared_page, 0); + vgpu->mmio.vreg = vgpu->mmio.sreg = vgpu->mmio.shared_page = NULL; } diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 67f19992b226..6eef2e01e46a 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -300,6 +300,27 @@ static inline int intel_gvt_hypervisor_set_trap_area( return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); } +/** + * intel_gvt_hypervisor_set_pvmmio - Set the pvmmio area + * @vgpu: a vGPU + * @start: the beginning of the guest physical address region + * @end: the end of the guest physical address region + * @map: map or unmap + * + * Returns: + * Zero on success, negative error code if failed. + */ +static inline int intel_gvt_hypervisor_set_pvmmio( + struct intel_vgpu *vgpu, u64 start, u64 end, bool map) +{ + /* a MPT implementation could have MMIO trapped elsewhere */ + if (!intel_gvt_host.mpt->set_pvmmio) + return -ENOENT; + + return intel_gvt_host.mpt->set_pvmmio(vgpu->handle, start, end, map); +} + + /** * intel_gvt_hypervisor_set_opregion - Set opregion for guest * @vgpu: a vGPU @@ -362,4 +383,12 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); } +static inline int intel_gvt_hypervisor_dom0_ready(void) +{ + if (!intel_gvt_host.mpt->dom0_ready) + return 0; + + return intel_gvt_host.mpt->dom0_ready(); +} + #endif /* _GVT_MPT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index d4f7ce6dc1d7..b55fc82027e0 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -57,8 +57,15 @@ #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) -#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) -#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) +#define _REG_701AC(pipe, plane) (0x701ac + pipe * 0x1000 + plane * 0x100) + +#define SKL_PS_REG_TO_PIPE(reg) (((reg) >> 11) & 0x3) +#define SKL_PS_REG_TO_SCALER(reg) (((reg) >> 8) & 0x3) +#define SKL_PS_REG_VALUE_TO_PLANE(val) (((val) >> 25) & 0x7) + +#define SKL_PLANE_REG_TO_PIPE(reg) (((reg) >> 12) & 0x3) +#define SKL_PLANE_REG_TO_PLANE(reg) ((((reg) & 0xFFF) - 0x180) >> 8) +#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane)*3 + pipe) #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index c32e7d5e8629..f5127e07570b 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -34,15 +34,11 @@ #include "i915_drv.h" #include "gvt.h" -static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) +static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu, + enum intel_engine_id ring_id) { - enum intel_engine_id i; - struct intel_engine_cs *engine; - - for_each_engine(engine, vgpu->gvt->dev_priv, i) { - if (!list_empty(workload_q_head(vgpu, i))) - return true; - } + if (!list_empty(workload_q_head(vgpu, ring_id))) + return true; return false; } @@ -68,11 +64,12 @@ struct gvt_sched_data { struct intel_gvt *gvt; struct hrtimer timer; unsigned long period; - struct list_head lru_runq_head; + struct list_head lru_runq_head[I915_NUM_ENGINES]; ktime_t expire_time; }; -static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) +static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time, + enum intel_engine_id ring_id) { ktime_t delta_ts; struct vgpu_sched_data *vgpu_data; @@ -80,7 +77,7 @@ static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) if (!vgpu || vgpu == vgpu->gvt->idle_vgpu) return; - vgpu_data = vgpu->sched_data; + vgpu_data = vgpu->sched_data[ring_id]; delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time); vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts); vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts); @@ -90,12 +87,13 @@ static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time) #define GVT_TS_BALANCE_PERIOD_MS 100 #define GVT_TS_BALANCE_STAGE_NUM 10 -static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct list_head *pos; - static uint64_t stage_check; - int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + static uint64_t stage_check[I915_NUM_ENGINES]; + int stage = stage_check[ring_id]++ % GVT_TS_BALANCE_STAGE_NUM; /* The timeslice accumulation reset at stage 0, which is * allocated again without adding previous debt. @@ -104,12 +102,12 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) int total_weight = 0; ktime_t fair_timeslice; - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); total_weight += vgpu_data->sched_ctl.weight; } - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS), total_weight) * vgpu_data->sched_ctl.weight; @@ -118,7 +116,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) vgpu_data->left_ts = vgpu_data->allocated_ts; } } else { - list_for_each(pos, &sched_data->lru_runq_head) { + list_for_each(pos, &sched_data->lru_runq_head[ring_id]) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); /* timeslice for next 100ms should add the left/debt @@ -129,62 +127,63 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) } } -static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) +static void try_to_schedule_next_vgpu(struct intel_gvt *gvt, + enum intel_engine_id ring_id) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - enum intel_engine_id i; - struct intel_engine_cs *engine; struct vgpu_sched_data *vgpu_data; ktime_t cur_time; /* no need to schedule if next_vgpu is the same with current_vgpu, * let scheduler chose next_vgpu again by setting it to NULL. */ - if (scheduler->next_vgpu == scheduler->current_vgpu) { - scheduler->next_vgpu = NULL; + if (scheduler->next_vgpu[ring_id] == + scheduler->current_vgpu[ring_id]) { + scheduler->next_vgpu[ring_id] = NULL; return; } + /* no target to schedule */ + if (!scheduler->next_vgpu[ring_id]) + return; /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu */ - scheduler->need_reschedule = true; + scheduler->need_reschedule[ring_id] = true; /* still have uncompleted workload? */ - for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) - return; - } + if (scheduler->current_workload[ring_id]) + return; cur_time = ktime_get(); - vgpu_update_timeslice(scheduler->current_vgpu, cur_time); - vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_update_timeslice(scheduler->current_vgpu[ring_id], cur_time, ring_id); + vgpu_data = scheduler->next_vgpu[ring_id]->sched_data[ring_id]; vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ - scheduler->current_vgpu = scheduler->next_vgpu; - scheduler->next_vgpu = NULL; + scheduler->current_vgpu[ring_id] = scheduler->next_vgpu[ring_id]; + scheduler->next_vgpu[ring_id] = NULL; - scheduler->need_reschedule = false; + scheduler->need_reschedule[ring_id] = false; /* wake up workload dispatch thread */ - for_each_engine(engine, gvt->dev_priv, i) - wake_up(&scheduler->waitq[i]); + wake_up(&scheduler->waitq[ring_id]); } -static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *head = &sched_data->lru_runq_head; + struct list_head *head = &sched_data->lru_runq_head[ring_id]; struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); - if (!vgpu_has_pending_workload(vgpu_data->vgpu)) + if (!vgpu_has_pending_workload(vgpu_data->vgpu, ring_id)) continue; if (vgpu_data->pri_sched) { @@ -208,7 +207,8 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) /* in nanosecond */ #define GVT_DEFAULT_TIME_SLICE 1000000 -static void tbs_sched_func(struct gvt_sched_data *sched_data) +static void tbs_sched_func(struct gvt_sched_data *sched_data, + enum intel_engine_id ring_id) { struct intel_gvt *gvt = sched_data->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -216,31 +216,34 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data) struct intel_vgpu *vgpu = NULL; /* no active vgpu or has already had a target */ - if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + if (list_empty(&sched_data->lru_runq_head[ring_id]) + || scheduler->next_vgpu[ring_id]) goto out; - vgpu = find_busy_vgpu(sched_data); + vgpu = find_busy_vgpu(sched_data, ring_id); if (vgpu) { - scheduler->next_vgpu = vgpu; - vgpu_data = vgpu->sched_data; + scheduler->next_vgpu[ring_id] = vgpu; + vgpu_data = vgpu->sched_data[ring_id]; if (!vgpu_data->pri_sched) { /* Move the last used vGPU to the tail of lru_list */ list_del_init(&vgpu_data->lru_list); list_add_tail(&vgpu_data->lru_list, - &sched_data->lru_runq_head); + &sched_data->lru_runq_head[ring_id]); } } else { - scheduler->next_vgpu = gvt->idle_vgpu; + scheduler->next_vgpu[ring_id] = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) - try_to_schedule_next_vgpu(gvt); + if (scheduler->next_vgpu[ring_id]) + try_to_schedule_next_vgpu(gvt, ring_id); } void intel_gvt_schedule(struct intel_gvt *gvt) { struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; ktime_t cur_time; + enum intel_engine_id i; + struct intel_engine_cs *engine; mutex_lock(&gvt->sched_lock); cur_time = ktime_get(); @@ -248,15 +251,19 @@ void intel_gvt_schedule(struct intel_gvt *gvt) if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, (void *)&gvt->service_request)) { if (cur_time >= sched_data->expire_time) { - gvt_balance_timeslice(sched_data); + for_each_engine(engine, gvt->dev_priv, i) + gvt_balance_timeslice(sched_data, i); sched_data->expire_time = ktime_add_ms( cur_time, GVT_TS_BALANCE_PERIOD_MS); } } clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request); - vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time); - tbs_sched_func(sched_data); + for_each_engine(engine, gvt->dev_priv, i) { + vgpu_update_timeslice(gvt->scheduler.current_vgpu[i], + cur_time, i); + tbs_sched_func(sched_data, i); + } mutex_unlock(&gvt->sched_lock); } @@ -276,6 +283,9 @@ static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) static int tbs_sched_init(struct intel_gvt *gvt) { + enum intel_engine_id i; + struct intel_engine_cs *engine; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; @@ -285,7 +295,9 @@ static int tbs_sched_init(struct intel_gvt *gvt) if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->lru_runq_head); + for_each_engine(engine, gvt->dev_priv, i) + INIT_LIST_HEAD(&data->lru_runq_head[i]); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; @@ -311,18 +323,29 @@ static void tbs_sched_clean(struct intel_gvt *gvt) static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { struct vgpu_sched_data *data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - data = kzalloc(sizeof(*data), GFP_KERNEL); - if (!data) - return -ENOMEM; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto err; - data->sched_ctl.weight = vgpu->sched_ctl.weight; - data->vgpu = vgpu; - INIT_LIST_HEAD(&data->lru_list); + data->sched_ctl.weight = vgpu->sched_ctl.weight; + data->vgpu = vgpu; + INIT_LIST_HEAD(&data->lru_list); - vgpu->sched_data = data; + vgpu->sched_data[i] = data; + } return 0; + +err: + for (; i >= 0; i--) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } + return -ENOMEM; } static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) @@ -330,8 +353,13 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) struct intel_gvt *gvt = vgpu->gvt; struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; - kfree(vgpu->sched_data); - vgpu->sched_data = NULL; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + kfree(vgpu->sched_data[i]); + vgpu->sched_data[i] = NULL; + } /* this vgpu id has been removed */ if (idr_is_empty(&gvt->vgpu_idr)) @@ -341,31 +369,42 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; ktime_t now; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; - if (!list_empty(&vgpu_data->lru_list)) - return; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; + if (!list_empty(&vgpu_data->lru_list)) + continue; - now = ktime_get(); - vgpu_data->pri_time = ktime_add(now, + now = ktime_get(); + vgpu_data->pri_time = ktime_add(now, ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0)); - vgpu_data->pri_sched = true; + vgpu_data->pri_sched = true; - list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head); + list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head[i]); + vgpu_data->active = true; + } if (!hrtimer_active(&sched_data->timer)) hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), sched_data->period), HRTIMER_MODE_ABS); - vgpu_data->active = true; } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; + + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; - list_del_init(&vgpu_data->lru_list); - vgpu_data->active = false; + list_del_init(&vgpu_data->lru_list); + vgpu_data->active = false; + } } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { @@ -423,10 +462,16 @@ void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) { - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data; + struct intel_engine_cs *engine; + enum intel_engine_id i; mutex_lock(&vgpu->gvt->sched_lock); - if (!vgpu_data->active) { + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; + if (vgpu_data->active) + continue; + gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); } @@ -444,36 +489,27 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) { struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; - int ring_id; - struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - - if (!vgpu_data->active) - return; + struct vgpu_sched_data *vgpu_data; + enum intel_engine_id i; + struct intel_engine_cs *engine; gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); mutex_lock(&vgpu->gvt->sched_lock); scheduler->sched_ops->stop_schedule(vgpu); - if (scheduler->next_vgpu == vgpu) - scheduler->next_vgpu = NULL; + for_each_engine(engine, vgpu->gvt->dev_priv, i) { + vgpu_data = vgpu->sched_data[i]; - if (scheduler->current_vgpu == vgpu) { - /* stop workload dispatching */ - scheduler->need_reschedule = true; - scheduler->current_vgpu = NULL; - } + if (scheduler->next_vgpu[i] == vgpu) + scheduler->next_vgpu[i] = NULL; - intel_runtime_pm_get(dev_priv); - spin_lock_bh(&scheduler->mmio_context_lock); - for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { - if (scheduler->engine_owner[ring_id] == vgpu) { - intel_gvt_switch_mmio(vgpu, NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; + if (scheduler->current_vgpu[i] == vgpu) { + /* stop workload dispatching */ + scheduler->need_reschedule[i] = true; + scheduler->current_vgpu[i] = NULL; } } - spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put(dev_priv); + mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 43aa058e29fc..ebba07a402d7 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -41,6 +41,8 @@ #define RING_CTX_OFF(x) \ offsetof(struct execlist_ring_context, x) +bool gvt_shadow_wa_ctx = false; + static void set_context_pdp_root_pointer( struct execlist_ring_context *ring_context, u32 pdp[8]) @@ -119,6 +121,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload, } } +static bool enable_lazy_shadow_ctx = true; static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -130,6 +133,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) struct page *page; void *dst; unsigned long context_gpa, context_page_num; + struct drm_i915_private *dev_priv = gvt->dev_priv; + struct i915_ggtt *ggtt = &gvt->dev_priv->ggtt; + dma_addr_t addr; + gen8_pte_t __iomem *pte; int i; gvt_dbg_sched("ring id %d workload lrca %x", ring_id, @@ -143,6 +150,18 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) context_page_num = 19; i = 2; +#ifdef CONFIG_INTEL_IOMMU + /* + * In case IOMMU for graphics is turned on, we don't want to + * turn on lazy shadow context feature because it will touch + * GGTT entries which require a BKL and since this is a + * performance enhancement feature, we will end up negating + * the performance. + */ + if(intel_iommu_gfx_mapped) { + enable_lazy_shadow_ctx = false; + } +#endif while (i < context_page_num) { context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, @@ -153,14 +172,41 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - dst = kmap(page); - intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, + if (!enable_lazy_shadow_ctx) { + page = i915_gem_object_get_page(ctx_obj, + LRC_PPHWSP_PN + i); + dst = kmap(page); + intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); - kunmap(page); + kunmap(page); + } else { + unsigned long mfn; + struct i915_gem_context *shadow_ctx = + workload->vgpu->submission.shadow_ctx; + + addr = i915_ggtt_offset( + shadow_ctx->__engine[ring_id].state) + + (LRC_PPHWSP_PN + i) * PAGE_SIZE; + pte = (gen8_pte_t __iomem *)ggtt->gsm + + (addr >> PAGE_SHIFT); + + mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, + context_gpa >> 12); + if (mfn == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("fail to translate gfn during context shadow\n"); + return -ENXIO; + } + + mfn <<= 12; + mfn |= _PAGE_PRESENT | _PAGE_RW | PPAT_CACHED; + writeq(mfn, pte); + } i++; } + I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); + POSTING_READ(GFX_FLSH_CNTL_GEN6); + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); shadow_ring_context = kmap(page); @@ -203,6 +249,7 @@ static inline bool is_gvt_request(struct i915_request *req) return i915_gem_context_force_single_submission(req->gem_context); } +/* static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; @@ -216,6 +263,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) reg = RING_ACTHD_UDW(ring_base); vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg); } +*/ static int shadow_context_status_change(struct notifier_block *nb, unsigned long action, void *data) @@ -226,21 +274,9 @@ static int shadow_context_status_change(struct notifier_block *nb, struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id ring_id = req->engine->id; struct intel_vgpu_workload *workload; - unsigned long flags; - - if (!is_gvt_request(req)) { - spin_lock_irqsave(&scheduler->mmio_context_lock, flags); - if (action == INTEL_CONTEXT_SCHEDULE_IN && - scheduler->engine_owner[ring_id]) { - /* Switch ring from vGPU to host. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - NULL, ring_id); - scheduler->engine_owner[ring_id] = NULL; - } - spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); + if (!is_gvt_request(req)) return NOTIFY_OK; - } workload = scheduler->current_workload[ring_id]; if (unlikely(!workload)) @@ -248,25 +284,13 @@ static int shadow_context_status_change(struct notifier_block *nb, switch (action) { case INTEL_CONTEXT_SCHEDULE_IN: - spin_lock_irqsave(&scheduler->mmio_context_lock, flags); - if (workload->vgpu != scheduler->engine_owner[ring_id]) { - /* Switch ring from host to vGPU or vGPU to vGPU. */ - intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], - workload->vgpu, ring_id); - scheduler->engine_owner[ring_id] = workload->vgpu; - } else - gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", - ring_id, workload->vgpu->id); - spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - save_ring_hw_state(workload->vgpu, ring_id); atomic_set(&workload->shadow_ctx_active, 0); break; case INTEL_CONTEXT_SCHEDULE_PREEMPTED: - save_ring_hw_state(workload->vgpu, ring_id); - break; + return NOTIFY_OK; default: WARN_ON(1); return NOTIFY_OK; @@ -381,7 +405,8 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) goto err_unpin; if ((workload->ring_id == RCS) && - (workload->wa_ctx.indirect_ctx.size != 0)) { + (workload->wa_ctx.indirect_ctx.size != 0) + && gvt_shadow_wa_ctx) { ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); if (ret) goto err_shadow; @@ -410,6 +435,38 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) return ret; } +static void gen8_shadow_pid_cid(struct intel_vgpu_workload *workload) +{ + int ring_id = workload->ring_id; + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; + u32 *cs; + + /* Copy the PID and CID from the guest's HWS page to the host's one */ + cs = intel_ring_begin(workload->req, 16); + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) + + I915_GEM_HWS_PID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_PID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = (workload->ctx_desc.lrca << I915_GTT_PAGE_SHIFT) + + I915_GEM_HWS_CID_ADDR; + *cs++ = 0; + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT; + *cs++ = i915_mmio_reg_offset(NOPID); + *cs++ = engine->status_page.ggtt_offset + I915_GEM_HWS_CID_ADDR + + (workload->vgpu->id << MI_STORE_DWORD_INDEX_SHIFT); + *cs++ = 0; + intel_ring_advance(workload->req, cs); +} + static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload); static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) @@ -594,22 +651,34 @@ static int prepare_workload(struct intel_vgpu_workload *workload) goto err_unpin_mm; } + /* we consider this as an workaround to avoid the situation that + * PDP's not updated, and right now we only limit it to BXT platform + * since it's not reported on the other platforms + */ + if (IS_BROXTON(vgpu->gvt->dev_priv)) { + gvt_emit_pdps(workload); + } + ret = copy_workload_to_ring_buffer(workload); if (ret) { gvt_vgpu_err("fail to generate request\n"); goto err_unpin_mm; } + gen8_shadow_pid_cid(workload); + ret = prepare_shadow_batch_buffer(workload); if (ret) { gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n"); goto err_unpin_mm; } - ret = prepare_shadow_wa_ctx(&workload->wa_ctx); - if (ret) { - gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); - goto err_shadow_batch; + if (gvt_shadow_wa_ctx) { + ret = prepare_shadow_wa_ctx(&workload->wa_ctx); + if (ret) { + gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n"); + goto err_shadow_batch; + } } if (workload->prepare) { @@ -647,6 +716,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ret = prepare_workload(workload); + workload->guilty_count = atomic_read(&workload->req->gem_context->guilty_count); out: if (ret) workload->status = ret; @@ -675,17 +745,18 @@ static struct intel_vgpu_workload *pick_next_workload( * no current vgpu / will be scheduled out / no workload * bail out */ - if (!scheduler->current_vgpu) { + if (!scheduler->current_vgpu[ring_id]) { gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id); goto out; } - if (scheduler->need_reschedule) { + if (scheduler->need_reschedule[ring_id]) { gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id); goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) + if (list_empty(workload_q_head(scheduler->current_vgpu[ring_id], + ring_id))) goto out; /* @@ -706,7 +777,8 @@ static struct intel_vgpu_workload *pick_next_workload( * schedule out a vgpu. */ scheduler->current_workload[ring_id] = container_of( - workload_q_head(scheduler->current_vgpu, ring_id)->next, + workload_q_head(scheduler->current_vgpu[ring_id], + ring_id)->next, struct intel_vgpu_workload, list); workload = scheduler->current_workload[ring_id]; @@ -734,29 +806,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload) gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id, workload->ctx_desc.lrca); - context_page_num = rq->engine->context_size; - context_page_num = context_page_num >> PAGE_SHIFT; + if (!enable_lazy_shadow_ctx) { + context_page_num = rq->engine->context_size; + context_page_num = context_page_num >> PAGE_SHIFT; - if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) - context_page_num = 19; + if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS) + context_page_num = 19; - i = 2; + i = 2; - while (i < context_page_num) { - context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, - (u32)((workload->ctx_desc.lrca + i) << - I915_GTT_PAGE_SHIFT)); - if (context_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid guest context descriptor\n"); - return; - } + while (i < context_page_num) { + context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, + (u32)((workload->ctx_desc.lrca + i) << + I915_GTT_PAGE_SHIFT)); + if (context_gpa == INTEL_GVT_INVALID_ADDR) { + gvt_vgpu_err("invalid guest context descriptor\n"); + return; + } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); - src = kmap(page); - intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, - I915_GTT_PAGE_SIZE); - kunmap(page); - i++; + page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + src = kmap(page); + intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, + I915_GTT_PAGE_SIZE); + kunmap(page); + i++; + } } intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + @@ -861,9 +935,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) list_del_init(&workload->list); + if (workload->status == -EIO) + intel_vgpu_reset_submission(vgpu, 1 << ring_id); + if (!workload->status) { release_shadow_batch_buffer(workload); - release_shadow_wa_ctx(&workload->wa_ctx); + if(gvt_shadow_wa_ctx) + release_shadow_wa_ctx(&workload->wa_ctx); } if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) { @@ -888,13 +966,25 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) atomic_dec(&s->running_workload_num); wake_up(&scheduler->workload_complete_wq); - if (gvt->scheduler.need_reschedule) + if (gvt->scheduler.need_reschedule[ring_id]) intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED); mutex_unlock(&gvt->sched_lock); mutex_unlock(&vgpu->vgpu_lock); } +static void inject_error_cs_irq(struct intel_vgpu *vgpu, int ring_id) +{ + enum intel_gvt_event_type events[] = { + RCS_CMD_STREAMER_ERR, + BCS_CMD_STREAMER_ERR, + VCS_CMD_STREAMER_ERR, + VCS2_CMD_STREAMER_ERR, + VECS_CMD_STREAMER_ERR, + }; + intel_vgpu_trigger_virtual_event(vgpu, events[ring_id]); +} + struct workload_thread_param { struct intel_gvt *gvt; int ring_id; @@ -909,6 +999,7 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; + long lret; bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv) || IS_BROXTON(gvt->dev_priv); @@ -955,7 +1046,24 @@ static int workload_thread(void *priv) gvt_dbg_sched("ring id %d wait workload %p\n", workload->ring_id, workload); - i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT); + lret = i915_request_wait(workload->req, 0, + MAX_SCHEDULE_TIMEOUT); + + gvt_dbg_sched("i915_wait_request %p returns %ld\n", + workload, lret); + if (lret >= 0 && workload->status == -EINPROGRESS) + workload->status = 0; + + /* + * increased guilty_count means that this request triggerred + * a GPU reset, so we need to notify the guest about the + * hang. + */ + if (workload->guilty_count < + atomic_read(&workload->req->gem_context->guilty_count)) { + workload->status = -EIO; + inject_error_cs_irq(workload->vgpu, ring_id); + } complete: gvt_dbg_sched("will complete workload %p, status: %d\n", @@ -1108,6 +1216,10 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) if (IS_ERR(s->shadow_ctx)) return PTR_ERR(s->shadow_ctx); + if (!s->shadow_ctx->name) { + s->shadow_ctx->name = kasprintf(GFP_KERNEL, "Shadow Context %d", vgpu->id); + } + bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload", diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index ca5529d0e48e..3cec02d2ac1a 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -37,10 +37,10 @@ #define _GVT_SCHEDULER_H_ struct intel_gvt_workload_scheduler { - struct intel_vgpu *current_vgpu; - struct intel_vgpu *next_vgpu; + struct intel_vgpu *current_vgpu[I915_NUM_ENGINES]; + struct intel_vgpu *next_vgpu[I915_NUM_ENGINES]; struct intel_vgpu_workload *current_workload[I915_NUM_ENGINES]; - bool need_reschedule; + bool need_reschedule[I915_NUM_ENGINES]; spinlock_t mmio_context_lock; /* can be null when owner is host */ @@ -84,6 +84,7 @@ struct intel_vgpu_workload { /* if this workload has been dispatched to i915? */ bool dispatched; int status; + unsigned int guilty_count; struct intel_vgpu_mm *shadow_mm; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index c628be05fbfe..511681b24c59 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -37,6 +37,11 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) { + enum pipe pipe; + int scaler; + struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; + /* setup the ballooning information */ vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1; @@ -62,6 +67,16 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) vgpu_vreg_t(vgpu, vgtif_reg(cursor_x_hot)) = UINT_MAX; vgpu_vreg_t(vgpu, vgtif_reg(cursor_y_hot)) = UINT_MAX; + vgpu_vreg_t(vgpu, vgtif_reg(scaler_owned)) = 0; + for_each_pipe(dev_priv, pipe) + for_each_universal_scaler(dev_priv, pipe, scaler) + if (gvt->pipe_info[pipe].scaler_owner[scaler] == + vgpu->id) + vgpu_vreg_t(vgpu, vgtif_reg(scaler_owned)) |= + 1 << (pipe * SKL_NUM_SCALERS + scaler); + + vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) = 0; + gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", vgpu_aperture_gmadr_base(vgpu), vgpu_aperture_sz(vgpu)); @@ -285,6 +300,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) intel_vgpu_clean_gtt(vgpu); intel_gvt_hypervisor_detach_vgpu(vgpu); intel_vgpu_free_resource(vgpu); + intel_vgpu_reset_cfg_space(vgpu); intel_vgpu_clean_mmio(vgpu); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); @@ -525,6 +541,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask; + enum intel_engine_id i; + struct intel_engine_cs *engine; + bool enable_pvmmio = vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)); gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", @@ -537,7 +556,10 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, * The current_vgpu will set to NULL after stopping the * scheduler when the reset is triggered by current vgpu. */ - if (scheduler->current_vgpu == NULL) { + for_each_engine_masked(engine, gvt->dev_priv, resetting_eng, i) { + if (scheduler->current_vgpu[i] != NULL) + continue; + mutex_unlock(&vgpu->vgpu_lock); intel_gvt_wait_vgpu_idle(vgpu); mutex_lock(&vgpu->vgpu_lock); @@ -556,6 +578,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_mmio(vgpu, dmlr); populate_pvinfo_page(vgpu); + vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) = enable_pvmmio; intel_vgpu_reset_display(vgpu); if (dmlr) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index f9ce35da4123..89b2863845d5 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1939,6 +1939,49 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) ring->space, ring->head, ring->tail, ring->emit); } +bool is_shadow_context(struct i915_gem_context *ctx) +{ + if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) + return true; + + return false; +} + +int get_vgt_id(struct i915_gem_context *ctx) +{ + int vgt_id; + + vgt_id = 0; + + if (is_shadow_context(ctx)) + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + + return vgt_id; +} + +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + int pid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + return pid; +} + +static void describe_ctx_ring_shadowed(struct seq_file *m, + struct i915_gem_context *ctx, struct intel_ring *ring, + struct intel_engine_cs *engine) +{ + int pid, cid, vgt_id; + + sscanf(ctx->name, "Shadow Context %d", &vgt_id); + pid = intel_read_status_page(engine, I915_GEM_HWS_PID_INDEX + vgt_id); + cid = intel_read_status_page(engine, I915_GEM_HWS_CID_INDEX + vgt_id); + seq_printf(m, " (Current DomU Process PID: %d, CID: %d)", + pid, cid); +} + static int i915_context_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1953,6 +1996,7 @@ static int i915_context_status(struct seq_file *m, void *unused) return ret; list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + bool is_shadow_context = false; seq_printf(m, "HW context %u ", ctx->hw_id); if (ctx->pid) { struct task_struct *task; @@ -1963,6 +2007,9 @@ static int i915_context_status(struct seq_file *m, void *unused) task->comm, task->pid); put_task_struct(task); } + } else if (ctx->name && !strncmp(ctx->name, "Shadow Context", 14)) { + seq_puts(m, "DomU Shadow Context "); + is_shadow_context = true; } else if (IS_ERR(ctx->file_priv)) { seq_puts(m, "(deleted) "); } else { @@ -1975,12 +2022,19 @@ static int i915_context_status(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) { struct intel_context *ce = to_intel_context(ctx, engine); + u64 lrc_desc = ce->lrc_desc; + seq_printf(m, "ctx id 0x%x ", (uint32_t)((lrc_desc >> 12) & + 0xFFFFF)); seq_printf(m, "%s: ", engine->name); if (ce->state) describe_obj(m, ce->state->obj); - if (ce->ring) + if (ce->ring) { describe_ctx_ring(m, ce->ring); + if(is_shadow_context) + describe_ctx_ring_shadowed(m, ctx, + ce->ring, engine); + } seq_putc(m, '\n'); } @@ -2954,15 +3008,23 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *intel_crtc) struct drm_device *dev = &dev_priv->drm; struct drm_crtc *crtc = &intel_crtc->base; struct intel_encoder *intel_encoder; - struct drm_plane_state *plane_state = crtc->primary->state; - struct drm_framebuffer *fb = plane_state->fb; + struct drm_plane_state *plane_state; + struct drm_framebuffer *fb; - if (fb) - seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", + if (!crtc->primary) { + seq_puts(m, "\tno primary plane\n"); + } else { + plane_state = crtc->primary->state; + fb = plane_state->fb; + + if (fb) + seq_printf(m, "\tfb: %d, pos: %dx%d, size: %dx%d\n", fb->base.id, plane_state->src_x >> 16, plane_state->src_y >> 16, fb->width, fb->height); - else - seq_puts(m, "\tprimary plane disabled\n"); + else + seq_puts(m, "\tprimary plane disabled\n"); + } + for_each_encoder_on_crtc(dev, crtc, intel_encoder) intel_encoder_info(m, intel_crtc, intel_encoder); } @@ -3207,13 +3269,18 @@ static int i915_display_info(struct seq_file *m, void *unused) intel_crtc_info(m, crtc); - seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", - yesno(cursor->base.state->visible), - cursor->base.state->crtc_x, - cursor->base.state->crtc_y, - cursor->base.state->crtc_w, - cursor->base.state->crtc_h, - cursor->cursor.base); + if (cursor) { + seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n", + yesno(cursor->base.state->visible), + cursor->base.state->crtc_x, + cursor->base.state->crtc_y, + cursor->base.state->crtc_w, + cursor->base.state->crtc_h, + cursor->cursor.base); + } else { + seq_puts(m, "\tNo cursor plane available on this platform\n"); + } + intel_scaler_info(m, crtc); intel_plane_info(m, crtc); } @@ -4000,6 +4067,9 @@ i915_wedged_set(void *data, u64 val) struct intel_engine_cs *engine; unsigned int tmp; + if (intel_vgpu_active(i915)) + return -EINVAL; + /* * There is no safeguard against this debugfs entry colliding * with the hangcheck calling same i915_handle_error() in diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f8cfd16be534..5c25a82265b1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -51,6 +51,7 @@ #include "i915_pmu.h" #include "i915_query.h" #include "i915_vgpu.h" +#include "intel_uc.h" #include "intel_drv.h" #include "intel_uc.h" @@ -895,6 +896,7 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv, sizeof(device_info->platform_mask) * BITS_PER_BYTE); BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->shared_page_lock); spin_lock_init(&dev_priv->gpu_error.lock); mutex_init(&dev_priv->backlight_lock); spin_lock_init(&dev_priv->uncore.lock); @@ -991,6 +993,9 @@ static void i915_mmio_cleanup(struct drm_i915_private *dev_priv) intel_teardown_mchbar(dev_priv); pci_iounmap(pdev, dev_priv->regs); + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(pdev, dev_priv->shared_page); + } /** @@ -1024,6 +1029,21 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) intel_uc_init_mmio(dev_priv); + if (intel_vgpu_active(dev_priv) && i915_modparams.enable_pvmmio) { + u32 bar = 0; + u32 mmio_size = 2 * 1024 * 1024; + + /* Map a share page from the end of 2M mmio region in bar0. */ + dev_priv->shared_page = (struct gvt_shared_page *) + pci_iomap_range(dev_priv->drm.pdev, bar, + mmio_size, PAGE_SIZE); + if (dev_priv->shared_page == NULL) { + ret = -EIO; + DRM_ERROR("ivi: failed to map share page.\n"); + goto err_uncore; + } + } + ret = intel_engines_init_mmio(dev_priv); if (ret) goto err_uncore; @@ -1033,6 +1053,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) return 0; err_uncore: + if (intel_vgpu_active(dev_priv) && dev_priv->shared_page) + pci_iounmap(dev_priv->drm.pdev, dev_priv->shared_page); intel_uncore_fini(dev_priv); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -1318,6 +1340,24 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) DRM_INFO("DRM_I915_DEBUG_GEM enabled\n"); } +static inline int get_max_avail_pipes(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + int index = 0; + + if (!intel_vgpu_active(dev_priv) || + !i915_modparams.avail_planes_per_pipe) + return INTEL_INFO(dev_priv)->num_pipes; + + for_each_pipe(dev_priv, pipe) { + if (AVAIL_PLANE_PER_PIPE(dev_priv, i915_modparams.avail_planes_per_pipe, + pipe)) + index++; + } + + return index; +} + /** * i915_driver_load - setup chip and create an initial config * @pdev: PCI device @@ -1335,6 +1375,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) (struct intel_device_info *)ent->driver_data; struct drm_i915_private *dev_priv; int ret; + int num_crtcs = 0; /* Enable nuclear pageflip on ILK+ */ if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) @@ -1386,9 +1427,9 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) * of the i915_driver_init_/i915_driver_register functions according * to the role/effect of the given init step. */ - if (INTEL_INFO(dev_priv)->num_pipes) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); + num_crtcs = get_max_avail_pipes(dev_priv); + if (num_crtcs) { + ret = drm_vblank_init(&dev_priv->drm, num_crtcs); if (ret) goto out_cleanup_hw; } @@ -1516,6 +1557,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) i915_gem_release(dev, file); mutex_unlock(&dev->struct_mutex); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + kfree(file_priv->process_name); +#endif + kfree(file_priv); } @@ -2860,6 +2905,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(I915_GEM_GVTBUFFER, i915_gem_gvtbuffer_ioctl, DRM_RENDER_ALLOW), }; static struct drm_driver driver = { @@ -2874,6 +2920,9 @@ static struct drm_driver driver = { .lastclose = i915_driver_lastclose, .postclose = i915_driver_postclose, +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + .gem_open_object = i915_gem_open_object, +#endif .gem_close_object = i915_gem_close_object, .gem_free_object_unlocked = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4aca5344863d..88f894c73a3c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -55,6 +55,7 @@ #include "i915_params.h" #include "i915_reg.h" +#include "i915_pvinfo.h" #include "i915_utils.h" #include "intel_bios.h" @@ -78,6 +79,9 @@ #include "i915_scheduler.h" #include "i915_timeline.h" #include "i915_vma.h" +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include "i915_gpu_error.h" +#endif #include "intel_gvt.h" @@ -333,6 +337,11 @@ struct drm_i915_file_private { struct drm_i915_private *dev_priv; struct drm_file *file; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + char *process_name; + struct pid *tgid; +#endif + struct { spinlock_t lock; struct list_head request_list; @@ -351,6 +360,10 @@ struct drm_i915_file_private { unsigned int bsd_engine; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct bin_attribute *obj_attr; +#endif + /* * Every context ban increments per client ban score. Also * hangs in short succession increments ban score. If ban threshold @@ -996,6 +1009,10 @@ struct i915_gem_mm { spinlock_t object_stat_lock; u64 object_memory; u32 object_count; + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + size_t phys_mem_total; +#endif }; #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */ @@ -1306,6 +1323,7 @@ struct i915_workarounds { struct i915_virtual_gpu { bool active; u32 caps; + u32 scaler_owned; }; /* used in computing the new watermarks state */ @@ -1589,6 +1607,8 @@ struct drm_i915_private { resource_size_t stolen_usable_size; /* Total size minus reserved ranges */ void __iomem *regs; + struct gvt_shared_page *shared_page; + spinlock_t shared_page_lock; struct intel_uncore uncore; @@ -1666,6 +1686,10 @@ struct drm_i915_private { bool preserve_bios_swizzle; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct kobject memtrack_kobj; +#endif + /* overlay */ struct intel_overlay *overlay; @@ -1837,6 +1861,10 @@ struct drm_i915_private { * This is limited in execlists to 21 bits. */ struct ida hw_ida; + + /* In case of virtualization, 3-bits of vgt-id will be added to hw_id */ +#define SIZE_CONTEXT_HW_ID_GVT (18) +#define MAX_CONTEXT_HW_ID_GVT (1<> (pipe) * BITS_PER_PIPE) & \ + ((1 << ((INTEL_INFO(dev_priv)->num_sprites[pipe]) + 1)) - 1)) + #include "i915_trace.h" static inline bool intel_vtd_active(void) @@ -2780,7 +2815,7 @@ static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) return dev_priv->gvt; } -static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv) +static inline bool intel_vgpu_active(const struct drm_i915_private *dev_priv) { return dev_priv->vgpu.active; } @@ -2814,18 +2849,18 @@ ilk_disable_display_irq(struct drm_i915_private *dev_priv, uint32_t bits) ilk_update_display_irq(dev_priv, bits, 0); } void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask); static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, bits); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, bits); } static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, uint32_t bits) + unsigned int crtc_index, uint32_t bits) { - bdw_update_pipe_irq(dev_priv, pipe, bits, 0); + bdw_update_pipe_irq(dev_priv, crtc_index, bits, 0); } void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, uint32_t interrupt_mask, @@ -2898,6 +2933,11 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size); struct drm_i915_gem_object * i915_gem_object_create_from_data(struct drm_i915_private *dev_priv, const void *data, size_t size); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_gem_open_object(struct drm_gem_object *gem, struct drm_file *file); +#endif + void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file); void i915_gem_free_object(struct drm_gem_object *obj); @@ -3149,6 +3189,7 @@ void i915_gem_reset_engine(struct intel_engine_cs *engine, void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); +int __must_check i915_gem_init_hw_late(struct drm_i915_private *dev_priv); void i915_gem_init_swizzling(struct drm_i915_private *dev_priv); void i915_gem_fini(struct drm_i915_private *dev_priv); void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv); @@ -3242,6 +3283,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, void i915_oa_init_reg_state(struct intel_engine_cs *engine, struct i915_gem_context *ctx, uint32_t *reg_state); +int i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, @@ -3319,6 +3362,19 @@ u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, unsigned int tiling, unsigned int stride); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_get_pid_cmdline(struct task_struct *task, char *buffer); +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj); +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj); +int i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, bool is_map_gtt, + bool is_mutex_locked); +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev); +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid); +#endif + /* i915_debugfs.c */ #ifdef CONFIG_DEBUG_FS int i915_debugfs_register(struct drm_i915_private *dev_priv); @@ -3358,6 +3414,13 @@ extern int i915_restore_state(struct drm_i915_private *dev_priv); void i915_setup_sysfs(struct drm_i915_private *dev_priv); void i915_teardown_sysfs(struct drm_i915_private *dev_priv); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file); +#endif + /* intel_lpe_audio.c */ int intel_lpe_audio_init(struct drm_i915_private *dev_priv); void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv); @@ -3578,7 +3641,11 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \ i915_reg_t reg) \ { \ - return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + if (!intel_vgpu_active(dev_priv) || !i915_modparams.enable_pvmmio || \ + likely(!in_mmio_read_trap_list((reg).reg))) \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \ + dev_priv->shared_page->reg_addr = i915_mmio_reg_offset(reg); \ + return read##s(dev_priv->regs + i915_mmio_reg_offset(vgtif_reg(pv_mmio))); \ } #define __raw_write(x, s) \ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fcc73a6ab503..1d0eaee0780b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -47,8 +47,852 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include +#include +#include +#include "../drm_internal.h" +#endif + static void i915_gem_flush_free_objects(struct drm_i915_private *i915); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +struct per_file_obj_mem_info { + int num_obj; + int num_obj_shared; + int num_obj_private; + int num_obj_gtt_bound; + int num_obj_purged; + int num_obj_purgeable; + int num_obj_allocated; + int num_obj_fault_mappable; + int num_obj_stolen; + size_t gtt_space_allocated_shared; + size_t gtt_space_allocated_priv; + size_t phys_space_allocated_shared; + size_t phys_space_allocated_priv; + size_t phys_space_purgeable; + size_t phys_space_shared_proportion; + size_t fault_mappable_size; + size_t stolen_space_allocated; + char *process_name; +}; + +struct name_entry { + struct list_head head; + struct drm_hash_item hash_item; +}; + +struct pid_stat_entry { + struct list_head head; + struct list_head namefree; + struct drm_open_hash namelist; + struct per_file_obj_mem_info stats; + struct pid *tgid; + int pid_num; +}; + +struct drm_i915_obj_virt_addr { + struct list_head head; + unsigned long user_virt_addr; +}; + +struct drm_i915_obj_pid_info { + struct list_head head; + pid_t tgid; + int open_handle_count; + struct list_head virt_addr_head; +}; + +struct get_obj_stats_buf { + struct pid_stat_entry *entry; + struct drm_i915_error_state_buf *m; +}; + +#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) +#define err_puts(e, s) i915_error_puts(e, s) + +static const char *get_tiling_flag(struct drm_i915_gem_object *obj) +{ + switch (i915_gem_object_get_tiling(obj)) { + default: + case I915_TILING_NONE: return " "; + case I915_TILING_X: return "X"; + case I915_TILING_Y: return "Y"; + } +} + +/* + * If this mmput call is the last one, it will tear down the mmaps of the + * process and calls drm_gem_vm_close(), which leads deadlock on i915 mutex. + * Instead, asynchronously schedule mmput function here, to avoid recursive + * calls to acquire i915_mutex. + */ +static void async_mmput_func(void *data, async_cookie_t cookie) +{ + struct mm_struct *mm = data; + mmput(mm); +} + +static void async_mmput(struct mm_struct *mm) +{ + async_schedule(async_mmput_func, mm); +} + +int i915_get_pid_cmdline(struct task_struct *task, char *buffer) +{ + int res = 0; + unsigned int len; + struct mm_struct *mm = get_task_mm(task); + + if (!mm) + goto out; + if (!mm->arg_end) + goto out_mm; + + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + res = access_process_vm(task, mm->arg_start, buffer, len, 0); + if (res < 0) { + async_mmput(mm); + return res; + } + + if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) + buffer[res-1] = '\0'; +out_mm: + async_mmput(mm); +out: + return 0; +} + +static int i915_obj_get_shmem_pages_alloced(struct drm_i915_gem_object *obj) +{ + if (obj->base.filp) { + struct inode *inode = file_inode(obj->base.filp); + + if (!inode) + return 0; + return inode->i_mapping->nrpages; + } + return 0; +} + +int i915_gem_obj_insert_pid(struct drm_i915_gem_object *obj) +{ + int found = 0; + struct drm_i915_obj_pid_info *entry; + pid_t current_tgid = task_tgid_nr(current); + + mutex_lock(&obj->base.dev->struct_mutex); + + list_for_each_entry(entry, &obj->pid_info, head) { + if (entry->tgid == current_tgid) { + entry->open_handle_count++; + found = 1; + break; + } + } + if (found == 0) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + mutex_unlock(&obj->base.dev->struct_mutex); + return -ENOMEM; + } + entry->tgid = current_tgid; + entry->open_handle_count = 1; + INIT_LIST_HEAD(&entry->virt_addr_head); + list_add_tail(&entry->head, &obj->pid_info); + } + + mutex_unlock(&obj->base.dev->struct_mutex); + return 0; +} + +void i915_gem_obj_remove_all_pids(struct drm_i915_gem_object *obj) +{ + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + list_for_each_entry_safe(virt_entry, + virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } +} + + int +i915_obj_insert_virt_addr(struct drm_i915_gem_object *obj, + unsigned long addr, + bool is_map_gtt, + bool is_mutex_locked) +{ + struct drm_i915_obj_pid_info *pid_entry; + pid_t current_tgid = task_tgid_nr(current); + int ret = 0, found = 0; + + if (is_map_gtt) + addr |= 1; + + if (!is_mutex_locked) { + ret = i915_mutex_lock_interruptible(obj->base.dev); + if (ret) + return ret; + } + + list_for_each_entry(pid_entry, &obj->pid_info, head) { + if (pid_entry->tgid == current_tgid) { + struct drm_i915_obj_virt_addr *virt_entry, *new_entry; + + list_for_each_entry(virt_entry, + &pid_entry->virt_addr_head, + head) { + if (virt_entry->user_virt_addr == addr) { + found = 1; + break; + } + } + if (found) + break; + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + goto out; + } + new_entry->user_virt_addr = addr; + list_add_tail(&new_entry->head, + &pid_entry->virt_addr_head); + break; + } + } + +out: + if (!is_mutex_locked) + mutex_unlock(&obj->base.dev->struct_mutex); + + return ret; +} + +static int i915_obj_virt_addr_is_invalid(struct drm_gem_object *obj, + struct pid *tgid, unsigned long addr) +{ + struct task_struct *task; + struct mm_struct *mm; + struct vm_area_struct *vma; + int locked, ret = 0; + + task = get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) { + DRM_DEBUG("null task for tgid=%d\n", pid_nr(tgid)); + return -EINVAL; + } + + mm = get_task_mm(task); + if (mm == NULL) { + DRM_DEBUG("null mm for tgid=%d\n", pid_nr(tgid)); + ret = -EINVAL; + goto out_task; + } + + locked = down_read_trylock(&mm->mmap_sem); + if (!locked) + goto out_mm; + + vma = find_vma(mm, addr); + if (vma) { + if (addr & 1) { /* mmap_gtt case */ + if (vma->vm_pgoff*PAGE_SIZE == (unsigned long) + drm_vma_node_offset_addr(&obj->vma_node)) + ret = 0; + else + ret = -EINVAL; + } else { /* mmap case */ + if (vma->vm_file == obj->filp) + ret = 0; + else + ret = -EINVAL; + } + } else + ret = -EINVAL; + + up_read(&mm->mmap_sem); + +out_mm: + async_mmput(mm); +out_task: + put_task_struct(task); + return ret; +} + +static void i915_obj_pidarray_validate(struct drm_gem_object *gem_obj) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + struct drm_device *dev = gem_obj->dev; + struct drm_i915_obj_virt_addr *virt_entry, *virt_next; + struct drm_i915_obj_pid_info *pid_entry, *pid_next; + struct drm_file *file; + struct drm_i915_file_private *file_priv; + struct pid *tgid; + int pid_num, present; + + /* + * Run a sanity check on pid_array. All entries in pid_array should + * be subset of the the drm filelist pid entries. + */ + list_for_each_entry_safe(pid_entry, pid_next, &obj->pid_info, head) { + if (pid_next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + break; + } + + present = 0; + list_for_each_entry(file, &dev->filelist, lhead) { + file_priv = file->driver_priv; + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + if (pid_num == pid_entry->tgid) { + present = 1; + break; + } + } + if (present == 0) { + DRM_DEBUG("stale_tgid=%d\n", pid_entry->tgid); + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + list_del(&virt_entry->head); + kfree(virt_entry); + } + list_del(&pid_entry->head); + kfree(pid_entry); + } else { + /* Validate the virtual address list */ + struct task_struct *task = + get_pid_task(tgid, PIDTYPE_PID); + if (task == NULL) + continue; + + list_for_each_entry_safe(virt_entry, virt_next, + &pid_entry->virt_addr_head, + head) { + if (i915_obj_virt_addr_is_invalid(gem_obj, tgid, + virt_entry->user_virt_addr)) { + DRM_DEBUG("stale_addr=%ld\n", + virt_entry->user_virt_addr); + list_del(&virt_entry->head); + kfree(virt_entry); + } + } + put_task_struct(task); + } + } +} + +static int i915_obj_find_insert_in_hash(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *found) +{ + struct drm_hash_item *hash_item; + int ret; + + ret = drm_ht_find_item(&pid_entry->namelist, + (unsigned long)&obj->base, &hash_item); + /* Not found, insert in hash */ + if (ret) { + struct name_entry *entry = + kzalloc(sizeof(*entry), GFP_NOWAIT); + if (entry == NULL) { + DRM_ERROR("alloc failed\n"); + return -ENOMEM; + } + entry->hash_item.key = (unsigned long)&obj->base; + drm_ht_insert_item(&pid_entry->namelist, + &entry->hash_item); + list_add_tail(&entry->head, &pid_entry->namefree); + *found = false; + } else + *found = true; + + return 0; +} + +static int i915_obj_shared_count(struct drm_i915_gem_object *obj, + struct pid_stat_entry *pid_entry, + bool *discard) +{ + struct drm_i915_obj_pid_info *pid_info_entry; + int ret, obj_shared_count = 0; + + /* + * The object can be shared among different processes by either flink + * or dma-buf mechanism, leading to shared count more than 1. For the + * objects not shared , return the shared count as 1. + * In case of shared dma-buf objects, there's a possibility that these + * may be external to i915. Detect this condition through + * 'import_attach' field. + */ + if (!obj->base.name && !obj->base.dma_buf) + return 1; + else if(obj->base.import_attach) { + /* not our GEM obj */ + *discard = true; + return 0; + } + + ret = i915_obj_find_insert_in_hash(obj, pid_entry, discard); + if (ret) + return ret; + + list_for_each_entry(pid_info_entry, &obj->pid_info, head) + obj_shared_count++; + + if (WARN_ON(obj_shared_count == 0)) + return -EINVAL; + + return obj_shared_count; +} + + static int +i915_describe_obj(struct get_obj_stats_buf *obj_stat_buf, + struct drm_i915_gem_object *obj) +{ + struct pid_stat_entry *pid_entry = obj_stat_buf->entry; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + + bool discard = false; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (!discard && !obj->stolen && + (obj->mm.madv != __I915_MADV_PURGED) && + (i915_obj_get_shmem_pages_alloced(obj) != 0)) { + if (obj_shared_count > 1) + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + else + stats->phys_space_allocated_priv += + obj->base.size; + } + + return 0; +} + + static int +i915_drm_gem_obj_info(int id, void *ptr, void *data) +{ + struct drm_i915_gem_object *obj = ptr; + struct get_obj_stats_buf *obj_stat_buf = data; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + return i915_describe_obj(obj_stat_buf, obj); +} + +bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o) +{ + struct i915_vma *vma; + + list_for_each_entry(vma, &o->vma_list, obj_link) + if (drm_mm_node_allocated(&vma->node)) + return true; + + return false; +} + + static int +i915_drm_gem_object_per_file_summary(int id, void *ptr, void *data) +{ + struct pid_stat_entry *pid_entry = data; + struct drm_i915_gem_object *obj = ptr; + struct per_file_obj_mem_info *stats = &pid_entry->stats; + int obj_shared_count = 0; + bool discard = false; + + if (obj->pid_info.next == NULL) { + DRM_ERROR( + "Invalid pid info. obj:%p, size:%zdK, tiling:%s, userptr=%s, stolen:%s, name:%d, handle_count=%d\n", + &obj->base, obj->base.size/1024, + get_tiling_flag(obj), + (obj->userptr.mm != 0) ? "Y" : "N", + obj->stolen ? "Y" : "N", obj->base.name, + obj->base.handle_count); + return 0; + } + + i915_obj_pidarray_validate(&obj->base); + + stats->num_obj++; + + obj_shared_count = i915_obj_shared_count(obj, pid_entry, &discard); + if (obj_shared_count < 0) + return obj_shared_count; + + if (discard) + return 0; + + if (obj_shared_count > 1) + stats->num_obj_shared++; + else + stats->num_obj_private++; + + if (i915_gem_obj_bound_any(obj)) { + stats->num_obj_gtt_bound++; + if (obj_shared_count > 1) + stats->gtt_space_allocated_shared += obj->base.size; + else + stats->gtt_space_allocated_priv += obj->base.size; + } + + if (obj->stolen) { + stats->num_obj_stolen++; + stats->stolen_space_allocated += obj->base.size; + } else if (obj->mm.madv == __I915_MADV_PURGED) { + stats->num_obj_purged++; + } else if (obj->mm.madv == I915_MADV_DONTNEED) { + stats->num_obj_purgeable++; + stats->num_obj_allocated++; + if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->phys_space_purgeable += obj->base.size; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } else + stats->phys_space_allocated_priv += + obj->base.size; + } else + WARN_ON(1); + } else if (i915_obj_get_shmem_pages_alloced(obj) != 0) { + stats->num_obj_allocated++; + if (obj_shared_count > 1) { + stats->phys_space_allocated_shared += + obj->base.size; + stats->phys_space_shared_proportion += + obj->base.size/obj_shared_count; + } + else + stats->phys_space_allocated_priv += obj->base.size; + } + return 0; +} + + static int +__i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + struct drm_file *file; + struct drm_i915_private *dev_priv = dev->dev_private; + + struct name_entry *entry, *next; + struct pid_stat_entry *pid_entry, *temp_entry; + struct pid_stat_entry *new_pid_entry, *new_temp_entry; + struct list_head per_pid_stats, sorted_pid_stats; + int ret = 0; + size_t total_shared_prop_space = 0, total_priv_space = 0; + + INIT_LIST_HEAD(&per_pid_stats); + INIT_LIST_HEAD(&sorted_pid_stats); + + err_puts(m, + "\n\n pid Total Shared Priv Purgeable Alloced SharedPHYsize SharedPHYprop PrivPHYsize PurgeablePHYsize process\n"); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct pid *tgid; + struct drm_i915_file_private *file_priv = file->driver_priv; + int pid_num, found = 0; + + tgid = file_priv->tgid; + pid_num = pid_nr(tgid); + + list_for_each_entry(pid_entry, &per_pid_stats, head) { + if (pid_entry->pid_num == pid_num) { + found = 1; + break; + } + } + + if (!found) { + struct pid_stat_entry *new_entry = + kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (new_entry == NULL) { + DRM_ERROR("alloc failed\n"); + ret = -ENOMEM; + break; + } + new_entry->tgid = tgid; + new_entry->pid_num = pid_num; + ret = drm_ht_create(&new_entry->namelist, + DRM_MAGIC_HASH_ORDER); + if (ret) { + kfree(new_entry); + break; + } + + list_add_tail(&new_entry->head, &per_pid_stats); + INIT_LIST_HEAD(&new_entry->namefree); + new_entry->stats.process_name = file_priv->process_name; + pid_entry = new_entry; + } + + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_object_per_file_summary, pid_entry); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + list_for_each_entry_safe(pid_entry, temp_entry, &per_pid_stats, head) { + if (list_empty(&sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, &sorted_pid_stats); + continue; + } + + list_for_each_entry_safe(new_pid_entry, new_temp_entry, + &sorted_pid_stats, head) { + int prev_space = + pid_entry->stats.phys_space_shared_proportion + + pid_entry->stats.phys_space_allocated_priv; + int new_space = + new_pid_entry-> + stats.phys_space_shared_proportion + + new_pid_entry->stats.phys_space_allocated_priv; + if (prev_space > new_space) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &new_pid_entry->head); + break; + } + if (list_is_last(&new_pid_entry->head, + &sorted_pid_stats)) { + list_del(&pid_entry->head); + list_add_tail(&pid_entry->head, + &sorted_pid_stats); + } + } + } + + list_for_each_entry_safe(pid_entry, temp_entry, + &sorted_pid_stats, head) { + struct task_struct *task = get_pid_task(pid_entry->tgid, + PIDTYPE_PID); + err_printf(m, + "%5d %6d %6d %6d %9d %8d %14zdK %14zdK %14zdK %14zdK %s", + pid_entry->pid_num, + pid_entry->stats.num_obj, + pid_entry->stats.num_obj_shared, + pid_entry->stats.num_obj_private, + pid_entry->stats.num_obj_purgeable, + pid_entry->stats.num_obj_allocated, + pid_entry->stats.phys_space_allocated_shared/1024, + pid_entry->stats.phys_space_shared_proportion/1024, + pid_entry->stats.phys_space_allocated_priv/1024, + pid_entry->stats.phys_space_purgeable/1024, + pid_entry->stats.process_name); + + if (task == NULL) + err_puts(m, "*\n"); + else + err_puts(m, "\n"); + + total_shared_prop_space += + pid_entry->stats.phys_space_shared_proportion/1024; + total_priv_space += + pid_entry->stats.phys_space_allocated_priv/1024; + list_del(&pid_entry->head); + + list_for_each_entry_safe(entry, next, + &pid_entry->namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry->namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry->namelist); + kfree(pid_entry); + if (task) + put_task_struct(task); + } + + err_puts(m, + "\t\t\t\t\t\t\t\t--------------\t-------------\t--------\n"); + err_printf(m, + "\t\t\t\t\t\t\t\t%13zdK\t%12zdK\tTotal\n", + total_shared_prop_space, total_priv_space); + + err_printf(m, "\nTotal used GFX Shmem Physical space %8zdK\n", + dev_priv->mm.phys_mem_total/1024); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + + return 0; +} + +#define NUM_SPACES 100 +#define INITIAL_SPACES_STR(x) #x +#define SPACES_STR(x) INITIAL_SPACES_STR(x) + + static int +__i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + struct drm_file *file; + struct drm_i915_file_private *file_priv_reqd = NULL; + int bytes_copy, ret = 0; + struct pid_stat_entry pid_entry; + struct name_entry *entry, *next; + + pid_entry.stats.phys_space_shared_proportion = 0; + pid_entry.stats.phys_space_allocated_priv = 0; + pid_entry.tgid = tgid; + pid_entry.pid_num = pid_nr(tgid); + ret = drm_ht_create(&pid_entry.namelist, DRM_MAGIC_HASH_ORDER); + if (ret) + return ret; + + INIT_LIST_HEAD(&pid_entry.namefree); + + /* + * Fill up initial few bytes with spaces, to insert summary data later + * on + */ + err_printf(m, "%"SPACES_STR(NUM_SPACES)"s\n", " "); + + list_for_each_entry(file, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv = file->driver_priv; + struct get_obj_stats_buf obj_stat_buf; + + obj_stat_buf.entry = &pid_entry; + obj_stat_buf.m = m; + + if (file_priv->tgid != tgid) + continue; + + file_priv_reqd = file_priv; + spin_lock(&file->table_lock); + ret = idr_for_each(&file->object_idr, + &i915_drm_gem_obj_info, &obj_stat_buf); + spin_unlock(&file->table_lock); + if (ret) + break; + } + + if (file_priv_reqd) { + int space_remaining; + + /* Reset the bytes counter to buffer beginning */ + bytes_copy = m->bytes; + m->bytes = 0; + + err_printf(m, "\n PID GfxMem Process\n"); + err_printf(m, "%5d %8zdK ", pid_nr(file_priv_reqd->tgid), + (pid_entry.stats.phys_space_shared_proportion + + pid_entry.stats.phys_space_allocated_priv)/1024); + + space_remaining = NUM_SPACES - m->bytes - 1; + if (strlen(file_priv_reqd->process_name) > space_remaining) + file_priv_reqd->process_name[space_remaining] = '\0'; + + err_printf(m, "%s\n", file_priv_reqd->process_name); + + /* Reinstate the previous saved value of bytes counter */ + m->bytes = bytes_copy; + } else + WARN(1, "drm file corresponding to tgid:%d not found\n", + pid_nr(tgid)); + + list_for_each_entry_safe(entry, next, + &pid_entry.namefree, head) { + list_del(&entry->head); + drm_ht_remove_item(&pid_entry.namelist, + &entry->hash_item); + kfree(entry); + } + drm_ht_remove(&pid_entry.namelist); + + if (ret) + return ret; + if (m->bytes == 0 && m->err) + return m->err; + return 0; +} + +int i915_get_drm_clients_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev) +{ + int ret = 0; + + /* + * Protect the access to global drm resources such as filelist. Protect + * against their removal under our noses, while in use. + */ + mutex_lock(&drm_global_mutex); + ret = i915_mutex_lock_interruptible(dev); + if (ret) { + mutex_unlock(&drm_global_mutex); + return ret; + } + + ret = __i915_get_drm_clients_info(m, dev); + + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&drm_global_mutex); + + return ret; +} + +int i915_gem_get_obj_info(struct drm_i915_error_state_buf *m, + struct drm_device *dev, struct pid *tgid) +{ + int ret = 0; + + ret = i915_mutex_lock_interruptible(dev); + if (ret) + return ret; + + ret = __i915_gem_get_obj_info(m, dev, tgid); + + mutex_unlock(&dev->struct_mutex); + + return ret; +} +#endif + static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) { if (obj->cache_dirty) @@ -1856,6 +2700,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_object *obj; unsigned long addr; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + int ret; +#endif if (args->flags & ~(I915_MMAP_WC)) return -EINVAL; @@ -1901,6 +2748,12 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (IS_ERR((void *)addr)) return addr; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + ret = i915_obj_insert_virt_addr(obj, addr, false, false); + if (ret) + return ret; +#endif + args->addr_ptr = (uint64_t) addr; return 0; @@ -2103,6 +2956,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, min_t(u64, vma->size, area->vm_end - area->vm_start), &ggtt->iomap); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + ret = i915_obj_insert_virt_addr(obj, (unsigned long)area->vm_start, true, true); +#endif if (ret) goto err_fence; @@ -2360,6 +3216,19 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Mark the object as not having backing pages, as physical space + * returned back to kernel + */ + if (obj->has_backing_pages == 1) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } +#endif } /* Try to discard unwanted pages */ @@ -2655,6 +3524,14 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (obj->has_backing_pages == 0) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total += obj->base.size; + obj->has_backing_pages = 1; + } +#endif __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -4699,6 +5576,15 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->mm.get_page.lock); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Mark the object as not having backing pages, as no allocation + * for it yet + */ + obj->has_backing_pages = 0; + INIT_LIST_HEAD(&obj->pid_info); +#endif + i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size); } @@ -4832,6 +5718,17 @@ static bool discard_backing_storage(struct drm_i915_gem_object *obj) return atomic_long_read(&obj->base.filp->f_count) == 1; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int +i915_gem_open_object(struct drm_gem_object *gem_obj, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); + + return i915_gem_obj_insert_pid(obj); +} +#endif + static void __i915_gem_free_objects(struct drm_i915_private *i915, struct llist_node *freed) { @@ -4885,6 +5782,16 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, if (obj->base.import_attach) drm_prime_gem_destroy(&obj->base, NULL); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (!obj->stolen && (obj->has_backing_pages == 1)) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + + dev_priv->mm.phys_mem_total -= obj->base.size; + obj->has_backing_pages = 0; + } + i915_gem_obj_remove_all_pids(obj); +#endif + reservation_object_fini(&obj->__builtin_resv); drm_gem_object_release(&obj->base); i915_gem_info_remove_obj(i915, obj->base.size); @@ -5027,7 +5934,8 @@ void i915_gem_sanitize(struct drm_i915_private *i915) * of the reset, so this could be applied to even earlier gen. */ err = -ENODEV; - if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915)) + if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915) && + !intel_vgpu_active(i915)) err = WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); if (!err) intel_engines_sanitize(i915); @@ -5254,6 +6162,28 @@ static int __i915_gem_restart_engines(void *data) return 0; } +int i915_gem_init_hw_late(struct drm_i915_private *dev_priv) +{ + int ret; + + /* + * Place for things that can be delayed until the first context + * is open. For example, fw loading in android. + */ + + /* fetch firmware */ + intel_uc_init_misc(dev_priv); + + /* Load fw. We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(dev_priv); + if (ret) { + DRM_ERROR("Late init: enabling uc failed (%d)\n", ret); + return ret; + } + + return 0; +} + int i915_gem_init_hw(struct drm_i915_private *dev_priv) { int ret; @@ -5312,11 +6242,17 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv) goto out; } - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(dev_priv); - if (ret) { - DRM_ERROR("Enabling uc failed (%d)\n", ret); - goto out; + /* + * Don't call i915_gem_init_hw_late() the very first time (during + * driver load); it will get called during first open instead. + * It should only be called on subsequent (re-initialization) passes. + */ + if (dev_priv->contexts_ready) { + ret = i915_gem_init_hw_late(dev_priv); + if (ret) + goto out; + } else { + DRM_DEBUG_DRIVER("Deferring late initialization\n"); } intel_mocs_init_l3cc_table(dev_priv); @@ -5461,7 +6397,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv) int ret; /* We need to fallback to 4K pages if host doesn't support huge gtt. */ - if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) + if ((intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv)) + || PVMMIO_LEVEL(dev_priv, PVMMIO_PPGTT_UPDATE)) mkwrite_device_info(dev_priv)->page_sizes = I915_GTT_PAGE_SIZE_4K; @@ -5479,9 +6416,13 @@ int i915_gem_init(struct drm_i915_private *dev_priv) if (ret) return ret; - ret = intel_uc_init_misc(dev_priv); - if (ret) - return ret; + /* + * ANDROID: fetch fw during drm_open instead + * due to filesystem is not up yet during driver init + * ret = intel_uc_init_misc(dev_priv); + * if (ret) + * return ret; + */ ret = intel_wopcm_init(&dev_priv->wopcm); if (ret) @@ -5837,6 +6778,11 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_request *request; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + i915_gem_remove_sysfs_file_entry(dev, file); + put_pid(file_priv->tgid); +#endif + /* Clean up our request list when the client is going away, so that * later retire_requests won't dereference our soon-to-be-gone * file_priv. @@ -5862,15 +6808,57 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file) file_priv->dev_priv = i915; file_priv->file = file; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + rcu_read_lock(); + file_priv->tgid = get_pid(find_vpid(task_tgid_nr(current))); + rcu_read_unlock(); + + file_priv->process_name = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!file_priv->process_name) { + ret = -ENOMEM; + goto out_free_file; + } + + ret = i915_get_pid_cmdline(current, file_priv->process_name); + if (ret) + goto out_free_name; +#endif + spin_lock_init(&file_priv->mm.lock); INIT_LIST_HEAD(&file_priv->mm.request_list); file_priv->bsd_engine = -1; file_priv->hang_timestamp = jiffies; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + intel_runtime_pm_get(i915); +#endif + ret = i915_gem_context_open(i915, file); +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + if (ret) { + intel_runtime_pm_put(i915); + goto out_free_name; + } + intel_runtime_pm_put(i915); + + ret = i915_gem_create_sysfs_file_entry(&i915->drm, file); + if (ret) { + i915_gem_context_close(file); + goto out_free_name; + } + + return 0; + +out_free_name: + kfree(file_priv->process_name); +out_free_file: + put_pid(file_priv->tgid); + kfree(file_priv); +#else if (ret) kfree(file_priv); +#endif return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index b10770cfccd2..0c8c68a34b7f 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -91,6 +91,7 @@ #include "i915_drv.h" #include "i915_trace.h" #include "intel_workarounds.h" +#include "i915_vgpu.h" #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 @@ -136,7 +137,12 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) list_del(&ctx->link); - ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + if (intel_vgpu_active(ctx->i915)) + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id & + ~(0x7 << SIZE_CONTEXT_HW_ID_GVT)); + else + ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id); + kfree_rcu(ctx, rcu); } @@ -217,6 +223,8 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) */ if (USES_GUC_SUBMISSION(dev_priv)) max = MAX_GUC_CONTEXT_HW_ID; + else if (intel_vgpu_active(dev_priv) || intel_gvt_active(dev_priv)) + max = MAX_CONTEXT_HW_ID_GVT; else max = MAX_CONTEXT_HW_ID; } @@ -236,6 +244,12 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out) return ret; } + if (intel_vgpu_active(dev_priv)) { + /* add vgpu_id to context hw_id */ + ret = ret | (I915_READ(vgtif_reg(vgt_id)) + << SIZE_CONTEXT_HW_ID_GVT); + } + *out = ret; return 0; } @@ -548,23 +562,55 @@ static int context_idr_cleanup(int id, void *p, void *data) return 0; } +int i915_gem_context_first_open(struct drm_i915_private *dev_priv) +{ + int ret; + + lockdep_assert_held(&dev_priv->drm.struct_mutex); + + DRM_DEBUG_DRIVER("Late initialization starting\n"); + + intel_runtime_pm_get(dev_priv); + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + + ret = i915_gem_init_hw_late(dev_priv); + if (ret == 0) + dev_priv->contexts_ready = true; + else + DRM_ERROR("Late initialization failed: %d\n", ret); + + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); + + return ret; +} + int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; struct i915_gem_context *ctx; + int ret = 0; idr_init(&file_priv->context_idr); mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, file_priv); - mutex_unlock(&i915->drm.struct_mutex); - if (IS_ERR(ctx)) { - idr_destroy(&file_priv->context_idr); - return PTR_ERR(ctx); + + if (!(i915->contexts_ready)) + ret = i915_gem_context_first_open(i915); + + if (ret == 0) { + ctx = i915_gem_create_context(i915, file_priv); + if (IS_ERR(ctx)) + ret = PTR_ERR(ctx); + + GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); } - GEM_BUG_ON(i915_gem_context_is_kernel(ctx)); + mutex_unlock(&i915->drm.struct_mutex); + + if (ret) + idr_destroy(&file_priv->context_idr); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index d548ac05ccd7..317e376cc2da 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -63,6 +63,7 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, i915_reg_t fence_reg_lo, fence_reg_hi; int fence_pitch_shift; u64 val; + struct drm_i915_private *dev_priv = fence->i915; if (INTEL_GEN(fence->i915) >= 6) { fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); @@ -92,9 +93,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence, val |= I965_FENCE_REG_VALID; } - if (!pipelined) { - struct drm_i915_private *dev_priv = fence->i915; - + if (intel_vgpu_active(dev_priv)) { + /* Use the 64-bit RW to write fence reg on VGPU mode. + * The GVT-g can trap the written val of VGPU to program the + * fence reg. And the fence write in gvt-g follows the + * sequence of off/read/double-write/read. This assures that + * the fence reg is configured as expected. + * At the same time the 64-bit op can help to reduce the num + * of VGPU trap for the fence reg. + */ + I915_WRITE64_FW(fence_reg_lo, val); + } else if (!pipelined) { /* To w/a incoherency with non-atomic 64-bit register updates, * we split the 64-bit update into two 32-bit writes. In order * for a partial fence not to be evaluated between writes, we diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index f00c7fbef79e..280095010286 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -998,6 +998,8 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; unsigned int pml4e; + u64 orig_start = start; + u64 orig_length = length; GEM_BUG_ON(!use_4lvl(vm)); @@ -1011,6 +1013,17 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, free_pdp(vm, pdp); } + + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_CLEAR); + } } static inline struct sgt_dma { @@ -1250,6 +1263,18 @@ static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, flags)) GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(&ppgtt->pml4), &pv_ppgtt->pdp); + writeq(vma->node.start, &pv_ppgtt->start); + writeq(vma->node.size, &pv_ppgtt->length); + writel(cache_level, &pv_ppgtt->cache_level); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_INSERT); + } + vma->page_sizes.gtt = I915_GTT_PAGE_SIZE; } } @@ -1498,6 +1523,8 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, u64 from = start; u32 pml4e; int ret; + u64 orig_start = start; + u64 orig_length = length; gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (pml4->pdps[pml4e] == vm->scratch_pdp) { @@ -1514,6 +1541,17 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, goto unwind_pdp; } + if (PVMMIO_LEVEL(vm->i915, PVMMIO_PPGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct pv_ppgtt_update *pv_ppgtt = + &dev_priv->shared_page->pv_ppgtt; + + writeq(px_dma(pml4), &pv_ppgtt->pdp); + writeq(orig_start, &pv_ppgtt->start); + writeq(orig_length, &pv_ppgtt->length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_PPGTT_L4_ALLOC); + } + return 0; unwind_pdp: @@ -2465,6 +2503,17 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) writeq(pte, addr); } +static void vgpu_ggtt_insert(struct drm_i915_private *dev_priv, + u64 start, int num_entries, enum i915_cache_level level) +{ + struct gvt_shared_page *shared_page = dev_priv->shared_page; + + writeq(start, &shared_page->pv_ggtt.start); + writeq(num_entries, &shared_page->pv_ggtt.length); + writel(level, &shared_page->pv_ggtt.cache_level); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_GGTT_INSERT); +} + static void gen8_ggtt_insert_page(struct i915_address_space *vm, dma_addr_t addr, u64 offset, @@ -2477,6 +2526,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm, gen8_set_pte(pte, gen8_pte_encode(addr, level, 0)); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + vgpu_ggtt_insert(vm->i915, offset, 1, level); + return; + } + ggtt->invalidate(vm->i915); } @@ -2501,6 +2555,20 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, for_each_sgt_dma(addr, sgt_iter, vma->pages) gen8_set_pte(gtt_entries++, pte_encode | addr); + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + int num_entries = gtt_entries - + ((gen8_pte_t __iomem *)ggtt->gsm + + (vma->node.start >> PAGE_SHIFT)); + /* + * Sometimes number of entries does not match vma node size. + * Pass number of pte entries instead. + */ + vgpu_ggtt_insert(vm->i915, vma->node.start, + num_entries, level); + return; + } + + /* * We want to flush the TLBs only after we're certain all the PTE * updates have finished. @@ -2574,6 +2642,16 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) gen8_set_pte(>t_base[i], scratch_pte); + + if (PVMMIO_LEVEL(vm->i915, PVMMIO_GGTT_UPDATE)) { + struct drm_i915_private *dev_priv = vm->i915; + struct gvt_shared_page *shared_page = dev_priv->shared_page; + + writeq(start, &shared_page->pv_ggtt.start); + writeq(length, &shared_page->pv_ggtt.length); + I915_WRITE(vgtif_reg(g2v_notify), VGT_G2V_GGTT_CLEAR); + } + } static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) @@ -2949,16 +3027,19 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) if (ret) return ret; - /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", + if (!intel_vgpu_active(dev_priv)) { + /* Clear any non-preallocated blocks */ + drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { + DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt->vm.clear_range(&ggtt->vm, hole_start, + ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); - } + } + + /* And finally clear the reserved guard page */ + ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); - /* And finally clear the reserved guard page */ - ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); + } if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { ret = i915_gem_init_aliasing_ppgtt(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c new file mode 100644 index 000000000000..f482eceb5c7f --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_gvtbuffer.c @@ -0,0 +1,292 @@ +/* + * Copyright © 2012 - 2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include "i915_drv.h" +#include "i915_trace.h" +#include "intel_drv.h" +#include + +#include "gvt/gvt.h" +#include "gvt/fb_decoder.h" + +static int +i915_gem_gvtbuffer_get_pages(struct drm_i915_gem_object *obj) +{ + BUG(); + return -EINVAL; +} + +static void i915_gem_gvtbuffer_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + /* like stolen memory, this should only be called during free + * after clearing pin count. + */ + sg_free_table(pages); + kfree(pages); +} + +static void +i915_gem_gvtbuffer_release(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); +} + +static const struct drm_i915_gem_object_ops i915_gem_gvtbuffer_ops = { + .get_pages = i915_gem_gvtbuffer_get_pages, + .put_pages = i915_gem_gvtbuffer_put_pages, + .release = i915_gem_gvtbuffer_release, +}; + +#define GEN8_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) >> 12) & 0x7ffffffULL) << 12)) + +#define GEN7_DECODE_PTE(pte) \ + ((dma_addr_t)(((((u64)pte) & 0x7f0) << 28) | (u64)(pte & 0xfffff000))) + +static struct sg_table * +i915_create_sg_pages_for_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct sg_table *st; + struct scatterlist *sg; + int i; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (st == NULL) + return NULL; + + if (sg_alloc_table(st, num_pages, GFP_KERNEL)) { + kfree(st); + return NULL; + } + + if (INTEL_INFO(dev_priv)->gen >= 8) { + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN8_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } else { + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + + (start >> PAGE_SHIFT); + for_each_sg(st->sgl, sg, num_pages, i) { + sg->offset = 0; + sg->length = PAGE_SIZE; + sg_dma_address(sg) = + GEN7_DECODE_PTE(readq(>t_entries[i])); + sg_dma_len(sg) = PAGE_SIZE; + } + } + + return st; +} + +struct drm_i915_gem_object * +i915_gem_object_create_gvtbuffer(struct drm_device *dev, + u32 start, u32 num_pages) +{ + struct drm_i915_gem_object *obj; + obj = i915_gem_object_alloc(to_i915(dev)); + if (obj == NULL) + return NULL; + + drm_gem_private_object_init(dev, &obj->base, num_pages << PAGE_SHIFT); + i915_gem_object_init(obj, &i915_gem_gvtbuffer_ops); + + obj->mm.pages = i915_create_sg_pages_for_gvtbuffer(dev, start, num_pages); + if (obj->mm.pages == NULL) { + i915_gem_object_free(obj); + return NULL; + } + + if (i915_gem_object_pin_pages(obj)) + printk(KERN_ERR "%s:%d> Pin pages failed!\n", __func__, __LINE__); + obj->cache_level = I915_CACHE_L3_LLC; + + DRM_DEBUG_DRIVER("GVT_GEM: backing store base = 0x%x pages = 0x%x\n", + start, num_pages); + return obj; +} + +static int gvt_decode_information(struct drm_device *dev, + struct drm_i915_gem_gvtbuffer *args) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_vgpu_fb_format fb; + struct intel_vgpu_primary_plane_format *p; + struct intel_vgpu_cursor_plane_format *c; + struct intel_vgpu_pipe_format *pipe; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + u32 id = args->id; + + if (intel_vgpu_decode_fb_format(dev_priv->gvt, id, &fb)) + return -EINVAL; +#else + return -EINVAL; +#endif + + pipe = ((args->pipe_id >= I915_MAX_PIPES) ? + NULL : &fb.pipes[args->pipe_id]); + + if (!pipe || !pipe->primary.enabled) { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid pipe_id: %d\n", + args->pipe_id); + return -EINVAL; + } + + if ((args->plane_id) == I915_GVT_PLANE_PRIMARY) { + p = &pipe->primary; + args->enabled = p->enabled; + args->x_offset = p->x_offset; + args->y_offset = p->y_offset; + args->start = p->base; + args->width = p->width; + args->height = p->height; + args->stride = p->stride; + args->bpp = p->bpp; + args->hw_format = p->hw_format; + args->drm_format = p->drm_format; + args->tiled = p->tiled; + } else if ((args->plane_id) == I915_GVT_PLANE_CURSOR) { + c = &pipe->cursor; + args->enabled = c->enabled; + args->x_offset = c->x_hot; + args->y_offset = c->y_hot; + args->x_pos = c->x_pos; + args->y_pos = c->y_pos; + args->start = c->base; + args->width = c->width; + args->height = c->height; + args->stride = c->width * (c->bpp / 8); + args->bpp = c->bpp; + args->tiled = 0; + } else { + DRM_DEBUG_DRIVER("GVT_GEM: Invalid plaine_id: %d\n", + args->plane_id); + return -EINVAL; + } + + args->size = ALIGN(args->stride * args->height, PAGE_SIZE) >> PAGE_SHIFT; + + if (args->start & (PAGE_SIZE - 1)) { + DRM_DEBUG_DRIVER("GVT_GEM: Not aligned fb start address: " + "0x%x\n", args->start); + return -EINVAL; + } + + if (((args->start >> PAGE_SHIFT) + args->size) > + ggtt_total_entries(&dev_priv->ggtt)) { + DRM_DEBUG_DRIVER("GVT: Invalid GTT offset or size\n"); + return -EINVAL; + } + return 0; +} + +/** + * Creates a new mm object that wraps some user memory. + */ +int +i915_gem_gvtbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_gem_gvtbuffer *args = data; + struct drm_i915_gem_object *obj; + u32 handle; + int ret = 0; + + if (INTEL_INFO(dev_priv)->gen < 7) + return -EPERM; + + if (args->flags & I915_GVTBUFFER_CHECK_CAPABILITY) + return 0; +#if 0 + if (!gvt_check_host()) + return -EPERM; +#endif + /* if args->start != 0 do not decode, but use it as ggtt offset*/ + if (args->start == 0) { + ret = gvt_decode_information(dev, args); + if (ret) + return ret; + } + + if (ret) + return ret; + + if (args->flags & I915_GVTBUFFER_QUERY_ONLY) + return 0; + + obj = i915_gem_object_create_gvtbuffer(dev, args->start, args->size); + if (!obj) { + DRM_DEBUG_DRIVER("GVT_GEM: Failed to create gem object" + " for VM FB!\n"); + return -EINVAL; + } + + if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) || + IS_KABYLAKE(dev_priv)) { + unsigned int tiling_mode = I915_TILING_NONE; + unsigned int stride = 0; + + switch (args->tiled << 10) { + case PLANE_CTL_TILED_LINEAR: + /* Default valid value */ + break; + case PLANE_CTL_TILED_X: + tiling_mode = I915_TILING_X; + stride = args->stride; + break; + case PLANE_CTL_TILED_Y: + tiling_mode = I915_TILING_Y; + stride = args->stride; + break; + default: + DRM_ERROR("gvt: tiling mode %d not supported\n", args->tiled); + } + obj->tiling_and_stride = tiling_mode | stride; + } else { + obj->tiling_and_stride = (args->tiled ? I915_TILING_X : I915_TILING_NONE) | + (args->tiled ? args->stride : 0); + } + + ret = drm_gem_handle_create(file, &obj->base, &handle); + + /* drop reference from allocate - handle holds it now */ + i915_gem_object_put(obj); + + if (ret) + return ret; + + args->handle = handle; + return 0; +} diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index 83e5e01fa9ea..338709b6640e 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -147,6 +147,10 @@ struct drm_i915_gem_object { #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) unsigned int cache_dirty:1; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + unsigned int has_backing_pages:1; +#endif + /** * @read_domains: Read memory domains. * @@ -278,6 +282,10 @@ struct drm_i915_gem_object { void *gvt_info; }; +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + struct list_head pid_info; +#endif + /** for phys allocated objects */ struct drm_dma_handle *phys_handle; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a262a64f5625..a803449498f8 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -91,6 +91,13 @@ static bool __i915_error_ok(struct drm_i915_error_state_buf *e) return true; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +bool i915_error_ok(struct drm_i915_error_state_buf *e) +{ + return __i915_error_ok(e); +} +#endif + static bool __i915_error_seek(struct drm_i915_error_state_buf *e, unsigned len) { @@ -162,7 +169,7 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e, __i915_error_advance(e, len); } -static void i915_error_puts(struct drm_i915_error_state_buf *e, +void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str) { unsigned len; @@ -871,6 +878,22 @@ int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf, return 0; } +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *ebuf, + size_t count) +{ + memset(ebuf, 0, sizeof(*ebuf)); + + ebuf->buf = kmalloc(count, GFP_KERNEL); + + if (ebuf->buf == NULL) + return -ENOMEM; + + ebuf->size = count; + return 0; +} +#endif + static void i915_error_object_free(struct drm_i915_error_object *obj) { int page; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 8710fb18ed74..821bed7bd375 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -307,6 +307,13 @@ struct drm_i915_error_state_buf { }; #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +void i915_error_puts(struct drm_i915_error_state_buf *e, + const char *str); +bool i915_error_ok(struct drm_i915_error_state_buf *e); +int i915_obj_state_buf_init(struct drm_i915_error_state_buf *eb, + size_t count); +#endif __printf(2, 3) void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 29877969310d..5e0e9f189418 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -37,6 +37,10 @@ #include "i915_trace.h" #include "intel_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: interrupt handling * @@ -221,6 +225,17 @@ static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); + +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +static inline void gvt_notify_vblank(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + if (dev_priv->gvt) + queue_work(system_highpri_wq, + &dev_priv->gvt->pipe_info[pipe].vblank_work); +} +#endif + /* For display hotplug interrupt */ static inline void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, @@ -611,11 +626,12 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, * @enabled_irq_mask: mask of interrupt bits to enable */ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, - enum pipe pipe, + unsigned int crtc_index, uint32_t interrupt_mask, uint32_t enabled_irq_mask) { uint32_t new_val; + enum pipe pipe; lockdep_assert_held(&dev_priv->irq_lock); @@ -624,6 +640,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, if (WARN_ON(!intel_irqs_enabled(dev_priv))) return; + if(get_pipe_from_crtc_index(&dev_priv->drm, crtc_index, &pipe)) + return; + new_val = dev_priv->de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); @@ -869,9 +888,14 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; } -static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) +static u32 g4x_get_vblank_counter(struct drm_device *dev, + unsigned int crtc_index) { struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe; + + if(get_pipe_from_crtc_index(dev, crtc_index, &pipe)) + return 0; return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); } @@ -987,18 +1011,21 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, - bool in_vblank_irq, int *vpos, int *hpos, - ktime_t *stime, ktime_t *etime, - const struct drm_display_mode *mode) +static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc_index, + bool in_vblank_irq, int *vpos, int *hpos, + ktime_t *stime, ktime_t *etime, + const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *intel_crtc; + enum pipe pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; + intel_crtc = get_intel_crtc_from_index(dev, crtc_index); + pipe = intel_crtc->pipe; + if (WARN_ON(!mode->crtc_clock)) { DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " "pipe %c\n", pipe_name(pipe)); @@ -1503,6 +1530,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir) tasklet |= USES_GUC_SUBMISSION(engine->i915); } + if ((iir & (GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) && + intel_vgpu_active(engine->i915)) { + queue_work(system_highpri_wq, &engine->reset_work); + return; + } + if (tasklet) tasklet_hi_schedule(&engine->execlists.tasklet); } @@ -2727,6 +2760,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) irqreturn_t ret = IRQ_NONE; u32 iir; enum pipe pipe; + struct intel_crtc *crtc; if (master_ctl & GEN8_DE_MISC_IRQ) { iir = I915_READ(GEN8_DE_MISC_IIR); @@ -2837,8 +2871,13 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ret = IRQ_HANDLED; I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); - if (iir & GEN8_PIPE_VBLANK) - drm_handle_vblank(&dev_priv->drm, pipe); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (iir & GEN8_PIPE_VBLANK) { + drm_handle_vblank(&dev_priv->drm, drm_crtc_index(&crtc->base)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + gvt_notify_vblank(dev_priv, pipe); +#endif + } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) hsw_pipe_crc_irq_handler(dev_priv, pipe); @@ -3446,7 +3485,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + /*since guest will see all the pipes, we don't want it disable vblank*/ + if (!dev_priv->gvt) + bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -4138,6 +4179,19 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_L3_DPF(dev_priv)) gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + if (intel_vgpu_active(dev_priv)) { + gt_interrupts[0] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_RCS_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_BCS_IRQ_SHIFT; + gt_interrupts[1] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS1_IRQ_SHIFT | + GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VCS2_IRQ_SHIFT; + gt_interrupts[3] |= GT_RENDER_CS_MASTER_ERROR_INTERRUPT << + GEN8_VECS_IRQ_SHIFT; + } + dev_priv->pm_ier = 0x0; dev_priv->pm_imr = ~dev_priv->pm_ier; GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 295e981e4a39..4ffdd533bddc 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -174,6 +174,83 @@ i915_param_named(enable_dpcd_backlight, bool, 0600, i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); +i915_param_named(domain_scaler_owner, int, 0400, + "scaler owners for each domain and for each pipe ids can be from 0-F"); + +/* pipeA Scaler = BITS 0-7 pipeB scaler = 8-15, pipeC = 16-19 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 31 20 19 16 15 8 7 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domains_scaler_owners = 0x00030210 // 0x000|3|02|10 + * scaler domain + * scaler_owner1A -0 + * scaler_owner2A -1 + * scaler_owner1B -2 + * scaler_owner2B -0 + * scaler_owner1C -3 + * scaler_owner2C -0 + * + */ + + +i915_param_named(enable_pvmmio, uint, 0400, + "Enable pv mmio feature and set pvmmio level, default 1." + "This parameter could only set from host, guest value is set through vgt_if"); + +/* pipeA = BITS 0-3, pipeB = BITS 8-11, pipeC = BITS 16-18 + * +----------+-------+---------+--------+--------+--------+--------+ + * |unused |unused | Pipe C | unused | Pipe B | unused | Pipe A | + * +----------+-------+---------+--------+--------+--------+--------+ + * 31 23 18 15 11 7 3 0 + * + * + * BITS 0,1,2,3 - needs to be set planes assigned for pipes A and B + * and BITs 0,1,2 - for pipe C + * eg: avail_planes_per_pipe = 0x3 - pipe A=2(planes 1 and 2) , pipeB=0 and pipeC=0 planes + * eg: avail_planes_per_pipe = 0x5 - pipe A=2(planes 1 and 3) , pipeB=0 and pipeC=0 planes + * avail_planes_per_pipe = 0x030701 - pipe A =1(plane 1, pipeB=3(planes 1,2 and 3), pipeC=2( planes 1 and 2) + * + */ +i915_param_named_unsafe(avail_planes_per_pipe, uint, 0400, + "plane mask for each pipe: \ + set BITS 0-3:pipeA 8-11:pipeB 16-18:pipeC to specify the planes that \ + are available eg: 0x030701 : planes 1:pipeA 1,2,3:pipeB \ + 1,2:pipeC (0x0 - default value)"); + +/* pipeA = BITS 0-15 pipeB = 16-31, pipeC = 32-47 + * + * +----------+------------+-------------+------------+ + * |unused | Pipe C | Pipe B | Pipe A | + * +----------+------------+-------------+------------+ + * 63 47 31 15 0 + * + * Each nibble represents domain id. 0 for Dom0, 1,2,3...0xF for DomUs + * eg: domain_plane_owners = 0x022111000010 // 0x0221|1100|0010 + * plane domain + * plane_owner1A -0 + * plane_owner2A -1 + * plane_owner3A -0 + * plane_owner4A -0 + * plane_owner1B -0 + * plane_owner2B -0 + * plane_owner3B -1 + * plane_owner4B -1 + * plane_owner1C -1 + * plane_owner2C -2 + * plane_owner3C -2 + * + * + */ +i915_param_named_unsafe(domain_plane_owners, ullong, 0400, + "plane owners for each domain and for each pipe \ + ids can be from 0-F, eg: domain_plane_owners = 0x022111000010 \ + planes owner: 3C:2 2C:2 1C:1 4B:1 3B:1 2B:1 1B:0 4A:0 3A:0 2A:1 1A:0 \ + (0x0 - default value)"); + static __always_inline void _print_param(struct drm_printer *p, const char *name, const char *type, @@ -185,6 +262,8 @@ static __always_inline void _print_param(struct drm_printer *p, drm_printf(p, "i915.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long long")) + drm_printf(p, "i915.%s=%llu\n", name, *(const unsigned long long *)x); else if (!__builtin_strcmp(type, "char *")) drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); else diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index 6c4d4a21474b..6b0f98c37761 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -55,6 +55,8 @@ struct drm_printer; param(int, edp_vswing, 0) \ param(int, reset, 2) \ param(unsigned int, inject_load_failure, 0) \ + param(unsigned int, avail_planes_per_pipe, 0) \ + param(unsigned long long, domain_plane_owners, 0) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ @@ -68,6 +70,11 @@ struct drm_printer; param(bool, nuclear_pageflip, false) \ param(bool, enable_dp_mst, true) \ param(bool, enable_dpcd_backlight, false) \ + param(int, domain_scaler_owner, 0x11100) \ + param(unsigned int, enable_pvmmio, \ + PVMMIO_ELSP_SUBMIT | PVMMIO_PLANE_UPDATE \ + | PVMMIO_PLANE_WM_UPDATE | PVMMIO_PPGTT_UPDATE \ + | PVMMIO_GGTT_UPDATE ) \ param(bool, enable_gvt, false) #define MEMBER(T, member, ...) T member; diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h index eeaa3d506d95..c15d4578bb5f 100644 --- a/drivers/gpu/drm/i915/i915_pvinfo.h +++ b/drivers/gpu/drm/i915/i915_pvinfo.h @@ -46,9 +46,83 @@ enum vgt_g2v_type { VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY, VGT_G2V_EXECLIST_CONTEXT_CREATE, VGT_G2V_EXECLIST_CONTEXT_DESTROY, + VGT_G2V_PPGTT_L4_ALLOC, + VGT_G2V_PPGTT_L4_CLEAR, + VGT_G2V_PPGTT_L4_INSERT, + VGT_G2V_GGTT_INSERT, + VGT_G2V_GGTT_CLEAR, VGT_G2V_MAX, }; +#define PLANE_COLOR_CTL_BIT (1 << 0) +#define PLANE_KEY_BIT (1 << 1) +#define PLANE_SCALER_BIT (1 << 2) + +struct pv_plane_update { + u32 flags; + u32 plane_color_ctl; + u32 plane_key_val; + u32 plane_key_max; + u32 plane_key_msk; + u32 plane_offset; + u32 plane_stride; + u32 plane_size; + u32 plane_aux_dist; + u32 plane_aux_offset; + u32 ps_ctrl; + u32 ps_pwr_gate; + u32 ps_win_ps; + u32 ps_win_sz; + u32 plane_pos; + u32 plane_ctl; +}; + +struct pv_plane_wm_update { + u32 max_wm_level; + u32 plane_wm_level[8]; + u32 plane_trans_wm_level; + u32 plane_buf_cfg; +}; + +struct pv_ppgtt_update { + u64 pdp; + u64 start; + u64 length; + u32 cache_level; +}; + +struct pv_ggtt_update { + u64 start; + u64 length; + u32 cache_level; +}; + +/* shared page(4KB) between gvt and VM, located at the first page next + * to MMIO region(2MB size normally). + */ +struct gvt_shared_page { + u32 elsp_data[4]; + u32 reg_addr; + struct pv_plane_update pv_plane; + struct pv_plane_wm_update pv_plane_wm; + struct pv_ppgtt_update pv_ppgtt; + struct pv_ggtt_update pv_ggtt; + u32 rsvd2[0x400 - 46]; +}; + +#define VGPU_PVMMIO(vgpu) vgpu_vreg_t(vgpu, vgtif_reg(enable_pvmmio)) + +/* + * define different levels of PVMMIO optimization + */ +enum pvmmio_levels { + PVMMIO_ELSP_SUBMIT = 0x1, + PVMMIO_PLANE_UPDATE = 0x2, + PVMMIO_PLANE_WM_UPDATE = 0x4, + PVMMIO_PPGTT_UPDATE = 0x10, + PVMMIO_GGTT_UPDATE = 0x20, +}; + /* * VGT capabilities type */ @@ -56,6 +130,9 @@ enum vgt_g2v_type { #define VGT_CAPS_HWSP_EMULATION BIT(3) #define VGT_CAPS_HUGE_GTT BIT(4) +#define PVMMIO_LEVEL(dev_priv, level) \ + (intel_vgpu_active(dev_priv) && (i915_modparams.enable_pvmmio & level)) + struct vgt_if { u64 magic; /* VGT_MAGIC */ u16 version_major; @@ -106,8 +183,11 @@ struct vgt_if { u32 execlist_context_descriptor_lo; u32 execlist_context_descriptor_hi; + u32 enable_pvmmio; + u32 pv_mmio; + u32 scaler_owned; - u32 rsv7[0x200 - 24]; /* pad to one page */ + u32 rsv7[0x200 - 27]; /* pad to one page */ } __packed; #define vgtif_reg(x) \ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9e63cd47b60f..3c2cc2e19826 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -10652,4 +10652,31 @@ enum skl_power_gate { _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \ _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC) +/* GVT has special read process from some MMIO register, + * which so that should be trapped to GVT to make a + * complete emulation. Such MMIO is not too much, now using + * a static list to cover them. + */ +static inline bool in_mmio_read_trap_list(u32 reg) +{ + if (unlikely(reg >= PCH_GMBUS0.reg && reg <= PCH_GMBUS5.reg)) + return true; + + if (unlikely(reg == RING_TIMESTAMP(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP(VEBOX_RING_BASE).reg || + reg == RING_TIMESTAMP(GEN8_BSD2_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(RENDER_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(BLT_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(GEN6_BSD_RING_BASE).reg || + reg == RING_TIMESTAMP_UDW(VEBOX_RING_BASE).reg)) + return true; + + if (unlikely(reg == SBI_DATA.reg || reg == 0x6c060 || reg == 0x206c)) + return true; + + return false; +} + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 5c2c93cbab12..1bd2a7ef1885 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -1054,6 +1054,7 @@ void i915_request_add(struct i915_request *request) lockdep_assert_held(&request->i915->drm.struct_mutex); trace_i915_request_add(request); + trace_i915_request_add_domain(request); /* * Make sure that no request gazumped us - if it was allocated after diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index e5e6f6bb2b05..4ff644202743 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,10 @@ #include "intel_drv.h" #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#include "../drm_internal.h" +#endif + static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev) { struct drm_minor *minor = dev_get_drvdata(kdev); @@ -571,6 +575,284 @@ static void i915_teardown_error_capture(struct device *kdev) { sysfs_remove_bin_file(&kdev->kobj, &error_state_attr); } + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) +#define dev_to_drm_minor(d) dev_get_drvdata((d)) + +static ssize_t i915_gem_clients_state_read(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_error_state_buf error_str; + ssize_t ret_count = 0; + int ret; + + ret = i915_error_state_buf_init(&error_str, dev_priv, count, off); + if (ret) + return ret; + + ret = i915_get_drm_clients_info(&error_str, dev); + if (ret) + goto out; + + ret_count = count < error_str.bytes ? count : error_str.bytes; + + memcpy(buf, error_str.buf, ret_count); +out: + i915_error_state_buf_release(&error_str); + + return ret ?: ret_count; +} + +#define GEM_OBJ_STAT_BUF_SIZE (4*1024) /* 4KB */ +#define GEM_OBJ_STAT_BUF_SIZE_MAX (1024*1024) /* 1MB */ + +struct i915_gem_file_attr_priv { + char tgid_str[16]; + struct pid *tgid; + struct drm_i915_error_state_buf buf; +}; + +static ssize_t i915_gem_read_objects(struct file *filp, + struct kobject *memtrack_kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct kobject *kobj = memtrack_kobj->parent; + struct device *kdev = container_of(kobj, struct device, kobj); + struct drm_minor *minor = dev_to_drm_minor(kdev); + struct drm_device *dev = minor->dev; + struct i915_gem_file_attr_priv *attr_priv; + struct pid *tgid; + ssize_t ret_count = 0; + long bytes_available; + int ret = 0, buf_size = GEM_OBJ_STAT_BUF_SIZE; + unsigned long timeout = msecs_to_jiffies(500) + 1; + + /* + * There may arise a scenario where syfs file entry is being removed, + * and may race against sysfs read. Sysfs file remove function would + * have taken the drm_global_mutex and would wait for read to finish, + * which is again waiting to acquire drm_global_mutex, leading to + * deadlock. To avoid this, use mutex_trylock here with a timeout. + */ + while (!mutex_trylock(&drm_global_mutex) && --timeout) + schedule_timeout_killable(1); + if (timeout == 0) { + DRM_DEBUG_DRIVER("Unable to acquire drm global mutex.\n"); + return -EBUSY; + } + + if (!attr || !attr->private) { + ret = -EINVAL; + DRM_ERROR("attr | attr->private pointer is NULL\n"); + goto out; + } + attr_priv = attr->private; + tgid = attr_priv->tgid; + + if (off && !attr_priv->buf.buf) { + ret = -EINVAL; + DRM_ERROR( + "Buf not allocated during read with non-zero offset\n"); + goto out; + } + + if (off == 0) { +retry: + if (!attr_priv->buf.buf) { + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj state buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + } else { + /* Reset the buf parameters before filling data */ + attr_priv->buf.pos = 0; + attr_priv->buf.bytes = 0; + } + + /* Read the gfx device stats */ + ret = i915_gem_get_obj_info(&attr_priv->buf, dev, tgid); + if (ret) + goto out; + + ret = i915_error_ok(&attr_priv->buf); + if (ret) { + ret = 0; + goto copy_data; + } + if (buf_size >= GEM_OBJ_STAT_BUF_SIZE_MAX) { + DRM_DEBUG_DRIVER("obj stat buf size limit reached\n"); + ret = -ENOMEM; + goto out; + } else { + /* Try to reallocate buf of larger size */ + i915_error_state_buf_release(&attr_priv->buf); + buf_size *= 2; + + ret = i915_obj_state_buf_init(&attr_priv->buf, + buf_size); + if (ret) { + DRM_ERROR( + "obj stat buf init failed. buf_size=%d\n", + buf_size); + goto out; + } + goto retry; + } + } +copy_data: + + bytes_available = (long)attr_priv->buf.bytes - (long)off; + + if (bytes_available > 0) { + ret_count = count < bytes_available ? count : bytes_available; + memcpy(buf, attr_priv->buf.buf + off, ret_count); + } else + ret_count = 0; + +out: + mutex_unlock(&drm_global_mutex); + + return ret ?: ret_count; +} + +int i915_gem_create_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_gem_file_attr_priv *attr_priv; + struct bin_attribute *obj_attr; + struct drm_file *file_local; + int ret; + + /* + * Check for multiple drm files having same tgid. If found, copy the + * bin attribute into the new file priv. Otherwise allocate a new + * copy of bin attribute, and create its corresponding sysfs file. + */ + mutex_lock(&dev->struct_mutex); + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (file_priv->tgid == file_priv_local->tgid) { + file_priv->obj_attr = file_priv_local->obj_attr; + mutex_unlock(&dev->struct_mutex); + return 0; + } + } + mutex_unlock(&dev->struct_mutex); + + obj_attr = kzalloc(sizeof(*obj_attr), GFP_KERNEL); + if (!obj_attr) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out; + } + + attr_priv = kzalloc(sizeof(*attr_priv), GFP_KERNEL); + if (!attr_priv) { + DRM_ERROR("Alloc failed. Out of memory\n"); + ret = -ENOMEM; + goto out_obj_attr; + } + + snprintf(attr_priv->tgid_str, 16, "%d", task_tgid_nr(current)); + obj_attr->attr.name = attr_priv->tgid_str; + obj_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + obj_attr->size = 0; + obj_attr->read = i915_gem_read_objects; + + attr_priv->tgid = file_priv->tgid; + obj_attr->private = attr_priv; + + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + obj_attr); + if (ret) { + DRM_ERROR( + "sysfs tgid file setup failed. tgid=%d, process:%s, ret:%d\n", + pid_nr(file_priv->tgid), file_priv->process_name, ret); + + goto out_attr_priv; + } + + file_priv->obj_attr = obj_attr; + return 0; + +out_attr_priv: + kfree(attr_priv); +out_obj_attr: + kfree(obj_attr); +out: + return ret; +} + +void i915_gem_remove_sysfs_file_entry(struct drm_device *dev, + struct drm_file *file) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_file *file_local; + int open_count = 1; + + /* + * The current drm file instance is already removed from filelist at + * this point. + * Check if this particular drm file being removed is the last one for + * that particular tgid, and no other instances for this tgid exist in + * the filelist. If so, remove the corresponding sysfs file entry also. + */ + list_for_each_entry(file_local, &dev->filelist, lhead) { + struct drm_i915_file_private *file_priv_local = + file_local->driver_priv; + + if (pid_nr(file_priv->tgid) == pid_nr(file_priv_local->tgid)) + open_count++; + } + + if (open_count == 1) { + struct i915_gem_file_attr_priv *attr_priv; + + if (WARN_ON(file_priv->obj_attr == NULL)) + return; + attr_priv = file_priv->obj_attr->private; + + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + file_priv->obj_attr); + + i915_error_state_buf_release(&attr_priv->buf); + kfree(file_priv->obj_attr->private); + kfree(file_priv->obj_attr); + } +} + +static struct bin_attribute i915_gem_client_state_attr = { + .attr.name = "i915_gem_meminfo", + .attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH, + .size = 0, + .read = i915_gem_clients_state_read, +}; + +static struct attribute *memtrack_kobj_attrs[] = {NULL}; + +static struct kobj_type memtrack_kobj_type = { + .release = NULL, + .sysfs_ops = NULL, + .default_attrs = memtrack_kobj_attrs, +}; +#endif #else static void i915_setup_error_capture(struct device *kdev) {} static void i915_teardown_error_capture(struct device *kdev) {} @@ -623,6 +905,28 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv) DRM_ERROR("RPS sysfs setup failed\n"); i915_setup_error_capture(kdev); + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + /* + * Create the gfx_memtrack directory for memtrack sysfs files + */ + ret = kobject_init_and_add( + &dev_priv->memtrack_kobj, &memtrack_kobj_type, + &kdev->kobj, "gfx_memtrack"); + if (unlikely(ret != 0)) { + DRM_ERROR( + "i915 sysfs setup memtrack directory failed\n" + ); + kobject_put(&dev_priv->memtrack_kobj); + } else { + ret = sysfs_create_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + if (ret) + DRM_ERROR( + "i915_gem_client_state sysfs setup failed\n" + ); + } +#endif } void i915_teardown_sysfs(struct drm_i915_private *dev_priv) @@ -641,4 +945,11 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group); sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group); #endif + +#if IS_ENABLED(CONFIG_DRM_I915_MEMTRACK) + sysfs_remove_bin_file(&dev_priv->memtrack_kobj, + &i915_gem_client_state_attr); + kobject_del(&dev_priv->memtrack_kobj); + kobject_put(&dev_priv->memtrack_kobj); +#endif } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index b50c6b829715..6f25961ad9ad 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -281,7 +281,7 @@ TRACE_EVENT(i915_pipe_update_start, TP_fast_assign( __entry->pipe = crtc->pipe; __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, - crtc->pipe); + drm_crtc_index(&crtc->base)); __entry->scanline = intel_get_crtc_scanline(crtc); __entry->min = crtc->debug.min_vbl; __entry->max = crtc->debug.max_vbl; @@ -679,6 +679,53 @@ DEFINE_EVENT(i915_request, i915_request_add, TP_ARGS(rq) ); +TRACE_EVENT(i915_multi_domains, + TP_PROTO(struct i915_request *req), + TP_ARGS(req), + + TP_STRUCT__entry( + __field(u32, dev) + __field(u32, ctx) + __field(u32, ring) + __field(u32, seqno) + __field(u32, global) + __field(int, prio_req) + __field(int, prio_ctx) + __field(bool, shadow_ctx) + __field(u32, hw_id) + __field(int, vgt_id) + __field(u32, pid) + ), + + TP_fast_assign( + __entry->dev = req->i915->drm.primary->index; + __entry->ring = req->engine->id; + __entry->ctx = req->fence.context; + __entry->seqno = req->fence.seqno; + __entry->global = req->global_seqno; + __entry->prio_req = req->sched.attr.priority; + __entry->prio_ctx = req->sched.attr.priority; + __entry->shadow_ctx = is_shadow_context(req->gem_context); + __entry->hw_id = req->gem_context->hw_id; + __entry->vgt_id = get_vgt_id(req->gem_context); + __entry->pid = is_shadow_context(req->gem_context) ? + get_pid_shadowed(req->gem_context, req->engine) : + pid_nr(req->gem_context->pid); + ), + + TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, " + "priority=%d (%d), is_shadow_ctx=%u, hw_id=%u, " + "vgt_id=%u, pid=%u", __entry->dev, __entry->ring, + __entry->ctx, __entry->seqno, __entry->global, + __entry->prio_req, __entry->prio_ctx, __entry->shadow_ctx, + __entry->hw_id, __entry->vgt_id, __entry->pid) +); + +DEFINE_EVENT(i915_multi_domains, i915_request_add_domain, + TP_PROTO(struct i915_request *req), + TP_ARGS(req) +); + #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS) DEFINE_EVENT(i915_request, i915_request_submit, TP_PROTO(struct i915_request *rq), diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index 869cf4a3b6de..d7a328f52978 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -76,6 +76,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv) } dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps)); + dev_priv->vgpu.scaler_owned = + __raw_i915_read32(dev_priv, vgtif_reg(scaler_owned)); + + /* If guest wants to enable pvmmio, it needs to enable it explicitly + * through vgt_if interface, and then read back the enable state from + * gvt layer. + */ + __raw_i915_write32(dev_priv, vgtif_reg(enable_pvmmio), + i915_modparams.enable_pvmmio); + i915_modparams.enable_pvmmio = __raw_i915_read16(dev_priv, + vgtif_reg(enable_pvmmio)); dev_priv->vgpu.active = true; DRM_INFO("Virtual GPU for Intel GVT-g detected.\n"); diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index b04952bacf77..ec4a73e79709 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -316,7 +316,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, if (*scaler_id < 0) { /* find a free scaler */ for (j = 0; j < intel_crtc->num_scalers; j++) { - if (!scaler_state->scalers[j].in_use) { + if (!scaler_state->scalers[j].in_use && + scaler_state->scalers[j].owned == 1) { scaler_state->scalers[j].in_use = 1; *scaler_id = j; DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", @@ -350,10 +351,13 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, * scaler 0 operates in high quality (HQ) mode. * In this case use scaler 0 to take advantage of HQ mode */ - *scaler_id = 0; - scaler_state->scalers[0].in_use = 1; - scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; - scaler_state->scalers[1].in_use = 0; + if (scaler_state->scalers[0].owned == 1) { + *scaler_id = 0; + scaler_state->scalers[0].in_use = 1; + scaler_state->scalers[0].mode = + PS_SCALER_MODE_HQ; + scaler_state->scalers[1].in_use = 0; + } } else { scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1faa494e2bc9..1f99373dcd77 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1726,6 +1726,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv) return; } + if (HAS_PCH_NOP(dev_priv) && !intel_vgpu_active(dev_priv)) { + DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); + return; + } + else if (HAS_PCH_NOP(dev_priv)) { + dev_priv->pch_type = PCH_NONE; + } + init_vbt_defaults(dev_priv); /* If the OpRegion does not have VBT, look in PCI ROM. */ diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index cf9b600cca79..addb223604fa 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -453,7 +453,13 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) INIT_WORK(&dev_priv->csr.work, csr_load_work_fn); - if (!HAS_CSR(dev_priv)) + /* + * In a GVTg enabled environment, loading the CSR firmware for DomU doesn't + * make much sense since we don't allow it to control display power + * management settings. Furthermore, we can save some time for DomU bootup + * by skipping CSR loading. + */ + if (!HAS_CSR(dev_priv) || intel_vgpu_active(dev_priv)) return; if (i915_modparams.dmc_firmware_path) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index c9af34861d9e..8b34f086a663 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -1882,7 +1882,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) goto out; } - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) cpu_transcoder = TRANSCODER_EDP; else cpu_transcoder = (enum transcoder) pipe; @@ -3009,11 +3009,6 @@ static void intel_enable_ddi(struct intel_encoder *encoder, intel_enable_ddi_hdmi(encoder, crtc_state, conn_state); else intel_enable_ddi_dp(encoder, crtc_state, conn_state); - - /* Enable hdcp if it's desired */ - if (conn_state->content_protection == - DRM_MODE_CONTENT_PROTECTION_DESIRED) - intel_hdcp_enable(to_intel_connector(conn_state->connector)); } static void intel_disable_ddi_dp(struct intel_encoder *encoder, @@ -3053,8 +3048,6 @@ static void intel_disable_ddi(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - intel_hdcp_disable(to_intel_connector(old_conn_state->connector)); - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) intel_disable_ddi_hdmi(encoder, old_crtc_state, old_conn_state); else @@ -3278,7 +3271,7 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, enum port port = encoder->port; int ret; - if (port == PORT_A) + if (port == PORT_A && !intel_vgpu_active(dev_priv)) pipe_config->cpu_transcoder = TRANSCODER_EDP; if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) @@ -3548,11 +3541,18 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) bool init_hdmi, init_dp, init_lspcon = false; - init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi || + /* + * For port A check whether vgpu is active and we have a monitor + * attached to port A. + * */ + init_hdmi = (intel_vgpu_active(dev_priv) && port == PORT_A && + (I915_READ(GEN8_DE_PORT_ISR) & BXT_DE_PORT_HP_DDIA)) || + (dev_priv->vbt.ddi_port_info[port].supports_dvi || dev_priv->vbt.ddi_port_info[port].supports_hdmi); init_dp = dev_priv->vbt.ddi_port_info[port].supports_dp; - if (intel_bios_is_lspcon_present(dev_priv, port)) { + if (!intel_vgpu_active(dev_priv) && + intel_bios_is_lspcon_present(dev_priv, port)) { /* * Lspcon device needs to be driven with DP connector * with special detection sequence. So make sure DP @@ -3648,7 +3648,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) /* In theory we don't need the encoder->type check, but leave it just in * case we have some really bad VBTs... */ - if (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi) { + if ((intel_vgpu_active(dev_priv) && IS_BROXTON(dev_priv)) || + (intel_encoder->type != INTEL_OUTPUT_EDP && init_hdmi)) { if (!intel_ddi_init_hdmi_connector(intel_dig_port)) goto err; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d2951096bca0..dd681684e0d9 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -49,6 +49,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /* Primary plane formats for gen <= 3 */ static const uint32_t i8xx_primary_formats[] = { DRM_FORMAT_C8, @@ -5237,8 +5241,8 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), crtc); struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + struct drm_plane_state *old_primary_state = primary ? + drm_atomic_get_old_plane_state(old_state, primary) : NULL; intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); @@ -5276,8 +5280,8 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_atomic_state *old_state = old_crtc_state->base.state; struct drm_plane *primary = crtc->base.primary; - struct drm_plane_state *old_primary_state = - drm_atomic_get_old_plane_state(old_state, primary); + struct drm_plane_state *old_primary_state = primary ? + drm_atomic_get_old_plane_state(old_state, primary) : NULL; bool modeset = needs_modeset(&pipe_config->base); struct intel_atomic_state *old_intel_state = to_intel_atomic_state(old_state); @@ -5358,15 +5362,37 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, intel_update_watermarks(crtc); } +static void disable_primary_plane(struct drm_i915_private *dev_priv, int pipe) +{ + u32 val; + + val = I915_READ(PLANE_CTL(pipe, PLANE_PRIMARY)); + if (val & PLANE_CTL_ENABLE) { + I915_WRITE(PLANE_CTL(pipe, PLANE_PRIMARY), 0); + I915_WRITE(PLANE_SURF(pipe, PLANE_PRIMARY), 0); + POSTING_READ(PLANE_SURF(pipe, PLANE_PRIMARY)); + } +} + static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *p; int pipe = intel_crtc->pipe; intel_crtc_dpms_overlay_disable(intel_crtc); + /* + * On BIOS based systems, if Dom0 doesn't own Plane 0 (Primary Plane), + * then during modeset, it wouldn't be able to disable this plane and + * this can lead to unexpected behavior after the modeset. Therefore, + * disable the primary plane if it was enabled by the BIOS/GOP. + */ + if (dev_priv->gvt && i915_modparams.avail_planes_per_pipe) + disable_primary_plane(dev_priv, pipe); + drm_for_each_plane_mask(p, dev, plane_mask) to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); @@ -8728,7 +8754,8 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc, /* find scaler attached to this pipe */ for (i = 0; i < crtc->num_scalers; i++) { ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); - if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { + if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK) && + scaler_state->scalers[i].owned) { id = i; pipe_config->pch_pfit.enabled = true; pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); @@ -11629,7 +11656,8 @@ static void verify_wm_state(struct drm_crtc *crtc, const enum pipe pipe = intel_crtc->pipe; int plane, level, max_level = ilk_wm_max_level(dev_priv); - if (INTEL_GEN(dev_priv) < 9 || !new_state->active) + if (INTEL_GEN(dev_priv) < 9 || !new_state->active || + i915_modparams.avail_planes_per_pipe) return; skl_pipe_wm_get_hw_state(crtc, &hw_wm); @@ -11874,7 +11902,16 @@ verify_crtc_state(struct drm_crtc *crtc, intel_pipe_config_sanity_check(dev_priv, pipe_config); sw_config = to_intel_crtc_state(new_crtc_state); - if (!intel_pipe_config_compare(dev_priv, sw_config, + + /* + * Only check for pipe config if we are not in a GVT guest environment, + * because such a check in a GVT guest environment doesn't make any sense + * as we don't allow the guest to do a mode set, so there can very well + * be a difference between what it has programmed vs. what the host + * truly configured the HW pipe to be in. + */ + if (!intel_vgpu_active(dev_priv) && + !intel_pipe_config_compare(dev_priv, sw_config, pipe_config, false)) { I915_STATE_WARN(1, "pipe state doesn't match!\n"); intel_dump_pipe_config(intel_crtc, pipe_config, @@ -11935,11 +11972,13 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv, if (new_state->active) I915_STATE_WARN(!(pll->active_mask & crtc_mask), "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); else I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", - pipe_name(drm_crtc_index(crtc)), pll->active_mask); + pipe_name(to_intel_crtc(crtc)->pipe), + pll->active_mask); I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", @@ -11970,10 +12009,10 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, I915_STATE_WARN(pll->active_mask & crtc_mask, "pll active mismatch (didn't expect pipe %c in active mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, "pll enabled crtcs mismatch (found %x in enabled mask)\n", - pipe_name(drm_crtc_index(crtc))); + pipe_name(to_intel_crtc(crtc)->pipe)); } } @@ -12388,7 +12427,8 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) if (!dev->max_vblank_count) return (u32)drm_crtc_accurate_vblank_count(&crtc->base); - return dev->driver->get_vblank_counter(dev, crtc->pipe); + return dev->driver->get_vblank_counter(dev, + drm_crtc_index(&crtc->base)); } static void intel_update_crtc(struct drm_crtc *crtc, @@ -12402,8 +12442,9 @@ static void intel_update_crtc(struct drm_crtc *crtc, struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = + crtc->primary ? intel_atomic_get_new_plane_state(to_intel_atomic_state(state), - to_intel_plane(crtc->primary)); + to_intel_plane(crtc->primary)) : NULL; if (modeset) { update_scanline_offset(intel_crtc); @@ -12580,6 +12621,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_connector_state *old_conn_state, *new_conn_state; + struct drm_connector *connector; struct drm_crtc *crtc; struct intel_crtc_state *intel_cstate; u64 put_domains[I915_MAX_PIPES] = {}; @@ -12677,9 +12720,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) } } + for_each_oldnew_connector_in_state(state, connector, old_conn_state, + new_conn_state, i) + intel_hdcp_atomic_pre_commit(connector, old_conn_state, + new_conn_state); + /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display.update_crtcs(state); + for_each_new_connector_in_state(state, connector, new_conn_state, i) + intel_hdcp_atomic_commit(connector, new_conn_state); + /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To * fix this: @@ -13715,10 +13766,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) num_formats = ARRAY_SIZE(skl_primary_formats); } - if (primary->has_ccs) - modifiers = skl_format_modifiers_ccs; - else - modifiers = skl_format_modifiers_noccs; + modifiers = i9xx_format_modifiers; primary->update_plane = skl_update_plane; primary->disable_plane = skl_disable_plane; @@ -13817,6 +13865,106 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) return ERR_PTR(ret); } +static struct intel_plane * +intel_skl_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, + int plane, bool is_primary) +{ + struct intel_plane *intel_plane = NULL; + struct intel_plane_state *state = NULL; + unsigned long possible_crtcs; + const uint32_t *plane_formats; + unsigned int supported_rotations, plane_type; + unsigned int num_formats; + const uint64_t *modifiers; + int ret; + + intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); + if (!intel_plane) { + ret = -ENOMEM; + goto fail; + } + + state = intel_create_plane_state(&intel_plane->base); + if (!state) { + ret = -ENOMEM; + goto fail; + } + + intel_plane->base.state = &state->base; + intel_plane->can_scale = false; + state->scaler_id = -1; + intel_plane->pipe = pipe; + + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (is_primary) { + intel_plane->i9xx_plane = (enum i9xx_plane_id) pipe; + intel_plane->check_plane = intel_check_primary_plane; + plane_type = DRM_PLANE_TYPE_PRIMARY; + } else { + intel_plane->i9xx_plane = (enum i9xx_plane_id) plane; + intel_plane->check_plane = intel_check_sprite_plane; + plane_type = DRM_PLANE_TYPE_OVERLAY; + } + + if (plane == PLANE_PRIMARY) { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane); + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } else { + intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane); + intel_plane->update_plane = skl_update_plane; + intel_plane->disable_plane = skl_disable_plane; + intel_plane->get_hw_state = skl_plane_get_hw_state; + } + + intel_plane->id = plane; + plane_formats = skl_primary_formats; + + modifiers = i9xx_format_modifiers; + + num_formats = ARRAY_SIZE(skl_primary_formats); + + /* + * Drop final format (NV12) for pipes or hardware steppings + * that don't support it. + */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_C0) || pipe >= PIPE_C + || plane >= 2) + num_formats--; + + + possible_crtcs = (1 << dev_priv->drm.mode_config.num_crtc); + ret = drm_universal_plane_init(&dev_priv->drm, &intel_plane->base, + possible_crtcs, &skl_plane_funcs, + plane_formats, num_formats, + modifiers, + plane_type, + "plane %d%c", plane+1, pipe_name(pipe)); + + if (ret) + goto fail; + + supported_rotations = DRM_MODE_ROTATE_0; + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&intel_plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); + + return intel_plane; + +fail: + kfree(state); + kfree(intel_plane); + + return ERR_PTR(ret); +} + static struct intel_plane * intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -13911,11 +14059,94 @@ static void intel_crtc_init_scalers(struct intel_crtc *crtc, scaler->in_use = 0; scaler->mode = PS_SCALER_MODE_DYN; + scaler->owned = 1; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (intel_gvt_active(dev_priv) && + dev_priv->gvt->pipe_info[crtc->pipe].scaler_owner[i] != 0) + scaler->owned = 0; +#endif + if (intel_vgpu_active(dev_priv) && + !(1 << (crtc->pipe * SKL_NUM_SCALERS + i) & + dev_priv->vgpu.scaler_owned)) + scaler->owned = 0; } scaler_state->scaler_id = -1; } +static int intel_crtc_init_restrict_planes(struct drm_i915_private *dev_priv, + enum pipe pipe, int planes_mask) +{ + struct intel_crtc *intel_crtc; + struct intel_crtc_state *crtc_state; + struct intel_plane *primary = NULL, *intel_plane = NULL; + bool is_primary = true; + int plane, ret, crtc_plane; + + intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); + if (!intel_crtc) + return -ENOMEM; + + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + if (!crtc_state) { + ret = -ENOMEM; + goto fail; + } + intel_crtc->config = crtc_state; + intel_crtc->base.state = &crtc_state->base; + crtc_state->base.crtc = &intel_crtc->base; + + for_each_universal_plane(dev_priv, pipe, plane) { + if (planes_mask & BIT(plane)) { + intel_plane = intel_skl_plane_create(dev_priv, + pipe, plane, is_primary); + if (IS_ERR(intel_plane)) { + DRM_DEBUG_KMS(" plane %d failed for pipe %d\n", plane, pipe); + ret = PTR_ERR(intel_plane); + goto fail; + } + if (is_primary) { + primary = intel_plane; + is_primary = false; + } + DRM_DEBUG_KMS(" plane %d created for pipe %d\n", plane, pipe); + intel_crtc->plane_ids_mask |= BIT(intel_plane->id); + } + } + + ret = drm_crtc_init_with_planes(&dev_priv->drm, + &intel_crtc->base, + primary ? &primary->base : NULL, NULL, + &intel_crtc_funcs, + "pipe %c", pipe_name(pipe)); + if (ret) + goto fail; + + intel_crtc->pipe = pipe; + crtc_plane = primary ? primary->i9xx_plane : 0; + + dev_priv->plane_to_crtc_mapping[crtc_plane] = intel_crtc; + dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; + + drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + + intel_color_init(&intel_crtc->base); + + WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); + + return 0; + +fail: + /* + * drm_mode_config_cleanup() will free up any + * crtcs/planes already initialized. + */ + kfree(crtc_state); + kfree(intel_crtc); + + return ret; +} + static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) { struct intel_crtc *intel_crtc; @@ -14034,6 +14265,27 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, return 0; } +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + if (WARN_ON(!c)) + return -ENOENT; + + *pipe = (to_intel_crtc(c)->pipe); + return 0; +} + +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index) +{ + struct drm_crtc *c = drm_crtc_from_index(dev, index); + + WARN_ON(!c); + return to_intel_crtc(c); +} + + static int intel_encoder_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -14315,6 +14567,15 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_encoder_clones(encoder); } +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * Encoders have been initialized. If we are in VGT mode, + * let's inform the HV that it can start Dom U as Dom 0 + * is ready to accept new Dom Us. + */ + gvt_dom0_ready(dev_priv); +#endif + intel_init_pch_refclk(dev_priv); drm_helper_move_panel_connectors_to_head(&dev_priv->drm); @@ -15133,12 +15394,42 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv) DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq); } + +static int intel_sanitize_plane_restriction(struct drm_i915_private *dev_priv) +{ + unsigned int mask; + + /*plane restriction feature is only for APL and KBL for now*/ + if (!(IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv))) { + i915_modparams.avail_planes_per_pipe = 0; + DRM_INFO("Turning off Plane Restrictions feature\n"); + } + + mask = i915_modparams.avail_planes_per_pipe; + + /* make sure SOS has a (dummy) plane per pipe. */ + if ((IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) && + intel_gvt_active(dev_priv)) { + enum pipe pipe; + + for_each_pipe(dev_priv, pipe) { + if (!AVAIL_PLANE_PER_PIPE(dev_priv, mask, pipe)) + mask |= (1 << pipe * BITS_PER_PIPE); + } + DRM_INFO("Fix internal plane mask: 0x%06x --> 0x%06x", + i915_modparams.avail_planes_per_pipe, mask); + } + return mask; +} + int intel_modeset_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); struct i915_ggtt *ggtt = &dev_priv->ggtt; enum pipe pipe; struct intel_crtc *crtc; + unsigned int planes_mask[I915_MAX_PIPES]; + unsigned int avail_plane_per_pipe_mask = 0; dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); @@ -15212,10 +15503,29 @@ int intel_modeset_init(struct drm_device *dev) INTEL_INFO(dev_priv)->num_pipes, INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); + avail_plane_per_pipe_mask = intel_sanitize_plane_restriction(dev_priv); + DRM_DEBUG_KMS("avail_planes_per_pipe = 0x%x \n", i915_modparams.avail_planes_per_pipe); + DRM_DEBUG_KMS("domain_plane_owners = 0x%llx \n", i915_modparams.domain_plane_owners); + + for_each_pipe(dev_priv, pipe) { + planes_mask[pipe] = AVAIL_PLANE_PER_PIPE(dev_priv, + avail_plane_per_pipe_mask, pipe); + DRM_DEBUG_KMS("for pipe %d plane_mask = %d \n", pipe, planes_mask[pipe]); + } + for_each_pipe(dev_priv, pipe) { int ret; - ret = intel_crtc_init(dev_priv, pipe); + if (!i915_modparams.avail_planes_per_pipe) { + ret = intel_crtc_init(dev_priv, pipe); + } else { + if (!intel_vgpu_active(dev_priv) || (intel_vgpu_active(dev_priv) + && planes_mask[pipe])) { + ret = intel_crtc_init_restrict_planes(dev_priv, + pipe, + planes_mask[pipe]); + } + } if (ret) { drm_mode_config_cleanup(dev); return ret; @@ -15242,7 +15552,7 @@ int intel_modeset_init(struct drm_device *dev) for_each_intel_crtc(dev, crtc) { struct intel_initial_plane_config plane_config = {}; - if (!crtc->active) + if (!crtc->active || !crtc->base.primary) continue; /* @@ -15611,9 +15921,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->base.active; if (crtc_state->base.active) - dev_priv->active_crtcs |= 1 << crtc->pipe; + dev_priv->active_crtcs |= + 1 << drm_crtc_index(&crtc->base); - readout_plane_state(crtc); + if (crtc->base.primary) + readout_plane_state(crtc); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, @@ -15632,7 +15944,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (crtc_state->base.active && crtc_state->shared_dpll == pll) - pll->state.crtc_mask |= 1 << crtc->pipe; + pll->state.crtc_mask |= + 1 << drm_crtc_index(&crtc->base); } pll->active_mask = pll->state.crtc_mask; @@ -15647,10 +15960,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_crtc_state *crtc_state; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = to_intel_crtc_state(crtc->base.state); + if (!crtc) { + encoder->base.crtc = NULL; + } else { + crtc_state = to_intel_crtc_state(crtc->base.state); + + encoder->base.crtc = &crtc->base; + encoder->get_config(encoder, crtc_state); - encoder->base.crtc = &crtc->base; - encoder->get_config(encoder, crtc_state); + } } else { encoder->base.crtc = NULL; } @@ -15809,9 +16127,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev, for_each_pipe(dev_priv, pipe) { crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - intel_sanitize_crtc(crtc, ctx); - intel_dump_pipe_config(crtc, crtc->config, - "[setup_hw_state]"); + if (crtc) { + intel_sanitize_crtc(crtc, ctx); + intel_dump_pipe_config(crtc, crtc->config, + "[setup_hw_state]"); + } } intel_modeset_update_connector_atomic_state(dev); @@ -15925,6 +16245,7 @@ static void intel_hpd_poll_fini(struct drm_device *dev) if (connector->hdcp_shim) { cancel_delayed_work_sync(&connector->hdcp_check_work); cancel_work_sync(&connector->hdcp_prop_work); + cancel_work_sync(&connector->hdcp_enable_work); } } drm_connector_list_iter_end(&conn_iter); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 1193202766a2..23a94eedb896 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -479,7 +479,7 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) { int i; if (dst_bytes > 4) @@ -2045,7 +2045,12 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (intel_wait_for_register(dev_priv, + /* + * Only wait for panel status if we are not in a GVT guest environment, + * because such a wait in a GVT guest environment doesn't make any sense + * as we are exposing virtual DP monitors to the guest. + */ + if (!intel_vgpu_active(dev_priv) && intel_wait_for_register(dev_priv, pp_stat_reg, mask, value, 5000)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index b51ad2917dbe..47b1dfe06152 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -303,7 +303,7 @@ intel_reference_shared_dpll(struct intel_shared_dpll *pll, DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name, pipe_name(crtc->pipe)); - shared_dpll[id].crtc_mask |= 1 << crtc->pipe; + shared_dpll[id].crtc_mask |= 1 << (drm_crtc_index(&crtc->base)); } /** @@ -3285,7 +3285,8 @@ void intel_release_shared_dpll(struct intel_shared_dpll *dpll, struct intel_shared_dpll_state *shared_dpll_state; shared_dpll_state = intel_atomic_get_shared_dpll_state(state); - shared_dpll_state[dpll->info->id].crtc_mask &= ~(1 << crtc->pipe); + shared_dpll_state[dpll->info->id].crtc_mask &= + ~(1 << drm_crtc_index(&crtc->base)); } /** diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8fc61e96754f..b8bae3ab7c35 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -418,6 +418,15 @@ struct intel_connector { uint64_t hdcp_value; /* protected by hdcp_mutex */ struct delayed_work hdcp_check_work; struct work_struct hdcp_prop_work; + struct work_struct hdcp_enable_work; + + /* list of Revocated KSVs and their count from SRM blob Parsing */ + unsigned int revocated_ksv_cnt; + u8 *revocated_ksv_list; + u32 srm_blob_id; + + /* Downstream info like, depth, device_count, bksv and ksv_list etc */ + struct cp_downstream_info *downstream_info; }; struct intel_digital_connector_state { @@ -564,6 +573,7 @@ struct intel_initial_plane_config { struct intel_scaler { int in_use; uint32_t mode; + int owned; }; struct intel_crtc_scaler_state { @@ -1384,6 +1394,11 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) return dev_priv->runtime_pm.irqs_enabled; } +bool is_shadow_context(struct i915_gem_context *ctx); +int get_vgt_id(struct i915_gem_context *ctx); +int get_pid_shadowed(struct i915_gem_context *ctx, + struct intel_engine_cs *engine); + int intel_get_crtc_scanline(struct intel_crtc *crtc); void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); @@ -1514,6 +1529,9 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int get_pipe_from_crtc_index(struct drm_device *dev, unsigned int index, enum pipe *pipe); +struct intel_crtc *get_intel_crtc_from_index(struct drm_device *dev, + unsigned int index); enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe); static inline bool @@ -1533,7 +1551,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { - drm_wait_one_vblank(&dev_priv->drm, pipe); + struct intel_crtc *crtc; + + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (crtc) + drm_wait_one_vblank(&dev_priv->drm, + drm_crtc_index(&crtc->base)); } static inline void intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) @@ -1708,6 +1731,7 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void intel_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); +void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes); void intel_plane_destroy(struct drm_plane *plane); void intel_edp_drrs_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); @@ -1916,10 +1940,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state); +void intel_hdcp_atomic_commit(struct drm_connector *connector, + struct drm_connector_state *new_state); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); -int intel_hdcp_enable(struct intel_connector *connector); -int intel_hdcp_disable(struct intel_connector *connector); int intel_hdcp_check_link(struct intel_connector *connector); bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port); @@ -2099,6 +2126,9 @@ int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); +int intel_check_sprite_plane(struct intel_plane *plane, + struct intel_crtc_state *crtc_state, + struct intel_plane_state *state); void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb2f9fce34cd..f028a6fb33fe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -548,10 +548,17 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, /* Find the largest fb */ for_each_crtc(dev, crtc) { - struct drm_i915_gem_object *obj = - intel_fb_obj(crtc->primary->state->fb); + struct drm_i915_gem_object *obj; intel_crtc = to_intel_crtc(crtc); + if (!crtc->primary) { + DRM_DEBUG_KMS("pipe %c has no primary plane\n", + pipe_name(intel_crtc->pipe)); + continue; + } + + obj = intel_fb_obj(crtc->primary->state->fb); + if (!crtc->state->active || !obj) { DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", pipe_name(intel_crtc->pipe)); diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index 77c123cc8817..83ed6f56ed56 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -181,11 +181,18 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, enum pipe pipe, bool enable) { struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + if (!crtc) { + DRM_DEBUG("No crtc for pipe=%d\n", pipe); + return; + } if (enable) - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_enable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); else - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_disable_pipe_irq(dev_priv, drm_crtc_index(&crtc->base), + GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c index 2fc7a0dd0df9..1f7da1cfd4b2 100644 --- a/drivers/gpu/drm/i915/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/intel_hangcheck.c @@ -418,6 +418,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (!i915_modparams.enable_hangcheck) return; + if (intel_vgpu_active(dev_priv)) + return; + if (!READ_ONCE(dev_priv->gt.awake)) return; diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c index 0cc6a861bcf8..0c481f59b24d 100644 --- a/drivers/gpu/drm/i915/intel_hdcp.c +++ b/drivers/gpu/drm/i915/intel_hdcp.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "intel_drv.h" #include "i915_reg.h" @@ -179,16 +180,110 @@ bool intel_hdcp_is_ksv_valid(u8 *ksv) return true; } +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) +{ + return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); +} + +static inline void intel_hdcp_print_ksv(u8 *ksv) +{ + DRM_DEBUG_KMS("\t%#04x, %#04x, %#04x, %#04x, %#04x\n", *ksv, + *(ksv + 1), *(ksv + 2), *(ksv + 3), *(ksv + 4)); +} + +/* Check if any of the KSV is revocated by DCP LLC through SRM table */ +static inline bool intel_hdcp_ksvs_revocated(struct intel_connector *connector, + u8 *ksvs, u32 ksv_count) +{ + u32 rev_ksv_cnt = connector->revocated_ksv_cnt; + u8 *rev_ksv_list = connector->revocated_ksv_list; + u32 cnt, i, j; + + /* If the Revocated ksv list is empty */ + if (!rev_ksv_cnt || !rev_ksv_list) + return false; + + for (cnt = 0; cnt < ksv_count; cnt++) { + rev_ksv_list = connector->revocated_ksv_list; + for (i = 0; i < rev_ksv_cnt; i++) { + for (j = 0; j < DRM_HDCP_KSV_LEN; j++) + if (*(ksvs + j) != *(rev_ksv_list + j)) { + break; + } else if (j == (DRM_HDCP_KSV_LEN - 1)) { + DRM_DEBUG_KMS("Revocated KSV is "); + intel_hdcp_print_ksv(ksvs); + return true; + } + /* Move the offset to next KSV in the revocated list */ + rev_ksv_list += DRM_HDCP_KSV_LEN; + } + + /* Iterate to next ksv_offset */ + ksvs += DRM_HDCP_KSV_LEN; + } + return false; +} + +/* Implements Part 2 of the HDCP authorization procedure */ static -int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim, - u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) +int intel_hdcp_auth_downstream(struct intel_connector *connector) { - struct drm_i915_private *dev_priv; + struct intel_digital_port *intel_dig_port = + conn_to_dig_port(connector); + const struct intel_hdcp_shim *shim = connector->hdcp_shim; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 vprime, sha_text, sha_leftovers, rep_ctl; + u8 bstatus[2], num_downstream, *ksv_fifo; int ret, i, j, sha_idx; - dev_priv = intel_dig_port->base.base.dev->dev_private; + if(intel_dig_port == NULL) + return EINVAL; + + ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); + if (ret) { + DRM_ERROR("KSV list failed to become ready (%d)\n", ret); + return ret; + } + + ret = shim->read_bstatus(intel_dig_port, bstatus); + if (ret) + return ret; + + if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || + DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { + DRM_ERROR("Max Topology Limit Exceeded\n"); + return -EPERM; + } + + /* + * When repeater reports 0 device count, HDCP1.4 spec allows disabling + * the HDCP encryption. That implies that repeater can't have its own + * display. As there is no consumption of encrypted content in the + * repeater with 0 downstream devices, we are failing the + * authentication. + */ + num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); + if (num_downstream == 0) + return -EINVAL; + + connector->downstream_info->device_count = num_downstream; + connector->downstream_info->depth = DRM_HDCP_DEPTH(bstatus[1]); + + ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!ksv_fifo) + return -ENOMEM; + + ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); + if (ret) + return ret; + + if (intel_hdcp_ksvs_revocated(connector, ksv_fifo, num_downstream)) { + DRM_ERROR("Revocated Ksv(s) in ksv_fifo\n"); + return -EPERM; + } + + memcpy(connector->downstream_info->ksv_list, ksv_fifo, + num_downstream * DRM_HDCP_KSV_LEN); /* Process V' values from the receiver */ for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) { @@ -394,79 +489,12 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, return 0; } -/* Implements Part 2 of the HDCP authorization procedure */ -static -int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim) -{ - u8 bstatus[2], num_downstream, *ksv_fifo; - int ret, i, tries = 3; - - ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim); - if (ret) { - DRM_ERROR("KSV list failed to become ready (%d)\n", ret); - return ret; - } - - ret = shim->read_bstatus(intel_dig_port, bstatus); - if (ret) - return ret; - - if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) || - DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) { - DRM_ERROR("Max Topology Limit Exceeded\n"); - return -EPERM; - } - - /* - * When repeater reports 0 device count, HDCP1.4 spec allows disabling - * the HDCP encryption. That implies that repeater can't have its own - * display. As there is no consumption of encrypted content in the - * repeater with 0 downstream devices, we are failing the - * authentication. - */ - num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]); - if (num_downstream == 0) - return -EINVAL; - - ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL); - if (!ksv_fifo) - return -ENOMEM; - - ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo); - if (ret) - goto err; - - /* - * When V prime mismatches, DP Spec mandates re-read of - * V prime atleast twice. - */ - for (i = 0; i < tries; i++) { - ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, - ksv_fifo, num_downstream, - bstatus); - if (!ret) - break; - } - - if (i == tries) { - DRM_ERROR("V Prime validation failed.(%d)\n", ret); - goto err; - } - - DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n", - num_downstream); - ret = 0; -err: - kfree(ksv_fifo); - return ret; -} - /* Implements Part 1 of the HDCP authorization procedure */ -static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, - const struct intel_hdcp_shim *shim) +static int intel_hdcp_auth(struct intel_connector *connector) { - struct drm_i915_private *dev_priv; + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); + const struct intel_hdcp_shim *shim = connector->hdcp_shim; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum port port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; @@ -484,7 +512,8 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, } ri; bool repeater_present, hdcp_capable; - dev_priv = intel_dig_port->base.base.dev->dev_private; + if(intel_dig_port == NULL) + return EINVAL; port = intel_dig_port->base.port; @@ -540,15 +569,25 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, return -ENODEV; } + if (intel_hdcp_ksvs_revocated(connector, bksv.shim, 1)) { + DRM_ERROR("BKSV is revocated\n"); + return -EPERM; + } + + memcpy(connector->downstream_info->bksv, bksv.shim, + DRM_MODE_HDCP_KSV_LEN); + I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); ret = shim->repeater_present(intel_dig_port, &repeater_present); if (ret) return ret; - if (repeater_present) + if (repeater_present) { I915_WRITE(HDCP_REP_CTL, intel_hdcp_get_repeater_ctl(intel_dig_port)); + connector->downstream_info->is_repeater = true; + } ret = shim->toggle_signalling(intel_dig_port, true); if (ret) @@ -612,18 +651,12 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port, */ if (repeater_present) - return intel_hdcp_auth_downstream(intel_dig_port, shim); + return intel_hdcp_auth_downstream(connector); DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n"); return 0; } -static -struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector) -{ - return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base); -} - static int _intel_hdcp_disable(struct intel_connector *connector) { struct drm_i915_private *dev_priv = connector->base.dev->dev_private; @@ -647,6 +680,9 @@ static int _intel_hdcp_disable(struct intel_connector *connector) return ret; } + memset(connector->downstream_info, 0, + sizeof(struct cp_downstream_info)); + DRM_DEBUG_KMS("HDCP is disabled\n"); return 0; } @@ -677,10 +713,15 @@ static int _intel_hdcp_enable(struct intel_connector *connector) /* Incase of authentication failures, HDCP spec expects reauth. */ for (i = 0; i < tries; i++) { - ret = intel_hdcp_auth(conn_to_dig_port(connector), - connector->hdcp_shim); - if (!ret) + ret = intel_hdcp_auth(connector); + if (!ret) { + connector->hdcp_value = + DRM_MODE_CONTENT_PROTECTION_ENABLED; + schedule_work(&connector->hdcp_prop_work); + schedule_delayed_work(&connector->hdcp_check_work, + DRM_HDCP_CHECK_PERIOD_MS); return 0; + } DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret); @@ -688,10 +729,33 @@ static int _intel_hdcp_enable(struct intel_connector *connector) _intel_hdcp_disable(connector); } + memset(connector->downstream_info, 0, + sizeof(struct cp_downstream_info)); + DRM_ERROR("HDCP authentication failed (%d tries/%d)\n", tries, ret); return ret; } +static void intel_hdcp_enable_work(struct work_struct *work) +{ + struct intel_connector *connector = container_of(work, + struct intel_connector, + hdcp_enable_work); + int ret; + + mutex_lock(&connector->hdcp_mutex); + ret = _intel_hdcp_enable(connector); + if (!ret) { + ret = drm_mode_connector_update_cp_downstream_property( + &connector->base, + connector->downstream_info); + if (ret) + DRM_ERROR("Downstream_property update failed.%d\n", + ret); + } + mutex_unlock(&connector->hdcp_mutex); +} + static void intel_hdcp_check_work(struct work_struct *work) { struct intel_connector *connector = container_of(to_delayed_work(work), @@ -744,33 +808,37 @@ int intel_hdcp_init(struct intel_connector *connector, if (ret) return ret; + ret = drm_connector_attach_cp_srm_property(&connector->base); + if (ret) + return ret; + + ret = drm_connector_attach_cp_downstream_property(&connector->base); + if (ret) + return ret; + + connector->downstream_info = kzalloc(sizeof(struct cp_downstream_info), + GFP_KERNEL); + if (!connector->downstream_info) + return -ENOMEM; + connector->hdcp_shim = hdcp_shim; mutex_init(&connector->hdcp_mutex); INIT_DELAYED_WORK(&connector->hdcp_check_work, intel_hdcp_check_work); INIT_WORK(&connector->hdcp_prop_work, intel_hdcp_prop_work); + INIT_WORK(&connector->hdcp_enable_work, intel_hdcp_enable_work); return 0; } int intel_hdcp_enable(struct intel_connector *connector) { - int ret; - if (!connector->hdcp_shim) return -ENOENT; mutex_lock(&connector->hdcp_mutex); - - ret = _intel_hdcp_enable(connector); - if (ret) - goto out; - - connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED; - schedule_work(&connector->hdcp_prop_work); - schedule_delayed_work(&connector->hdcp_check_work, - DRM_HDCP_CHECK_PERIOD_MS); -out: + schedule_work(&connector->hdcp_enable_work); mutex_unlock(&connector->hdcp_mutex); - return ret; + + return 0; } int intel_hdcp_disable(struct intel_connector *connector) @@ -798,7 +866,6 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, { uint64_t old_cp = old_state->content_protection; uint64_t new_cp = new_state->content_protection; - struct drm_crtc_state *crtc_state; if (!new_state->crtc) { /* @@ -819,10 +886,176 @@ void intel_hdcp_atomic_check(struct drm_connector *connector, (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED && new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) return; +} + +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector, + struct drm_connector_state *old_state, + struct drm_connector_state *new_state) +{ + uint64_t old_cp = old_state->content_protection; + uint64_t new_cp = new_state->content_protection; + + /* + * Disable HDCP if the connector is becoming disabled, or if requested + * via the property. + */ + if ((!new_state->crtc && + old_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) || + (new_state->crtc && + old_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_cp == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) + intel_hdcp_disable(to_intel_connector(connector)); +} + +static u32 intel_hdcp_get_revocated_ksv_count(u8 *buf, u32 vrls_length) +{ + u32 parsed_bytes = 0, ksv_count = 0, vrl_ksv_cnt, vrl_sz; + + do { + vrl_ksv_cnt = *buf; + ksv_count += vrl_ksv_cnt; + + vrl_sz = (vrl_ksv_cnt * DRM_HDCP_KSV_LEN) + 1; + buf += vrl_sz; + parsed_bytes += vrl_sz; + } while (parsed_bytes < vrls_length); + + return ksv_count; +} + +static u32 intel_hdcp_get_revocated_ksvs(u8 *ksv_list, const u8 *buf, + u32 vrls_length) +{ + u32 parsed_bytes = 0, ksv_count = 0; + u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0; + + do { + vrl_ksv_cnt = *buf; + vrl_ksv_sz = vrl_ksv_cnt * DRM_HDCP_KSV_LEN; + + buf++; + + DRM_INFO("vrl: %d, Revoked KSVs: %d\n", vrl_idx++, + vrl_ksv_cnt); + memcpy(ksv_list, buf, vrl_ksv_sz); + + ksv_count += vrl_ksv_cnt; + ksv_list += vrl_ksv_sz; + buf += vrl_ksv_sz; + + parsed_bytes += (vrl_ksv_sz + 1); + } while (parsed_bytes < vrls_length); + + return ksv_count; +} + +static int intel_hdcp_parse_srm(struct drm_connector *connector, + struct drm_property_blob *blob) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct cp_srm_header *header; + u32 vrl_length, ksv_count; + u8 *buf; + + if (blob->length < (sizeof(struct cp_srm_header) + + DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE)) { + DRM_ERROR("Invalid blob length\n"); + return -EINVAL; + } + + header = (struct cp_srm_header *)blob->data; + + DRM_INFO("SRM ID: 0x%x, SRM Ver: 0x%x, SRM Gen No: 0x%x\n", + header->spec_indicator.srm_id, + __swab16(header->srm_version), + header->srm_gen_no); + + WARN_ON(header->spec_indicator.reserved_hi || + header->spec_indicator.reserved_lo); + + if (header->spec_indicator.srm_id != DRM_HDCP_1_4_SRM_ID) { + DRM_ERROR("Invalid srm_id\n"); + return -EINVAL; + } + + buf = blob->data + sizeof(*header); + + vrl_length = (*buf << 16 | *(buf + 1) << 8 | *(buf + 2)); + + if (blob->length < (sizeof(struct cp_srm_header) + vrl_length) || + vrl_length < (DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE)) { + DRM_ERROR("Invalid blob length or vrl length\n"); + return -EINVAL; + } + + /* Length of the all vrls combined */ + vrl_length -= (DRM_HDCP_1_4_VRL_LENGTH_SIZE + + DRM_HDCP_1_4_DCP_SIG_SIZE); + + if (!vrl_length) { + DRM_DEBUG("No vrl found\n"); + return -EINVAL; + } + + buf += DRM_HDCP_1_4_VRL_LENGTH_SIZE; + + + ksv_count = intel_hdcp_get_revocated_ksv_count(buf, vrl_length); + if (!ksv_count) { + DRM_INFO("Revocated KSV count is 0\n"); + return 0; + } + + kfree(intel_connector->revocated_ksv_list); + intel_connector->revocated_ksv_list = kzalloc(ksv_count * + DRM_HDCP_KSV_LEN, GFP_KERNEL); + if (!intel_connector->revocated_ksv_list) { + DRM_ERROR("Out of Memory\n"); + return -ENOMEM; + } + + if (intel_hdcp_get_revocated_ksvs(intel_connector->revocated_ksv_list, + buf, vrl_length) != ksv_count) { + intel_connector->revocated_ksv_cnt = 0; + kfree(intel_connector->revocated_ksv_list); + return -EINVAL; + } + + intel_connector->revocated_ksv_cnt = ksv_count; + return 0; +} + +static void intel_hdcp_update_srm(struct drm_connector *connector, + u32 srm_blob_id) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_property_blob *blob; + + blob = drm_property_lookup_blob(connector->dev, srm_blob_id); + if (!blob || !blob->data) + return; + + if (!intel_hdcp_parse_srm(connector, blob)) + intel_connector->srm_blob_id = srm_blob_id; + + drm_property_blob_put(blob); +} + +void intel_hdcp_atomic_commit(struct drm_connector *connector, + struct drm_connector_state *new_state) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + uint64_t new_cp = new_state->content_protection; + + if (new_state->cp_srm_blob_id && + new_state->cp_srm_blob_id != intel_connector->srm_blob_id) + intel_hdcp_update_srm(connector, new_state->cp_srm_blob_id); - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); - crtc_state->mode_changed = true; + /* Enable hdcp if it's desired */ + if (new_state->crtc && new_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED) + intel_hdcp_enable(to_intel_connector(connector)); } /* Implements Part 3 of the HDCP authorization procedure */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 192972a7d287..a3b6d078ca24 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -2182,6 +2182,14 @@ static u8 bxt_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port) u8 ddc_pin; switch (port) { + case PORT_A: + if ((IS_GEN9_LP(dev_priv)) && (intel_vgpu_active(dev_priv))) + ddc_pin = GMBUS_PIN_3_BXT; + else { + MISSING_CASE(port); + ddc_pin = GMBUS_PIN_DPB; + } + break; case PORT_B: ddc_pin = GMBUS_PIN_1_BXT; break; @@ -2365,7 +2373,8 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port); - if (WARN_ON(port == PORT_A)) + if (!intel_vgpu_active(dev_priv) && + WARN_ON(port == PORT_A)) return; intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port); diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 174479232e94..534223874dd2 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -186,7 +186,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, const struct i915_request *last, int prio) { - return (intel_engine_has_preemption(engine) && + return (!intel_vgpu_active(engine->i915) && + intel_engine_has_preemption(engine) && __execlists_need_preempt(prio, rq_prio(last)) && !i915_request_completed(last)); } @@ -451,6 +452,8 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) struct intel_engine_execlists *execlists = &engine->execlists; struct execlist_port *port = execlists->port; unsigned int n; + u32 descs[4]; + int i = 0; /* * We can skip acquiring intel_runtime_pm_get() here as it was taken @@ -493,10 +496,27 @@ static void execlists_submit_ports(struct intel_engine_cs *engine) GEM_BUG_ON(!n); desc = 0; } + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + BUG_ON(i >= 4); + descs[i] = upper_32_bits(desc); + descs[i + 1] = lower_32_bits(desc); + i += 2; + continue; + } write_desc(execlists, desc, n); } - + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + spin_lock(&engine->i915->shared_page_lock); + writel(descs[0], elsp_data); + writel(descs[1], elsp_data + 1); + writel(descs[2], elsp_data + 2); + writel(descs[3], execlists->submit_reg); + spin_unlock(&engine->i915->shared_page_lock); + } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) writel(EL_CTRL_LOAD, execlists->ctrl_reg); @@ -552,10 +572,24 @@ static void inject_preempt_context(struct intel_engine_cs *engine) * the state of the GPU is known (idle). */ GEM_TRACE("%s\n", engine->name); - for (n = execlists_num_ports(execlists); --n; ) - write_desc(execlists, 0, n); - write_desc(execlists, ce->lrc_desc, n); + if (intel_vgpu_active(engine->i915) && + PVMMIO_LEVEL(engine->i915, PVMMIO_ELSP_SUBMIT)) { + u32 __iomem *elsp_data = engine->i915->shared_page->elsp_data; + + spin_lock(&engine->i915->shared_page_lock); + writel(0, elsp_data); + writel(0, elsp_data + 1); + writel(upper_32_bits(ce->lrc_desc), elsp_data + 2); + writel(lower_32_bits(ce->lrc_desc), execlists->submit_reg); + spin_unlock(&engine->i915->shared_page_lock); + + } else { + for (n = execlists_num_ports(execlists); --n; ) + write_desc(execlists, 0, n); + + write_desc(execlists, ce->lrc_desc, n); + } /* we need to manually load the submit queue */ if (execlists->ctrl_reg) @@ -2369,6 +2403,16 @@ logical_ring_default_irqs(struct intel_engine_cs *engine) engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; } +static void i915_error_reset(struct work_struct *work) { + struct intel_engine_cs *engine = + container_of(work, struct intel_engine_cs, + reset_work); + i915_handle_error(engine->i915, 1 << engine->id, + I915_ERROR_CAPTURE, + "Received error interrupt from engine %d", + engine->id); +} + static void logical_ring_setup(struct intel_engine_cs *engine) { @@ -2382,6 +2426,8 @@ logical_ring_setup(struct intel_engine_cs *engine) logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); + + INIT_WORK(&engine->reset_work, i915_error_reset); } static bool csb_force_mmio(struct drm_i915_private *i915) @@ -2712,6 +2758,14 @@ populate_lr_context(struct i915_gem_context *ctx, _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT); + /* write the context's pid and hw_id/cid to the per-context HWS page */ + if(intel_vgpu_active(engine->i915) && pid_nr(ctx->pid)) { + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_PID_ADDR) + = pid_nr(ctx->pid) & 0x3fffff; + *(u32*)(vaddr + LRC_PPHWSP_PN * PAGE_SIZE + I915_GEM_HWS_CID_ADDR) + = ctx->hw_id & 0x3fffff; + } + err_unpin_ctx: i915_gem_object_unpin_map(ctx_obj); return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 43ae9de12ba3..07a8f8dc4935 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -33,6 +33,10 @@ #include #include +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + /** * DOC: RC6 * @@ -803,11 +807,14 @@ static int intel_wm_num_levels(struct drm_i915_private *dev_priv) static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = plane_state ? to_intel_plane(plane_state->base.plane) : NULL; /* FIXME check the 'enable' instead */ if (!crtc_state->base.active) return false; + if (!plane_state && i915_modparams.avail_planes_per_pipe) { + return true; + } /* * Treat cursor with fb as always visible since cursor updates @@ -3706,7 +3713,6 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) struct intel_crtc *crtc; struct intel_plane *plane; struct intel_crtc_state *cstate; - enum pipe pipe; int level, latency; int sagv_block_time_us; @@ -3732,8 +3738,10 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(intel_state->active_crtcs) - 1; - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + crtc = get_intel_crtc_from_index(dev, + ffs(intel_state->active_crtcs) - 1); + if (!crtc) + return false; cstate = to_intel_crtc_state(crtc->base.state); if (crtc->base.state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) @@ -3755,6 +3763,7 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) latency = dev_priv->wm.skl_latency[level]; if (skl_needs_memory_bw_wa(intel_state) && + plane->base.state->fb && plane->base.state->fb->modifier == I915_FORMAT_MOD_X_TILED) latency += 15; @@ -3968,11 +3977,15 @@ static uint_fixed_16_16_t skl_plane_downscale_amount(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate) { - struct intel_plane *plane = to_intel_plane(pstate->base.plane); + struct intel_plane *plane = pstate ? to_intel_plane(pstate->base.plane) : NULL; uint32_t src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; + if (!pstate && i915_modparams.avail_planes_per_pipe) { + return mul_fixed16(u32_to_fixed16(1), u32_to_fixed16(1)); + } + if (WARN_ON(!intel_wm_plane_visible(cstate, pstate))) return u32_to_fixed16(0); @@ -4483,9 +4496,9 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, const struct intel_plane_state *intel_pstate, struct skl_wm_params *wp, int plane_id) { - struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane); - const struct drm_plane_state *pstate = &intel_pstate->base; - const struct drm_framebuffer *fb = pstate->fb; + struct intel_plane *plane = intel_pstate ? to_intel_plane(intel_pstate->base.plane) : NULL; + const struct drm_plane_state *pstate = intel_pstate ? &intel_pstate->base : NULL; + const struct drm_framebuffer *fb = pstate ? pstate->fb : NULL; uint32_t interm_pbpl; struct intel_atomic_state *state = to_intel_atomic_state(cstate->base.state); @@ -4500,6 +4513,19 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, return -EINVAL; } + if (!intel_pstate && i915_modparams.avail_planes_per_pipe) { + wp->y_tiled = false; + wp->x_tiled = true; + wp->cpp = 4; + wp->y_min_scanlines = 8; + wp->rc_surface = fb ? fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || + fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS : 0; + wp->is_planar = fb ? fb->format->format == DRM_FORMAT_NV12 : 0; + wp->width = cstate->pipe_src_w; + wp->dbuf_block_size = 512; + goto calculate_wm; + } + wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED || fb->modifier == I915_FORMAT_MOD_Yf_TILED || fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || @@ -4524,8 +4550,6 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, wp->width /= 2; wp->cpp = fb->format->cpp[plane_id]; - wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, - intel_pstate); if (INTEL_GEN(dev_priv) >= 11 && fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 8) @@ -4556,6 +4580,9 @@ skl_compute_plane_wm_params(const struct drm_i915_private *dev_priv, if (apply_memory_bw_wa) wp->y_min_scanlines *= 2; +calculate_wm: + wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, + intel_pstate); wp->plane_bytes_per_line = wp->width * wp->cpp; if (wp->y_tiled) { interm_pbpl = DIV_ROUND_UP(wp->plane_bytes_per_line * @@ -4594,7 +4621,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, const struct skl_wm_level *result_prev, struct skl_wm_level *result /* out */) { - const struct drm_plane_state *pstate = &intel_pstate->base; + const struct drm_plane_state *pstate = intel_pstate ? &intel_pstate->base : NULL; + const struct drm_framebuffer *fb = pstate ? pstate->fb : NULL; uint32_t latency = dev_priv->wm.skl_latency[level]; uint_fixed_16_16_t method1, method2; uint_fixed_16_16_t selected_result; @@ -4647,7 +4675,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, wp->plane_blocks_per_line); /* Display WA #1125: skl,bxt,kbl,glk */ - if (level == 0 && wp->rc_surface) + if (fb && level == 0 && wp->rc_surface) res_blocks += fixed16_to_u32_round_up(wp->y_tile_minimum); /* Display WA #1126: skl,bxt,kbl,glk */ @@ -4705,12 +4733,16 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (level) { return 0; } else { - struct drm_plane *plane = pstate->plane; + struct drm_plane *plane = pstate ? pstate->plane : NULL; DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n"); - DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", + + if (plane) { + DRM_DEBUG_KMS("[PLANE:%d:%s] blocks required = %u/%u, lines required = %u/31\n", plane->base.id, plane->name, res_blocks, ddb_allocation, res_lines); + } + return -EINVAL; } } @@ -4741,18 +4773,16 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, const struct intel_plane_state *intel_pstate, const struct skl_wm_params *wm_params, struct skl_plane_wm *wm, - int plane_id) + int plane_id, + enum plane_id intel_plane_id) { struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - struct drm_plane *plane = intel_pstate->base.plane; - struct intel_plane *intel_plane = to_intel_plane(plane); uint16_t ddb_blocks; enum pipe pipe = intel_crtc->pipe; int level, max_level = ilk_wm_max_level(dev_priv); - enum plane_id intel_plane_id = intel_plane->id; int ret; - if (WARN_ON(!intel_pstate->base.fb)) + if (WARN_ON(intel_pstate && !intel_pstate->base.fb)) return -EINVAL; ddb_blocks = plane_id ? @@ -4782,7 +4812,8 @@ skl_compute_wm_levels(const struct drm_i915_private *dev_priv, return ret; } - if (intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) + if (intel_pstate && + intel_pstate->base.fb->format->format == DRM_FORMAT_NV12) wm->is_planar = true; return 0; @@ -4866,16 +4897,109 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate, trans_wm->plane_en = false; } -static int skl_build_pipe_wm(struct intel_crtc_state *cstate, +static int skl_build_plane_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm, + int pipe, + enum plane_id plane_id, + struct intel_plane_state *intel_pstate) +{ + struct drm_device *dev = cstate->base.crtc->dev; + const struct drm_i915_private *dev_priv = to_i915(dev); + struct skl_plane_wm *wm; + struct skl_wm_params wm_params; + uint16_t ddb_blocks; + int ret; + + wm = &pipe_wm->planes[plane_id]; + ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); + + ret = skl_compute_plane_wm_params(dev_priv, cstate, + intel_pstate, &wm_params, 0); + if (ret) + return ret; + + ret = skl_compute_wm_levels(dev_priv, ddb, cstate, + intel_pstate, &wm_params, wm, 0, plane_id); + if (ret) + return ret; + + skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], + ddb_blocks, &wm->trans_wm); + + /* uv plane watermarks must also be validated for NV12/Planar */ + if (wm_params.is_planar) { + memset(&wm_params, 0, sizeof(struct skl_wm_params)); + wm->is_planar = true; + + ret = skl_compute_plane_wm_params(dev_priv, cstate, + intel_pstate, + &wm_params, 1); + if (ret) + return ret; + + ret = skl_compute_wm_levels(dev_priv, ddb, cstate, + intel_pstate, &wm_params, + wm, 1, plane_id); + if (ret) + return ret; + } + + return 0; +} + +static int skl_build_pipe_all_plane_wm(struct intel_crtc_state *cstate, struct skl_ddb_allocation *ddb, struct skl_pipe_wm *pipe_wm) { struct drm_device *dev = cstate->base.crtc->dev; - struct drm_crtc_state *crtc_state = &cstate->base; const struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc); + struct drm_crtc_state *crtc_state = &cstate->base; + struct drm_plane *plane; + const struct drm_plane_state *pstate; + struct intel_plane_state *intel_pstate; + int pipe = crtc->pipe; + int plane_id; + int ret; + + memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); + + /* + * Since Dom0 may not own all planes on this pipe, there will + * not be a valid intel_plane for the planes it doesn't own. + * Therefore, we have to pass NULL to skl_compute_wm_level() + * which will then know that this plane is not owned by Dom0 + * and hence will use width and height from the crtc and will + * also assume cpp = 4 and tiling = x_tiled. + */ + for_each_universal_plane(dev_priv, pipe, plane_id) { + intel_pstate = NULL; + + drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) { + if (plane_id == to_intel_plane(plane)->id) { + intel_pstate = to_intel_plane_state(pstate); + break; + } + } + + ret = skl_build_plane_wm(cstate, ddb, pipe_wm, + pipe, plane_id, (struct intel_plane_state *) intel_pstate); + if (ret) + return ret; + } + pipe_wm->linetime = skl_compute_linetime_wm(cstate); + + return 0; +} + +static int skl_build_pipe_wm(struct intel_crtc_state *cstate, + struct skl_ddb_allocation *ddb, + struct skl_pipe_wm *pipe_wm) +{ + struct drm_crtc_state *crtc_state = &cstate->base; struct drm_plane *plane; const struct drm_plane_state *pstate; - struct skl_plane_wm *wm; int ret; /* @@ -4888,43 +5012,12 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate, const struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); enum plane_id plane_id = to_intel_plane(plane)->id; - struct skl_wm_params wm_params; enum pipe pipe = to_intel_crtc(cstate->base.crtc)->pipe; - uint16_t ddb_blocks; - wm = &pipe_wm->planes[plane_id]; - ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]); - - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, &wm_params, 0); + ret = skl_build_plane_wm(cstate, ddb, pipe_wm, + pipe, plane_id, (struct intel_plane_state *) intel_pstate); if (ret) return ret; - - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, wm, 0); - if (ret) - return ret; - - skl_compute_transition_wm(cstate, &wm_params, &wm->wm[0], - ddb_blocks, &wm->trans_wm); - - /* uv plane watermarks must also be validated for NV12/Planar */ - if (wm_params.is_planar) { - memset(&wm_params, 0, sizeof(struct skl_wm_params)); - wm->is_planar = true; - - ret = skl_compute_plane_wm_params(dev_priv, cstate, - intel_pstate, - &wm_params, 1); - if (ret) - return ret; - - ret = skl_compute_wm_levels(dev_priv, ddb, cstate, - intel_pstate, &wm_params, - wm, 1); - if (ret) - return ret; - } } pipe_wm->linetime = skl_compute_linetime_wm(cstate); @@ -4957,6 +5050,70 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv, I915_WRITE(reg, val); } +static void skl_pv_write_wm_level(u32 *plane_wm_level, + const struct skl_wm_level *level) +{ + uint32_t val = 0; + + if (level->plane_en) { + val |= PLANE_WM_EN; + val |= level->plane_res_b; + val |= level->plane_res_l << PLANE_WM_LINES_SHIFT; + } + + *plane_wm_level = val; +} + +static void skl_pv_ddb_entry_write(u32 *plane_cfg, + const struct skl_ddb_entry *entry) +{ + if (entry->end) + *plane_cfg = (entry->end - 1) << 16 | entry->start; + else + *plane_cfg = 0; +} + +static void skl_pv_write_plane_wm(struct intel_crtc *intel_crtc, + const struct skl_plane_wm *wm, + const struct skl_ddb_allocation *ddb, + enum plane_id plane_id) +{ + int i, level; + struct pv_plane_wm_update tmp_plane_wm; + struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); + int max_level = ilk_wm_max_level(dev_priv); + u32 __iomem *pv_plane_wm = (u32 *)&(dev_priv->shared_page->pv_plane_wm); + enum pipe pipe = intel_crtc->pipe; + + memset(&tmp_plane_wm, 0, sizeof(struct pv_plane_wm_update)); + tmp_plane_wm.max_wm_level = max_level; + for (level = 0; level <= max_level; level++) { + skl_pv_write_wm_level(&tmp_plane_wm.plane_wm_level[level], + &wm->wm[level]); + } + skl_pv_write_wm_level(&tmp_plane_wm.plane_trans_wm_level, + &wm->trans_wm); + + if (wm->is_planar) { + skl_pv_ddb_entry_write(&tmp_plane_wm.plane_buf_cfg, + &ddb->uv_plane[pipe][plane_id]); + } else { + skl_pv_ddb_entry_write(&tmp_plane_wm.plane_buf_cfg, + &ddb->plane[pipe][plane_id]); + } + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_wm_update) / 4; i++) + writel(*((u32 *)(&tmp_plane_wm) + i), pv_plane_wm + i); + if (wm->is_planar) + skl_ddb_entry_write(dev_priv, + PLANE_NV12_BUF_CFG(pipe, plane_id), + &ddb->plane[pipe][plane_id]); + else + I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0); + spin_unlock(&dev_priv->shared_page_lock); +} + static void skl_write_plane_wm(struct intel_crtc *intel_crtc, const struct skl_plane_wm *wm, const struct skl_ddb_allocation *ddb, @@ -4968,6 +5125,21 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc, int level, max_level = ilk_wm_max_level(dev_priv); enum pipe pipe = intel_crtc->pipe; + if (INTEL_GEN(dev_priv) < 11) { + /* + * when plane restriction feature is enabled, + * sos trap handlers for plane wm related registers are null + */ + /* TODO: uncomment when plane restriction feature is enabled */ +#if 0 + if (i915_modparams.avail_planes_per_pipe) + return; +#endif + if (PVMMIO_LEVEL(dev_priv, PVMMIO_PLANE_WM_UPDATE)) + return skl_pv_write_plane_wm(intel_crtc, wm, + ddb, plane_id); + } + for (level = 0; level <= max_level; level++) { skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level), &wm->wm[level]); @@ -5059,7 +5231,10 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate, struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate); int ret; - ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); + if (i915_modparams.avail_planes_per_pipe) + ret = skl_build_pipe_all_plane_wm(intel_cstate, ddb, pipe_wm); + else + ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm); if (ret) return ret; @@ -5128,6 +5303,23 @@ skl_compute_ddb(struct drm_atomic_state *state) memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb)); +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + /* + * In GVT environemnt, allocate ddb for all planes in active crtc. + * When there is active pipe change, intel_state active_crtcs is + * not zero and updated before dev_priv, so use intel_state + * active_crtc when it is not zero. + */ + if (dev_priv->gvt) { + unsigned int active_crtcs; + + active_crtcs = intel_state->active_crtcs ? + intel_state->active_crtcs : dev_priv->active_crtcs; + intel_gvt_allocate_ddb(dev_priv->gvt, ddb, active_crtcs); + return 0; + } +#endif + for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) { ret = skl_allocate_pipe_ddb(cstate, ddb); if (ret) @@ -5286,10 +5478,14 @@ skl_compute_wm(struct drm_atomic_state *state) struct drm_crtc_state *cstate; struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct skl_ddb_values *results = &intel_state->wm_results; + struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); struct skl_pipe_wm *pipe_wm; bool changed = false; int ret, i; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return 0; + /* Clear all dirty flags */ results->dirty_pipes = 0; @@ -5348,12 +5544,29 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, enum pipe pipe = crtc->pipe; enum plane_id plane_id; + if (intel_vgpu_active(dev_priv) && i915_modparams.avail_planes_per_pipe) + return; + if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); + if (i915_modparams.avail_planes_per_pipe) { + for_each_universal_plane(dev_priv, pipe, plane_id) { + skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], + ddb, plane_id); + } + + return; + } + for_each_plane_id_on_crtc(crtc, plane_id) { +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif if (plane_id != PLANE_CURSOR) skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id], ddb, plane_id); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index f5ffa6d31e82..ff42345d735b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -567,6 +567,7 @@ struct intel_engine_cs { } semaphore; struct intel_engine_execlists execlists; + struct work_struct reset_work; /* Contexts are pinned whilst they are active on the GPU. The last * context executed remains active whilst the GPU is idle - the @@ -789,6 +790,11 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_GEM_HWS_SCRATCH_INDEX 0x40 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_PID_INDEX 0x50 +#define I915_GEM_HWS_PID_ADDR (I915_GEM_HWS_PID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) +#define I915_GEM_HWS_CID_INDEX 0x58 +#define I915_GEM_HWS_CID_ADDR (I915_GEM_HWS_CID_INDEX << MI_STORE_DWORD_INDEX_SHIFT) + #define I915_HWS_CSB_BUF0_INDEX 0x10 #define I915_HWS_CSB_WRITE_INDEX 0x1f #define CNL_HWS_CSB_WRITE_INDEX 0x2f diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index f7026e887fa9..e71dd44c64ab 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -41,6 +41,10 @@ #include #include "i915_drv.h" +#if IS_ENABLED(CONFIG_DRM_I915_GVT) +#include "gvt.h" +#endif + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -228,6 +232,68 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) #endif } +static void pv_update_plane_reg(struct intel_plane *plane, + u32 stride, uint32_t src_w, uint32_t src_h, + uint32_t crtc_w, uint32_t crtc_h, u32 aux_stride, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int i; + struct pv_plane_update tmp_plane; + uint32_t x = plane_state->main.x; + uint32_t y = plane_state->main.y; + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 __iomem *pv_plane = (u32 *)&(dev_priv->shared_page->pv_plane); + + memset(&tmp_plane, 0, sizeof(struct pv_plane_update)); + if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { + tmp_plane.flags |= PLANE_COLOR_CTL_BIT; + tmp_plane.plane_color_ctl = PLANE_COLOR_PIPE_GAMMA_ENABLE | + PLANE_COLOR_PIPE_CSC_ENABLE | + PLANE_COLOR_PLANE_GAMMA_DISABLE; + } + + if (plane_state->ckey.flags) { + tmp_plane.flags |= PLANE_KEY_BIT; + tmp_plane.plane_key_val = plane_state->ckey.min_value; + tmp_plane.plane_key_max = plane_state->ckey.max_value; + tmp_plane.plane_key_msk = plane_state->ckey.channel_mask; + } + + tmp_plane.plane_offset = (y << 16) | x; + tmp_plane.plane_stride = stride; + tmp_plane.plane_size = (src_h << 16) | src_w; + tmp_plane.plane_aux_dist = + (plane_state->aux.offset - plane_state->main.offset) | + aux_stride; + tmp_plane.plane_aux_offset = + (plane_state->aux.y << 16) | plane_state->aux.x; + + /* program plane scaler */ + if (plane_state->scaler_id >= 0) { + tmp_plane.flags |= PLANE_SCALER_BIT; + tmp_plane.ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane->id) | + crtc_state->scaler_state.scalers[plane_state->scaler_id].mode; + tmp_plane.ps_pwr_gate = 0; + tmp_plane.ps_win_ps = + (plane_state->base.dst.x1 << 16) | plane_state->base.dst.y1; + tmp_plane.ps_win_sz = ((crtc_w + 1) << 16) | (crtc_h + 1); + tmp_plane.plane_pos = 0; + } else { + tmp_plane.plane_pos = + (plane_state->base.dst.y1 << 16) | plane_state->base.dst.x1; + } + + tmp_plane.plane_ctl = plane_state->ctl; + + spin_lock(&dev_priv->shared_page_lock); + for (i = 0; i < sizeof(struct pv_plane_update) / 4; i++) + writel(*((u32 *)(&tmp_plane) + i), pv_plane + i); + I915_WRITE_FW(PLANE_SURF(plane->pipe, plane->id), + intel_plane_ggtt_offset(plane_state) + plane_state->main.offset); + spin_unlock(&dev_priv->shared_page_lock); +} + void skl_update_plane(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -253,12 +319,23 @@ skl_update_plane(struct intel_plane *plane, uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif /* Sizes are 0 based */ src_w--; src_h--; crtc_w--; crtc_h--; + if (PVMMIO_LEVEL(dev_priv, PVMMIO_PLANE_UPDATE)) { + pv_update_plane_reg(plane, stride, src_w, src_h, + crtc_w, crtc_h, aux_stride, crtc_state, plane_state); + return; + } + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -336,6 +413,12 @@ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc) enum pipe pipe = plane->pipe; unsigned long irqflags; +#if IS_ENABLED(CONFIG_DRM_I915_GVT) + if (dev_priv->gvt && + dev_priv->gvt->pipe_info[pipe].plane_owner[plane_id]) + return; +#endif + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); @@ -956,7 +1039,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane, return ret; } -static int +int intel_check_sprite_plane(struct intel_plane *plane, struct intel_crtc_state *crtc_state, struct intel_plane_state *state) @@ -1541,10 +1624,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, num_plane_formats = ARRAY_SIZE(skl_plane_formats); } - if (intel_plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + modifiers = i9xx_plane_format_modifiers; plane_funcs = &skl_plane_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { diff --git a/drivers/gpu/drm/i915/intel_wopcm.c b/drivers/gpu/drm/i915/intel_wopcm.c index 74bf76f3fddc..10d1ea189775 100644 --- a/drivers/gpu/drm/i915/intel_wopcm.c +++ b/drivers/gpu/drm/i915/intel_wopcm.c @@ -63,6 +63,9 @@ #define GEN9_GUC_FW_RESERVED (128 * 1024) #define GEN9_GUC_WOPCM_OFFSET (GUC_WOPCM_RESERVED + GEN9_GUC_FW_RESERVED) +#define GEN9_GUC_9_29_SIZE ((142 * 1024) + 768) +#define GEN9_HUC_1_07_SIZE ((150 * 1024) + 576) + /** * intel_wopcm_init_early() - Early initialization of the WOPCM. * @wopcm: pointer to intel_wopcm. @@ -155,8 +158,8 @@ static inline int check_hw_restriction(struct drm_i915_private *i915, int intel_wopcm_init(struct intel_wopcm *wopcm) { struct drm_i915_private *i915 = wopcm_to_i915(wopcm); - u32 guc_fw_size = intel_uc_fw_get_upload_size(&i915->guc.fw); - u32 huc_fw_size = intel_uc_fw_get_upload_size(&i915->huc.fw); + u32 guc_fw_size = GEN9_GUC_9_29_SIZE; + u32 huc_fw_size = GEN9_HUC_1_07_SIZE; u32 ctx_rsvd = context_reserved_size(i915); u32 guc_wopcm_base; u32 guc_wopcm_size; @@ -207,6 +210,13 @@ int intel_wopcm_init(struct intel_wopcm *wopcm) wopcm->guc.base = guc_wopcm_base; wopcm->guc.size = guc_wopcm_size; + /* + * In deferred fw loading, we defer the intel_guc_init which will + * initialize the guc.ggtt_pin_bias. As it relies on wopcm size, + * set the ggtt_pin_bias after wopcm initialization + */ + i915->guc.ggtt_pin_bias = i915->wopcm.size - i915->wopcm.guc.base; + return 0; } diff --git a/drivers/hwtracing/intel_th/Kconfig b/drivers/hwtracing/intel_th/Kconfig index ca0527d588e9..4be9d14de4fb 100644 --- a/drivers/hwtracing/intel_th/Kconfig +++ b/drivers/hwtracing/intel_th/Kconfig @@ -1,6 +1,6 @@ config INTEL_TH tristate "Intel(R) Trace Hub controller" - depends on HAS_DMA && HAS_IOMEM + depends on HAS_DMA && HAS_IOMEM && X86 help Intel(R) Trace Hub (TH) is a set of hardware blocks (subdevices) that produce, switch and output trace data from multiple hardware and @@ -67,6 +67,28 @@ config INTEL_TH_MSU Say Y here to enable MSU output device for Intel TH. +config INTEL_TH_MSU_DVC + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc" + depends on DVC_TRACE_BUS + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This provides the means to route this data over USB, + using DvC-Trace. + + Say Y here to enable DvC-Trace output device for Intel TH. + +config INTEL_TH_MSU_DVC_DEBUG + tristate "Intel Trace Hub Memory Storage Unit to USB-dvc debug" + depends on INTEL_TH_MSU_DVC + help + Memory Storage Unit (MSU) trace output device enables + storing STP traces to system memory. + This enables extensive logging and collection of + statistical data on MSU/DvC-Trace device performance. + + Say Y to enable extended debug features on MSU-DvC. + config INTEL_TH_PTI tristate "Intel(R) Trace Hub PTI output" help @@ -82,4 +104,15 @@ config INTEL_TH_DEBUG help Say Y here to enable debugging. +config INTEL_TH_EARLY_PRINTK + bool "Intel TH early printk console" + depends on INTEL_TH=y + default n + ---help--- + Enables early printk console. + When the early printk console is enabled in the kernel + command line, kernel log messages are sent to Intel TH + (hence they are aggregated with the other trace messages + from the platform). + endif diff --git a/drivers/hwtracing/intel_th/Makefile b/drivers/hwtracing/intel_th/Makefile index d9252fa8d9ca..6d3dc7de0bb3 100644 --- a/drivers/hwtracing/intel_th/Makefile +++ b/drivers/hwtracing/intel_th/Makefile @@ -18,5 +18,12 @@ intel_th_sth-y := sth.o obj-$(CONFIG_INTEL_TH_MSU) += intel_th_msu.o intel_th_msu-y := msu.o +obj-$(CONFIG_INTEL_TH_MSU_DVC) += intel_th_msu_dvc.o +intel_th_msu_dvc-y := msu-dvc.o +subdir-ccflags-$(CONFIG_INTEL_TH_MSU_DVC_DEBUG) += -DMDD_DEBUG + obj-$(CONFIG_INTEL_TH_PTI) += intel_th_pti.o intel_th_pti-y := pti.o + +obj-$(CONFIG_INTEL_TH_EARLY_PRINTK) += intel_th_early_printk.o +intel_th_early_printk-y := early_printk.o diff --git a/drivers/hwtracing/intel_th/acpi.c b/drivers/hwtracing/intel_th/acpi.c index 87bc3744755f..5aef93d75ff2 100644 --- a/drivers/hwtracing/intel_th/acpi.c +++ b/drivers/hwtracing/intel_th/acpi.c @@ -34,6 +34,13 @@ static const struct acpi_device_id intel_th_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, intel_th_acpi_ids); +static void intel_th_acpi_reset(struct intel_th *th) +{ + /* Software reset */ + + /* Always set FON for S0ix flow */ +} + static int intel_th_acpi_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); @@ -45,7 +52,7 @@ static int intel_th_acpi_probe(struct platform_device *pdev) return -ENODEV; th = intel_th_alloc(&pdev->dev, (void *)id->driver_data, - pdev->resource, pdev->num_resources, -1); + pdev->resource, pdev->num_resources, -1,intel_th_acpi_reset); if (IS_ERR(th)) return PTR_ERR(th); diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index fc6b7f8b62fb..5a5464944e80 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c @@ -215,48 +215,75 @@ static ssize_t port_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RO(port); -static int intel_th_output_activate(struct intel_th_device *thdev) +/** + * intel_th_output_activate() - call output initialization procedure + * @output: output to activate + */ +int intel_th_output_activate(struct intel_th_output *output) { - struct intel_th_driver *thdrv = - to_intel_th_driver_or_null(thdev->dev.driver); - struct intel_th *th = to_intel_th(thdev); - int ret = 0; - - if (!thdrv) - return -ENODEV; + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + struct intel_th_driver *outdrv = + to_intel_th_driver(outdev->dev.driver); - if (!try_module_get(thdrv->driver.owner)) - return -ENODEV; + if (WARN_ON_ONCE(outdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; - pm_runtime_get_sync(&thdev->dev); + if (outdrv->activate) + return outdrv->activate(outdev); - if (th->activate) - ret = th->activate(th); - if (ret) - goto fail_put; + return 0; +} +EXPORT_SYMBOL_GPL(intel_th_output_activate); - if (thdrv->activate) - ret = thdrv->activate(thdev); - else - intel_th_trace_enable(thdev); +/** + * intel_th_first_trace() - notification callback for first trace + * + * Notify each child device that the first capture is about to begin. + * This gives a chance to save the current data as the Trace Hub may have + * already been configured by the BIOS to trace to a given output. + * + * @dev: output device to notify + * @data: private data - unused + */ +static int intel_th_first_trace(struct device *dev, void *data) +{ + struct intel_th_device *thdev = + container_of(dev, struct intel_th_device, dev); + struct intel_th_driver *thdrv = + to_intel_th_driver(thdev->dev.driver); - if (ret) - goto fail_deactivate; + if (thdrv && thdrv->first_trace) + thdrv->first_trace(thdev); return 0; +} -fail_deactivate: - if (th->deactivate) - th->deactivate(th); +/** + * intel_th_start_trace() - start tracing to an output device + * @thdev: output device that requests tracing + */ +static int intel_th_start_trace(struct intel_th_device *thdev) +{ + struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); + struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); + static atomic_t first = { .counter = 1, }; -fail_put: - pm_runtime_put(&thdev->dev); - module_put(thdrv->driver.owner); + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return -EINVAL; - return ret; + if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) + return -EINVAL; + + if (atomic_dec_if_positive(&first) == 0) + device_for_each_child(&hub->dev, NULL, intel_th_first_trace); + + /* The hub has control over Intel Trace Hub. + * Let the hub start a trace if possible and activate the output. */ + return hubdrv->enable(hub, &thdev->output); } -static void intel_th_output_deactivate(struct intel_th_device *thdev) +static void intel_th_stop_trace(struct intel_th_device *thdev) { struct intel_th_driver *thdrv = to_intel_th_driver_or_null(thdev->dev.driver); @@ -298,9 +325,9 @@ static ssize_t active_store(struct device *dev, struct device_attribute *attr, if (!!val != thdev->output.active) { if (val) - ret = intel_th_output_activate(thdev); + ret = intel_th_start_trace(thdev); else - intel_th_output_deactivate(thdev); + intel_th_stop_trace(thdev); } return ret ? ret : size; @@ -369,6 +396,7 @@ intel_th_device_alloc(struct intel_th *th, unsigned int type, const char *name, thdev->id = id; thdev->type = type; + thdev->th = th; strcpy(thdev->name, name); device_initialize(&thdev->dev); @@ -811,10 +839,11 @@ static const struct file_operations intel_th_output_fops = { * @devres: parent's resources * @ndevres: number of resources * @irq: irq number + * @reset: parent's reset function */ struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq) + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)) { struct intel_th *th; int err, r; @@ -848,6 +877,7 @@ intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, th->resource = devres; th->num_resources = ndevres; th->irq = irq; + th->reset = reset; dev_set_drvdata(dev, th); @@ -902,10 +932,26 @@ void intel_th_free(struct intel_th *th) EXPORT_SYMBOL_GPL(intel_th_free); /** - * intel_th_trace_enable() - enable tracing for an output device - * @thdev: output device that requests tracing be enabled + * intel_th_reset() - reset hardware registers + * @hub: hub requesting the reset + */ +void intel_th_reset(struct intel_th_device *hub) +{ + struct intel_th *th = hub->th; + + if (WARN_ON_ONCE(hub->type != INTEL_TH_SWITCH)) + return; + + if (th->reset) + th->reset(th); +} +EXPORT_SYMBOL_GPL(intel_th_reset); + +/** + * intel_th_trace_switch() - execute a switch sequence + * @thdev: output device that requests tracing switch */ -int intel_th_trace_enable(struct intel_th_device *thdev) +int intel_th_trace_switch(struct intel_th_device *thdev) { struct intel_th_device *hub = to_intel_th_device(thdev->dev.parent); struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver); @@ -916,12 +962,11 @@ int intel_th_trace_enable(struct intel_th_device *thdev) if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT)) return -EINVAL; - pm_runtime_get_sync(&thdev->dev); - hubdrv->enable(hub, &thdev->output); + hubdrv->trig_switch(hub, &thdev->output); return 0; } -EXPORT_SYMBOL_GPL(intel_th_trace_enable); +EXPORT_SYMBOL_GPL(intel_th_trace_switch); /** * intel_th_trace_disable() - disable tracing for an output device diff --git a/drivers/hwtracing/intel_th/early_printk.c b/drivers/hwtracing/intel_th/early_printk.c new file mode 100644 index 000000000000..bbcb9f89161d --- /dev/null +++ b/drivers/hwtracing/intel_th/early_printk.c @@ -0,0 +1,98 @@ +#include +#include +#include +#include "sth.h" + +static unsigned long sth_phys_addr; + +void early_intel_th_init(const char *s) +{ + size_t n; + unsigned long addr, chan; + char buf[32] = {0, }; + char *match, *next; + + /* Expect ,0x:[,keep] */ + if (*s == ',') + ++s; + if (strncmp(s, "0x", 2)) + goto fail; + + n = strcspn(s, ","); + if (n > sizeof(buf) - 1) + goto fail; + strncpy(buf, s, n); + next = buf; + + /* Get sw_bar */ + match = strsep(&next, ":"); + if (!match) + goto fail; + + if (kstrtoul(match, 16, &addr)) + goto fail; + + /* Get channel */ + if (kstrtoul(next, 0, &chan)) + goto fail; + + sth_phys_addr = addr + chan * sizeof(struct intel_th_channel); + return; + +fail: + pr_err("%s invalid parameter %s", __func__, s); +} + +static void intel_th_early_write(struct console *con, const char *buf, + unsigned len) +{ + struct intel_th_channel *channel; + const u8 *p = buf; + const u32 sven_header = 0x01000242; + + if (WARN_ON_ONCE(!sth_phys_addr)) + return; + + /* Software can send messages to Intel TH by writing to an MMIO space + * that is divided in several Master/Channel regions. + * Write directly to the address provided through the cmdline. + */ + set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, sth_phys_addr); + channel = (struct intel_th_channel *) + (__fix_to_virt(FIX_EARLYCON_MEM_BASE) + + (sth_phys_addr & (PAGE_SIZE - 1))); + + /* Add hardcoded SVEN header + * type: DEBUG_STRING + * severity: SVEN_SEVERITY_NORMAL + * length: payload size + * subtype: SVEN_DEBUGSTR_Generic + */ + iowrite32(sven_header, &channel->DnTS); + iowrite16(len, &channel->Dn); + + while (len) { + if (len >= 4) { + iowrite32(*(u32 *)p, &channel->Dn); + p += 4; + len -= 4; + } else if (len >= 2) { + iowrite16(*(u16 *)p, &channel->Dn); + p += 2; + len -= 2; + } else { + iowrite8(*(u8 *)p, &channel->Dn); + p += 1; + len -= 1; + } + } + + iowrite32(0, &channel->FLAG); +} + +struct console intel_th_early_console = { + .name = "earlyintelth", + .write = intel_th_early_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c index 8426b7970c14..2bc9b9f0b752 100644 --- a/drivers/hwtracing/intel_th/gth.c +++ b/drivers/hwtracing/intel_th/gth.c @@ -27,14 +27,16 @@ struct gth_device; * @output: link to output device's output descriptor * @index: output port number * @port_type: one of GTH_* port type values - * @master: bitmap of masters configured for this output + * @config: output configuration backup + * @smcfreq: maintenance packet frequency backup */ struct gth_output { struct gth_device *gth; struct intel_th_output *output; unsigned int index; unsigned int port_type; - DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1); + u32 config; + u32 smcfreq; }; /** @@ -65,6 +67,8 @@ static void gth_output_set(struct gth_device *gth, int port, u32 val; int shift = (port & 3) * 8; + gth->output[port].config = config; + val = ioread32(gth->base + reg); val &= ~(0xff << shift); val |= config << shift; @@ -91,6 +95,8 @@ static void gth_smcfreq_set(struct gth_device *gth, int port, int shift = (port & 1) * 16; u32 val; + gth->output[port].smcfreq = freq; + val = ioread32(gth->base + reg); val &= ~(0xffff << shift); val |= freq << shift; @@ -139,6 +145,24 @@ gth_master_set(struct gth_device *gth, unsigned int master, int port) iowrite32(val, gth->base + reg); } +static int gth_master_get(struct gth_device *gth, unsigned int master) +{ + unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u); + unsigned int shift = (master & 0x7) * 4; + u32 val; + + if (master >= 256) { + reg = REG_GTH_GSWTDEST; + shift = 0; + } + + val = ioread32(gth->base + reg); + val &= (0xf << shift); + val >>= shift; + + return val ? val & 0x7 : -1; +} + static ssize_t master_attr_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -182,14 +206,7 @@ static ssize_t master_attr_store(struct device *dev, old_port = gth->master[ma->master]; if (old_port >= 0) { gth->master[ma->master] = -1; - clear_bit(ma->master, gth->output[old_port].master); - - /* - * if the port is active, program this setting, - * implies that runtime PM is on - */ - if (gth->output[old_port].output->active) - gth_master_set(gth, ma->master, -1); + gth_master_set(gth, ma->master, -1); } /* connect to the new output port, if any */ @@ -200,11 +217,8 @@ static ssize_t master_attr_store(struct device *dev, goto unlock; } - set_bit(ma->master, gth->output[port].master); - - /* if the port is active, program this setting, see above */ - if (gth->output[port].output->active) - gth_master_set(gth, ma->master, port); + gth_master_set(gth, ma->master, port); + gth->master[ma->master] = port; } gth->master[ma->master] = port; @@ -272,45 +286,6 @@ gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm) return config; } -/* - * Reset outputs and sources - */ -static int intel_th_gth_reset(struct gth_device *gth) -{ - u32 reg; - int port, i; - - reg = ioread32(gth->base + REG_GTH_SCRPD0); - if (reg & SCRPD_DEBUGGER_IN_USE) - return -EBUSY; - - /* Always save/restore STH and TU registers in S0ix entry/exit */ - reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; - iowrite32(reg, gth->base + REG_GTH_SCRPD0); - - /* output ports */ - for (port = 0; port < 8; port++) { - if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) == - GTH_NONE) - continue; - - gth_output_set(gth, port, 0); - gth_smcfreq_set(gth, port, 16); - } - /* disable overrides */ - iowrite32(0, gth->base + REG_GTH_DESTOVR); - - /* masters swdest_0~31 and gswdest */ - for (i = 0; i < 33; i++) - iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4); - - /* sources */ - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfc, gth->base + REG_GTH_SCR2); - - return 0; -} - /* * "outputs" attribute group */ @@ -456,6 +431,66 @@ static int intel_th_output_attributes(struct gth_device *gth) return sysfs_create_group(>h->dev->kobj, >h->output_group); } +/** + * intel_th_gth_stop() - stop tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * @capture_done: set when no more traces will be captured + * + * This will stop tracing using force storeEn off signal and wait for the + * pipelines to be empty for the corresponding output port. + */ +static void intel_th_gth_stop(struct gth_device *gth, + struct intel_th_output *output, + bool capture_done) +{ + struct intel_th_device *outdev = + container_of(output, struct intel_th_device, output); + unsigned long count; + u32 reg; + u32 scr2 = 0xfc | (capture_done ? 1 : 0); + + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(scr2, gth->base + REG_GTH_SCR2); + + /* wait on pipeline empty for the given port */ + for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; + count && !(reg & BIT(output->port)); count--) { + reg = ioread32(gth->base + REG_GTH_STAT); + cpu_relax(); + } + + if (!count) + dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n", + output->port); + + /* wait on output piepline empty */ + if (output->wait_empty) + output->wait_empty(outdev); + + /* clear force capture done for next captures */ + iowrite32(0xfc, gth->base + REG_GTH_SCR2); +} + +/** + * intel_th_gth_start() - start tracing to an output device + * @gth: GTH device + * @output: output device's descriptor + * + * This will start tracing using force storeEn signal. + */ +static void intel_th_gth_start(struct gth_device *gth, + struct intel_th_output *output) +{ + u32 scr = 0xfc0000; + + if (output->multiblock) + scr |= 0xff; + + iowrite32(scr, gth->base + REG_GTH_SCR); + iowrite32(0, gth->base + REG_GTH_SCR2); +} + /** * intel_th_gth_disable() - disable tracing to an output device * @thdev: GTH device @@ -469,48 +504,49 @@ static void intel_th_gth_disable(struct intel_th_device *thdev, struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - unsigned long count; - int master; + int i; u32 reg; spin_lock(>h->gth_lock); output->active = false; - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS) { - gth_master_set(gth, master, -1); - } - spin_unlock(>h->gth_lock); - - iowrite32(0, gth->base + REG_GTH_SCR); - iowrite32(0xfd, gth->base + REG_GTH_SCR2); - - /* wait on pipeline empty for the given port */ - for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH; - count && !(reg & BIT(output->port)); count--) { - reg = ioread32(gth->base + REG_GTH_STAT); - cpu_relax(); - } + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + if (gth->master[i] == output->port) + gth_master_set(gth, i, -1); - /* clear force capture done for next captures */ - iowrite32(0xfc, gth->base + REG_GTH_SCR2); + spin_unlock(>h->gth_lock); - if (!count) - dev_dbg(&thdev->dev, "timeout waiting for GTH[%d] PLE\n", - output->port); + intel_th_gth_stop(gth, output, true); reg = ioread32(gth->base + REG_GTH_SCRPD0); reg &= ~output->scratchpad; iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Workaround for PTI pipeline empty not set by hardware */ + if (output->type == GTH_PTI && + !(BIT(output->port) & ioread32(gth->base + REG_GTH_STAT))) + intel_th_reset(thdev); } -static void gth_tscu_resync(struct gth_device *gth) +/* + * Set default configuration. + */ +static void intel_th_gth_reset(struct gth_device *gth) { u32 reg; - reg = ioread32(gth->base + REG_TSCU_TSUCTRL); - reg &= ~TSUCTRL_CTCRESYNC; - iowrite32(reg, gth->base + REG_TSCU_TSUCTRL); + /* Always save/restore STH and TU registers in S0ix entry/exit */ + reg = ioread32(gth->base + REG_GTH_SCRPD0); + reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED; + iowrite32(reg, gth->base + REG_GTH_SCRPD0); + + /* Force sources off */ + iowrite32(0, gth->base + REG_GTH_SCR); + iowrite32(0xfc, gth->base + REG_GTH_SCR2); + + /* Setup CTS for single trigger */ + iowrite32(0x80000000, gth->base + REG_CTS_C0S0_EN); + iowrite32(0x40000010, gth->base + REG_CTS_C0S0_ACT); } /** @@ -521,35 +557,91 @@ static void gth_tscu_resync(struct gth_device *gth) * This will configure all masters set to output to this device and * enable tracing using force storeEn signal. */ -static void intel_th_gth_enable(struct intel_th_device *thdev, - struct intel_th_output *output) +static int intel_th_gth_enable(struct intel_th_device *thdev, + struct intel_th_output *output) { struct gth_device *gth = dev_get_drvdata(&thdev->dev); - struct intel_th *th = to_intel_th(thdev); - u32 scr = 0xfc0000, scrpd; - int master; + u32 scrpd; + int i; + int ret = -EBUSY; + + /* No operation allowed while a debugger is connected */ + scrpd = ioread32(gth->base + REG_GTH_SCRPD0); + if (scrpd & SCRPD_DEBUGGER_IN_USE) + return ret; spin_lock(>h->gth_lock); - for_each_set_bit(master, gth->output[output->port].master, - TH_CONFIGURABLE_MASTERS + 1) { - gth_master_set(gth, master, output->port); + + /* Only allow one output active at a time */ + for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { + if (gth->output[i].output && + gth->output[i].output->active) { + spin_unlock(>h->gth_lock); + return ret; + } } - if (output->multiblock) - scr |= 0xff; + intel_th_reset(thdev); + intel_th_gth_reset(gth); + + /* Re-configure output */ + gth_output_set(gth, output->port, gth->output[output->port].config); + gth_smcfreq_set(gth, output->port, gth->output[output->port].smcfreq); + + /* Enable masters for the output, disable others */ + for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) + gth_master_set(gth, i, gth->master[i] == output->port ? + output->port : -1); output->active = true; spin_unlock(>h->gth_lock); - if (INTEL_TH_CAP(th, tscu_enable)) - gth_tscu_resync(gth); + /* Setup the output */ + ret = intel_th_output_activate(output); + if (ret) + return ret; scrpd = ioread32(gth->base + REG_GTH_SCRPD0); scrpd |= output->scratchpad; iowrite32(scrpd, gth->base + REG_GTH_SCRPD0); - iowrite32(scr, gth->base + REG_GTH_SCR); - iowrite32(0, gth->base + REG_GTH_SCR2); + /* Enable sources */ + intel_th_gth_start(gth, output); + + return 0; +} + +/** + * intel_th_gth_switch() - execute a switch sequence + * @thdev: GTH device + * @output: output device's descriptor + * + * This will execute a switch sequence that will trigger a switch window + * when tracing to MSC in multi-block mode. + */ +static void intel_th_gth_switch(struct intel_th_device *thdev, + struct intel_th_output *output) +{ + struct gth_device *gth = dev_get_drvdata(&thdev->dev); + unsigned long count, flags; + u32 reg; + + /* trigger */ + iowrite32(0, gth->base + REG_CTS_CTL); + iowrite32(1, gth->base + REG_CTS_CTL); + /* wait on trigger status */ + for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH; + count && !(reg & BIT(4)); count--) { + reg = ioread32(gth->base + REG_CTS_STAT); + cpu_relax(); + } + if (!count) + dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n"); + + local_irq_save(flags); + intel_th_gth_stop(gth, output, false); + intel_th_gth_start(gth, output); + local_irq_restore(flags); } /** @@ -566,9 +658,16 @@ static void intel_th_gth_enable(struct intel_th_device *thdev, static int intel_th_gth_assign(struct intel_th_device *thdev, struct intel_th_device *othdev) { - struct gth_device *gth = dev_get_drvdata(&thdev->dev); + struct gth_device *gth; int i, id; + if(thdev == NULL || othdev == NULL) + return -EINVAL; + + gth = dev_get_drvdata(&thdev->dev); + if(gth == NULL) + return -EINVAL; + if (thdev->host_mode) return -EBUSY; @@ -632,10 +731,9 @@ intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master) master = TH_CONFIGURABLE_MASTERS; spin_lock(>h->gth_lock); - if (gth->master[master] == -1) { - set_bit(master, gth->output[port].master); + if (gth->master[master] == -1) gth->master[master] = port; - } + spin_unlock(>h->gth_lock); return 0; @@ -666,29 +764,19 @@ static int intel_th_gth_probe(struct intel_th_device *thdev) gth->base = base; spin_lock_init(>h->gth_lock); - dev_set_drvdata(dev, gth); - - /* - * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE - * bit. Either way, don't reset HW in this case, and don't export any - * capture configuration attributes. Also, refuse to assign output - * drivers to ports, see intel_th_gth_assign(). - */ - if (thdev->host_mode) - return 0; + dev_set_drvdata(dev, gth); - ret = intel_th_gth_reset(gth); - if (ret) { - if (ret != -EBUSY) - return ret; - - thdev->host_mode = true; - - return 0; - } + /* + * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE + * bit. Either way, don't reset HW in this case, and don't export any + * capture configuration attributes. Also, refuse to assign output + * drivers to ports, see intel_th_gth_assign(). + */ + if (thdev->host_mode) + return 0; for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) - gth->master[i] = -1; + gth->master[i] = gth_master_get(gth, i); for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) { gth->output[i].gth = gth; @@ -731,6 +819,7 @@ static struct intel_th_driver intel_th_gth_driver = { .unassign = intel_th_gth_unassign, .set_output = intel_th_gth_set_output, .enable = intel_th_gth_enable, + .trig_switch = intel_th_gth_switch, .disable = intel_th_gth_disable, .driver = { .name = "gth", diff --git a/drivers/hwtracing/intel_th/gth.h b/drivers/hwtracing/intel_th/gth.h index 6f2b0b930875..1f7d0d886320 100644 --- a/drivers/hwtracing/intel_th/gth.h +++ b/drivers/hwtracing/intel_th/gth.h @@ -49,6 +49,11 @@ enum { REG_GTH_SCRPD3 = 0xec, /* ScratchPad[3] */ REG_TSCU_TSUCTRL = 0x2000, /* TSCU control register */ REG_TSCU_TSCUSTAT = 0x2004, /* TSCU status register */ + /* Common Capture Sequencer (CTS) registers */ + REG_CTS_C0S0_EN = 0x30c0, /* clause_event_enable_c0s0 */ + REG_CTS_C0S0_ACT = 0x3180, /* clause_action_control_c0s0 */ + REG_CTS_STAT = 0x32a0, /* cts_status */ + REG_CTS_CTL = 0x32a4, /* cts_control */ }; /* waiting for Pipeline Empty bit(s) to assert for GTH */ @@ -56,5 +61,7 @@ enum { #define TSUCTRL_CTCRESYNC BIT(0) #define TSCUSTAT_CTCSYNCING BIT(1) +/* waiting for Trigger status to assert for CTS */ +#define CTS_TRIG_WAITLOOP_DEPTH 10000 #endif /* __INTEL_TH_GTH_H__ */ diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h index 780206dc9012..f385471fe56a 100644 --- a/drivers/hwtracing/intel_th/intel_th.h +++ b/drivers/hwtracing/intel_th/intel_th.h @@ -18,6 +18,8 @@ enum { INTEL_TH_SWITCH, }; +struct intel_th_device; + /** * struct intel_th_output - descriptor INTEL_TH_OUTPUT type devices * @port: output port number, assigned by the switch @@ -25,6 +27,7 @@ enum { * @scratchpad: scratchpad bits to flag when this output is enabled * @multiblock: true for multiblock output configuration * @active: true when this output is enabled + * @wait_empty: wait for device pipeline to be empty * * Output port descriptor, used by switch driver to tell which output * port this output device corresponds to. Filled in at output device's @@ -37,6 +40,7 @@ struct intel_th_output { unsigned int scratchpad; bool multiblock; bool active; + void (*wait_empty)(struct intel_th_device *); }; /** @@ -55,6 +59,7 @@ struct intel_th_drvdata { * struct intel_th_device - device on the intel_th bus * @dev: device * @drvdata: hardware capabilities/quirks + * @th: core device * @resource: array of resources available to this device * @num_resources: number of resources in @resource array * @type: INTEL_TH_{SOURCE,OUTPUT,SWITCH} @@ -64,12 +69,13 @@ struct intel_th_drvdata { * @name: device name to match the driver */ struct intel_th_device { - struct device dev; + struct device dev; struct intel_th_drvdata *drvdata; - struct resource *resource; - unsigned int num_resources; - unsigned int type; - int id; + struct intel_th *th; + struct resource *resource; + unsigned int num_resources; + unsigned int type; + int id; /* INTEL_TH_SWITCH specific */ bool host_mode; @@ -148,6 +154,7 @@ intel_th_output_assigned(struct intel_th_device *thdev) */ struct intel_th_driver { struct device_driver driver; + void (*first_trace)(struct intel_th_device *thdev); int (*probe)(struct intel_th_device *thdev); void (*remove)(struct intel_th_device *thdev); /* switch (GTH) ops */ @@ -155,8 +162,10 @@ struct intel_th_driver { struct intel_th_device *othdev); void (*unassign)(struct intel_th_device *thdev, struct intel_th_device *othdev); - void (*enable)(struct intel_th_device *thdev, + int (*enable)(struct intel_th_device *thdev, struct intel_th_output *output); + void (*trig_switch)(struct intel_th_device *thdev, + struct intel_th_output *output); void (*disable)(struct intel_th_device *thdev, struct intel_th_output *output); /* output ops */ @@ -213,13 +222,15 @@ static inline struct intel_th *to_intel_th(struct intel_th_device *thdev) struct intel_th * intel_th_alloc(struct device *dev, struct intel_th_drvdata *drvdata, - struct resource *devres, unsigned int ndevres, int irq); + struct resource *devres, unsigned int ndevres, int irq, void (*reset)(struct intel_th *th)); void intel_th_free(struct intel_th *th); int intel_th_driver_register(struct intel_th_driver *thdrv); void intel_th_driver_unregister(struct intel_th_driver *thdrv); -int intel_th_trace_enable(struct intel_th_device *thdev); +int intel_th_output_activate(struct intel_th_output *output); +void intel_th_reset(struct intel_th_device *hub); +int intel_th_trace_switch(struct intel_th_device *thdev); int intel_th_trace_disable(struct intel_th_device *thdev); int intel_th_set_output(struct intel_th_device *thdev, unsigned int master); @@ -246,6 +257,7 @@ enum { * @num_thdevs: number of devices in the @thdev array * @num_resources: number or resources in the @resource array * @irq: irq number + * @reset: reset function of the core device * @id: this Intel TH controller's device ID in the system * @major: device node major for output devices */ @@ -263,6 +275,8 @@ struct intel_th { unsigned int num_resources; int irq; + void (*reset)(struct intel_th *th); + int id; int major; #ifdef CONFIG_MODULES @@ -290,11 +304,11 @@ to_intel_th_hub(struct intel_th_device *thdev) enum { /* Global Trace Hub (GTH) */ REG_GTH_OFFSET = 0x0000, - REG_GTH_LENGTH = 0x2000, + REG_GTH_LENGTH = 0x4000, /* Timestamp counter unit (TSCU) */ REG_TSCU_OFFSET = 0x2000, - REG_TSCU_LENGTH = 0x1000, + REG_TSCU_LENGTH = 0x2000, /* Software Trace Hub (STH) [0x4000..0x4fff] */ REG_STH_OFFSET = 0x4000, diff --git a/drivers/hwtracing/intel_th/msu-dvc.c b/drivers/hwtracing/intel_th/msu-dvc.c new file mode 100755 index 000000000000..91693c0f8175 --- /dev/null +++ b/drivers/hwtracing/intel_th/msu-dvc.c @@ -0,0 +1,1184 @@ +/* + * Intel Trace Hub to USB dvc-trace driver + * + * Copyright (C) 2015, Intel Corporation. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "msu.h" + +#ifdef MDD_DEBUG +#define MDD_F_DEBUG() pr_debug("\n") +#else +#define MDD_F_DEBUG() do {} while (0) +#endif + +#define DTC_DRV_NAME "dvcith" + +#define MDD_MIN_TRANSFER_DEF 2048 +#define MDD_RETRY_TIMEOUT_DEF 2 +#define MDD_MAX_RETRY_CNT_DEF 150 + +/* The DWC3 gadget is able to handle a maximum of 32 TRBs per-ep (an sg based + * request counts as the number of sg-s). + * This should be updated in case some other UDC has a lower threshold. */ +#define MDD_MAX_TRB_CNT 32 + +#define mdd_err(mdd, ...) dev_err(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_warn(mdd, ...) dev_warn(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_info(mdd, ...) dev_info(&(mdd)->ddev.device, ## __VA_ARGS__) +#define mdd_debug(mdd, ...) dev_debug(&(mdd)->ddev.device, ## __VA_ARGS__) + +#ifdef MDD_DEBUG +struct msu_dvc_stats { + unsigned long work_start; + unsigned long work_end; + + unsigned long loop_count; + unsigned long hits; + + u64 full_block_size; + u64 valid_block_size; + u64 valid_data_size; + + u32 transfer_type:2; + u32 process_type:2; + + enum usb_device_speed speed; +}; +#endif + +enum { + MDD_TRANSFER_NO_CHANGE, + MDD_TRANSFER_AUTO, + MDD_TRANSFER_MIN = MDD_TRANSFER_AUTO, + MDD_TRANSFER_SINGLE, + MDD_TRANSFER_MULTI, + MDD_TRANSFER_MAX = MDD_TRANSFER_MULTI, +}; + +static const char *const transfer_type_name[] = { + [MDD_TRANSFER_AUTO] = "Auto", + [MDD_TRANSFER_SINGLE] = "Single", + [MDD_TRANSFER_MULTI] = "Multi", +}; + +enum { + MDD_PROC_NO_CHANGE, + MDD_PROC_NONE, + MDD_PROC_MIN = MDD_PROC_NONE, + MDD_PROC_REM_TAIL, + MDD_PROC_REM_ALL, + MDD_PROC_MAX = MDD_PROC_REM_ALL, +}; + +static const char *const process_type_name[] = { + [MDD_PROC_NONE] = "Full-Blocks", + [MDD_PROC_REM_TAIL] = "Trimmed-Blocks", + [MDD_PROC_REM_ALL] = "STP", +}; + +struct mdd_transfer_data { + u8 *buffer; + u8 *buffer_sg; + size_t buffer_sg_len; + dma_addr_t buffer_dma; + size_t buffer_len; + struct scatterlist *sg_raw; + struct scatterlist *sg_proc; + struct scatterlist *sg_trans; /* not be allocated */ + size_t block_count; + size_t block_size; + spinlock_t lock; +}; + +#define mdd_lock_transfer(mdd) spin_lock(&mdd->tdata.lock) +#define mdd_unlock_transfer(mdd) spin_unlock(&mdd->tdata.lock) + +struct msu_dvc_dev { + struct dvct_source_device ddev; + atomic_t *dtc_status; + struct usb_ep *ep; + struct usb_function *func; + enum usb_device_speed speed; + struct intel_th_device *th_dev; + + struct workqueue_struct *wrq; + struct work_struct work; + struct usb_request *req; + wait_queue_head_t wq; + atomic_t req_ongoing; + + /*attributes */ + u32 retry_timeout; + u32 max_retry_count; + u32 transfer_type:2; + u32 process_type:2; + u32 min_transfer; + +#ifdef MDD_DEBUG + struct msu_dvc_stats stats; +#endif + struct mdd_transfer_data tdata; + + struct list_head mdd_list; +}; + +static LIST_HEAD(mdd_devs); +static DEFINE_SPINLOCK(mdd_devs_lock); + +static inline struct usb_gadget *mdd_gadget(struct msu_dvc_dev *mdd) +{ + BUG_ON(!mdd->func); + BUG_ON(!mdd->func->config); + BUG_ON(!mdd->func->config->cdev); + BUG_ON(!mdd->func->config->cdev->gadget); + return mdd->func->config->cdev->gadget; +} + +/* Back-cast to msu_dvc_dev */ +static inline struct msu_dvc_dev *dtc_to_mdd(struct dvct_source_device *p_dtc) +{ + return container_of(p_dtc, struct msu_dvc_dev, ddev); +} + +static ssize_t mdd_min_transfer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->min_transfer); +} + +static ssize_t mdd_min_transfer_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + /* 48 represents the size of sync frames that are generated by + * a window switch, from this point on we have "real data" + * Going under this value could result in unneeded switching */ + if (!kstrtou32(buf, 10, &mdd->min_transfer)) { + if (mdd->min_transfer < 48) + mdd->min_transfer = 48; + return count; + } + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_min_transfer); + + +static ssize_t mdd_retry_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->retry_timeout); +} + +static ssize_t mdd_retry_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->retry_timeout)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_retry_timeout); + +static ssize_t mdd_max_retry_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%u\n", mdd->max_retry_count); +} + +static ssize_t mdd_max_retry_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + if (!kstrtou32(buf, 10, &mdd->max_retry_count)) + return count; + + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_max_retry); + +static ssize_t mdd_transfer_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->transfer_type, + transfer_type_name[mdd->transfer_type]); +} + +static ssize_t mdd_transfer_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_TRANSFER_MAX + && tmp >= MDD_TRANSFER_MIN) { + mdd->transfer_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_transfer_type); + +static ssize_t mdd_proc_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + return sprintf(buf, "%d %s\n", mdd->process_type, + process_type_name[mdd->process_type]); +} + +static ssize_t mdd_proc_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct msu_dvc_dev *mdd; + u8 tmp; + + MDD_F_DEBUG(); + + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + if (mdd->dtc_status + && dvct_get_status(mdd->dtc_status, DVCT_MASK_TRANS)) + return -EBUSY; + + if (!kstrtou8(buf, 10, &tmp) && tmp <= MDD_PROC_MAX + && tmp >= MDD_PROC_MIN) { + mdd->process_type = tmp; + return count; + } + return -EINVAL; +} + +static DEVICE_ATTR_RW(mdd_proc_type); + +#ifdef MDD_DEBUG + +static ssize_t mdd_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct msu_dvc_dev *mdd; + int len = 0; + + static const char *const u_speed_names[] = { + [USB_SPEED_UNKNOWN] = "?", + [USB_SPEED_LOW] = "LS", + [USB_SPEED_FULL] = "FS", + [USB_SPEED_HIGH] = "HS", + [USB_SPEED_WIRELESS] = "WR", + [USB_SPEED_SUPER] = "SS", + }; + + MDD_F_DEBUG(); + mdd = container_of(dev, struct msu_dvc_dev, ddev.device); + + len += snprintf(buf + len, PAGE_SIZE - len, "R.count\tR.hits\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "T.tot_j\tT.tot_ms\t"); + len += + snprintf(buf + len, PAGE_SIZE - len, "D.total\tD.block\tT.stp\t"); + len += snprintf(buf + len, PAGE_SIZE - len, "Tr.type\tProc.type\t"); + + len += snprintf(buf + len, PAGE_SIZE - len, "USB.speed\n"); + + /* Actual values starts here */ + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%lu\t", + mdd->stats.loop_count, mdd->stats.hits); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%lu\t%u\t", + (mdd->stats.work_end - mdd->stats.work_start), + jiffies_to_msecs(mdd->stats.work_end - + mdd->stats.work_start)); + len += + snprintf(buf + len, PAGE_SIZE - len, "%llu\t%llu\t%llu\t", + mdd->stats.full_block_size, mdd->stats.valid_block_size, + mdd->stats.valid_data_size); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + transfer_type_name[mdd->transfer_type]); + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\t", + process_type_name[mdd->process_type]); + + len += + snprintf(buf + len, PAGE_SIZE - len, "%s\n", + u_speed_names[mdd->stats.speed]); + + return len; +} + +static DEVICE_ATTR_RO(mdd_stats); + +static void init_stats_start(struct msu_dvc_dev *mdd) +{ + mdd->stats.loop_count = 0; + mdd->stats.hits = 0; + + mdd->stats.work_start = jiffies; + + mdd->stats.full_block_size = 0; + mdd->stats.valid_block_size = 0; + mdd->stats.valid_data_size = 0; + + mdd->stats.process_type = mdd->process_type; + mdd->stats.transfer_type = mdd->transfer_type; + mdd->stats.speed = mdd->speed; +} + +#define stats_loop(mdd) ((mdd)->stats.loop_count++) +#define stats_hit(mdd) ((mdd)->stats.hits++) +#else +#define init_stats_start(n) do {} while (0) +#define stats_loop(mdd) do {} while (0) +#define stats_hit(mdd) do {} while (0) +#endif + +static void mdd_complete(struct usb_ep *ep, struct usb_request *req) +{ + struct msu_dvc_dev *mdd = (struct msu_dvc_dev *)req->context; + + mdd_lock_transfer(mdd); + + if (req->status != 0) { + mdd_err(mdd, "Usb request error %d\n", req->status); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + } + atomic_set(&mdd->req_ongoing, 0); + wake_up(&mdd->wq); + mdd_unlock_transfer(mdd); +} + +static int mdd_setup_transfer_data(struct msu_dvc_dev *mdd) +{ + int ret = -EINVAL; + + MDD_F_DEBUG(); + + if (!mdd->ep || !mdd->req) { + mdd_err(mdd, "Invalid endpoint data\n"); + goto err; + } + + mdd->tdata.block_count = msc_max_blocks(mdd->th_dev); + if (mdd->tdata.block_count == 0) { + mdd_err(mdd, "Invalid block count %zu\n", + mdd->tdata.block_count); + goto err; + } + + mdd->tdata.block_size = msc_block_max_size(mdd->th_dev); + if (mdd->tdata.block_size == 0) { + mdd_err(mdd, "Invalid block size %zu\n", mdd->tdata.block_size); + goto err; + } + + mdd->tdata.sg_raw = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_raw), + GFP_KERNEL); + if (!mdd->tdata.sg_raw) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_raw; + } + + if (mdd->process_type != MDD_PROC_NONE) { + mdd->tdata.sg_proc = kmalloc_array(mdd->tdata.block_count, + sizeof(*mdd->tdata.sg_proc), + GFP_KERNEL); + if (!mdd->tdata.sg_proc) { + mdd_err(mdd, "Cannot allocate sg memory %zu\n", + mdd->tdata.block_size); + goto err_sg_proc; + } + mdd->tdata.sg_trans = mdd->tdata.sg_proc; + } else { + mdd->tdata.sg_trans = mdd->tdata.sg_raw; + } + + if (mdd->transfer_type == MDD_TRANSFER_SINGLE) { + mdd->tdata.buffer_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer = + dma_alloc_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, + &mdd->tdata.buffer_dma, GFP_KERNEL); + if (!mdd->tdata.buffer) { + mdd_err(mdd, "Cannot allocate DMA memory\n"); + goto err_l_buf; + } + } else { + mdd->tdata.buffer_sg_len = + mdd->tdata.block_count * mdd->tdata.block_size; + mdd->tdata.buffer_sg = kmalloc(mdd->tdata.buffer_sg_len, GFP_KERNEL); + if(mdd->tdata.buffer_sg == NULL) + mdd->tdata.buffer_sg_len = 0; + + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + return 0; +err_l_buf: + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; +err_sg_proc: + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; +err_sg_raw: + ret = -ENOMEM; +err: + return ret; +} + +static void mdd_reset_transfer_data(struct msu_dvc_dev *mdd) +{ + MDD_F_DEBUG(); + kfree(mdd->tdata.sg_proc); + mdd->tdata.sg_proc = NULL; + kfree(mdd->tdata.sg_raw); + mdd->tdata.sg_raw = NULL; + if (mdd->tdata.buffer) { + dma_free_coherent(&(mdd_gadget(mdd)->dev), + mdd->tdata.buffer_len, mdd->tdata.buffer, + mdd->tdata.buffer_dma); + mdd->tdata.buffer = NULL; + mdd->tdata.buffer_dma = 0; + mdd->tdata.buffer_len = 0; + } + + if(mdd->tdata.buffer_sg != NULL) + { + mdd->tdata.buffer_sg_len = 0; + kfree(mdd->tdata.buffer_sg); + } +} + +static unsigned mdd_sg_len(struct scatterlist *sgl, int nents) +{ + int i; + struct scatterlist *sg; + unsigned ret = 0; + + /*MDD_F_DEBUG(); */ + for_each_sg(sgl, sg, nents, i) { + ret += sg->length; + } + return ret; +} + +static int mdd_send_sg_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer_sg, + mdd->tdata.buffer_sg_len); + + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + mdd->req->buf = mdd->tdata.buffer_sg; + mdd->req->length = transfer_len; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_sg(struct msu_dvc_dev *mdd, int nents) +{ + struct scatterlist *sgl = mdd->tdata.sg_trans; + + if(mdd->tdata.buffer_sg != NULL) + { + return mdd_send_sg_buffer(mdd, nents); + } + + /*MDD_F_DEBUG(); */ + while (nents) { + int trans_ents; + + mdd_lock_transfer(mdd); + + if (nents > MDD_MAX_TRB_CNT) { + trans_ents = MDD_MAX_TRB_CNT; + sg_mark_end(&sgl[trans_ents - 1]); + } else { + trans_ents = nents; + } + + if (trans_ents == 1) { + mdd->req->buf = sg_virt(sgl); + mdd->req->length = sgl->length; + mdd->req->dma = 0; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + } else { + mdd->req->buf = NULL; + mdd->req->length = mdd_sg_len(sgl, trans_ents); + mdd->req->dma = 0; + mdd->req->sg = sgl; + mdd->req->num_sgs = trans_ents; + } + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + nents -= trans_ents; + sgl += trans_ents; + + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + } + return 0; +} + +static int mdd_send_buffer(struct msu_dvc_dev *mdd, int nents) +{ + size_t transfer_len; + + /*MDD_F_DEBUG(); */ + mdd_lock_transfer(mdd); + transfer_len = + sg_copy_to_buffer(mdd->tdata.sg_trans, nents, mdd->tdata.buffer, + mdd->tdata.buffer_len); + if (!transfer_len) { + mdd_err(mdd, "Cannot copy into nonsg memory\n"); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + mdd->req->buf = mdd->tdata.buffer; + mdd->req->length = transfer_len; + mdd->req->dma = mdd->tdata.buffer_dma; + mdd->req->sg = NULL; + mdd->req->num_sgs = 0; + + mdd->req->context = mdd; + mdd->req->complete = mdd_complete; + mdd->req->zero = 1; + + if (usb_ep_queue(mdd->ep, mdd->req, GFP_KERNEL)) { + mdd_err(mdd, "Cannot queue request\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + mdd_unlock_transfer(mdd); + return -EINVAL; + } + + atomic_set(&mdd->req_ongoing, 1); + mdd_unlock_transfer(mdd); + /*wait for done stop or disable */ + wait_event(mdd->wq, (!atomic_read(&mdd->req_ongoing) || + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS))); + return 0; +} + +static int mdd_send_auto(struct msu_dvc_dev *mdd, int nents) +{ + /*MDD_F_DEBUG(); */ + if (!mdd_gadget(mdd)->sg_supported) + return mdd_send_buffer(mdd, nents); + else + return mdd_send_sg(mdd, nents); +} + +static int (*send_funcs[])(struct msu_dvc_dev *, int) = { + [MDD_TRANSFER_AUTO] = mdd_send_auto, + [MDD_TRANSFER_SINGLE] = mdd_send_buffer, + [MDD_TRANSFER_MULTI] = mdd_send_sg, +}; + +#ifdef MDD_DEBUG +static int mdd_proc_add_stats(struct msu_dvc_dev *mdd, int nents) +{ + int i, count; + struct scatterlist *sg; + + /*MDD_F_DEBUG(); */ + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + count = msc_data_sz((struct msc_block_desc *)sg_virt(sg)); + mdd->stats.full_block_size += sg->length; + mdd->stats.valid_block_size += (count + MSC_BDESC); + mdd->stats.valid_data_size += count; + } + + return i; +} +#else +#define mdd_proc_add_stats(m, n) do {} while (0) +#endif + +static int mdd_proc_trimmed_blocks(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + if (!len) { + mdd_err(mdd, "Zero length block"); + continue; + } + + len += MSC_BDESC; + + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int mdd_proc_stp_only(struct msu_dvc_dev *mdd, int nents) +{ + u8 *ptr; + size_t len; + int i, out_cnt = 0; + struct scatterlist *sg, *sg_dest = NULL; + + /*MDD_F_DEBUG(); */ + mdd_proc_add_stats(mdd, nents); + + sg_init_table(mdd->tdata.sg_proc, nents); + + for_each_sg(mdd->tdata.sg_raw, sg, nents, i) { + ptr = sg_virt(sg); + len = msc_data_sz((struct msc_block_desc *)ptr); + ptr += MSC_BDESC; + if (!len) { + mdd_err(mdd, "Zero data length block"); + } else { + if (!sg_dest) + sg_dest = mdd->tdata.sg_proc; + else + sg_dest = sg_next(sg_dest); + sg_set_buf(sg_dest, ptr, len); + out_cnt++; + } + } + if (sg_dest) + sg_mark_end(sg_dest); + + return out_cnt; +} + +static int (*proc_funcs[]) (struct msu_dvc_dev *, int) = { +#ifdef MDD_DEBUG + [MDD_PROC_NONE] = mdd_proc_add_stats, +#endif + [MDD_PROC_REM_TAIL] = mdd_proc_trimmed_blocks, + [MDD_PROC_REM_ALL] = mdd_proc_stp_only, +}; + +static void mdd_work(struct work_struct *work) +{ + int nents, current_bytes, retry_cnt = 0; + struct msu_dvc_dev *mdd; + + MDD_F_DEBUG(); + mdd = container_of(work, struct msu_dvc_dev, work); + init_stats_start(mdd); + + if (mdd_setup_transfer_data(mdd)) { + mdd_err(mdd, "Cannot setup transfer data\n"); + return; + } + mdd_info(mdd, "Start transfer loop\n"); + while (atomic_read(mdd->dtc_status) == DVCT_MASK_ONLINE_TRANS) { + sg_init_table(mdd->tdata.sg_raw, mdd->tdata.block_count); + /* Maybe will be better if msc_sg_oldest_win changes the window + * if the "oldest" one contains data and is the current one..*/ + + preempt_disable(); + current_bytes = msc_current_win_bytes(mdd->th_dev); + if (current_bytes > mdd->min_transfer || + (current_bytes && retry_cnt >= mdd->max_retry_count)) { + msc_switch_window(mdd->th_dev); + nents = msc_sg_oldest_win(mdd->th_dev, + mdd->tdata.sg_raw); + retry_cnt = 0; + } else { + if (unlikely(current_bytes < 0)) { + mdd_warn(mdd, "Unexpected state (%d), switch", + current_bytes); + msc_switch_window(mdd->th_dev); + } else { + if (retry_cnt < mdd->max_retry_count) + retry_cnt++; + } + nents = 0; + } + preempt_enable(); + stats_loop(mdd); + + if (nents < 0) { + mdd_err(mdd, "Cannot get ith data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + + if (nents && proc_funcs[mdd->process_type]) { + nents = proc_funcs[mdd->process_type] (mdd, nents); + if (nents < 0) { + mdd_err(mdd, "Cannot process data\n"); + dvct_set_status(mdd->dtc_status, DVCT_MASK_ERR); + break; + } + } + + if (nents) { + stats_hit(mdd); + if (send_funcs[mdd->transfer_type] (mdd, nents)) + break; + } else { + /*wait for stop or timeout */ + wait_event_timeout(mdd->wq, + (atomic_read(mdd->dtc_status) != + DVCT_MASK_ONLINE_TRANS), + msecs_to_jiffies(mdd-> + retry_timeout)); + } + } + mdd_info(mdd, "End transfer loop\n"); + if (atomic_read(&mdd->req_ongoing)) { + usb_ep_dequeue(mdd->ep, mdd->req); + atomic_set(&mdd->req_ongoing, 0); + } + +#ifdef MDD_DEBUG + mdd->stats.work_end = jiffies; +#endif + mdd_reset_transfer_data(mdd); +} + +static int mdd_activate(struct dvct_source_device *client, atomic_t *status) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + mdd->dtc_status = status; + + return 0; +} + +static int mdd_binded(struct dvct_source_device *client, struct usb_ep *ep, + struct usb_function *func) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->ep = ep; + mdd->func = func; + + mdd->req = usb_ep_alloc_request(mdd->ep, GFP_KERNEL); + if (!mdd->req) { + mdd_err(mdd, "Cannot allocate usb request\n"); + return -ENOMEM; + } + return 0; +} + +static void mdd_connected(struct dvct_source_device *client, + enum usb_device_speed speed) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = speed; +} + +union mdd_config { + u8 config; + struct { + u8 enable:1; /* one */ + u8 tr_type:2; + u8 proc_type:2; + } params; +}; + +static int mdd_start_transfer(struct dvct_source_device *client, u8 config) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + union mdd_config cfg; + + MDD_F_DEBUG(); + /* If we share the resources with node base reading this is + * the place where we should lock.*/ + + cfg.config = config; + + if (cfg.params.proc_type <= MDD_PROC_MAX + && cfg.params.proc_type >= MDD_PROC_MIN) { + mdd_info(mdd, "Set process type %d", cfg.params.proc_type); + mdd->process_type = cfg.params.proc_type; + } + + if (cfg.params.tr_type <= MDD_TRANSFER_MAX + && cfg.params.tr_type >= MDD_TRANSFER_MIN) { + mdd_info(mdd, "Set transfer type %d", cfg.params.tr_type); + mdd->transfer_type = cfg.params.tr_type; + } + + /*Force linear buffer transfer if the gadget is not supporting SGs */ + if (mdd->transfer_type != MDD_TRANSFER_SINGLE + && !mdd_gadget(mdd)->sg_supported) { + mdd_info(mdd, "Force linear buffer transfer"); + mdd->transfer_type = MDD_TRANSFER_SINGLE; + } + + dvct_clr_status(mdd->dtc_status, DVCT_MASK_ERR); + dvct_set_status(mdd->dtc_status, DVCT_MASK_TRANS); + queue_work(mdd->wrq, &mdd->work); + return 0; +} + +static int mdd_stop_transfer(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + dvct_clr_status(mdd->dtc_status, DVCT_MASK_TRANS); + wake_up(&mdd->wq); + + return 0; +} + +static void mdd_disconnected(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->speed = USB_SPEED_UNKNOWN; +} + +static void mdd_unbinded(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + + if (mdd->req) { + usb_ep_free_request(mdd->ep, mdd->req); + mdd->req = NULL; + } + mdd->ep = NULL; +} + +static void mdd_deactivate(struct dvct_source_device *client) +{ + struct msu_dvc_dev *mdd = dtc_to_mdd(client); + + MDD_F_DEBUG(); + mdd->dtc_status = NULL; +} + +/*the driver*/ +static struct dvct_source_driver mdd_drv = { + .activate = mdd_activate, + .binded = mdd_binded, + .connected = mdd_connected, + .start_transfer = mdd_start_transfer, + .stop_transfer = mdd_stop_transfer, + .disconnected = mdd_disconnected, + .unbinded = mdd_unbinded, + .deactivate = mdd_deactivate, + .driver.name = DTC_DRV_NAME, +}; + +static struct msu_dvc_dev *mdd_alloc_device(const char *name) +{ + struct msu_dvc_dev *mdd; + + mdd = kzalloc(sizeof(*mdd), GFP_KERNEL); + + if (!mdd) + return ERR_PTR(-ENOMEM); + + mdd->ddev.name_add = kstrdup(name, GFP_KERNEL); + if (!mdd->ddev.name_add) { + kfree(mdd); + return ERR_PTR(-ENOMEM); + } + + /* mdd->ddev.protocol = 0; */ + /* mdd->ddev.desc = NULL; */ + /* mdd->dtc_status = NULL; */ + /* mdd->ep = NULL; */ + mdd->speed = USB_SPEED_UNKNOWN; + /* mdd->msu_dev = NULL; */ + /* mdd->wrq = NULL; */ + mdd->retry_timeout = MDD_RETRY_TIMEOUT_DEF; + mdd->max_retry_count = MDD_MAX_RETRY_CNT_DEF; + /* mdd->req = NULL; */ + atomic_set(&mdd->req_ongoing, 0); + /*mdd->tdata is all NULL */ + mdd->transfer_type = MDD_TRANSFER_AUTO; + mdd->process_type = MDD_PROC_REM_ALL; + mdd->min_transfer = MDD_MIN_TRANSFER_DEF; + + spin_lock_init(&mdd->tdata.lock); + + return mdd; +}; + +static void mdd_free_device(struct msu_dvc_dev *mdd) +{ + kfree(mdd->ddev.name_add); + kfree(mdd); +}; + +static struct attribute *mdd_attrs[] = { + &dev_attr_mdd_min_transfer.attr, + &dev_attr_mdd_retry_timeout.attr, + &dev_attr_mdd_max_retry.attr, + &dev_attr_mdd_transfer_type.attr, + &dev_attr_mdd_proc_type.attr, +#ifdef MDD_DEBUG + &dev_attr_mdd_stats.attr, +#endif + NULL, +}; + +static struct attribute_group mdd_attrs_group = { + .attrs = mdd_attrs, +}; + +void mdd_msc_probe(struct intel_th_device *thdev) +{ + int ret; + struct msu_dvc_dev *mdd; + struct device *dev; + + pr_info("New th-msc device %s", dev_name(&thdev->dev)); + mdd = mdd_alloc_device(dev_name(&thdev->dev)); + + if (IS_ERR_OR_NULL(mdd)) { + pr_err("Cannot allocate device %s (%ld)", dev_name(&thdev->dev), + PTR_ERR(mdd)); + return; + } + + ret = dvct_source_device_add(&mdd->ddev, &mdd_drv); + if (ret) { + pr_err("Cannot register dvc device %d", ret); + mdd_free_device(mdd); + return; + } + + mdd->th_dev = thdev; + dev = &mdd->ddev.device; + + mdd->wrq = alloc_workqueue("%s_workqueue", WQ_MEM_RECLAIM | WQ_HIGHPRI, + 1, dev_name(&mdd->ddev.device)); + if (!mdd->wrq) { + mdd_err(mdd, "Cannot allocate work queue\n"); + mdd_free_device(mdd); + return; + } + + INIT_WORK(&mdd->work, mdd_work); + + init_waitqueue_head(&mdd->wq); + + /*Attributes */ + ret = sysfs_create_group(&dev->kobj, &mdd_attrs_group); + if (ret) + mdd_warn(mdd, "Cannot add attribute group %d\n", ret); + + ret = sysfs_create_link(&dev->kobj, &thdev->dev.kobj, "msc"); + if (ret) + mdd_warn(mdd, "Cannot add msc link %d\n", ret); + + spin_lock(&mdd_devs_lock); + list_add(&mdd->mdd_list, &mdd_devs); + spin_unlock(&mdd_devs_lock); +} + +void mdd_msc_remove(struct intel_th_device *thdev) +{ + struct msu_dvc_dev *mdd = NULL; + struct msu_dvc_dev *mdd_iter = NULL; + + spin_lock(&mdd_devs_lock); + list_for_each_entry(mdd_iter, &mdd_devs, mdd_list) { + if (mdd_iter->th_dev == thdev) + mdd = mdd_iter; + } + + if (!mdd) { + pr_err("No such mdd device, %s", dev_name(&thdev->dev)); + spin_unlock(&mdd_devs_lock); + return; + } + list_del(&mdd->mdd_list); + + spin_unlock(&mdd_devs_lock); + + flush_workqueue(mdd->wrq); + destroy_workqueue(mdd->wrq); + + sysfs_remove_group(&mdd->ddev.device.kobj, &mdd_attrs_group); + sysfs_remove_link(&mdd->ddev.device.kobj, "msc"); + + mdd->wrq = NULL; + + dvct_source_device_del(&mdd->ddev); + mdd_free_device(mdd); +} + +struct msc_probe_rem_cb mdd_msc_cbs = { + .probe = mdd_msc_probe, + .remove = mdd_msc_remove, +}; + +static int __init msu_dvc_init(void) +{ + int ret; + + MDD_F_DEBUG(); + ret = dvct_source_driver_register(&mdd_drv); + if (ret) { + pr_err("Cannot register dvc driver %d", ret); + return ret; + } + + msc_register_callbacks(mdd_msc_cbs); + return 0; +} + +static void __exit msu_dvc_exit(void) +{ + MDD_F_DEBUG(); + msc_unregister_callbacks(); + dvct_source_driver_unregister(&mdd_drv); +} + +module_init(msu_dvc_init); +module_exit(msu_dvc_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Traian Schiau "); diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c index d293e55553bd..70d41a0e2e0a 100644 --- a/drivers/hwtracing/intel_th/msu.c +++ b/drivers/hwtracing/intel_th/msu.c @@ -23,6 +23,9 @@ #include #endif +#include +#include + #include "intel_th.h" #include "msu.h" @@ -89,6 +92,7 @@ struct msc_iter { * @single_wrap: single mode wrap occurred * @base: buffer's base pointer * @base_addr: buffer's base address + * @nwsa: next window start address backup * @user_count: number of users of the buffer * @mmap_count: number of mappings * @buf_mutex: mutex to serialize access to buffer-related bits @@ -98,6 +102,8 @@ struct msc_iter { * @mode: MSC operating mode * @burst_len: write burst length * @index: number of this MSC in the MSU + * + * @max_blocks: Maximum number of blocks in a window */ struct msc { void __iomem *reg_base; @@ -109,6 +115,7 @@ struct msc { unsigned int single_wrap : 1; void *base; dma_addr_t base_addr; + unsigned long nwsa; /* <0: no buffer, 0: no users, >0: active users */ atomic_t user_count; @@ -124,8 +131,99 @@ struct msc { unsigned int mode; unsigned int burst_len; unsigned int index; + unsigned int max_blocks; +}; + +static struct msc_probe_rem_cb msc_probe_rem_cb; + +struct msc_device_instance { + struct list_head list; + struct intel_th_device *thdev; }; +static LIST_HEAD(msc_dev_instances); +static DEFINE_MUTEX(msc_dev_reg_lock); +/** + * msc_register_callbacks() + * @cbs + */ +int msc_register_callbacks(struct msc_probe_rem_cb cbs) +{ + struct msc_device_instance *it; + + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = cbs.probe; + msc_probe_rem_cb.remove = cbs.remove; + /* Call the probe callback for the already existing ones*/ + list_for_each_entry(it, &msc_dev_instances, list) { + cbs.probe(it->thdev); + } + + mutex_unlock(&msc_dev_reg_lock); + return 0; +} +EXPORT_SYMBOL_GPL(msc_register_callbacks); + +/** + * msc_unregister_callbacks() + */ +void msc_unregister_callbacks(void) +{ + mutex_lock(&msc_dev_reg_lock); + + msc_probe_rem_cb.probe = NULL; + msc_probe_rem_cb.remove = NULL; + + mutex_unlock(&msc_dev_reg_lock); +} +EXPORT_SYMBOL_GPL(msc_unregister_callbacks); + +static void msc_add_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance; + + instance = kmalloc(sizeof(*instance), GFP_KERNEL); + if (!instance) + return; + + mutex_lock(&msc_dev_reg_lock); + + instance->thdev = thdev; + list_add(&instance->list, &msc_dev_instances); + + if (msc_probe_rem_cb.probe) + msc_probe_rem_cb.probe(thdev); + + mutex_unlock(&msc_dev_reg_lock); +} + +static void msc_rm_instance(struct intel_th_device *thdev) +{ + struct msc_device_instance *instance = NULL, *it; + + mutex_lock(&msc_dev_reg_lock); + + if (msc_probe_rem_cb.remove) + msc_probe_rem_cb.remove(thdev); + + list_for_each_entry(it, &msc_dev_instances, list) { + if (it->thdev == thdev) { + instance = it; + break; + } + } + + if (instance) { + list_del(&instance->list); + kfree(instance); + } else { + pr_warn("msu: cannot remove %p (not found)", thdev); + } + + mutex_unlock(&msc_dev_reg_lock); +} + static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) { /* header hasn't been written */ @@ -139,6 +237,37 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) return false; } +/** + * msc_current_window() - locate the window in use + * @msc: MSC device + * + * This should only be used in multiblock mode. Caller should hold the + * msc::user_count reference. + * + * Return: the current output window + */ +static struct msc_window *msc_current_window(struct msc *msc) +{ + struct msc_window *win, *prev = NULL; + /*BAR is never changing, so the current one is the one before the next*/ + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; + + if (list_empty(&msc->win_list)) + return NULL; + + list_for_each_entry(win, &msc->win_list, entry) { + if (win->block[0].addr == win_addr) + break; + prev = win; + } + if (!prev) + prev = list_entry(msc->win_list.prev, struct msc_window, entry); + + return prev; +} + + /** * msc_oldest_window() - locate the window with oldest data * @msc: MSC device @@ -151,20 +280,26 @@ static inline bool msc_block_is_empty(struct msc_block_desc *bdesc) static struct msc_window *msc_oldest_window(struct msc *msc) { struct msc_window *win; - u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); - unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT; unsigned int found = 0; + unsigned long nwsa; if (list_empty(&msc->win_list)) return NULL; + if (msc->enabled) { + u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + + nwsa = (unsigned long)reg << PAGE_SHIFT; + } else { + nwsa = msc->nwsa; + } /* * we might need a radix tree for this, depending on how * many windows a typical user would allocate; ideally it's * something like 2, in which case we're good */ list_for_each_entry(win, &msc->win_list, entry) { - if (win->block[0].addr == win_addr) + if (win->block[0].addr == nwsa) found++; /* skip the empty ones */ @@ -207,6 +342,160 @@ static unsigned int msc_win_oldest_block(struct msc_window *win) return 0; } +/** + * msc_max_blocks() - get the maximum number of block + * @thdev: the sub-device + * + * Return: the maximum number of blocks / window + */ +unsigned int msc_max_blocks(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + + return msc->max_blocks; +} +EXPORT_SYMBOL_GPL(msc_max_blocks); + +/** + * msc_block_max_size() - get the size of biggest block + * @thdev: the sub-device + * + * Return: the size of biggest block + */ +unsigned int msc_block_max_size(struct intel_th_device *thdev) +{ + return PAGE_SIZE; +} +EXPORT_SYMBOL_GPL(msc_block_max_size); + +/** + * msc_switch_window() - perform a window switch + * @thdev: the sub-device + */ +int msc_switch_window(struct intel_th_device *thdev) +{ + intel_th_trace_switch(thdev); + return 0; +} +EXPORT_SYMBOL_GPL(msc_switch_window); + +/** + * msc_current_win_bytes() - get the current window data size + * @thdev: the sub-device + * + * Get the number of valid data bytes in the current window. + * Based on this the dvc-source part can decide to request a window switch. + */ +int msc_current_win_bytes(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win; + u32 reg_mwp, blk, offset, i; + int size = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_current_window(msc); + reg_mwp = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + + if (!win) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + blk = 0; + while (blk < win->nr_blocks) { + if (win->block[blk].addr == (reg_mwp & PAGE_MASK)) + break; + blk++; + } + + if (blk >= win->nr_blocks) { + atomic_dec(&msc->user_count); + return -EINVAL; + } + + offset = (reg_mwp & (PAGE_SIZE - 1)); + + + /*if wrap*/ + if (msc_block_wrapped(win->block[blk].bdesc)) { + for (i = blk+1; i < win->nr_blocks; i++) + size += msc_data_sz(win->block[i].bdesc); + } + + for (i = 0; i < blk; i++) + size += msc_data_sz(win->block[i].bdesc); + + /*finaly the current one*/ + size += (offset - MSC_BDESC); + + atomic_dec(&msc->user_count); + return size; +} +EXPORT_SYMBOL_GPL(msc_current_win_bytes); + +/** + * msc_sg_oldest_win() - get the data from the oldest window + * @thdev: the sub-device + * @sg_array: destination sg array + * + * Return: sg count + */ +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + struct msc_window *win, *c_win; + struct msc_block_desc *bdesc; + unsigned int blk, sg = 0; + + /* proceed only if actively storing in muli-window mode */ + if (!msc->enabled || + (msc->mode != MSC_MODE_MULTI) || + !atomic_inc_unless_negative(&msc->user_count)) + return -EINVAL; + + win = msc_oldest_window(msc); + if (!win) + return 0; + + c_win = msc_current_window(msc); + + if (win == c_win) + return 0; + + blk = msc_win_oldest_block(win); + + /* start with the first block containing only oldest data */ + if (msc_block_wrapped(win->block[blk].bdesc)) + if (++blk == win->nr_blocks) + blk = 0; + + do { + bdesc = win->block[blk].bdesc; + sg_set_buf(&sg_array[sg++], bdesc, PAGE_SIZE); + + if (bdesc->hw_tag & MSC_HW_TAG_ENDBIT) + break; + + if (++blk == win->nr_blocks) + blk = 0; + + } while (sg <= win->nr_blocks); + + sg_mark_end(&sg_array[sg - 1]); + + atomic_dec(&msc->user_count); + + return sg; +} +EXPORT_SYMBOL_GPL(msc_sg_oldest_win); + /** * msc_is_last_win() - check if a window is the last one for a given MSC * @win: window @@ -478,9 +767,9 @@ static void msc_buffer_clear_hw_header(struct msc *msc) * msc_configure() - set up MSC hardware * @msc: the MSC device to configure * - * Program storage mode, wrapping, burst length and trace buffer address - * into a given MSC. Then, enable tracing and set msc::enabled. - * The latter is serialized on msc::buf_mutex, so make sure to hold it. + * Program all relevant registers for a given MSC. + * Programming registers must be delayed until this stage since the hardware + * will be reset before a capture is started. */ static int msc_configure(struct msc *msc) { @@ -515,10 +804,8 @@ static int msc_configure(struct msc *msc) iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; - intel_th_trace_enable(msc->thdev); msc->enabled = 1; - return 0; } @@ -531,23 +818,14 @@ static int msc_configure(struct msc *msc) */ static void msc_disable(struct msc *msc) { - unsigned long count; u32 reg; lockdep_assert_held(&msc->buf_mutex); intel_th_trace_disable(msc->thdev); - for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; - count && !(reg & MSCSTS_PLE); count--) { - reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); - cpu_relax(); - } - - if (!count) - dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); - if (msc->mode == MSC_MODE_SINGLE) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT); reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); @@ -556,6 +834,10 @@ static void msc_disable(struct msc *msc) reg, msc->single_sz, msc->single_wrap); } + /* Save next window start address before disabling */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA); + msc->nwsa = (unsigned long)reg << PAGE_SHIFT; + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); reg &= ~MSC_EN; iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); @@ -564,8 +846,7 @@ static void msc_disable(struct msc *msc) iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR); iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE); - dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n", - ioread32(msc->reg_base + REG_MSU_MSC0NWSA)); + dev_dbg(msc_dev(msc), "MSCnNWSA: %08lx\n", msc->nwsa); reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg); @@ -1061,16 +1342,19 @@ static int intel_th_msc_release(struct inode *inode, struct file *file) } static ssize_t -msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) +msc_single_to_user(void *in_buf, unsigned long in_pages, + unsigned long in_sz, bool wrapped, + char __user *buf, loff_t off, size_t len) { - unsigned long size = msc->nr_pages << PAGE_SHIFT, rem = len; + unsigned long size = in_pages << PAGE_SHIFT, rem = len; unsigned long start = off, tocopy = 0; - if (msc->single_wrap) { - start += msc->single_sz; + /* With wrapping, copy the end of the buffer first */ + if (wrapped) { + start += in_sz; if (start < size) { tocopy = min(rem, size - start); - if (copy_to_user(buf, msc->base + start, tocopy)) + if (copy_to_user(buf, in_buf + start, tocopy)) return -EFAULT; buf += tocopy; @@ -1079,21 +1363,17 @@ msc_single_to_user(struct msc *msc, char __user *buf, loff_t off, size_t len) } start &= size - 1; - if (rem) { - tocopy = min(rem, msc->single_sz - start); - if (copy_to_user(buf, msc->base + start, tocopy)) - return -EFAULT; - - rem -= tocopy; - } - - return len - rem; } + /* Copy the beginning of the buffer */ + if (rem) { + tocopy = min(rem, in_sz - start); + if (copy_to_user(buf, in_buf + start, tocopy)) + return -EFAULT; - if (copy_to_user(buf, msc->base + start, rem)) - return -EFAULT; + rem -= tocopy; + } - return len; + return len - rem; } static ssize_t intel_th_msc_read(struct file *file, char __user *buf, @@ -1123,8 +1403,10 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf, len = size - off; if (msc->mode == MSC_MODE_SINGLE) { - ret = msc_single_to_user(msc, buf, off, len); - if (ret >= 0) + ret = msc_single_to_user(msc->base, msc->nr_pages, + msc->single_sz, msc->single_wrap, + buf, off, len); + if (ret > 0) *ppos += ret; } else if (msc->mode == MSC_MODE_MULTI) { struct msc_win_to_user_struct u = { @@ -1250,6 +1532,283 @@ static const struct file_operations intel_th_msc_fops = { .owner = THIS_MODULE, }; +static void msc_wait_ple(struct intel_th_device *thdev) +{ + struct msc *msc = dev_get_drvdata(&thdev->dev); + unsigned long count; + u32 reg; + + for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH; + count && !(reg & MSCSTS_PLE); count--) { + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + cpu_relax(); + } + + if (!count) + dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n"); +} + +#ifdef CONFIG_ACPI +#define ACPI_SIG_NPKT "NPKT" + +/* Buffers that may be handed through NPKT ACPI table */ +enum NPKT_BUF_TYPE { + NPKT_MTB = 0, + NPKT_MTB_REC, + NPKT_CSR, + NPKT_CSR_REC, + NPKT_NBUF +}; +static const char * const npkt_buf_name[NPKT_NBUF] = { + [NPKT_MTB] = "mtb", + [NPKT_MTB_REC] = "mtb_rec", + [NPKT_CSR] = "csr", + [NPKT_CSR_REC] = "csr_rec" +}; + +/* CSR capture still active */ +#define NPKT_CSR_USED BIT(4) + +struct acpi_npkt_buf { + u64 addr; + u32 size; + u32 offset; +}; + +/* NPKT ACPI table */ +struct acpi_table_npkt { + struct acpi_table_header header; + struct acpi_npkt_buf buffers[NPKT_NBUF]; + u8 flags; +} __packed; + +/* Trace buffer obtained from NPKT table */ +struct npkt_buf { + dma_addr_t phy; + void *buf; + u32 size; + u32 offset; + bool wrapped; + atomic_t active; + struct msc *msc; +}; + +static struct npkt_buf *npkt_bufs; +static struct dentry *npkt_dump_dir; +static DEFINE_MUTEX(npkt_lock); + +/** + * Stop current trace if a buffer was marked with a capture in pogress. + * + * Update buffer write offset and wrap status after stopping the trace. + */ +static void stop_buffer_trace(struct npkt_buf *buf) +{ + u32 reg, mode; + struct msc *msc = buf->msc; + + mutex_lock(&npkt_lock); + if (!atomic_read(&buf->active)) + goto unlock; + + reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); + mode = (reg & MSC_MODE) >> __ffs(MSC_MODE); + if (!(reg & MSC_EN) || mode != MSC_MODE_SINGLE) { + /* Assume full buffer */ + pr_warn("NPKT reported CSR in use but not tracing to CSR\n"); + buf->offset = 0; + buf->wrapped = true; + atomic_set(&buf->active, 0); + goto unlock; + } + + /* The hub must be able to stop a capture not started by the driver */ + intel_th_trace_disable(msc->thdev); + + /* Update offset and wrap status */ + reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP); + buf->offset = reg - (u32)buf->phy; + reg = ioread32(msc->reg_base + REG_MSU_MSC0STS); + buf->wrapped = !!(reg & MSCSTS_WRAPSTAT); + atomic_set(&buf->active, 0); + +unlock: + mutex_unlock(&npkt_lock); +} + +/** + * Copy re-ordered data from an NPKT buffer to a user buffer. + */ +static ssize_t read_npkt_dump_buf(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct npkt_buf *buf = file->private_data; + size_t size = buf->size; + loff_t off = *ppos; + ssize_t ret; + + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); + + if (off >= size) + return 0; + + ret = msc_single_to_user(buf->buf, size >> PAGE_SHIFT, + buf->offset, buf->wrapped, + user_buf, off, count); + if (ret > 0) + *ppos += ret; + + return ret; +} + +static const struct file_operations npkt_dump_buf_fops = { + .read = read_npkt_dump_buf, + .open = simple_open, + .llseek = noop_llseek, +}; + +/** + * Prepare a buffer with remapped address for a given NPKT buffer and add + * an entry for it in debugfs. + */ +static void npkt_bind_buffer(enum NPKT_BUF_TYPE type, + struct acpi_npkt_buf *abuf, u8 flags, + struct npkt_buf *buf, struct msc *msc) +{ + const char *name = npkt_buf_name[type]; + + /* No buffer handed through ACPI */ + if (!abuf->addr || !abuf->size) + return; + + /* Only expect multiples of page size */ + if (abuf->size & (PAGE_SIZE - 1)) { + pr_warn("invalid size 0x%x for buffer %s\n", + abuf->size, name); + return; + } + + buf->size = abuf->size; + buf->offset = abuf->offset; + buf->wrapped = !!(flags & BIT(type)); + /* CSR may still be active */ + if (type == NPKT_CSR && (flags & NPKT_CSR_USED)) { + atomic_set(&buf->active, 1); + buf->msc = msc; + } + + buf->phy = abuf->addr; + buf->buf = (__force void *)ioremap(buf->phy, buf->size); + if (!buf->buf) { + pr_err("ioremap failed for buffer %s 0x%llx size:0x%x\n", + name, buf->phy, buf->size); + return; + } + + debugfs_create_file(name, S_IRUGO, npkt_dump_dir, buf, + &npkt_dump_buf_fops); +} + +static void npkt_bind_buffers(struct acpi_table_npkt *npkt, + struct npkt_buf *bufs, struct msc *msc) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + npkt_bind_buffer(i, &npkt->buffers[i], npkt->flags, + &bufs[i], msc); +} + +static void npkt_unbind_buffers(struct npkt_buf *bufs) +{ + int i; + + for (i = 0; i < NPKT_NBUF; i++) + if (bufs[i].buf) + iounmap((__force void __iomem *)bufs[i].buf); +} + +/** + * Prepare debugfs access to NPKT buffers. + */ +static void intel_th_npkt_init(struct msc *msc) +{ + acpi_status status; + struct acpi_table_npkt *npkt; + + /* Associate NPKT to msc0 */ + if (npkt_bufs || msc->index != 0) + return; + + status = acpi_get_table(ACPI_SIG_NPKT, 0, + (struct acpi_table_header **)&npkt); + if (ACPI_FAILURE(status)) { + pr_warn("Failed to get NPKT table, %s\n", + acpi_format_exception(status)); + return; + } + + npkt_bufs = kzalloc(sizeof(struct npkt_buf) * NPKT_NBUF, GFP_KERNEL); + if (!npkt_bufs) + return; + + npkt_dump_dir = debugfs_create_dir("npkt_dump", NULL); + if (!npkt_dump_dir) { + pr_err("npkt_dump debugfs create dir failed\n"); + goto free_npkt_bufs; + } + + npkt_bind_buffers(npkt, npkt_bufs, msc); + + return; + +free_npkt_bufs: + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * Remove debugfs access to NPKT buffers and release resources. + */ +static void intel_th_npkt_remove(struct msc *msc) +{ + /* Only clean for msc 0 if necessary */ + if (!npkt_bufs || msc->index != 0) + return; + + npkt_unbind_buffers(npkt_bufs); + debugfs_remove_recursive(npkt_dump_dir); + kfree(npkt_bufs); + npkt_bufs = NULL; +} + +/** + * First trace callback. + * + * If NPKT notified a CSR capture is in progress, stop it and update buffer + * write offset and wrap status. + */ +static void intel_th_msc_first_trace(struct intel_th_device *thdev) +{ + struct device *dev = &thdev->dev; + struct msc *msc = dev_get_drvdata(dev); + struct npkt_buf *buf; + + if (!npkt_bufs || msc->index != 0) + return; + + buf = &npkt_bufs[NPKT_CSR]; + if (atomic_read(&buf->active)) + stop_buffer_trace(buf); +} + +#else /* !CONFIG_ACPI */ +static inline void intel_th_npkt_init(struct msc *msc) {} +static inline void intel_th_npkt_remove(struct msc *msc) {} +#define intel_th_msc_first_trace NULL +#endif /* !CONFIG_ACPI */ + static int intel_th_msc_init(struct msc *msc) { atomic_set(&msc->user_count, -1); @@ -1263,6 +1822,8 @@ static int intel_th_msc_init(struct msc *msc) (ioread32(msc->reg_base + REG_MSU_MSC0CTL) & MSC_LEN) >> __ffs(MSC_LEN); + msc->thdev->output.wait_empty = msc_wait_ple; + return 0; } @@ -1386,6 +1947,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, if (ret) return ret; + msc->max_blocks = 0; + /* scan the comma-separated list of allocation sizes */ end = memchr(buf, '\n', len); if (end) @@ -1420,6 +1983,9 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, win = rewin; win[nr_wins - 1] = val; + msc->max_blocks = + (val > msc->max_blocks) ? val : msc->max_blocks; + if (!end) break; @@ -1439,10 +2005,32 @@ nr_pages_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(nr_pages); +static ssize_t +win_switch_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + struct msc *msc = dev_get_drvdata(dev); + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret) + return ret; + + if (val != 1) + return -EINVAL; + + intel_th_trace_switch(msc->thdev); + return size; +} + +static DEVICE_ATTR_WO(win_switch); + static struct attribute *msc_output_attrs[] = { &dev_attr_wrap.attr, &dev_attr_mode.attr, &dev_attr_nr_pages.attr, + &dev_attr_win_switch.attr, NULL, }; @@ -1479,28 +2067,25 @@ static int intel_th_msc_probe(struct intel_th_device *thdev) if (err) return err; + msc->max_blocks = 0; dev_set_drvdata(dev, msc); + intel_th_npkt_init(msc); + msc_add_instance(thdev); + return 0; } static void intel_th_msc_remove(struct intel_th_device *thdev) { struct msc *msc = dev_get_drvdata(&thdev->dev); - int ret; - - intel_th_msc_deactivate(thdev); - - /* - * Buffers should not be used at this point except if the - * output character device is still open and the parent - * device gets detached from its bus, which is a FIXME. - */ - ret = msc_buffer_free_unless_used(msc); - WARN_ON_ONCE(ret); + intel_th_npkt_remove(msc); + msc_rm_instance(thdev); + sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); } static struct intel_th_driver intel_th_msc_driver = { + .first_trace = intel_th_msc_first_trace, .probe = intel_th_msc_probe, .remove = intel_th_msc_remove, .activate = intel_th_msc_activate, diff --git a/drivers/hwtracing/intel_th/msu.h b/drivers/hwtracing/intel_th/msu.h index 9cc8aced6116..ccfed662a726 100644 --- a/drivers/hwtracing/intel_th/msu.h +++ b/drivers/hwtracing/intel_th/msu.h @@ -8,6 +8,8 @@ #ifndef __INTEL_TH_MSU_H__ #define __INTEL_TH_MSU_H__ +#include "intel_th.h" + enum { REG_MSU_MSUPARAMS = 0x0000, REG_MSU_MSUSTS = 0x0008, @@ -105,4 +107,19 @@ static inline bool msc_block_last_written(struct msc_block_desc *bdesc) /* waiting for Pipeline Empty bit(s) to assert for MSC */ #define MSC_PLE_WAITLOOP_DEPTH 10000 +/* API */ +struct msc_probe_rem_cb { + void (*probe)(struct intel_th_device *thdev); + void (*remove)(struct intel_th_device *thdev); +}; + +int msc_register_callbacks(struct msc_probe_rem_cb cbs); +void msc_unregister_callbacks(void); +unsigned int msc_max_blocks(struct intel_th_device *thdev); +unsigned int msc_block_max_size(struct intel_th_device *thdev); +int msc_switch_window(struct intel_th_device *thdev); +int msc_sg_oldest_win(struct intel_th_device *thdev, + struct scatterlist *sg_array); +int msc_current_win_bytes(struct intel_th_device *thdev); + #endif /* __INTEL_TH_MSU_H__ */ diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 1cf6290d6435..460850b16d19 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -61,6 +61,35 @@ static void intel_th_pci_deactivate(struct intel_th *th) if (err) dev_err(&pdev->dev, "failed to read NPKDSC register\n"); } +/* + * PCI Configuration Registers + */ +enum { + REG_PCI_NPKDSC = 0x80, /* NPK Device Specific Control */ + REG_PCI_NPKDSD = 0x90, /* NPK Device Specific Defeature */ +}; + +/* Trace Hub software reset */ +#define NPKDSC_RESET BIT(1) + +/* Force On */ +#define NPKDSD_FON BIT(0) + +static void intel_th_pci_reset(struct intel_th *th) +{ + struct pci_dev *pdev = container_of(th->dev, struct pci_dev, dev); + u32 val; + + /* Software reset */ + pci_read_config_dword(pdev, REG_PCI_NPKDSC, &val); + val |= NPKDSC_RESET; + pci_write_config_dword(pdev, REG_PCI_NPKDSC, val); + + /* Always set FON for S0ix flow */ + pci_read_config_dword(pdev, REG_PCI_NPKDSD, &val); + val |= NPKDSD_FON; + pci_write_config_dword(pdev, REG_PCI_NPKDSD, val); +} static int intel_th_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -78,7 +107,7 @@ static int intel_th_pci_probe(struct pci_dev *pdev, return err; th = intel_th_alloc(&pdev->dev, drvdata, pdev->resource, - DEVICE_COUNT_RESOURCE, pdev->irq); + DEVICE_COUNT_RESOURCE, pdev->irq, intel_th_pci_reset); if (IS_ERR(th)) return PTR_ERR(th); @@ -170,11 +199,34 @@ static const struct pci_device_id intel_th_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, intel_th_pci_id_table); +static int intel_th_suspend(struct device *dev) +{ + /* + * Stub the call to avoid disabling the device. + * Suspend is fully handled by firmwares. + */ + return 0; +} + +static int intel_th_resume(struct device *dev) +{ + /* Firmwares have already restored the device state. */ + return 0; +} + +static const struct dev_pm_ops intel_th_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(intel_th_suspend, + intel_th_resume) +}; + static struct pci_driver intel_th_pci_driver = { .name = DRIVER_NAME, .id_table = intel_th_pci_id_table, .probe = intel_th_pci_probe, .remove = intel_th_pci_remove, + .driver = { + .pm = &intel_th_pm_ops, + }, }; module_pci_driver(intel_th_pci_driver); diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c index 56694339cb06..9b6224b22a5f 100644 --- a/drivers/hwtracing/intel_th/pti.c +++ b/drivers/hwtracing/intel_th/pti.c @@ -161,8 +161,6 @@ static int intel_th_pti_activate(struct intel_th_device *thdev) iowrite32(ctl, pti->base + REG_PTI_CTL); - intel_th_trace_enable(thdev); - return 0; } diff --git a/drivers/hwtracing/stm/console.c b/drivers/hwtracing/stm/console.c index a00f65e21747..a363467ac4a8 100644 --- a/drivers/hwtracing/stm/console.c +++ b/drivers/hwtracing/stm/console.c @@ -31,8 +31,22 @@ static void stm_console_write(struct console *con, const char *buf, unsigned len) { struct stm_console *sc = container_of(con, struct stm_console, console); + static char svenbuf[1024]; + char *p = svenbuf; + unsigned int towrite; + u16 textlen; + const u32 sven_header = 0x01000242; - stm_source_write(&sc->data, 0, buf, len); + textlen = min_t(u16, len, 1024 - sizeof(sven_header) - sizeof(textlen)); + towrite = textlen + sizeof(sven_header) + sizeof(textlen); + + memcpy(p, &sven_header, sizeof(sven_header)); + p += sizeof(sven_header); + memcpy(p, &textlen, sizeof(textlen)); + p += sizeof(textlen); + memcpy(p, buf, textlen); + + stm_source_write(&sc->data, 0, svenbuf, towrite); } static int stm_console_link(struct stm_source_data *data) diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig index c5992cd195a1..a78c8309bb3f 100644 --- a/drivers/input/Kconfig +++ b/drivers/input/Kconfig @@ -184,6 +184,19 @@ config INPUT_APMPOWER To compile this driver as a module, choose M here: the module will be called apm-power. +config INPUT_KEYRESET + bool "Reset key" + depends on INPUT + select INPUT_KEYCOMBO + ---help--- + Say Y here if you want to reboot when some keys are pressed; + +config INPUT_KEYCOMBO + bool "Key combo" + depends on INPUT + ---help--- + Say Y here if you want to take action when some keys are pressed; + comment "Input Device Drivers" source "drivers/input/keyboard/Kconfig" diff --git a/drivers/input/Makefile b/drivers/input/Makefile index 40de6a7be641..f0351af763bd 100644 --- a/drivers/input/Makefile +++ b/drivers/input/Makefile @@ -27,5 +27,7 @@ obj-$(CONFIG_INPUT_TOUCHSCREEN) += touchscreen/ obj-$(CONFIG_INPUT_MISC) += misc/ obj-$(CONFIG_INPUT_APMPOWER) += apm-power.o +obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o +obj-$(CONFIG_INPUT_KEYCOMBO) += keycombo.o obj-$(CONFIG_RMI4_CORE) += rmi4/ diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c new file mode 100644 index 000000000000..2fba451b91d5 --- /dev/null +++ b/drivers/input/keycombo.c @@ -0,0 +1,261 @@ +/* drivers/input/keycombo.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include + +struct keycombo_state { + struct input_handler input_handler; + unsigned long keybit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long upbit[BITS_TO_LONGS(KEY_CNT)]; + unsigned long key[BITS_TO_LONGS(KEY_CNT)]; + spinlock_t lock; + struct workqueue_struct *wq; + int key_down_target; + int key_down; + int key_up; + struct delayed_work key_down_work; + int delay; + struct work_struct key_up_work; + void (*key_up_fn)(void *); + void (*key_down_fn)(void *); + void *priv; + int key_is_down; + struct wakeup_source combo_held_wake_source; + struct wakeup_source combo_up_wake_source; +}; + +static void do_key_down(struct work_struct *work) +{ + struct delayed_work *dwork = container_of(work, struct delayed_work, + work); + struct keycombo_state *state = container_of(dwork, + struct keycombo_state, key_down_work); + if (state->key_down_fn) + state->key_down_fn(state->priv); +} + +static void do_key_up(struct work_struct *work) +{ + struct keycombo_state *state = container_of(work, struct keycombo_state, + key_up_work); + if (state->key_up_fn) + state->key_up_fn(state->priv); + __pm_relax(&state->combo_up_wake_source); +} + +static void keycombo_event(struct input_handle *handle, unsigned int type, + unsigned int code, int value) +{ + unsigned long flags; + struct keycombo_state *state = handle->private; + + if (type != EV_KEY) + return; + + if (code >= KEY_MAX) + return; + + if (!test_bit(code, state->keybit)) + return; + + spin_lock_irqsave(&state->lock, flags); + if (!test_bit(code, state->key) == !value) + goto done; + __change_bit(code, state->key); + if (test_bit(code, state->upbit)) { + if (value) + state->key_up++; + else + state->key_up--; + } else { + if (value) + state->key_down++; + else + state->key_down--; + } + if (state->key_down == state->key_down_target && state->key_up == 0) { + __pm_stay_awake(&state->combo_held_wake_source); + state->key_is_down = 1; + if (queue_delayed_work(state->wq, &state->key_down_work, + state->delay)) + pr_debug("Key down work already queued!"); + } else if (state->key_is_down) { + if (!cancel_delayed_work(&state->key_down_work)) { + __pm_stay_awake(&state->combo_up_wake_source); + queue_work(state->wq, &state->key_up_work); + } + __pm_relax(&state->combo_held_wake_source); + state->key_is_down = 0; + } +done: + spin_unlock_irqrestore(&state->lock, flags); +} + +static int keycombo_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id) +{ + int i; + int ret; + struct input_handle *handle; + struct keycombo_state *state = + container_of(handler, struct keycombo_state, input_handler); + for (i = 0; i < KEY_MAX; i++) { + if (test_bit(i, state->keybit) && test_bit(i, dev->keybit)) + break; + } + if (i == KEY_MAX) + return -ENODEV; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return -ENOMEM; + + handle->dev = dev; + handle->handler = handler; + handle->name = KEYCOMBO_NAME; + handle->private = state; + + ret = input_register_handle(handle); + if (ret) + goto err_input_register_handle; + + ret = input_open_device(handle); + if (ret) + goto err_input_open_device; + + return 0; + +err_input_open_device: + input_unregister_handle(handle); +err_input_register_handle: + kfree(handle); + return ret; +} + +static void keycombo_disconnect(struct input_handle *handle) +{ + input_close_device(handle); + input_unregister_handle(handle); + kfree(handle); +} + +static const struct input_device_id keycombo_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT, + .evbit = { BIT_MASK(EV_KEY) }, + }, + { }, +}; +MODULE_DEVICE_TABLE(input, keycombo_ids); + +static int keycombo_probe(struct platform_device *pdev) +{ + int ret; + int key, *keyp; + struct keycombo_state *state; + struct keycombo_platform_data *pdata = pdev->dev.platform_data; + + if (!pdata) + return -EINVAL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + spin_lock_init(&state->lock); + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + state->key_down_target++; + __set_bit(key, state->keybit); + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + __set_bit(key, state->keybit); + __set_bit(key, state->upbit); + } + } + + state->wq = alloc_ordered_workqueue("keycombo", 0); + if (!state->wq) + return -ENOMEM; + + state->priv = pdata->priv; + + if (pdata->key_down_fn) + state->key_down_fn = pdata->key_down_fn; + INIT_DELAYED_WORK(&state->key_down_work, do_key_down); + + if (pdata->key_up_fn) + state->key_up_fn = pdata->key_up_fn; + INIT_WORK(&state->key_up_work, do_key_up); + + wakeup_source_init(&state->combo_held_wake_source, "key combo"); + wakeup_source_init(&state->combo_up_wake_source, "key combo up"); + state->delay = msecs_to_jiffies(pdata->key_down_delay); + + state->input_handler.event = keycombo_event; + state->input_handler.connect = keycombo_connect; + state->input_handler.disconnect = keycombo_disconnect; + state->input_handler.name = KEYCOMBO_NAME; + state->input_handler.id_table = keycombo_ids; + ret = input_register_handler(&state->input_handler); + if (ret) { + kfree(state); + return ret; + } + platform_set_drvdata(pdev, state); + return 0; +} + +int keycombo_remove(struct platform_device *pdev) +{ + struct keycombo_state *state = platform_get_drvdata(pdev); + input_unregister_handler(&state->input_handler); + destroy_workqueue(state->wq); + kfree(state); + return 0; +} + + +struct platform_driver keycombo_driver = { + .driver.name = KEYCOMBO_NAME, + .probe = keycombo_probe, + .remove = keycombo_remove, +}; + +static int __init keycombo_init(void) +{ + return platform_driver_register(&keycombo_driver); +} + +static void __exit keycombo_exit(void) +{ + return platform_driver_unregister(&keycombo_driver); +} + +module_init(keycombo_init); +module_exit(keycombo_exit); diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c new file mode 100644 index 000000000000..7e5222aec7c1 --- /dev/null +++ b/drivers/input/keyreset.c @@ -0,0 +1,144 @@ +/* drivers/input/keyreset.c + * + * Copyright (C) 2014 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct keyreset_state { + int restart_requested; + int (*reset_fn)(void); + struct platform_device *pdev_child; + struct work_struct restart_work; +}; + +static void do_restart(struct work_struct *unused) +{ + orderly_reboot(); +} + +static void do_reset_fn(void *priv) +{ + struct keyreset_state *state = priv; + if (state->restart_requested) + panic("keyboard reset failed, %d", state->restart_requested); + if (state->reset_fn) { + state->restart_requested = state->reset_fn(); + } else { + pr_info("keyboard reset\n"); + schedule_work(&state->restart_work); + state->restart_requested = 1; + } +} + +static int keyreset_probe(struct platform_device *pdev) +{ + int ret = -ENOMEM; + struct keycombo_platform_data *pdata_child; + struct keyreset_platform_data *pdata = pdev->dev.platform_data; + int up_size = 0, down_size = 0, size; + int key, *keyp; + struct keyreset_state *state; + + if (!pdata) + return -EINVAL; + state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->pdev_child = platform_device_alloc(KEYCOMBO_NAME, + PLATFORM_DEVID_AUTO); + if (!state->pdev_child) + return -ENOMEM; + state->pdev_child->dev.parent = &pdev->dev; + INIT_WORK(&state->restart_work, do_restart); + + keyp = pdata->keys_down; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + down_size++; + } + if (pdata->keys_up) { + keyp = pdata->keys_up; + while ((key = *keyp++)) { + if (key >= KEY_MAX) + continue; + up_size++; + } + } + size = sizeof(struct keycombo_platform_data) + + sizeof(int) * (down_size + 1); + pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); + if (!pdata_child) + goto error; + memcpy(pdata_child->keys_down, pdata->keys_down, + sizeof(int) * down_size); + if (up_size > 0) { + pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1, + GFP_KERNEL); + if (!pdata_child->keys_up) + goto error; + memcpy(pdata_child->keys_up, pdata->keys_up, + sizeof(int) * up_size); + if (!pdata_child->keys_up) + goto error; + } + state->reset_fn = pdata->reset_fn; + pdata_child->key_down_fn = do_reset_fn; + pdata_child->priv = state; + pdata_child->key_down_delay = pdata->key_down_delay; + ret = platform_device_add_data(state->pdev_child, pdata_child, size); + if (ret) + goto error; + platform_set_drvdata(pdev, state); + return platform_device_add(state->pdev_child); +error: + platform_device_put(state->pdev_child); + return ret; +} + +int keyreset_remove(struct platform_device *pdev) +{ + struct keyreset_state *state = platform_get_drvdata(pdev); + platform_device_put(state->pdev_child); + return 0; +} + + +struct platform_driver keyreset_driver = { + .driver.name = KEYRESET_NAME, + .probe = keyreset_probe, + .remove = keyreset_remove, +}; + +static int __init keyreset_init(void) +{ + return platform_driver_register(&keyreset_driver); +} + +static void __exit keyreset_exit(void) +{ + return platform_driver_unregister(&keyreset_driver); +} + +module_init(keyreset_init); +module_exit(keyreset_exit); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index ca59a2be9bc5..9e0232c517d0 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -521,6 +521,11 @@ config INPUT_SGI_BTNS To compile this driver as a module, choose M here: the module will be called sgi_btns. +config INPUT_GPIO + tristate "GPIO driver support" + help + Say Y here if you want to support gpio based keys, wheels etc... + config HP_SDC_RTC tristate "HP SDC Real Time Clock" depends on (GSC || HP300) && SERIO diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index 9d0f9d1ff68f..02e9edcde799 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile @@ -36,6 +36,7 @@ obj-$(CONFIG_INPUT_DRV2667_HAPTICS) += drv2667.o obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o obj-$(CONFIG_INPUT_GPIO_BEEPER) += gpio-beeper.o obj-$(CONFIG_INPUT_GPIO_DECODER) += gpio_decoder.o +obj-$(CONFIG_INPUT_GPIO) += gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o obj-$(CONFIG_INPUT_HISI_POWERKEY) += hisi_powerkey.o obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o obj-$(CONFIG_INPUT_IMS_PCU) += ims-pcu.o diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c new file mode 100644 index 000000000000..0acf4a576f53 --- /dev/null +++ b/drivers/input/misc/gpio_axis.c @@ -0,0 +1,192 @@ +/* drivers/input/misc/gpio_axis.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct gpio_axis_state { + struct gpio_event_input_devs *input_devs; + struct gpio_event_axis_info *info; + uint32_t pos; +}; + +uint16_t gpio_axis_4bit_gray_map_table[] = { + [0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */ + [0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */ + [0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */ + [0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */ + [0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */ + [0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */ + [0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */ + [0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */ +}; +uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_4bit_gray_map_table[in]; +} + +uint16_t gpio_axis_5bit_singletrack_map_table[] = { + [0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /* 10000 10100 11100 */ + [0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /* 11110 11010 11000 */ + [0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /* 01000 01010 01110 */ + [0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /* 01111 01101 01100 */ + [0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /* 00100 00101 00111 */ + [0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /* 10111 10110 00110 */ + [0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /* 00010 10010 10011 */ + [0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /* 11011 01011 00011 */ + [0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001 */ + [0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001 */ +}; +uint16_t gpio_axis_5bit_singletrack_map( + struct gpio_event_axis_info *info, uint16_t in) +{ + return gpio_axis_5bit_singletrack_map_table[in]; +} + +static void gpio_event_update_axis(struct gpio_axis_state *as, int report) +{ + struct gpio_event_axis_info *ai = as->info; + int i; + int change; + uint16_t state = 0; + uint16_t pos; + uint16_t old_pos = as->pos; + for (i = ai->count - 1; i >= 0; i--) + state = (state << 1) | gpio_get_value(ai->gpio[i]); + pos = ai->map(ai, state); + if (ai->flags & GPIOEAF_PRINT_RAW) + pr_info("axis %d-%d raw %x, pos %d -> %d\n", + ai->type, ai->code, state, old_pos, pos); + if (report && pos != old_pos) { + if (ai->type == EV_REL) { + change = (ai->decoded_size + pos - old_pos) % + ai->decoded_size; + if (change > ai->decoded_size / 2) + change -= ai->decoded_size; + if (change == ai->decoded_size / 2) { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d unknown direction, " + "pos %d -> %d\n", ai->type, + ai->code, old_pos, pos); + change = 0; /* no closest direction */ + } + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d change %d\n", + ai->type, ai->code, change); + input_report_rel(as->input_devs->dev[ai->dev], + ai->code, change); + } else { + if (ai->flags & GPIOEAF_PRINT_EVENT) + pr_info("axis %d-%d now %d\n", + ai->type, ai->code, pos); + input_event(as->input_devs->dev[ai->dev], + ai->type, ai->code, pos); + } + input_sync(as->input_devs->dev[ai->dev]); + } + as->pos = pos; +} + +static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id) +{ + struct gpio_axis_state *as = dev_id; + gpio_event_update_axis(as, 1); + return IRQ_HANDLED; +} + +int gpio_event_axis_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + int irq; + struct gpio_event_axis_info *ai; + struct gpio_axis_state *as; + + ai = container_of(info, struct gpio_event_axis_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND) { + for (i = 0; i < ai->count; i++) + disable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + for (i = 0; i < ai->count; i++) + enable_irq(gpio_to_irq(ai->gpio[i])); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + *data = as = kmalloc(sizeof(*as), GFP_KERNEL); + if (as == NULL) { + ret = -ENOMEM; + goto err_alloc_axis_state_failed; + } + as->input_devs = input_devs; + as->info = ai; + if (ai->dev >= input_devs->count) { + pr_err("gpio_event_axis: bad device index %d >= %d " + "for %d:%d\n", ai->dev, input_devs->count, + ai->type, ai->code); + ret = -EINVAL; + goto err_bad_device_index; + } + + input_set_capability(input_devs->dev[ai->dev], + ai->type, ai->code); + if (ai->type == EV_ABS) { + input_set_abs_params(input_devs->dev[ai->dev], ai->code, + 0, ai->decoded_size - 1, 0, 0); + } + for (i = 0; i < ai->count; i++) { + ret = gpio_request(ai->gpio[i], "gpio_event_axis"); + if (ret < 0) + goto err_request_gpio_failed; + ret = gpio_direction_input(ai->gpio[i]); + if (ret < 0) + goto err_gpio_direction_input_failed; + ret = irq = gpio_to_irq(ai->gpio[i]); + if (ret < 0) + goto err_get_irq_num_failed; + ret = request_irq(irq, gpio_axis_irq_handler, + IRQF_TRIGGER_RISING | + IRQF_TRIGGER_FALLING, + "gpio_event_axis", as); + if (ret < 0) + goto err_request_irq_failed; + } + gpio_event_update_axis(as, 0); + return 0; + } + + ret = 0; + as = *data; + for (i = ai->count - 1; i >= 0; i--) { + free_irq(gpio_to_irq(ai->gpio[i]), as); +err_request_irq_failed: +err_get_irq_num_failed: +err_gpio_direction_input_failed: + gpio_free(ai->gpio[i]); +err_request_gpio_failed: + ; + } +err_bad_device_index: + kfree(as); + *data = NULL; +err_alloc_axis_state_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c new file mode 100644 index 000000000000..90f07eba3ce9 --- /dev/null +++ b/drivers/input/misc/gpio_event.c @@ -0,0 +1,228 @@ +/* drivers/input/misc/gpio_event.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_event { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_platform_data *info; + void *state[0]; +}; + +static int gpio_input_event( + struct input_dev *dev, unsigned int type, unsigned int code, int value) +{ + int i; + int devnr; + int ret = 0; + int tmp_ret; + struct gpio_event_info **ii; + struct gpio_event *ip = input_get_drvdata(dev); + + for (devnr = 0; devnr < ip->input_devs->count; devnr++) + if (ip->input_devs->dev[devnr] == dev) + break; + if (devnr == ip->input_devs->count) { + pr_err("gpio_input_event: unknown device %p\n", dev); + return -EIO; + } + + for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) { + if ((*ii)->event) { + tmp_ret = (*ii)->event(ip->input_devs, *ii, + &ip->state[i], + devnr, type, code, value); + if (tmp_ret) + ret = tmp_ret; + } + } + return ret; +} + +static int gpio_event_call_all_func(struct gpio_event *ip, int func) +{ + int i; + int ret; + struct gpio_event_info **ii; + + if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) { + ii = ip->info->info; + for (i = 0; i < ip->info->info_count; i++, ii++) { + if ((*ii)->func == NULL) { + ret = -ENODEV; + pr_err("gpio_event_probe: Incomplete pdata, " + "no function\n"); + goto err_no_func; + } + if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend) + continue; + ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i], + func); + if (ret) { + pr_err("gpio_event_probe: function failed\n"); + goto err_func_failed; + } + } + return 0; + } + + ret = 0; + i = ip->info->info_count; + ii = ip->info->info + i; + while (i > 0) { + i--; + ii--; + if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend) + continue; + (*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1); +err_func_failed: +err_no_func: + ; + } + return ret; +} + +static void __maybe_unused gpio_event_suspend(struct gpio_event *ip) +{ + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND); + if (ip->info->power) + ip->info->power(ip->info, 0); +} + +static void __maybe_unused gpio_event_resume(struct gpio_event *ip) +{ + if (ip->info->power) + ip->info->power(ip->info, 1); + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME); +} + +static int gpio_event_probe(struct platform_device *pdev) +{ + int err; + struct gpio_event *ip; + struct gpio_event_platform_data *event_info; + int dev_count = 1; + int i; + int registered = 0; + + event_info = pdev->dev.platform_data; + if (event_info == NULL) { + pr_err("gpio_event_probe: No pdata\n"); + return -ENODEV; + } + if ((!event_info->name && !event_info->names[0]) || + !event_info->info || !event_info->info_count) { + pr_err("gpio_event_probe: Incomplete pdata\n"); + return -ENODEV; + } + if (!event_info->name) + while (event_info->names[dev_count]) + dev_count++; + ip = kzalloc(sizeof(*ip) + + sizeof(ip->state[0]) * event_info->info_count + + sizeof(*ip->input_devs) + + sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL); + if (ip == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + ip->input_devs = (void*)&ip->state[event_info->info_count]; + platform_set_drvdata(pdev, ip); + + for (i = 0; i < dev_count; i++) { + struct input_dev *input_dev = input_allocate_device(); + if (input_dev == NULL) { + err = -ENOMEM; + pr_err("gpio_event_probe: " + "Failed to allocate input device\n"); + goto err_input_dev_alloc_failed; + } + input_set_drvdata(input_dev, ip); + input_dev->name = event_info->name ? + event_info->name : event_info->names[i]; + input_dev->event = gpio_input_event; + ip->input_devs->dev[i] = input_dev; + } + ip->input_devs->count = dev_count; + ip->info = event_info; + if (event_info->power) + ip->info->power(ip->info, 1); + + err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT); + if (err) + goto err_call_all_func_failed; + + for (i = 0; i < dev_count; i++) { + err = input_register_device(ip->input_devs->dev[i]); + if (err) { + pr_err("gpio_event_probe: Unable to register %s " + "input device\n", ip->input_devs->dev[i]->name); + goto err_input_register_device_failed; + } + registered++; + } + + return 0; + +err_input_register_device_failed: + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); +err_call_all_func_failed: + if (event_info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < registered; i++) + input_unregister_device(ip->input_devs->dev[i]); + for (i = dev_count - 1; i >= registered; i--) { + input_free_device(ip->input_devs->dev[i]); +err_input_dev_alloc_failed: + ; + } + kfree(ip); +err_kp_alloc_failed: + return err; +} + +static int gpio_event_remove(struct platform_device *pdev) +{ + struct gpio_event *ip = platform_get_drvdata(pdev); + int i; + + gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT); + if (ip->info->power) + ip->info->power(ip->info, 0); + for (i = 0; i < ip->input_devs->count; i++) + input_unregister_device(ip->input_devs->dev[i]); + kfree(ip); + return 0; +} + +static struct platform_driver gpio_event_driver = { + .probe = gpio_event_probe, + .remove = gpio_event_remove, + .driver = { + .name = GPIO_EVENT_DEV_NAME, + }, +}; + +module_platform_driver(gpio_event_driver); + +MODULE_DESCRIPTION("GPIO Event Driver"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c new file mode 100644 index 000000000000..5875d739c550 --- /dev/null +++ b/drivers/input/misc/gpio_input.c @@ -0,0 +1,390 @@ +/* drivers/input/misc/gpio_input.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + DEBOUNCE_UNSTABLE = BIT(0), /* Got irq, while debouncing */ + DEBOUNCE_PRESSED = BIT(1), + DEBOUNCE_NOTPRESSED = BIT(2), + DEBOUNCE_WAIT_IRQ = BIT(3), /* Stable irq state */ + DEBOUNCE_POLL = BIT(4), /* Stable polling state */ + + DEBOUNCE_UNKNOWN = + DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED, +}; + +struct gpio_key_state { + struct gpio_input_state *ds; + uint8_t debounce; +}; + +struct gpio_input_state { + struct gpio_event_input_devs *input_devs; + const struct gpio_event_input_info *info; + struct hrtimer timer; + int use_irq; + int debounce_count; + spinlock_t irq_lock; + struct wakeup_source *ws; + struct gpio_key_state key_state[0]; +}; + +static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer) +{ + int i; + int pressed; + struct gpio_input_state *ds = + container_of(timer, struct gpio_input_state, timer); + unsigned gpio_flags = ds->info->flags; + unsigned npolarity; + int nkeys = ds->info->keymap_size; + const struct gpio_event_direct_entry *key_entry; + struct gpio_key_state *key_state; + unsigned long irqflags; + uint8_t debounce; + bool sync_needed; + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); +#endif + key_entry = ds->info->keymap; + key_state = ds->key_state; + sync_needed = false; + spin_lock_irqsave(&ds->irq_lock, irqflags); + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + debounce = key_state->debounce; + if (debounce & DEBOUNCE_WAIT_IRQ) + continue; + if (key_state->debounce & DEBOUNCE_UNSTABLE) { + debounce = key_state->debounce = DEBOUNCE_UNKNOWN; + enable_irq(gpio_to_irq(key_entry->gpio)); + if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) continue debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH); + pressed = gpio_get_value(key_entry->gpio) ^ npolarity; + if (debounce & DEBOUNCE_POLL) { + if (pressed == !(debounce & DEBOUNCE_PRESSED)) { + ds->debounce_count++; + key_state->debounce = DEBOUNCE_UNKNOWN; + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-" + "%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + } + continue; + } + if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 1\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_PRESSED; + continue; + } + if (!pressed && (debounce & DEBOUNCE_PRESSED)) { + if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_keys_scan_keys: key %x-%x, %d " + "(%d) debounce pressed 0\n", + ds->info->type, key_entry->code, + i, key_entry->gpio); + key_state->debounce = DEBOUNCE_NOTPRESSED; + continue; + } + /* key is stable */ + ds->debounce_count--; + if (ds->use_irq) + key_state->debounce |= DEBOUNCE_WAIT_IRQ; + else + key_state->debounce |= DEBOUNCE_POLL; + if (gpio_flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) " + "changed to %d\n", ds->info->type, + key_entry->code, i, key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + sync_needed = true; + } + if (sync_needed) { + for (i = 0; i < ds->input_devs->count; i++) + input_sync(ds->input_devs->dev[i]); + } + +#if 0 + key_entry = kp->keys_info->keymap; + key_state = kp->key_state; + for (i = 0; i < nkeys; i++, key_entry++, key_state++) { + pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio, + gpio_read_detect_status(key_entry->gpio)); + } +#endif + + if (ds->debounce_count) + hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL); + else if (!ds->use_irq) + hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL); + else + __pm_relax(ds->ws); + + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id) +{ + struct gpio_key_state *ks = dev_id; + struct gpio_input_state *ds = ks->ds; + int keymap_index = ks - ds->key_state; + const struct gpio_event_direct_entry *key_entry; + unsigned long irqflags; + int pressed; + + if (!ds->use_irq) + return IRQ_HANDLED; + + key_entry = &ds->info->keymap[keymap_index]; + + if (ds->info->debounce_time) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ks->debounce & DEBOUNCE_WAIT_IRQ) { + ks->debounce = DEBOUNCE_UNKNOWN; + if (ds->debounce_count++ == 0) { + __pm_stay_awake(ds->ws); + hrtimer_start( + &ds->timer, ds->info->debounce_time, + HRTIMER_MODE_REL); + } + if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE) + pr_info("gpio_event_input_irq_handler: " + "key %x-%x, %d (%d) start debounce\n", + ds->info->type, key_entry->code, + keymap_index, key_entry->gpio); + } else { + disable_irq_nosync(irq); + ks->debounce = DEBOUNCE_UNSTABLE; + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + } else { + pressed = gpio_get_value(key_entry->gpio) ^ + !(ds->info->flags & GPIOEDF_ACTIVE_HIGH); + if (ds->info->flags & GPIOEDF_PRINT_KEYS) + pr_info("gpio_event_input_irq_handler: key %x-%x, %d " + "(%d) changed to %d\n", + ds->info->type, key_entry->code, keymap_index, + key_entry->gpio, pressed); + input_event(ds->input_devs->dev[key_entry->dev], ds->info->type, + key_entry->code, pressed); + input_sync(ds->input_devs->dev[key_entry->dev]); + } + return IRQ_HANDLED; +} + +static int gpio_event_input_request_irqs(struct gpio_input_state *ds) +{ + int i; + int err; + unsigned int irq; + unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + + for (i = 0; i < ds->info->keymap_size; i++) { + err = irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_event_input_irq_handler, + req_flags, "gpio_keys", &ds->key_state[i]); + if (err) { + pr_err("gpio_event_input_request_irqs: request_irq " + "failed for input %d, irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_request_irq_failed; + } + if (ds->info->info.no_suspend) { + err = enable_irq_wake(irq); + if (err) { + pr_err("gpio_event_input_request_irqs: " + "enable_irq_wake failed for input %d, " + "irq %d\n", + ds->info->keymap[i].gpio, irq); + goto err_enable_irq_wake_failed; + } + } + } + return 0; + + for (i = ds->info->keymap_size - 1; i >= 0; i--) { + irq = gpio_to_irq(ds->info->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); +err_enable_irq_wake_failed: + free_irq(irq, &ds->key_state[i]); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_input_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int ret; + int i; + unsigned long irqflags; + struct gpio_event_input_info *di; + struct gpio_input_state *ds = *data; + char *wlname; + + di = container_of(info, struct gpio_event_input_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND) { + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + disable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_cancel(&ds->timer); + return 0; + } + if (func == GPIO_EVENT_FUNC_RESUME) { + spin_lock_irqsave(&ds->irq_lock, irqflags); + if (ds->use_irq) + for (i = 0; i < di->keymap_size; i++) + enable_irq(gpio_to_irq(di->keymap[i].gpio)); + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (ktime_to_ns(di->poll_time) <= 0) + di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC); + + *data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) * + di->keymap_size, GFP_KERNEL); + if (ds == NULL) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate private data\n"); + goto err_ds_alloc_failed; + } + ds->debounce_count = di->keymap_size; + ds->input_devs = input_devs; + ds->info = di; + wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s", + input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : ""); + + ds->ws = wakeup_source_register(wlname); + kfree(wlname); + if (!ds->ws) { + ret = -ENOMEM; + pr_err("gpio_event_input_func: " + "Failed to allocate wakeup source\n"); + goto err_ws_failed; + } + + spin_lock_init(&ds->irq_lock); + + for (i = 0; i < di->keymap_size; i++) { + int dev = di->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_input_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + di->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], di->type, + di->keymap[i].code); + ds->key_state[i].ds = ds; + ds->key_state[i].debounce = DEBOUNCE_UNKNOWN; + } + + for (i = 0; i < di->keymap_size; i++) { + ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in"); + if (ret) { + pr_err("gpio_event_input_func: gpio_request " + "failed for %d\n", di->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_input(di->keymap[i].gpio); + if (ret) { + pr_err("gpio_event_input_func: " + "gpio_direction_input failed for %d\n", + di->keymap[i].gpio); + goto err_gpio_configure_failed; + } + } + + ret = gpio_event_input_request_irqs(ds); + + spin_lock_irqsave(&ds->irq_lock, irqflags); + ds->use_irq = ret == 0; + + pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s " + "mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + ret == 0 ? "interrupt" : "polling"); + + hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + ds->timer.function = gpio_event_input_timer_func; + hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + return 0; + } + + ret = 0; + spin_lock_irqsave(&ds->irq_lock, irqflags); + hrtimer_cancel(&ds->timer); + if (ds->use_irq) { + for (i = di->keymap_size - 1; i >= 0; i--) { + int irq = gpio_to_irq(di->keymap[i].gpio); + if (ds->info->info.no_suspend) + disable_irq_wake(irq); + free_irq(irq, &ds->key_state[i]); + } + } + spin_unlock_irqrestore(&ds->irq_lock, irqflags); + + for (i = di->keymap_size - 1; i >= 0; i--) { +err_gpio_configure_failed: + gpio_free(di->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + wakeup_source_unregister(ds->ws); +err_ws_failed: + kfree(ds); +err_ds_alloc_failed: + return ret; +} diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c new file mode 100644 index 000000000000..08769dd88f56 --- /dev/null +++ b/drivers/input/misc/gpio_matrix.c @@ -0,0 +1,440 @@ +/* drivers/input/misc/gpio_matrix.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include + +struct gpio_kp { + struct gpio_event_input_devs *input_devs; + struct gpio_event_matrix_info *keypad_info; + struct hrtimer timer; + struct wakeup_source wake_src; + int current_output; + unsigned int use_irq:1; + unsigned int key_state_changed:1; + unsigned int last_key_state_changed:1; + unsigned int some_keys_pressed:2; + unsigned int disabled_irq:1; + unsigned long keys_pressed[0]; +}; + +static void clear_phantom_key(struct gpio_kp *kp, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int key_index = out * mi->ninputs + in; + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + __clear_bit(key_index, kp->keys_pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS) + pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) " + "not cleared\n", keycode, out, in, + mi->output_gpios[out], mi->input_gpios[in]); + } +} + +static int restore_keys_for_input(struct gpio_kp *kp, int out, int in) +{ + int rv = 0; + int key_index; + + key_index = out * kp->keypad_info->ninputs + in; + while (out < kp->keypad_info->noutputs) { + if (test_bit(key_index, kp->keys_pressed)) { + rv = 1; + clear_phantom_key(kp, out, in); + } + key_index += kp->keypad_info->ninputs; + out++; + } + return rv; +} + +static void remove_phantom_keys(struct gpio_kp *kp) +{ + int out, in, inp; + int key_index; + + if (kp->some_keys_pressed < 3) + return; + + for (out = 0; out < kp->keypad_info->noutputs; out++) { + inp = -1; + key_index = out * kp->keypad_info->ninputs; + for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) { + if (test_bit(key_index, kp->keys_pressed)) { + if (inp == -1) { + inp = in; + continue; + } + if (inp >= 0) { + if (!restore_keys_for_input(kp, out + 1, + inp)) + break; + clear_phantom_key(kp, out, inp); + inp = -2; + } + restore_keys_for_input(kp, out, in); + } + } + } +} + +static void report_key(struct gpio_kp *kp, int key_index, int out, int in) +{ + struct gpio_event_matrix_info *mi = kp->keypad_info; + int pressed = test_bit(key_index, kp->keys_pressed); + unsigned short keyentry = mi->keymap[key_index]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + + if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) { + if (keycode == KEY_RESERVED) { + if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS) + pr_info("gpiomatrix: unmapped key, %d-%d " + "(%d-%d) changed to %d\n", + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + } else { + if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS) + pr_info("gpiomatrix: key %x, %d-%d (%d-%d) " + "changed to %d\n", keycode, + out, in, mi->output_gpios[out], + mi->input_gpios[in], pressed); + input_report_key(kp->input_devs->dev[dev], keycode, pressed); + } + } +} + +static void report_sync(struct gpio_kp *kp) +{ + int i; + + for (i = 0; i < kp->input_devs->count; i++) + input_sync(kp->input_devs->dev[i]); +} + +static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer) +{ + int out, in; + int key_index; + int gpio; + struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer); + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH); + + out = kp->current_output; + if (out == mi->noutputs) { + out = 0; + kp->last_key_state_changed = kp->key_state_changed; + kp->key_state_changed = 0; + kp->some_keys_pressed = 0; + } else { + key_index = out * mi->ninputs; + for (in = 0; in < mi->ninputs; in++, key_index++) { + gpio = mi->input_gpios[in]; + if (gpio_get_value(gpio) ^ !polarity) { + if (kp->some_keys_pressed < 3) + kp->some_keys_pressed++; + kp->key_state_changed |= !__test_and_set_bit( + key_index, kp->keys_pressed); + } else + kp->key_state_changed |= __test_and_clear_bit( + key_index, kp->keys_pressed); + } + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, !polarity); + else + gpio_direction_input(gpio); + out++; + } + kp->current_output = out; + if (out < mi->noutputs) { + gpio = mi->output_gpios[out]; + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(gpio, polarity); + else + gpio_direction_output(gpio, polarity); + hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) { + if (kp->key_state_changed) { + hrtimer_start(&kp->timer, mi->debounce_delay, + HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + kp->key_state_changed = kp->last_key_state_changed; + } + if (kp->key_state_changed) { + if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS) + remove_phantom_keys(kp); + key_index = 0; + for (out = 0; out < mi->noutputs; out++) + for (in = 0; in < mi->ninputs; in++, key_index++) + report_key(kp, key_index, out, in); + report_sync(kp); + } + if (!kp->use_irq || kp->some_keys_pressed) { + hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL); + return HRTIMER_NORESTART; + } + + /* No keys are pressed, reenable interrupt */ + for (out = 0; out < mi->noutputs; out++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[out], polarity); + else + gpio_direction_output(mi->output_gpios[out], polarity); + } + for (in = 0; in < mi->ninputs; in++) + enable_irq(gpio_to_irq(mi->input_gpios[in])); + __pm_relax(&kp->wake_src); + return HRTIMER_NORESTART; +} + +static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id) +{ + int i; + struct gpio_kp *kp = dev_id; + struct gpio_event_matrix_info *mi = kp->keypad_info; + unsigned gpio_keypad_flags = mi->flags; + + if (!kp->use_irq) { + /* ignore interrupt while registering the handler */ + kp->disabled_irq = 1; + disable_irq_nosync(irq_in); + return IRQ_HANDLED; + } + + for (i = 0; i < mi->ninputs; i++) + disable_irq_nosync(gpio_to_irq(mi->input_gpios[i])); + for (i = 0; i < mi->noutputs; i++) { + if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE) + gpio_set_value(mi->output_gpios[i], + !(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH)); + else + gpio_direction_input(mi->output_gpios[i]); + } + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + return IRQ_HANDLED; +} + +static int gpio_keypad_request_irqs(struct gpio_kp *kp) +{ + int i; + int err; + unsigned int irq; + unsigned long request_flags; + struct gpio_event_matrix_info *mi = kp->keypad_info; + + switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) { + default: + request_flags = IRQF_TRIGGER_FALLING; + break; + case GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_RISING; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ: + request_flags = IRQF_TRIGGER_LOW; + break; + case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH: + request_flags = IRQF_TRIGGER_HIGH; + break; + } + + for (i = 0; i < mi->ninputs; i++) { + err = irq = gpio_to_irq(mi->input_gpios[i]); + if (err < 0) + goto err_gpio_get_irq_num_failed; + err = request_irq(irq, gpio_keypad_irq_handler, request_flags, + "gpio_kp", kp); + if (err) { + pr_err("gpiomatrix: request_irq failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + goto err_request_irq_failed; + } + err = enable_irq_wake(irq); + if (err) { + pr_err("gpiomatrix: set_irq_wake failed for input %d, " + "irq %d\n", mi->input_gpios[i], irq); + } + disable_irq(irq); + if (kp->disabled_irq) { + kp->disabled_irq = 0; + enable_irq(irq); + } + } + return 0; + + for (i = mi->noutputs - 1; i >= 0; i--) { + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); +err_request_irq_failed: +err_gpio_get_irq_num_failed: + ; + } + return err; +} + +int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs, + struct gpio_event_info *info, void **data, int func) +{ + int i; + int err; + int key_count; + struct gpio_kp *kp; + struct gpio_event_matrix_info *mi; + + mi = container_of(info, struct gpio_event_matrix_info, info); + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) { + /* TODO: disable scanning */ + return 0; + } + + if (func == GPIO_EVENT_FUNC_INIT) { + if (mi->keymap == NULL || + mi->input_gpios == NULL || + mi->output_gpios == NULL) { + err = -ENODEV; + pr_err("gpiomatrix: Incomplete pdata\n"); + goto err_invalid_platform_data; + } + key_count = mi->ninputs * mi->noutputs; + + *data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) * + BITS_TO_LONGS(key_count), GFP_KERNEL); + if (kp == NULL) { + err = -ENOMEM; + pr_err("gpiomatrix: Failed to allocate private data\n"); + goto err_kp_alloc_failed; + } + kp->input_devs = input_devs; + kp->keypad_info = mi; + for (i = 0; i < key_count; i++) { + unsigned short keyentry = mi->keymap[i]; + unsigned short keycode = keyentry & MATRIX_KEY_MASK; + unsigned short dev = keyentry >> MATRIX_CODE_BITS; + if (dev >= input_devs->count) { + pr_err("gpiomatrix: bad device index %d >= " + "%d for key code %d\n", + dev, input_devs->count, keycode); + err = -EINVAL; + goto err_bad_keymap; + } + if (keycode && keycode <= KEY_MAX) + input_set_capability(input_devs->dev[dev], + EV_KEY, keycode); + } + + for (i = 0; i < mi->noutputs; i++) { + err = gpio_request(mi->output_gpios[i], "gpio_kp_out"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "output %d\n", mi->output_gpios[i]); + goto err_request_output_gpio_failed; + } + if (gpio_cansleep(mi->output_gpios[i])) { + pr_err("gpiomatrix: unsupported output gpio %d," + " can sleep\n", mi->output_gpios[i]); + err = -EINVAL; + goto err_output_gpio_configure_failed; + } + if (mi->flags & GPIOKPF_DRIVE_INACTIVE) + err = gpio_direction_output(mi->output_gpios[i], + !(mi->flags & GPIOKPF_ACTIVE_HIGH)); + else + err = gpio_direction_input(mi->output_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_configure failed for " + "output %d\n", mi->output_gpios[i]); + goto err_output_gpio_configure_failed; + } + } + for (i = 0; i < mi->ninputs; i++) { + err = gpio_request(mi->input_gpios[i], "gpio_kp_in"); + if (err) { + pr_err("gpiomatrix: gpio_request failed for " + "input %d\n", mi->input_gpios[i]); + goto err_request_input_gpio_failed; + } + err = gpio_direction_input(mi->input_gpios[i]); + if (err) { + pr_err("gpiomatrix: gpio_direction_input failed" + " for input %d\n", mi->input_gpios[i]); + goto err_gpio_direction_input_failed; + } + } + kp->current_output = mi->noutputs; + kp->key_state_changed = 1; + + hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + kp->timer.function = gpio_keypad_timer_func; + wakeup_source_init(&kp->wake_src, "gpio_kp"); + err = gpio_keypad_request_irqs(kp); + kp->use_irq = err == 0; + + pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for " + "%s%s in %s mode\n", input_devs->dev[0]->name, + (input_devs->count > 1) ? "..." : "", + kp->use_irq ? "interrupt" : "polling"); + + if (kp->use_irq) + __pm_stay_awake(&kp->wake_src); + hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL); + + return 0; + } + + err = 0; + kp = *data; + + if (kp->use_irq) + for (i = mi->noutputs - 1; i >= 0; i--) + free_irq(gpio_to_irq(mi->input_gpios[i]), kp); + + hrtimer_cancel(&kp->timer); + wakeup_source_trash(&kp->wake_src); + for (i = mi->noutputs - 1; i >= 0; i--) { +err_gpio_direction_input_failed: + gpio_free(mi->input_gpios[i]); +err_request_input_gpio_failed: + ; + } + for (i = mi->noutputs - 1; i >= 0; i--) { +err_output_gpio_configure_failed: + gpio_free(mi->output_gpios[i]); +err_request_output_gpio_failed: + ; + } +err_bad_keymap: + kfree(kp); +err_kp_alloc_failed: +err_invalid_platform_data: + return err; +} diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c new file mode 100644 index 000000000000..2aac2fad0a17 --- /dev/null +++ b/drivers/input/misc/gpio_output.c @@ -0,0 +1,97 @@ +/* drivers/input/misc/gpio_output.c + * + * Copyright (C) 2007 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +int gpio_event_output_event( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, unsigned int dev, unsigned int type, + unsigned int code, int value) +{ + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + if (type != oi->type) + return 0; + if (!(oi->flags & GPIOEDF_ACTIVE_HIGH)) + value = !value; + for (i = 0; i < oi->keymap_size; i++) + if (dev == oi->keymap[i].dev && code == oi->keymap[i].code) + gpio_set_value(oi->keymap[i].gpio, value); + return 0; +} + +int gpio_event_output_func( + struct gpio_event_input_devs *input_devs, struct gpio_event_info *info, + void **data, int func) +{ + int ret; + int i; + struct gpio_event_output_info *oi; + oi = container_of(info, struct gpio_event_output_info, info); + + if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) + return 0; + + if (func == GPIO_EVENT_FUNC_INIT) { + int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH); + + for (i = 0; i < oi->keymap_size; i++) { + int dev = oi->keymap[i].dev; + if (dev >= input_devs->count) { + pr_err("gpio_event_output_func: bad device " + "index %d >= %d for key code %d\n", + dev, input_devs->count, + oi->keymap[i].code); + ret = -EINVAL; + goto err_bad_keymap; + } + input_set_capability(input_devs->dev[dev], oi->type, + oi->keymap[i].code); + } + + for (i = 0; i < oi->keymap_size; i++) { + ret = gpio_request(oi->keymap[i].gpio, + "gpio_event_output"); + if (ret) { + pr_err("gpio_event_output_func: gpio_request " + "failed for %d\n", oi->keymap[i].gpio); + goto err_gpio_request_failed; + } + ret = gpio_direction_output(oi->keymap[i].gpio, + output_level); + if (ret) { + pr_err("gpio_event_output_func: " + "gpio_direction_output failed for %d\n", + oi->keymap[i].gpio); + goto err_gpio_direction_output_failed; + } + } + return 0; + } + + ret = 0; + for (i = oi->keymap_size - 1; i >= 0; i--) { +err_gpio_direction_output_failed: + gpio_free(oi->keymap[i].gpio); +err_gpio_request_failed: + ; + } +err_bad_keymap: + return ret; +} + diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 8b8c123cae66..7a0fea9633fb 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -479,6 +479,21 @@ config DM_VERITY If unsure, say N. +config DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + bool "Prefetch size 128" + +config DM_VERITY_HASH_PREFETCH_MIN_SIZE + int "Verity hash prefetch minimum size" + depends on DM_VERITY + range 1 4096 + default 128 if DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + default 1 + ---help--- + This sets minimum number of hash blocks to prefetch for dm-verity. + For devices like eMMC, having larger prefetch size like 128 can improve + performance with increased memory consumption for keeping more hashes + in RAM. + config DM_VERITY_FEC bool "Verity forward error correction support" depends on DM_VERITY @@ -559,4 +574,51 @@ config DM_ZONED If unsure, say N. +config DM_VERITY_AVB + tristate "Support AVB specific verity error behavior" + depends on DM_VERITY + ---help--- + Enables Android Verified Boot platform-specific error + behavior. In particular, it will modify the vbmeta partition + specified on the kernel command-line when non-transient error + occurs (followed by a panic). + +config DM_ANDROID_VERITY + bool "Android verity target support" + depends on BLK_DEV_DM=y + depends on DM_VERITY=y + depends on X509_CERTIFICATE_PARSER + depends on SYSTEM_TRUSTED_KEYRING + depends on CRYPTO_RSA + depends on KEYS + depends on ASYMMETRIC_KEY_TYPE + depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select DM_VERITY_HASH_PREFETCH_MIN_SIZE_128 + ---help--- + This device-mapper target is virtually a VERITY target. This + target is setup by reading the metadata contents piggybacked + to the actual data blocks in the block device. The signature + of the metadata contents are verified against the key included + in the system keyring. Upon success, the underlying verity + target is setup. + +config DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + bool "Verity will validate blocks at most once" + depends on DM_VERITY + ---help--- + Default enables at_most_once option for dm-verity + + Verify data blocks only the first time they are read from the + data device, rather than every time. This reduces the overhead + of dm-verity so that it can be used on systems that are memory + and/or CPU constrained. However, it provides a reduced level + of security because only offline tampering of the data device's + content will be detected, not online tampering. + + Hash blocks are still verified each time they are read from the + hash device, since verification of hash blocks is less performance + critical than data blocks, and a hash block will not be verified + any more after all the data blocks it covers have been verified anyway. + + If unsure, say N. endif # MD diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 822f4e8753bc..8e371cd1d2ed 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -76,3 +76,11 @@ endif ifeq ($(CONFIG_DM_VERITY_FEC),y) dm-verity-objs += dm-verity-fec.o endif + +ifeq ($(CONFIG_DM_VERITY_AVB),y) +dm-verity-objs += dm-verity-avb.o +endif + +ifeq ($(CONFIG_DM_ANDROID_VERITY),y) +dm-verity-objs += dm-android-verity.o +endif diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c new file mode 100644 index 000000000000..20e05936551f --- /dev/null +++ b/drivers/md/dm-android-verity.c @@ -0,0 +1,925 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "dm-verity.h" +#include "dm-android-verity.h" + +static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH]; +static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH]; +static char buildvariant[BUILD_VARIANT]; + +static bool target_added; +static bool verity_enabled = true; +struct dentry *debug_dir; +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv); + +static struct target_type android_verity_target = { + .name = "android-verity", + .version = {1, 0, 0}, + .module = THIS_MODULE, + .ctr = android_verity_ctr, + .dtr = verity_dtr, + .map = verity_map, + .status = verity_status, + .prepare_ioctl = verity_prepare_ioctl, + .iterate_devices = verity_iterate_devices, + .io_hints = verity_io_hints, +}; + +static int __init verified_boot_state_param(char *line) +{ + strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate)); + return 1; +} + +__setup("androidboot.verifiedbootstate=", verified_boot_state_param); + +static int __init verity_mode_param(char *line) +{ + strlcpy(veritymode, line, sizeof(veritymode)); + return 1; +} + +__setup("androidboot.veritymode=", verity_mode_param); + +static int __init verity_keyid_param(char *line) +{ + strlcpy(veritykeyid, line, sizeof(veritykeyid)); + return 1; +} + +__setup("veritykeyid=", verity_keyid_param); + +static int __init verity_buildvariant(char *line) +{ + strlcpy(buildvariant, line, sizeof(buildvariant)); + return 1; +} + +__setup("buildvariant=", verity_buildvariant); + +static inline bool default_verity_key_id(void) +{ + return veritykeyid[0] != '\0'; +} + +static inline bool is_eng(void) +{ + static const char typeeng[] = "eng"; + + return !strncmp(buildvariant, typeeng, sizeof(typeeng)); +} + +static inline bool is_userdebug(void) +{ + static const char typeuserdebug[] = "userdebug"; + + return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug)); +} + +static inline bool is_unlocked(void) +{ + static const char unlocked[] = "orange"; + + return !strncmp(verifiedbootstate, unlocked, sizeof(unlocked)); +} + +static int read_block_dev(struct bio_read *payload, struct block_device *bdev, + sector_t offset, int length) +{ + struct bio *bio; + int err = 0, i; + + payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE); + + bio = bio_alloc(GFP_KERNEL, payload->number_of_pages); + if (!bio) { + DMERR("Error while allocating bio"); + return -ENOMEM; + } + + bio_set_dev(bio, bdev); + bio->bi_iter.bi_sector = offset; + bio_set_op_attrs(bio, REQ_OP_READ, 0); + + payload->page_io = kzalloc(sizeof(struct page *) * + payload->number_of_pages, GFP_KERNEL); + if (!payload->page_io) { + DMERR("page_io array alloc failed"); + err = -ENOMEM; + goto free_bio; + } + + for (i = 0; i < payload->number_of_pages; i++) { + payload->page_io[i] = alloc_page(GFP_KERNEL); + if (!payload->page_io[i]) { + DMERR("alloc_page failed"); + err = -ENOMEM; + goto free_pages; + } + if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) { + DMERR("bio_add_page error"); + err = -EIO; + goto free_pages; + } + } + + if (!submit_bio_wait(bio)) + /* success */ + goto free_bio; + DMERR("bio read failed"); + err = -EIO; + +free_pages: + for (i = 0; i < payload->number_of_pages; i++) + if (payload->page_io[i]) + __free_page(payload->page_io[i]); + kfree(payload->page_io); +free_bio: + bio_put(bio); + return err; +} + +static inline u64 fec_div_round_up(u64 x, u64 y) +{ + u64 remainder; + + return div64_u64_rem(x, y, &remainder) + + (remainder > 0 ? 1 : 0); +} + +static inline void populate_fec_metadata(struct fec_header *header, + struct fec_ecc_metadata *ecc) +{ + ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size), + FEC_BLOCK_SIZE); + ecc->roots = le32_to_cpu(header->roots); + ecc->start = le64_to_cpu(header->inp_size); +} + +static inline int validate_fec_header(struct fec_header *header, u64 offset) +{ + /* move offset to make the sanity check work for backup header + * as well. */ + offset -= offset % FEC_BLOCK_SIZE; + if (le32_to_cpu(header->magic) != FEC_MAGIC || + le32_to_cpu(header->version) != FEC_VERSION || + le32_to_cpu(header->size) != sizeof(struct fec_header) || + le32_to_cpu(header->roots) == 0 || + le32_to_cpu(header->roots) >= FEC_RSM) + return -EINVAL; + + return 0; +} + +static int extract_fec_header(dev_t dev, struct fec_header *fec, + struct fec_ecc_metadata *ecc) +{ + u64 device_size; + struct bio_read payload; + int i, err = 0; + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("bdev get error"); + return PTR_ERR(bdev); + } + + device_size = i_size_read(bdev->bd_inode); + + /* fec metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + */ + BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE); + /* 512 byte sector alignment */ + BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0); + + err = read_block_dev(&payload, bdev, (device_size - + FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto error; + } + + BUG_ON(sizeof(struct fec_header) > PAGE_SIZE); + memcpy(fec, page_address(payload.page_io[0]), + sizeof(*fec)); + + ecc->valid = true; + if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) { + /* Try the backup header */ + memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE + - sizeof(*fec) , + sizeof(*fec)); + if (validate_fec_header(fec, device_size - + sizeof(struct fec_header))) + ecc->valid = false; + } + + if (ecc->valid) + populate_fec_metadata(fec, ecc); + + for (i = 0; i < payload.number_of_pages; i++) + __free_page(payload.page_io[i]); + kfree(payload.page_io); + +error: + blkdev_put(bdev, FMODE_READ); + return err; +} +static void find_metadata_offset(struct fec_header *fec, + struct block_device *bdev, u64 *metadata_offset) +{ + u64 device_size; + + device_size = i_size_read(bdev->bd_inode); + + if (le32_to_cpu(fec->magic) == FEC_MAGIC) + *metadata_offset = le64_to_cpu(fec->inp_size) - + VERITY_METADATA_SIZE; + else + *metadata_offset = device_size - VERITY_METADATA_SIZE; +} + +static int find_size(dev_t dev, u64 *device_size) +{ + struct block_device *bdev; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return PTR_ERR(bdev); + } + + *device_size = i_size_read(bdev->bd_inode); + *device_size >>= SECTOR_SHIFT; + + DMINFO("blkdev size in sectors: %llu", *device_size); + blkdev_put(bdev, FMODE_READ); + return 0; +} + +static int verify_header(struct android_metadata_header *header) +{ + int retval = -EINVAL; + + if (is_userdebug() && le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE) + return VERITY_STATE_DISABLE; + + if (!(le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_NUMBER) || + (le32_to_cpu(header->magic_number) == + VERITY_METADATA_MAGIC_DISABLE)) { + DMERR("Incorrect magic number"); + return retval; + } + + if (le32_to_cpu(header->protocol_version) != + VERITY_METADATA_VERSION) { + DMERR("Unsupported version %u", + le32_to_cpu(header->protocol_version)); + return retval; + } + + return 0; +} + +static int extract_metadata(dev_t dev, struct fec_header *fec, + struct android_metadata **metadata, + bool *verity_enabled) +{ + struct block_device *bdev; + struct android_metadata_header *header; + int i; + u32 table_length, copy_length, offset; + u64 metadata_offset; + struct bio_read payload; + int err = 0; + + bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL); + + if (IS_ERR_OR_NULL(bdev)) { + DMERR("blkdev_get_by_dev failed"); + return -ENODEV; + } + + find_metadata_offset(fec, bdev, &metadata_offset); + + /* Verity metadata size is a power of 2 and PAGE_SIZE + * is a power of 2 as well. + * PAGE_SIZE is also a multiple of 512 bytes. + */ + if (VERITY_METADATA_SIZE > PAGE_SIZE) + BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0); + /* 512 byte sector alignment */ + BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0); + + err = read_block_dev(&payload, bdev, metadata_offset / + (1 << SECTOR_SHIFT), VERITY_METADATA_SIZE); + if (err) { + DMERR("Error while reading verity metadata"); + goto blkdev_release; + } + + header = kzalloc(sizeof(*header), GFP_KERNEL); + if (!header) { + DMERR("kzalloc failed for header"); + err = -ENOMEM; + goto free_payload; + } + + memcpy(header, page_address(payload.page_io[0]), + sizeof(*header)); + + DMINFO("bio magic_number:%u protocol_version:%d table_length:%u", + le32_to_cpu(header->magic_number), + le32_to_cpu(header->protocol_version), + le32_to_cpu(header->table_length)); + + err = verify_header(header); + + if (err == VERITY_STATE_DISABLE) { + DMERR("Mounting root with verity disabled"); + *verity_enabled = false; + /* we would still have to read the metadata to figure out + * the data blocks size. Or may be could map the entire + * partition similar to mounting the device. + * + * Reset error as well as the verity_enabled flag is changed. + */ + err = 0; + } else if (err) + goto free_header; + + *metadata = kzalloc(sizeof(**metadata), GFP_KERNEL); + if (!*metadata) { + DMERR("kzalloc for metadata failed"); + err = -ENOMEM; + goto free_header; + } + + (*metadata)->header = header; + table_length = le32_to_cpu(header->table_length); + + if (table_length == 0 || + table_length > (VERITY_METADATA_SIZE - + sizeof(struct android_metadata_header))) { + DMERR("table_length too long"); + err = -EINVAL; + goto free_metadata; + } + + (*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL); + + if (!(*metadata)->verity_table) { + DMERR("kzalloc verity_table failed"); + err = -ENOMEM; + goto free_metadata; + } + + if (sizeof(struct android_metadata_header) + + table_length <= PAGE_SIZE) { + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + table_length); + } else { + copy_length = PAGE_SIZE - + sizeof(struct android_metadata_header); + memcpy((*metadata)->verity_table, + page_address(payload.page_io[0]) + + sizeof(struct android_metadata_header), + copy_length); + table_length -= copy_length; + offset = copy_length; + i = 1; + while (table_length != 0) { + if (table_length > PAGE_SIZE) { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + PAGE_SIZE); + offset += PAGE_SIZE; + table_length -= PAGE_SIZE; + } else { + memcpy((*metadata)->verity_table + offset, + page_address(payload.page_io[i]), + table_length); + table_length = 0; + } + i++; + } + } + (*metadata)->verity_table[table_length] = '\0'; + + DMINFO("verity_table: %s", (*metadata)->verity_table); + goto free_payload; + +free_metadata: + kfree(*metadata); +free_header: + kfree(header); +free_payload: + for (i = 0; i < payload.number_of_pages; i++) + if (payload.page_io[i]) + __free_page(payload.page_io[i]); + kfree(payload.page_io); +blkdev_release: + blkdev_put(bdev, FMODE_READ); + return err; +} + +/* helper functions to extract properties from dts */ +const char *find_dt_value(const char *name) +{ + struct device_node *firmware; + const char *value; + + firmware = of_find_node_by_path("/firmware/android"); + if (!firmware) + return NULL; + value = of_get_property(firmware, name, NULL); + of_node_put(firmware); + + return value; +} + +static int verity_mode(void) +{ + static const char enforcing[] = "enforcing"; + static const char verified_mode_prop[] = "veritymode"; + const char *value; + + value = find_dt_value(verified_mode_prop); + if (!value) + value = veritymode; + if (!strncmp(value, enforcing, sizeof(enforcing) - 1)) + return DM_VERITY_MODE_RESTART; + + return DM_VERITY_MODE_EIO; +} + +static void handle_error(void) +{ + int mode = verity_mode(); + if (mode == DM_VERITY_MODE_RESTART) { + DMERR("triggering restart"); + kernel_restart("dm-verity device corrupted"); + } else { + DMERR("Mounting verity root failed"); + } +} + +static struct public_key_signature *table_make_digest( + enum hash_algo hash, + const void *table, + unsigned long table_len) +{ + struct public_key_signature *pks = NULL; + struct crypto_shash *tfm; + struct shash_desc *desc; + size_t digest_size, desc_size; + int ret; + + /* Allocate the hashing algorithm we're going to need and find out how + * big the hash operational data will be. + */ + tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0); + if (IS_ERR(tfm)) + return ERR_CAST(tfm); + + desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); + digest_size = crypto_shash_digestsize(tfm); + + /* We allocate the hash operational data storage on the end of out + * context data and the digest output buffer on the end of that. + */ + ret = -ENOMEM; + pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL); + if (!pks) + goto error; + + pks->pkey_algo = "rsa"; + pks->hash_algo = hash_algo_name[hash]; + pks->digest = (u8 *)pks + sizeof(*pks) + desc_size; + pks->digest_size = digest_size; + + desc = (struct shash_desc *)(pks + 1); + desc->tfm = tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + ret = crypto_shash_init(desc); + if (ret < 0) + goto error; + + ret = crypto_shash_finup(desc, table, table_len, pks->digest); + if (ret < 0) + goto error; + + crypto_free_shash(tfm); + return pks; + +error: + kfree(pks); + crypto_free_shash(tfm); + return ERR_PTR(ret); +} + + +static int verify_verity_signature(char *key_id, + struct android_metadata *metadata) +{ + struct public_key_signature *pks = NULL; + int retval = -EINVAL; + + if (!key_id) + goto error; + + pks = table_make_digest(HASH_ALGO_SHA256, + (const void *)metadata->verity_table, + le32_to_cpu(metadata->header->table_length)); + if (IS_ERR(pks)) { + DMERR("hashing failed"); + retval = PTR_ERR(pks); + pks = NULL; + goto error; + } + + pks->s = kmemdup(&metadata->header->signature[0], RSANUMBYTES, GFP_KERNEL); + if (!pks->s) { + DMERR("Error allocating memory for signature"); + goto error; + } + pks->s_size = RSANUMBYTES; + + retval = verify_signature_one(pks, NULL, key_id); + kfree(pks->s); +error: + kfree(pks); + return retval; +} + +static inline bool test_mult_overflow(sector_t a, u32 b) +{ + sector_t r = (sector_t)~0ULL; + + sector_div(r, b); + return a > r; +} + +static int add_as_linear_device(struct dm_target *ti, char *dev) +{ + /*Move to linear mapping defines*/ + char *linear_table_args[DM_LINEAR_ARGS] = {dev, + DM_LINEAR_TARGET_OFFSET}; + int err = 0; + + android_verity_target.dtr = dm_linear_dtr, + android_verity_target.map = dm_linear_map, + android_verity_target.status = dm_linear_status, + android_verity_target.end_io = dm_linear_end_io, + android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl, + android_verity_target.iterate_devices = dm_linear_iterate_devices, + android_verity_target.direct_access = dm_linear_dax_direct_access, + android_verity_target.dax_copy_from_iter = dm_linear_dax_copy_from_iter, + android_verity_target.io_hints = NULL; + + set_disk_ro(dm_disk(dm_table_get_md(ti->table)), 0); + + err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args); + + if (!err) { + DMINFO("Added android-verity as a linear target"); + target_added = true; + } else + DMERR("Failed to add android-verity as linear target"); + + return err; +} + +static int create_linear_device(struct dm_target *ti, dev_t dev, + char *target_device) +{ + u64 device_size = 0; + int err = find_size(dev, &device_size); + + if (err) { + DMERR("error finding bdev size"); + handle_error(); + return err; + } + + ti->len = device_size; + err = add_as_linear_device(ti, target_device); + if (err) { + handle_error(); + return err; + } + verity_enabled = false; + return 0; +} + +/* + * Target parameters: + * Key id of the public key in the system keyring. + * Verity metadata's signature would be verified against + * this. If the key id contains spaces, replace them + * with '#'. + * The block device for which dm-verity is being setup. + */ +static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +{ + dev_t uninitialized_var(dev); + struct android_metadata *metadata = NULL; + int err = 0, i, mode; + char *key_id = NULL, *table_ptr, dummy, *target_device; + char *verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS]; + /* One for specifying number of opt args and one for mode */ + sector_t data_sectors; + u32 data_block_size; + unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS; + struct fec_header uninitialized_var(fec); + struct fec_ecc_metadata uninitialized_var(ecc); + char buf[FEC_ARG_LENGTH], *buf_ptr; + unsigned long long tmpll; + + if (argc == 1) { + /* Use the default keyid */ + if (default_verity_key_id()) + key_id = veritykeyid; + else if (!is_eng()) { + DMERR("veritykeyid= is not set"); + handle_error(); + return -EINVAL; + } + target_device = argv[0]; + } else if (argc == 2) { + key_id = argv[0]; + target_device = argv[1]; + } else { + DMERR("Incorrect number of arguments"); + handle_error(); + return -EINVAL; + } + + dev = name_to_dev_t(target_device); + if (!dev) { + DMERR("no dev found for %s", target_device); + handle_error(); + return -EINVAL; + } + + if (is_eng()) + return create_linear_device(ti, dev, target_device); + + strreplace(key_id, '#', ' '); + + DMINFO("key:%s dev:%s", key_id, target_device); + + if (extract_fec_header(dev, &fec, &ecc)) { + DMERR("Error while extracting fec header"); + handle_error(); + return -EINVAL; + } + + err = extract_metadata(dev, &fec, &metadata, &verity_enabled); + + if (err) { + /* Allow invalid metadata when the device is unlocked */ + if (is_unlocked()) { + DMWARN("Allow invalid metadata when unlocked"); + return create_linear_device(ti, dev, target_device); + } + DMERR("Error while extracting metadata"); + handle_error(); + goto free_metadata; + } + + if (verity_enabled) { + err = verify_verity_signature(key_id, metadata); + + if (err) { + DMERR("Signature verification failed"); + handle_error(); + goto free_metadata; + } else + DMINFO("Signature verification success"); + } + + table_ptr = metadata->verity_table; + + for (i = 0; i < VERITY_TABLE_ARGS; i++) { + verity_table_args[i] = strsep(&table_ptr, " "); + if (verity_table_args[i] == NULL) + break; + } + + if (i != VERITY_TABLE_ARGS) { + DMERR("Verity table not in the expected format"); + err = -EINVAL; + handle_error(); + goto free_metadata; + } + + if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (tmpll > ULONG_MAX) { + DMERR(" too large. Forgot to turn on CONFIG_LBDAF?"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + data_sectors = tmpll; + + if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy) + != 1) { + DMERR("Verity table not in the expected format"); + handle_error(); + err = -EINVAL; + goto free_metadata; + } + + if (test_mult_overflow(data_sectors, data_block_size >> + SECTOR_SHIFT)) { + DMERR("data_sectors too large"); + handle_error(); + err = -EOVERFLOW; + goto free_metadata; + } + + data_sectors *= data_block_size >> SECTOR_SHIFT; + DMINFO("Data sectors %llu", (unsigned long long)data_sectors); + + /* update target length */ + ti->len = data_sectors; + + /* Setup linear target and free */ + if (!verity_enabled) { + err = add_as_linear_device(ti, target_device); + goto free_metadata; + } + + /*substitute data_dev and hash_dev*/ + verity_table_args[1] = target_device; + verity_table_args[2] = target_device; + + mode = verity_mode(); + + if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) { + if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u %s " VERITY_TABLE_OPT_FEC_FORMAT, + 1 + VERITY_TABLE_OPT_FEC_ARGS, + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : + VERITY_TABLE_OPT_LOGGING, + target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, + "%u " VERITY_TABLE_OPT_FEC_FORMAT, + VERITY_TABLE_OPT_FEC_ARGS, target_device, + ecc.start / FEC_BLOCK_SIZE, ecc.blocks, + ecc.roots); + } + } else if (mode) { + err = snprintf(buf, FEC_ARG_LENGTH, + "2 " VERITY_TABLE_OPT_IGNZERO " %s", + mode == DM_VERITY_MODE_RESTART ? + VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING); + } else { + err = snprintf(buf, FEC_ARG_LENGTH, "1 %s", + "ignore_zero_blocks"); + } + + if (err < 0 || err >= FEC_ARG_LENGTH) + goto free_metadata; + + buf_ptr = buf; + + for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS + + VERITY_TABLE_OPT_FEC_ARGS + 2); i++) { + verity_table_args[i] = strsep(&buf_ptr, " "); + if (verity_table_args[i] == NULL) { + no_of_args = i; + break; + } + } + + err = verity_ctr(ti, no_of_args, verity_table_args); + if (err) { + DMERR("android-verity failed to create a verity target"); + } else { + target_added = true; + DMINFO("android-verity created as verity target"); + } + +free_metadata: + if (metadata) { + kfree(metadata->header); + kfree(metadata->verity_table); + } + kfree(metadata); + return err; +} + +static int __init dm_android_verity_init(void) +{ + int r; + struct dentry *file; + + r = dm_register_target(&android_verity_target); + if (r < 0) + DMERR("register failed %d", r); + + /* Tracks the status of the last added target */ + debug_dir = debugfs_create_dir("android_verity", NULL); + + if (IS_ERR_OR_NULL(debug_dir)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + goto end; + } + + file = debugfs_create_bool("target_added", S_IRUGO, debug_dir, + &target_added); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + goto end; + } + + file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir, + &verity_enabled); + + if (IS_ERR_OR_NULL(file)) { + DMERR("Cannot create android_verity debugfs directory: %ld", + PTR_ERR(debug_dir)); + debugfs_remove_recursive(debug_dir); + } + +end: + return r; +} + +static void __exit dm_android_verity_exit(void) +{ + if (!IS_ERR_OR_NULL(debug_dir)) + debugfs_remove_recursive(debug_dir); + + dm_unregister_target(&android_verity_target); +} + +module_init(dm_android_verity_init); +module_exit(dm_android_verity_exit); diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h new file mode 100644 index 000000000000..8f6f5e777187 --- /dev/null +++ b/drivers/md/dm-android-verity.h @@ -0,0 +1,132 @@ +/* + * Copyright (C) 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef DM_ANDROID_VERITY_H +#define DM_ANDROID_VERITY_H + +#include + +#define RSANUMBYTES 256 +#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001 +#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56 +#define VERITY_METADATA_VERSION 0 +#define VERITY_STATE_DISABLE 1 +#define DATA_BLOCK_SIZE (4 * 1024) +#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE) +#define VERITY_TABLE_ARGS 10 +#define VERITY_COMMANDLINE_PARAM_LENGTH 20 +#define BUILD_VARIANT 20 + +/* + * : is the format for the identifier. + * subject can either be the Common Name(CN) + Organization Name(O) or + * just the CN if the it is prefixed with O + * From https://tools.ietf.org/html/rfc5280#appendix-A + * ub-organization-name-length INTEGER ::= 64 + * ub-common-name-length INTEGER ::= 64 + * + * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278 + * ctx->o_size + 2 + ctx->cn_size + 1 + * + 41 characters for ":" and sha1 id + * 64 + 2 + 64 + 1 + 1 + 40 (172) + * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters. + */ +#define VERITY_DEFAULT_KEY_ID_LENGTH 200 + +#define FEC_MAGIC 0xFECFECFE +#define FEC_BLOCK_SIZE (4 * 1024) +#define FEC_VERSION 0 +#define FEC_RSM 255 +#define FEC_ARG_LENGTH 300 + +#define VERITY_TABLE_OPT_RESTART "restart_on_corruption" +#define VERITY_TABLE_OPT_LOGGING "ignore_corruption" +#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks" + +#define VERITY_TABLE_OPT_FEC_FORMAT \ + "use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks" +#define VERITY_TABLE_OPT_FEC_ARGS 9 + +#define VERITY_DEBUG 0 + +#define DM_MSG_PREFIX "android-verity" + +#define DM_LINEAR_ARGS 2 +#define DM_LINEAR_TARGET_OFFSET "0" + +/* + * There can be two formats. + * if fec is present + * + * if fec is not present + * + */ +struct fec_header { + __le32 magic; + __le32 version; + __le32 size; + __le32 roots; + __le32 fec_size; + __le64 inp_size; + u8 hash[SHA256_DIGEST_SIZE]; +} __attribute__((packed)); + +struct android_metadata_header { + __le32 magic_number; + __le32 protocol_version; + char signature[RSANUMBYTES]; + __le32 table_length; +}; + +struct android_metadata { + struct android_metadata_header *header; + char *verity_table; +}; + +struct fec_ecc_metadata { + bool valid; + u32 roots; + u64 blocks; + u64 rounds; + u64 start; +}; + +struct bio_read { + struct page **page_io; + int number_of_pages; +}; + +extern struct target_type linear_target; + +extern void dm_linear_dtr(struct dm_target *ti); +extern int dm_linear_map(struct dm_target *ti, struct bio *bio); +extern int dm_linear_end_io(struct dm_target *ti, struct bio *bio, + blk_status_t *error); +extern void dm_linear_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int dm_linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev); +extern int dm_linear_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv); +#if IS_ENABLED(CONFIG_DAX_DRIVER) +extern long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, + long nr_pages, void **kaddr, + pfn_t *pfn); +extern size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i); +#else +#define dm_linear_dax_direct_access NULL +#define dm_linear_dax_copy_from_iter NULL +#endif +#endif /* DM_ANDROID_VERITY_H */ diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index b810ea77e6b1..350aa7a7a591 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1993,6 +1993,45 @@ void dm_interface_exit(void) dm_hash_exit(); } + +/** + * dm_ioctl_export - Permanently export a mapped device via the ioctl interface + * @md: Pointer to mapped_device + * @name: Buffer (size DM_NAME_LEN) for name + * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired + */ +int dm_ioctl_export(struct mapped_device *md, const char *name, + const char *uuid) +{ + int r = 0; + struct hash_cell *hc; + + if (!md) { + r = -ENXIO; + goto out; + } + + /* The name and uuid can only be set once. */ + mutex_lock(&dm_hash_cells_mutex); + hc = dm_get_mdptr(md); + mutex_unlock(&dm_hash_cells_mutex); + if (hc) { + DMERR("%s: already exported", dm_device_name(md)); + r = -ENXIO; + goto out; + } + + r = dm_hash_insert(name, uuid, md); + if (r) { + DMERR("%s: could not bind to '%s'", dm_device_name(md), name); + goto out; + } + + /* Let udev know we've changed. */ + dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md)); +out: + return r; +} /** * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers * @md: Pointer to mapped_device diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index d10964d41fd7..ae9d111f519f 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -26,7 +26,7 @@ struct linear_c { /* * Construct a linear mapping: */ -static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) +int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct linear_c *lc; unsigned long long tmp; @@ -70,7 +70,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv) return ret; } -static void linear_dtr(struct dm_target *ti) +void dm_linear_dtr(struct dm_target *ti) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -95,14 +95,14 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) linear_map_sector(ti, bio->bi_iter.bi_sector); } -static int linear_map(struct dm_target *ti, struct bio *bio) +int dm_linear_map(struct dm_target *ti, struct bio *bio) { linear_map_bio(ti, bio); return DM_MAPIO_REMAPPED; } -static int linear_end_io(struct dm_target *ti, struct bio *bio, +int dm_linear_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error) { struct linear_c *lc = ti->private; @@ -112,8 +112,9 @@ static int linear_end_io(struct dm_target *ti, struct bio *bio, return DM_ENDIO_DONE; } +EXPORT_SYMBOL_GPL(dm_linear_end_io); -static void linear_status(struct dm_target *ti, status_type_t type, +void dm_linear_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct linear_c *lc = (struct linear_c *) ti->private; @@ -130,7 +131,7 @@ static void linear_status(struct dm_target *ti, status_type_t type, } } -static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +int dm_linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct linear_c *lc = (struct linear_c *) ti->private; struct dm_dev *dev = lc->dev; @@ -146,7 +147,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev return 0; } -static int linear_iterate_devices(struct dm_target *ti, +int dm_linear_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct linear_c *lc = ti->private; @@ -155,7 +156,7 @@ static int linear_iterate_devices(struct dm_target *ti, } #if IS_ENABLED(CONFIG_DAX_DRIVER) -static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, +long dm_linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { long ret; @@ -170,8 +171,9 @@ static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff, return ret; return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn); } +EXPORT_SYMBOL_GPL(dm_linear_dax_direct_access); -static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, +size_t dm_linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct linear_c *lc = ti->private; @@ -184,8 +186,9 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff, return 0; return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i); } +EXPORT_SYMBOL_GPL(dm_linear_dax_copy_from_iter); -static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, +static size_t dm_linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i) { struct linear_c *lc = ti->private; @@ -200,9 +203,9 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, } #else -#define linear_dax_direct_access NULL -#define linear_dax_copy_from_iter NULL -#define linear_dax_copy_to_iter NULL +#define dm_linear_dax_direct_access NULL +#define dm_linear_dax_copy_from_iter NULL +#define dm_linear_dax_copy_to_iter NULL #endif static struct target_type linear_target = { @@ -210,16 +213,16 @@ static struct target_type linear_target = { .version = {1, 4, 0}, .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, .module = THIS_MODULE, - .ctr = linear_ctr, - .dtr = linear_dtr, - .map = linear_map, - .end_io = linear_end_io, - .status = linear_status, - .prepare_ioctl = linear_prepare_ioctl, - .iterate_devices = linear_iterate_devices, - .direct_access = linear_dax_direct_access, - .dax_copy_from_iter = linear_dax_copy_from_iter, - .dax_copy_to_iter = linear_dax_copy_to_iter, + .ctr = dm_linear_ctr, + .dtr = dm_linear_dtr, + .map = dm_linear_map, + .status = dm_linear_status, + .end_io = dm_linear_end_io, + .prepare_ioctl = dm_linear_prepare_ioctl, + .iterate_devices = dm_linear_iterate_devices, + .direct_access = dm_linear_dax_direct_access, + .dax_copy_from_iter = dm_linear_dax_copy_from_iter, + .dax_copy_to_iter = dm_linear_dax_copy_to_iter, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 3d0e2c198f06..485626d5b9d7 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/md/dm-verity-avb.c b/drivers/md/dm-verity-avb.c new file mode 100644 index 000000000000..a9f102aa379e --- /dev/null +++ b/drivers/md/dm-verity-avb.c @@ -0,0 +1,229 @@ +/* + * Copyright (C) 2017 Google. + * + * This file is released under the GPLv2. + * + * Based on drivers/md/dm-verity-chromeos.c + */ + +#include +#include +#include + +#define DM_MSG_PREFIX "verity-avb" + +/* Set via module parameters. */ +static char avb_vbmeta_device[64]; +static char avb_invalidate_on_error[4]; + +static void invalidate_vbmeta_endio(struct bio *bio) +{ + if (bio->bi_status) + DMERR("invalidate_vbmeta_endio: error %d", bio->bi_status); + complete(bio->bi_private); +} + +static int invalidate_vbmeta_submit(struct bio *bio, + struct block_device *bdev, + int op, int access_last_sector, + struct page *page) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + bio->bi_private = &wait; + bio->bi_end_io = invalidate_vbmeta_endio; + bio_set_dev(bio, bdev); + bio_set_op_attrs(bio, op, REQ_SYNC); + + bio->bi_iter.bi_sector = 0; + if (access_last_sector) { + sector_t last_sector; + + last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1; + bio->bi_iter.bi_sector = last_sector; + } + if (!bio_add_page(bio, page, PAGE_SIZE, 0)) { + DMERR("invalidate_vbmeta_submit: bio_add_page error"); + return -EIO; + } + + submit_bio(bio); + /* Wait up to 2 seconds for completion or fail. */ + if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000))) + return -EIO; + return 0; +} + +static int invalidate_vbmeta(dev_t vbmeta_devt) +{ + int ret = 0; + struct block_device *bdev; + struct bio *bio; + struct page *page; + fmode_t dev_mode; + /* Ensure we do synchronous unblocked I/O. We may also need + * sync_bdev() on completion, but it really shouldn't. + */ + int access_last_sector = 0; + + DMINFO("invalidate_vbmeta: acting on device %d:%d", + MAJOR(vbmeta_devt), MINOR(vbmeta_devt)); + + /* First we open the device for reading. */ + dev_mode = FMODE_READ | FMODE_EXCL; + bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode, + invalidate_vbmeta); + if (IS_ERR(bdev)) { + DMERR("invalidate_kernel: could not open device for reading"); + dev_mode = 0; + ret = -ENOENT; + goto failed_to_read; + } + + bio = bio_alloc(GFP_NOIO, 1); + if (!bio) { + ret = -ENOMEM; + goto failed_bio_alloc; + } + + page = alloc_page(GFP_NOIO); + if (!page) { + ret = -ENOMEM; + goto failed_to_alloc_page; + } + + access_last_sector = 0; + ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ, + access_last_sector, page); + if (ret) { + DMERR("invalidate_vbmeta: error reading"); + goto failed_to_submit_read; + } + + /* We have a page. Let's make sure it looks right. */ + if (memcmp("AVB0", page_address(page), 4) == 0) { + /* Stamp it. */ + memcpy(page_address(page), "AVE0", 4); + DMINFO("invalidate_vbmeta: found vbmeta partition"); + } else { + /* Could be this is on a AVB footer, check. Also, since the + * AVB footer is in the last 64 bytes, adjust for the fact that + * we're dealing with 512-byte sectors. + */ + size_t offset = (1<bi_remaining. + */ + bio_reset(bio); + + ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_WRITE, + access_last_sector, page); + if (ret) { + DMERR("invalidate_vbmeta: error writing"); + goto failed_to_submit_write; + } + + DMERR("invalidate_vbmeta: completed."); + ret = 0; +failed_to_submit_write: +failed_to_write: +invalid_header: + __free_page(page); +failed_to_submit_read: + /* Technically, we'll leak a page with the pending bio, but + * we're about to reboot anyway. + */ +failed_to_alloc_page: + bio_put(bio); +failed_bio_alloc: + if (dev_mode) + blkdev_put(bdev, dev_mode); +failed_to_read: + return ret; +} + +void dm_verity_avb_error_handler(void) +{ + dev_t dev; + + DMINFO("AVB error handler called for %s", avb_vbmeta_device); + + if (strcmp(avb_invalidate_on_error, "yes") != 0) { + DMINFO("Not configured to invalidate"); + return; + } + + if (avb_vbmeta_device[0] == '\0') { + DMERR("avb_vbmeta_device parameter not set"); + goto fail_no_dev; + } + + dev = name_to_dev_t(avb_vbmeta_device); + if (!dev) { + DMERR("No matching partition for device: %s", + avb_vbmeta_device); + goto fail_no_dev; + } + + invalidate_vbmeta(dev); + +fail_no_dev: + ; +} + +static int __init dm_verity_avb_init(void) +{ + DMINFO("AVB error handler initialized with vbmeta device: %s", + avb_vbmeta_device); + return 0; +} + +static void __exit dm_verity_avb_exit(void) +{ +} + +module_init(dm_verity_avb_init); +module_exit(dm_verity_avb_exit); + +MODULE_AUTHOR("David Zeuthen "); +MODULE_DESCRIPTION("AVB-specific error handler for dm-verity"); +MODULE_LICENSE("GPL"); + +/* Declare parameter with no module prefix */ +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "androidboot.vbmeta." +module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0); +module_param_string(invalidate_on_error, avb_invalidate_on_error, + sizeof(avb_invalidate_on_error), 0); diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 684af08d0747..8306ee0b2d0c 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -11,6 +11,7 @@ #include "dm-verity-fec.h" #include +#include #define DM_MSG_PREFIX "verity-fec" @@ -175,9 +176,11 @@ static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio, if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); - else if (r > 0) + else if (r > 0) { DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", v->data_dev->name, (unsigned long long)rsb, r); + atomic_add_unless(&v->fec->corrected, 1, INT_MAX); + } return r; } @@ -545,6 +548,7 @@ unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz, void verity_fec_dtr(struct dm_verity *v) { struct dm_verity_fec *f = v->fec; + struct kobject *kobj = &f->kobj_holder.kobj; if (!verity_fec_is_enabled(v)) goto out; @@ -561,6 +565,12 @@ void verity_fec_dtr(struct dm_verity *v) if (f->dev) dm_put_device(v->ti, f->dev); + + if (kobj->state_initialized) { + kobject_put(kobj); + wait_for_completion(dm_get_completion_from_kobject(kobj)); + } + out: kfree(f); v->fec = NULL; @@ -649,6 +659,28 @@ int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v, return 0; } +static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec, + kobj_holder.kobj); + + return sprintf(buf, "%d\n", atomic_read(&f->corrected)); +} + +static struct kobj_attribute attr_corrected = __ATTR_RO(corrected); + +static struct attribute *fec_attrs[] = { + &attr_corrected.attr, + NULL +}; + +static struct kobj_type fec_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = fec_attrs, + .release = dm_kobject_release +}; + /* * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr. */ @@ -672,8 +704,10 @@ int verity_fec_ctr_alloc(struct dm_verity *v) */ int verity_fec_ctr(struct dm_verity *v) { + int r; struct dm_verity_fec *f = v->fec; struct dm_target *ti = v->ti; + struct mapped_device *md = dm_table_get_md(ti->table); u64 hash_blocks; int ret; @@ -682,6 +716,16 @@ int verity_fec_ctr(struct dm_verity *v) return 0; } + /* Create a kobject and sysfs attributes */ + init_completion(&f->kobj_holder.completion); + + r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype, + &disk_to_dev(dm_disk(md))->kobj, "%s", "fec"); + if (r) { + ti->error = "Cannot create kobject"; + return r; + } + /* * FEC is computed over data blocks, possible metadata, and * hash blocks. In other words, FEC covers total of fec_blocks diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 6ad803b2b36c..93af41777b4f 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -12,6 +12,8 @@ #ifndef DM_VERITY_FEC_H #define DM_VERITY_FEC_H +#include "dm.h" +#include "dm-core.h" #include "dm-verity.h" #include @@ -51,6 +53,8 @@ struct dm_verity_fec { mempool_t extra_pool; /* mempool for extra buffers */ mempool_t output_pool; /* mempool for output */ struct kmem_cache *cache; /* cache for buffers */ + atomic_t corrected; /* corrected errors */ + struct dm_kobject_holder kobj_holder; /* for sysfs attributes */ }; /* per-bio data */ diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index fc65f0dedf7f..ea131ea0d8ea 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -251,8 +251,12 @@ static int verity_handle_err(struct dm_verity *v, enum verity_block_type type, if (v->mode == DM_VERITY_MODE_LOGGING) return 0; - if (v->mode == DM_VERITY_MODE_RESTART) + if (v->mode == DM_VERITY_MODE_RESTART) { +#ifdef CONFIG_DM_VERITY_AVB + dm_verity_avb_error_handler(); +#endif kernel_restart("dm-verity device corrupted"); + } return 1; } @@ -580,6 +584,7 @@ static void verity_prefetch_io(struct work_struct *work) container_of(work, struct dm_verity_prefetch_work, work); struct dm_verity *v = pw->v; int i; + sector_t prefetch_size; for (i = v->levels - 2; i >= 0; i--) { sector_t hash_block_start; @@ -602,8 +607,14 @@ static void verity_prefetch_io(struct work_struct *work) hash_block_end = v->hash_blocks - 1; } no_prefetch_cluster: + // for emmc, it is more efficient to send bigger read + prefetch_size = max((sector_t)CONFIG_DM_VERITY_HASH_PREFETCH_MIN_SIZE, + hash_block_end - hash_block_start + 1); + if ((hash_block_start + prefetch_size) >= (v->hash_start + v->hash_blocks)) { + prefetch_size = hash_block_end - hash_block_start + 1; + } dm_bufio_prefetch(v->bufio, hash_block_start, - hash_block_end - hash_block_start + 1); + prefetch_size); } kfree(pw); @@ -630,7 +641,7 @@ static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io) * Bio map function. It allocates dm_verity_io structure and bio vector and * fills them. Then it issues prefetches and the I/O. */ -static int verity_map(struct dm_target *ti, struct bio *bio) +int verity_map(struct dm_target *ti, struct bio *bio) { struct dm_verity *v = ti->private; struct dm_verity_io *io; @@ -675,7 +686,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio) /* * Status: V (valid) or C (corruption found) */ -static void verity_status(struct dm_target *ti, status_type_t type, +void verity_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct dm_verity *v = ti->private; @@ -739,7 +750,7 @@ static void verity_status(struct dm_target *ti, status_type_t type, } } -static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) +int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev) { struct dm_verity *v = ti->private; @@ -751,7 +762,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev return 0; } -static int verity_iterate_devices(struct dm_target *ti, +int verity_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct dm_verity *v = ti->private; @@ -759,7 +770,7 @@ static int verity_iterate_devices(struct dm_target *ti, return fn(ti, v->data_dev, v->data_start, ti->len, data); } -static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) +void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct dm_verity *v = ti->private; @@ -772,7 +783,7 @@ static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, limits->logical_block_size); } -static void verity_dtr(struct dm_target *ti) +void verity_dtr(struct dm_target *ti) { struct dm_verity *v = ti->private; @@ -927,7 +938,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v) * * Hex string or "-" if no salt. */ -static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) +int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) { struct dm_verity *v; struct dm_arg_set as; @@ -1091,6 +1102,14 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } +#ifdef CONFIG_DM_ANDROID_VERITY_AT_MOST_ONCE_DEFAULT_ENABLED + if (!v->validated_blocks) { + r = verity_alloc_most_once(v); + if (r) + goto bad; + } +#endif + v->hash_per_block_bits = __fls((1 << v->hash_dev_block_bits) / v->digest_size); diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 3441c10b840c..233cc99d440d 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -127,4 +127,14 @@ extern int verity_hash(struct dm_verity *v, struct ahash_request *req, extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io, sector_t block, u8 *digest, bool *is_zero); +extern void verity_status(struct dm_target *ti, status_type_t type, + unsigned status_flags, char *result, unsigned maxlen); +extern int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev); +extern int verity_iterate_devices(struct dm_target *ti, + iterate_devices_callout_fn fn, void *data); +extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits); +extern void verity_dtr(struct dm_target *ti); +extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv); +extern int verity_map(struct dm_target *ti, struct bio *bio); +extern void dm_verity_avb_error_handler(void); #endif /* DM_VERITY_H */ diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 114a81b27c37..d8db76afa622 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -80,8 +80,6 @@ void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); enum dm_queue_mode dm_get_md_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md); -int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); - /* * To check the return value from dm_table_find_target(). */ diff --git a/drivers/mfd/arizona-i2c.c b/drivers/mfd/arizona-i2c.c index 5fe12961cfe5..84d29dec0ae1 100644 --- a/drivers/mfd/arizona-i2c.c +++ b/drivers/mfd/arizona-i2c.c @@ -23,6 +23,141 @@ #include "arizona.h" +/************************************************************/ +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD1", "0-001a"), + REGULATOR_SUPPLY("LDOVDD", "0-001a"), + REGULATOR_SUPPLY("CPVDD", "0-001a"), + REGULATOR_SUPPLY("DBVDD2", "0-001a"), + REGULATOR_SUPPLY("DBVDD3", "0-001a"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "0-001a"), + REGULATOR_SUPPLY("SPKVDDR", "0-001a"), +}; +#else +/***********WM8280 1.8V REGULATOR*************/ +static struct regulator_consumer_supply vflorida1_consumer[] = { + REGULATOR_SUPPLY("AVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD1", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("LDOVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("CPVDD", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD2", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("DBVDD3", "i2c-INT34C1:00"), +}; + +/***********WM8280 5V REGULATOR*************/ +static struct regulator_consumer_supply vflorida2_consumer[] = { + REGULATOR_SUPPLY("SPKVDDL", "i2c-INT34C1:00"), + REGULATOR_SUPPLY("SPKVDDR", "i2c-INT34C1:00"), +}; +#endif + +static struct regulator_init_data vflorida1_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida1_consumer), + .consumer_supplies = vflorida1_consumer, +}; + +static struct fixed_voltage_config vflorida1_config = { + .supply_name = "DC_1V8", + .microvolts = 1800000, + .gpio = -EINVAL, + .init_data = &vflorida1_data, +}; + +static struct platform_device vflorida1_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida1_config, + }, +}; + +static struct regulator_init_data vflorida2_data = { + .constraints = { + .always_on = 1, + }, + .num_consumer_supplies = ARRAY_SIZE(vflorida2_consumer), + .consumer_supplies = vflorida2_consumer, +}; + +static struct fixed_voltage_config vflorida2_config = { + .supply_name = "DC_5V", + .microvolts = 3700000, + .gpio = -EINVAL, + .init_data = &vflorida2_data, +}; + +static struct platform_device vflorida2_device = { + .name = "reg-fixed-voltage", + .id = PLATFORM_DEVID_AUTO, + .dev = { + .platform_data = &vflorida2_config, + }, +}; + +/***********WM8280 Codec Driver platform data*************/ +static const struct arizona_micd_range micd_ctp_ranges[] = { + { .max = 11, .key = BTN_0 }, + { .max = 28, .key = BTN_1 }, + { .max = 54, .key = BTN_2 }, + { .max = 100, .key = BTN_3 }, + { .max = 186, .key = BTN_4 }, + { .max = 430, .key = BTN_5 }, +}; + +static struct arizona_micd_config micd_modes[] = { + /*{Acc Det on Micdet1, Use Micbias2 for detection, + * Set GPIO to 1 to selecte this polarity}*/ + { 0, 2, 1 }, +}; + +static struct arizona_pdata __maybe_unused florida_pdata = { + .reset = 0, /*No Reset GPIO from AP, use SW reset*/ + .irq_flags = IRQF_TRIGGER_LOW | IRQF_ONESHOT, + .clk32k_src = ARIZONA_32KZ_MCLK2, /*Onboard OSC provides 32K on MCLK2*/ + /* + * IN1 uses both MICBIAS1 and MICBIAS2 based on jack polarity, + * the below values in dmic_ref only has meaning for DMIC's and not + * AMIC's + */ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, ARIZONA_DMIC_MICBIAS3, 0, 0}, + .inmode = {ARIZONA_INMODE_DIFF, ARIZONA_INMODE_DMIC, 0, 0}, +#else + .dmic_ref = {ARIZONA_DMIC_MICBIAS1, 0, ARIZONA_DMIC_MICVDD, 0}, + .inmode = {ARIZONA_INMODE_SE, 0, ARIZONA_INMODE_DMIC, 0}, +#endif + .gpio_base = 0, /* Base allocated by gpio core */ + .micd_pol_gpio = 2, /* GPIO3 (offset 2 from gpio_base) of the codec */ + .micd_configs = micd_modes, + .num_micd_configs = ARRAY_SIZE(micd_modes), + .micd_force_micbias = true, +}; + +/************************************************************/ +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct i2c_board_info arizona_i2c_device = { + I2C_BOARD_INFO("wm8280", 0x1A), + .platform_data = &florida_pdata, +}; +#endif + static int arizona_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -31,10 +166,16 @@ static int arizona_i2c_probe(struct i2c_client *i2c, unsigned long type; int ret; + pr_debug("%s:%d\n", __func__, __LINE__); if (i2c->dev.of_node) type = arizona_of_get_type(&i2c->dev); +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + else + type = WM8280; +#else else type = id->driver_data; +#endif switch (type) { case WM5102: @@ -105,6 +246,13 @@ static const struct i2c_device_id arizona_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, arizona_i2c_id); +#ifndef CONFIG_SND_SOC_INTEL_CNL_FPGA +static struct acpi_device_id __maybe_unused arizona_acpi_match[] = { + { "INT34C1", WM8280 }, + { } +}; +#endif + static struct i2c_driver arizona_i2c_driver = { .driver = { .name = "arizona", @@ -116,7 +264,53 @@ static struct i2c_driver arizona_i2c_driver = { .id_table = arizona_i2c_id, }; -module_i2c_driver(arizona_i2c_driver); +static int __init arizona_modinit(void) +{ + int ret = 0; +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + struct i2c_adapter *adapter; + struct i2c_client *client; +#endif + + pr_debug("%s Entry\n", __func__); + /***********WM8280 Register Regulator*************/ + platform_device_register(&vflorida1_device); + platform_device_register(&vflorida2_device); + +#ifdef CONFIG_SND_SOC_INTEL_CNL_FPGA + adapter = i2c_get_adapter(0); + pr_debug("%s:%d\n", __func__, __LINE__); + if (adapter) { + client = i2c_new_device(adapter, &arizona_i2c_device); + pr_debug("%s:%d\n", __func__, __LINE__); + if (!client) { + pr_err("can't create i2c device %s\n", + arizona_i2c_device.type); + i2c_put_adapter(adapter); + pr_debug("%s:%d\n", __func__, __LINE__); + return -ENODEV; + } + } else { + pr_err("adapter is NULL\n"); + return -ENODEV; + } +#endif + pr_debug("%s:%d\n", __func__, __LINE__); + ret = i2c_add_driver(&arizona_i2c_driver); + + pr_debug("%s Exit\n", __func__); + + return ret; +} + +module_init(arizona_modinit); + +static void __exit arizona_modexit(void) +{ + i2c_del_driver(&arizona_i2c_driver); +} + +module_exit(arizona_modexit); MODULE_DESCRIPTION("Arizona I2C bus interface"); MODULE_AUTHOR("Mark Brown "); diff --git a/drivers/mfd/arizona-irq.c b/drivers/mfd/arizona-irq.c index a307832d7e45..bf50bb41e4a7 100644 --- a/drivers/mfd/arizona-irq.c +++ b/drivers/mfd/arizona-irq.c @@ -377,7 +377,8 @@ int arizona_irq_init(struct arizona *arizona) ret = request_threaded_irq(arizona->irq, NULL, arizona_irq_thread, flags, "arizona", arizona); - if (ret != 0) { + /* FPGA board doesn't have irq line */ + if (ret != 0 && !IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA)) { dev_err(arizona->dev, "Failed to request primary IRQ %d: %d\n", arizona->irq, ret); goto err_main_irq; diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 3726eacdf65d..3242af01f8fa 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -513,6 +513,27 @@ config MISC_RTSX tristate default MISC_RTSX_PCI || MISC_RTSX_USB +config UID_SYS_STATS + bool "Per-UID statistics" + depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING + help + Per UID based cpu time statistics exported to /proc/uid_cputime + Per UID based io statistics exported to /proc/uid_io + Per UID based procstat control in /proc/uid_procstat + +config UID_SYS_STATS_DEBUG + bool "Per-TASK statistics" + depends on UID_SYS_STATS + default n + help + Per TASK based io statistics exported to /proc/uid_io + +config MEMORY_STATE_TIME + tristate "Memory freq/bandwidth time statistics" + depends on PROFILING + help + Memory time statistics exported to /sys/kernel/memory_state_time + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index af22bbc3d00c..f4d0fd9afcb8 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -58,3 +58,5 @@ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o obj-$(CONFIG_PCI_ENDPOINT_TEST) += pci_endpoint_test.o obj-$(CONFIG_OCXL) += ocxl/ obj-$(CONFIG_MISC_RTSX) += cardreader/ +obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o +obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index c49e1d2269af..2a82520f3e4e 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -43,3 +43,5 @@ config INTEL_MEI_TXE Supported SoCs: Intel Bay Trail + +source "drivers/misc/mei/spd/Kconfig" diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index cd6825afa8e1..f83e2fdc01d3 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -23,3 +23,5 @@ mei-txe-objs += hw-txe.o mei-$(CONFIG_EVENT_TRACING) += mei-trace.o CFLAGS_mei-trace.o = -I$(src) + +obj-$(CONFIG_INTEL_MEI_SPD) += spd/ diff --git a/drivers/misc/mei/spd/Kconfig b/drivers/misc/mei/spd/Kconfig new file mode 100644 index 000000000000..085f9caa8c66 --- /dev/null +++ b/drivers/misc/mei/spd/Kconfig @@ -0,0 +1,12 @@ +# +# Storage proxy device configuration +# +config INTEL_MEI_SPD + tristate "Intel MEI Host Storage Proxy Driver" + depends on INTEL_MEI && BLOCK && RPMB + help + A driver for the host storage proxy ME client + The driver enables ME FW to store data on a storage devices + that are accessible only from the host. + + To compile this driver as a module, choose M here. diff --git a/drivers/misc/mei/spd/Makefile b/drivers/misc/mei/spd/Makefile new file mode 100644 index 000000000000..72d0bca2974e --- /dev/null +++ b/drivers/misc/mei/spd/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for the Storage Proxy device driver. +# + +obj-$(CONFIG_INTEL_MEI_SPD) += mei_spd.o +mei_spd-objs := main.o +mei_spd-objs += cmd.o +mei_spd-objs += gpp.o +mei_spd-objs += rpmb.o +mei_spd-$(CONFIG_DEBUG_FS) += debugfs.o + +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/misc/mei/spd/cmd.c b/drivers/misc/mei/spd/cmd.c new file mode 100644 index 000000000000..3f45902e23da --- /dev/null +++ b/drivers/misc/mei/spd/cmd.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +#define spd_cmd_size(_cmd) \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_##_cmd)) +#define spd_cmd_rpmb_size(_cmd) \ + (spd_cmd_size(_cmd) + SPD_CLIENT_RPMB_DATA_MAX_SIZE) + +#define to_spd_hdr(_buf) (struct spd_cmd_hdr *)(_buf) +#define to_spd_cmd(_cmd, _buf) \ + (struct spd_cmd_##_cmd *)((_buf) + sizeof(struct spd_cmd_hdr)) + +const char *spd_cmd_str(enum spd_cmd_type cmd) +{ +#define __SPD_CMD(_cmd) SPD_##_cmd##_CMD +#define SPD_CMD(cmd) case __SPD_CMD(cmd): return #cmd + switch (cmd) { + SPD_CMD(NONE); + SPD_CMD(START_STOP); + SPD_CMD(RPMB_WRITE); + SPD_CMD(RPMB_READ); + SPD_CMD(RPMB_GET_COUNTER); + SPD_CMD(GPP_WRITE); + SPD_CMD(GPP_READ); + SPD_CMD(TRIM); + SPD_CMD(INIT); + SPD_CMD(STORAGE_STATUS); + SPD_CMD(MAX); + default: + return "unknown"; + } +#undef SPD_CMD +#undef __SPD_CMD +} + +const char *mei_spd_dev_str(enum spd_storage_type type) +{ +#define SPD_TYPE(type) case SPD_TYPE_##type: return #type + switch (type) { + SPD_TYPE(UNDEF); + SPD_TYPE(EMMC); + SPD_TYPE(UFS); + default: + return "unknown"; + } +#undef SPD_TYPE +} + +const char *mei_spd_state_str(enum mei_spd_state state) +{ +#define SPD_STATE(state) case MEI_SPD_STATE_##state: return #state + switch (state) { + SPD_STATE(INIT); + SPD_STATE(INIT_WAIT); + SPD_STATE(INIT_DONE); + SPD_STATE(RUNNING); + SPD_STATE(STOPPING); + default: + return "unknown"; + } +#undef SPD_STATE +} + +/** + * mei_spd_init_req - send init request + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_init_req(struct mei_spd *spd) +{ + const int req_len = sizeof(struct spd_cmd_hdr); + struct spd_cmd_hdr *hdr; + u32 cmd_type = SPD_INIT_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state != MEI_SPD_STATE_INIT) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + spd->state = MEI_SPD_STATE_INIT_WAIT; + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "start send failed ret = %zd\n", ret); + return ret; + } + + return 0; +} + +/** + * mei_spd_cmd_init_rsp - handle init response message + * + * @spd: spd device + * @cmd: received spd command + * @cmd_sz: received command size + * + * Return: 0 on success; < 0 otherwise + */ +static int mei_spd_cmd_init_rsp(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t cmd_sz) +{ + int type; + int gpp_id; + int i; + + if (cmd_sz < spd_cmd_size(init_resp)) { + spd_err(spd, "Wrong init response size\n"); + return -EINVAL; + } + + if (spd->state != MEI_SPD_STATE_INIT_WAIT) + return -EPROTO; + + type = cmd->init_rsp.type; + gpp_id = cmd->init_rsp.gpp_partition_id; + + switch (type) { + case SPD_TYPE_EMMC: + if (gpp_id < 1 || gpp_id > 4) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + case SPD_TYPE_UFS: + if (gpp_id < 1 || gpp_id > 6) { + spd_err(spd, "%s unsupported gpp id %d\n", + mei_spd_dev_str(type), gpp_id); + return -EINVAL; + } + break; + + default: + spd_err(spd, "unsupported storage type %d\n", + cmd->init_rsp.type); + return -EINVAL; + } + + spd->dev_type = type; + spd->gpp_partition_id = gpp_id; + + if (cmd->init_rsp.serial_no_sz != 0) { + if (cmd->init_rsp.serial_no_sz != + cmd_sz - spd_cmd_size(init_resp)) { + spd_err(spd, "wrong serial no size %u?=%zu\n", + cmd->init_rsp.serial_no_sz, + cmd_sz - spd_cmd_size(init_resp)); + return -EMSGSIZE; + } + + if (cmd->init_rsp.serial_no_sz > 256) { + spd_err(spd, "serial no is too large %u\n", + cmd->init_rsp.serial_no_sz); + return -EMSGSIZE; + } + + spd->dev_id = kzalloc(cmd->init_rsp.serial_no_sz, GFP_KERNEL); + if (!spd->dev_id) + return -ENOMEM; + + spd->dev_id_sz = cmd->init_rsp.serial_no_sz; + if (type == SPD_TYPE_EMMC) { + /* FW have this in be32 format */ + __be32 *sno = (__be32 *)cmd->init_rsp.serial_no; + u32 *dev_id = (u32 *)spd->dev_id; + + for (i = 0; i < spd->dev_id_sz / sizeof(u32); i++) + dev_id[i] = be32_to_cpu(sno[i]); + } else { + memcpy(spd->dev_id, &cmd->init_rsp.serial_no, + cmd->init_rsp.serial_no_sz); + } + } + + spd->state = MEI_SPD_STATE_INIT_DONE; + + return 0; +} + +/** + * mei_spd_cmd_storage_status_req - send storage status message + * + * @spd: spd device + * + * Return: 0 on success + * -EPROTO if called in wrong state + * < 0 on write error + */ +int mei_spd_cmd_storage_status_req(struct mei_spd *spd) +{ + struct spd_cmd_hdr *hdr; + struct spd_cmd_storage_status_req *req; + const int req_len = spd_cmd_size(storage_status_req); + u32 cmd_type = SPD_STORAGE_STATUS_CMD; + ssize_t ret; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_INIT_DONE) + return -EPROTO; + + memset(spd->buf, 0, req_len); + hdr = to_spd_hdr(spd->buf); + + hdr->command_type = cmd_type; + hdr->is_response = 0; + hdr->len = req_len; + + req = to_spd_cmd(storage_status_req, spd->buf); + req->gpp_on = mei_spd_gpp_is_open(spd); + req->rpmb_on = mei_spd_rpmb_is_open(spd); + + ret = mei_cldev_send(spd->cldev, spd->buf, req_len); + if (ret != req_len) { + spd_err(spd, "send storage status failed ret = %zd\n", ret); + return ret; + } + + if (req->gpp_on || req->rpmb_on) + spd->state = MEI_SPD_STATE_RUNNING; + else + spd->state = MEI_SPD_STATE_INIT_DONE; + + spd_dbg(spd, "cmd [%d] %s : state [%d] %s\n", + cmd_type, spd_cmd_str(cmd_type), + spd->state, mei_spd_state_str(spd->state)); + + return 0; +} + +static int mei_spd_cmd_gpp_write(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len = SPD_GPP_WRITE_DATA_LEN(*cmd); + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_write_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_write(spd, cmd->gpp_write_req.offset, + cmd->gpp_write_req.data, len); + if (ret) { + spd_err(spd, "Failed to write to gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "wrote %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_write_rsp); + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_gpp_read(struct mei_spd *spd, struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + size_t len; + int ret; + + if (out_buf_sz < spd_cmd_size(gpp_read_req)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + len = cmd->gpp_read_req.size_to_read; + if (len > SPD_CLIENT_GPP_DATA_MAX_SIZE) { + spd_err(spd, "Block is to large to read\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + ret = mei_spd_gpp_read(spd, cmd->gpp_read_req.offset, + cmd->gpp_read_resp.data, len); + + if (ret) { + spd_err(spd, "Failed to read from gpp ret = %d\n", ret); + return SPD_STATUS_GENERAL_FAILURE; + } + + spd_dbg(spd, "read %zd bytes of data\n", len); + + cmd->header.len = spd_cmd_size(gpp_read_rsp) + len; + + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_read(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_read.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_read)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_READ_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "read RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_write(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_write.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_write)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "write RPMB frame performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_rpmb_get_counter(struct mei_spd *spd, + struct spd_cmd *cmd, + ssize_t out_buf_sz) +{ + u8 *frame = cmd->rpmb_get_counter.rpmb_frame; + + if (out_buf_sz != spd_cmd_rpmb_size(rpmb_get_counter)) { + spd_err(spd, "Wrong request size\n"); + return SPD_STATUS_INVALID_COMMAND; + } + + if (mei_spd_rpmb_cmd_req(spd, RPMB_WRITE_DATA, frame)) + return SPD_STATUS_GENERAL_FAILURE; + + spd_dbg(spd, "get RPMB counter performed\n"); + return SPD_STATUS_SUCCESS; +} + +static int mei_spd_cmd_response(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "rsp [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + switch (spd_cmd) { + case SPD_INIT_CMD: + ret = mei_spd_cmd_init_rsp(spd, cmd, out_buf_sz); + if (ret) + break; + mutex_unlock(&spd->lock); + mei_spd_rpmb_init(spd); + mei_spd_gpp_init(spd); + mutex_lock(&spd->lock); + break; + default: + ret = -EINVAL; + spd_err(spd, "Wrong response command %d\n", spd_cmd); + break; + } + + return ret; +} + +/** + * mei_spd_cmd_request - dispatch command requests from the SPD device + * + * @spd: spd device + * @out_buf_sz: buffer size + * + * Return: (TBD) + */ +static int mei_spd_cmd_request(struct mei_spd *spd, ssize_t out_buf_sz) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t written; + u32 spd_cmd; + int ret; + + spd_cmd = cmd->header.command_type; + + spd_dbg(spd, "req [%d] %s : state [%d] %s\n", + spd_cmd, spd_cmd_str(spd_cmd), + spd->state, mei_spd_state_str(spd->state)); + + if (spd->state < MEI_SPD_STATE_RUNNING) { + spd_err(spd, "Wrong state %d\n", spd->state); + ret = SPD_STATUS_INVALID_COMMAND; + goto reply; + } + + switch (spd_cmd) { + case SPD_RPMB_WRITE_CMD: + ret = mei_spd_cmd_rpmb_write(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_READ_CMD: + ret = mei_spd_cmd_rpmb_read(spd, cmd, out_buf_sz); + break; + case SPD_RPMB_GET_COUNTER_CMD: + ret = mei_spd_cmd_rpmb_get_counter(spd, cmd, out_buf_sz); + break; + case SPD_GPP_WRITE_CMD: + ret = mei_spd_cmd_gpp_write(spd, cmd, out_buf_sz); + break; + case SPD_GPP_READ_CMD: + ret = mei_spd_cmd_gpp_read(spd, cmd, out_buf_sz); + break; + case SPD_TRIM_CMD: + spd_err(spd, "Command %d is not supported\n", spd_cmd); + ret = SPD_STATUS_NOT_SUPPORTED; + break; + default: + spd_err(spd, "Wrong request command %d\n", spd_cmd); + ret = SPD_STATUS_INVALID_COMMAND; + break; + } +reply: + cmd->header.is_response = 1; + cmd->header.status = ret; + if (ret != SPD_STATUS_SUCCESS) + cmd->header.len = sizeof(struct spd_cmd_hdr); + + written = mei_cldev_send(spd->cldev, spd->buf, cmd->header.len); + if (written != cmd->header.len) { + ret = SPD_STATUS_GENERAL_FAILURE; + spd_err(spd, "Failed to send reply written = %zd\n", written); + } + + /* FIXME: translate ret to errno */ + if (ret) + return -EINVAL; + + return 0; +} + +ssize_t mei_spd_cmd(struct mei_spd *spd) +{ + struct spd_cmd *cmd = (struct spd_cmd *)spd->buf; + ssize_t out_buf_sz; + int ret; + + out_buf_sz = mei_cldev_recv(spd->cldev, spd->buf, spd->buf_sz); + if (out_buf_sz < 0) { + spd_err(spd, "failure in receive ret = %zd\n", out_buf_sz); + return out_buf_sz; + } + + if (out_buf_sz == 0) { + spd_err(spd, "received empty msg\n"); + return 0; + } + + /* check that we've received at least sizeof(header) */ + if (out_buf_sz < sizeof(struct spd_cmd_hdr)) { + spd_err(spd, "Request is too short\n"); + return -EFAULT; + } + + if (cmd->header.is_response) + ret = mei_spd_cmd_response(spd, out_buf_sz); + else + ret = mei_spd_cmd_request(spd, out_buf_sz); + + return ret; +} + +static void mei_spd_status_send_work(struct work_struct *work) +{ + struct mei_spd *spd = + container_of(work, struct mei_spd, status_send_w); + + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); +} + +void mei_spd_free(struct mei_spd *spd) +{ + if (!spd) + return; + + cancel_work_sync(&spd->status_send_w); + + kfree(spd->buf); + kfree(spd); +} + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev) +{ + struct mei_spd *spd; + u8 *buf; + + spd = kzalloc(sizeof(*spd), GFP_KERNEL); + if (!spd) + return NULL; + + spd->buf_sz = sizeof(struct spd_cmd) + SPD_CLIENT_GPP_DATA_MAX_SIZE; + buf = kmalloc(spd->buf_sz, GFP_KERNEL); + if (!buf) + goto free; + + spd->cldev = cldev; + spd->buf = buf; + spd->state = MEI_SPD_STATE_INIT; + mutex_init(&spd->lock); + INIT_WORK(&spd->status_send_w, mei_spd_status_send_work); + + return spd; +free: + kfree(spd); + return NULL; +} diff --git a/drivers/misc/mei/spd/cmd.h b/drivers/misc/mei/spd/cmd.h new file mode 100644 index 000000000000..3f77550f44ab --- /dev/null +++ b/drivers/misc/mei/spd/cmd.h @@ -0,0 +1,230 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _SPD_CMD_H +#define _SPD_CMD_H + +#include + +/** + * enum spd_cmd_type - available commands + * + * @SPD_NONE_CMD : Lower command sentinel. + * @SPD_START_STOP_CMD : start stop command (deprecated). [Host -> TEE] + * @SPD_RPMB_WRITE_CMD : RPMB write request. [TEE -> Host] + * @SPD_RPMB_READ_CMD : RPMB read request. [TEE -> Host] + * @SPD_RPMB_GET_COUNTER_CMD: get counter request [TEE -> Host] + * @SPD_GPP_WRITE_CMD : GPP write request. [TEE -> Host] + * @SPD_GPP_READ_CMD : GPP read request. [TEE -> Host] + * @SPD_TRIM_CMD : TRIM command [TEE -> Host] + * @SPD_INIT_CMD : initial handshake between host and fw. [Host -> TEE] + * @SPD_STORAGE_STATUS_CMD : the backing storage status. [Host -> TEE] + * @SPD_MAX_CMD: Upper command sentinel. + */ +enum spd_cmd_type { + SPD_NONE_CMD = 0, + SPD_START_STOP_CMD, + SPD_RPMB_WRITE_CMD, + SPD_RPMB_READ_CMD, + SPD_RPMB_GET_COUNTER_CMD, + SPD_GPP_WRITE_CMD, + SPD_GPP_READ_CMD, + SPD_TRIM_CMD, + SPD_INIT_CMD, + SPD_STORAGE_STATUS_CMD, + SPD_MAX_CMD, +}; + +enum spd_status { + SPD_STATUS_SUCCESS = 0, + SPD_STATUS_GENERAL_FAILURE = 1, + SPD_STATUS_NOT_READY = 2, + SPD_STATUS_NOT_SUPPORTED = 3, + SPD_STATUS_INVALID_COMMAND = 4, +}; + +/** + * enum spd_storage_type - storage device type + * + * @SPD_TYPE_UNDEF: lower enum sentinel + * @SPD_TYPE_EMMC: emmc device + * @SPD_TYPE_UFS: ufs device + * @SPD_TYPE_MAX: upper enum sentinel + */ +enum spd_storage_type { + SPD_TYPE_UNDEF = 0, + SPD_TYPE_EMMC = 1, + SPD_TYPE_UFS = 2, + SPD_TYPE_MAX +}; + +/** + * struct spd_cmd_hdr - Host storage Command Header + * + * @command_type: SPD_TYPES + * @is_response: 1 == Response, 0 == Request + * @len: command length + * @status: command status + * @reserved: reserved + */ +struct spd_cmd_hdr { + u32 command_type : 7; + u32 is_response : 1; + u32 len : 13; + u32 status : 8; + u32 reserved : 3; +} __packed; + +/** + * RPMB Frame Size as defined by the JDEC spec + */ +#define SPD_CLIENT_RPMB_DATA_MAX_SIZE (512) + +/** + * struct spd_cmd_init_resp + * commandType == HOST_STORAGE_INIT_CMD + * + * @gpp_partition_id: gpp_partition: + * UFS: LUN Number (0-7) + * EMMC: 1-4. + * 0xff: GPP not supported + * @type: storage hw type + * SPD_TYPE_EMMC + * SPD_TYPE_UFS + * @serial_no_sz: serial_no size + * @serial_no: device serial number + */ +struct spd_cmd_init_resp { + u32 gpp_partition_id; + u32 type; + u32 serial_no_sz; + u8 serial_no[0]; +}; + +/** + * struct spd_cmd_storage_status_req + * commandType == SPD_STORAGE_STATUS_CMD + * + * @gpp_on: availability of the gpp backing storage + * 0 - GP partition is accessible + * 1 - GP partition is not accessible + * @rpmb_on: availability of the backing storage + * 0 - RPMB partition is accessible + * 1 - RPBM partition is not accessible + */ +struct spd_cmd_storage_status_req { + u32 gpp_on; + u32 rpmb_on; +} __packed; + +/** + * struct spd_cmd_rpmb_write + * command_type == SPD_RPMB_WRITE_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_write { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_read + * command_type == SPD_RPMB_READ_CMD + * + * @rpmb_frame: RPMB frame are constant size (512) + */ +struct spd_cmd_rpmb_read { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_rpmb_get_counter + * command_type == SPD_RPMB_GET_COUNTER_CMD + * + * @rpmb_frame: frame containing frame counter + */ +struct spd_cmd_rpmb_get_counter { + u8 rpmb_frame[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_req + * command_type == SPD_GPP_WRITE_CMD + * + * @offset: frame offset in partition + * @data: 4K page + */ +struct spd_cmd_gpp_write_req { + u32 offset; + u8 data[0]; +} __packed; + +/** + * struct spd_cmd_gpp_write_rsp + * command_type == SPD_GPP_WRITE_CMD + * + * @reserved: reserved + */ +struct spd_cmd_gpp_write_rsp { + u32 reserved[2]; +} __packed; + +/** + * struct spd_cmd_gpp_read_req + * command_type == SPD_GPP_READ_CMD + * + * @offset: offset of a frame on GPP partition + * @size_to_read: data length to read (must be ) + */ +struct spd_cmd_gpp_read_req { + u32 offset; + u32 size_to_read; +} __packed; + +/** + * struct spd_cmd_gpp_read_rsp + * command_type == SPD_GPP_READ_CMD + * + * @reserved: reserved + * @data: data + */ +struct spd_cmd_gpp_read_rsp { + u32 reserved; + u8 data[0]; +} __packed; + +#define SPD_GPP_READ_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_read_rsp))) + +#define SPD_GPP_WRITE_DATA_LEN(cmd) ((cmd).header.len - \ + (sizeof(struct spd_cmd_hdr) + \ + sizeof(struct spd_cmd_gpp_write_req))) + +struct spd_cmd { + struct spd_cmd_hdr header; + + union { + struct spd_cmd_rpmb_write rpmb_write; + struct spd_cmd_rpmb_read rpmb_read; + struct spd_cmd_rpmb_get_counter rpmb_get_counter; + + struct spd_cmd_gpp_write_req gpp_write_req; + struct spd_cmd_gpp_write_rsp gpp_write_rsp; + + struct spd_cmd_gpp_read_req gpp_read_req; + struct spd_cmd_gpp_read_rsp gpp_read_resp; + + struct spd_cmd_init_resp init_rsp; + struct spd_cmd_storage_status_req status_req; + }; +} __packed; + +/* GPP Max data 4K */ +#define SPD_CLIENT_GPP_DATA_MAX_SIZE (4096) + +const char *spd_cmd_str(enum spd_cmd_type cmd); +const char *mei_spd_dev_str(enum spd_storage_type type); + +#endif /* _SPD_CMD_H */ diff --git a/drivers/misc/mei/spd/debugfs.c b/drivers/misc/mei/spd/debugfs.c new file mode 100644 index 000000000000..dfbb62a49fcc --- /dev/null +++ b/drivers/misc/mei/spd/debugfs.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static ssize_t mei_spd_dbgfs_read_info(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct mei_spd *spd = fp->private_data; + size_t bufsz = 4095; + char *buf; + int pos = 0; + int ret; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pos += scnprintf(buf + pos, bufsz - pos, "DEV STATE: [%d] %s\n", + spd->state, mei_spd_state_str(spd->state)); + pos += scnprintf(buf + pos, bufsz - pos, "DEV TYPE : [%d] %s\n", + spd->dev_type, mei_spd_dev_str(spd->dev_type)); + pos += scnprintf(buf + pos, bufsz - pos, " ID SIZE : %d\n", + spd->dev_id_sz); + pos += scnprintf(buf + pos, bufsz - pos, " ID : '%s'\n", "N/A"); + pos += scnprintf(buf + pos, bufsz - pos, "GPP\n"); + pos += scnprintf(buf + pos, bufsz - pos, " id : %d\n", + spd->gpp_partition_id); + pos += scnprintf(buf + pos, bufsz - pos, " opened : %1d\n", + mei_spd_gpp_is_open(spd)); + + ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, pos); + kfree(buf); + return ret; +} + +static const struct file_operations mei_spd_dbgfs_fops_info = { + .open = simple_open, + .read = mei_spd_dbgfs_read_info, + .llseek = generic_file_llseek, +}; + +void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ + if (!spd->dbgfs_dir) + return; + debugfs_remove_recursive(spd->dbgfs_dir); + spd->dbgfs_dir = NULL; +} + +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + struct dentry *dir, *f; + + dir = debugfs_create_dir(name, NULL); + if (!dir) + return -ENOMEM; + + spd->dbgfs_dir = dir; + + f = debugfs_create_file("info", 0400, dir, + spd, &mei_spd_dbgfs_fops_info); + if (!f) { + spd_err(spd, "info: registration failed\n"); + goto err; + } + + return 0; +err: + mei_spd_dbgfs_deregister(spd); + return -ENODEV; +} diff --git a/drivers/misc/mei/spd/gpp.c b/drivers/misc/mei/spd/gpp.c new file mode 100644 index 000000000000..b5d1a27a50ee --- /dev/null +++ b/drivers/misc/mei/spd/gpp.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include + +#include "cmd.h" +#include "spd.h" + +static struct page *page_read(struct address_space *mapping, int index) +{ + return read_mapping_page(mapping, index, NULL); +} + +static int mei_spd_bd_read(struct mei_spd *spd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct page *page; + int index = from >> PAGE_SHIFT; + int offset = from & (PAGE_SIZE - 1); + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(spd->gpp->bd_inode->i_mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + memcpy(buf, page_address(page) + offset, cpylen); + put_page(page); + + if (retlen) + *retlen += cpylen; + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int _mei_spd_bd_write(struct block_device *dev, const u_char *buf, + loff_t to, size_t len, size_t *retlen) +{ + struct page *page; + struct address_space *mapping = dev->bd_inode->i_mapping; + int index = to >> PAGE_SHIFT; /* page index */ + int offset = to & ~PAGE_MASK; /* page offset */ + int cpylen; + + while (len) { + if ((offset + len) > PAGE_SIZE) + cpylen = PAGE_SIZE - offset; + else + cpylen = len; + len = len - cpylen; + + page = page_read(mapping, index); + if (IS_ERR(page)) + return PTR_ERR(page); + + if (memcmp(page_address(page) + offset, buf, cpylen)) { + lock_page(page); + memcpy(page_address(page) + offset, buf, cpylen); + set_page_dirty(page); + unlock_page(page); + balance_dirty_pages_ratelimited(mapping); + } + put_page(page); + + if (retlen) + *retlen += cpylen; + + buf += cpylen; + offset = 0; + index++; + } + return 0; +} + +static int mei_spd_bd_write(struct mei_spd *spd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + int ret; + + ret = _mei_spd_bd_write(spd->gpp, buf, to, len, retlen); + if (ret > 0) + ret = 0; + + sync_blockdev(spd->gpp); + + return ret; +} + +static void mei_spd_bd_sync(struct mei_spd *spd) +{ + sync_blockdev(spd->gpp); +} + +#define GPP_FMODE (FMODE_WRITE | FMODE_READ | FMODE_EXCL) + +bool mei_spd_gpp_is_open(struct mei_spd *spd) +{ + struct request_queue *q; + + if (!spd->gpp) + return false; + + q = spd->gpp->bd_queue; + if (q && !blk_queue_stopped(q)) + return true; + + return false; +} + +static int mei_spd_gpp_open(struct mei_spd *spd, struct device *dev) +{ + int ret; + + if (spd->gpp) + return 0; + + spd->gpp = blkdev_get_by_dev(dev->devt, GPP_FMODE, spd); + if (IS_ERR(spd->gpp)) { + ret = PTR_ERR(spd->gpp); + spd->gpp = NULL; + spd_dbg(spd, "Can't get GPP block device %s ret = %d\n", + dev_name(dev), ret); + return ret; + } + + spd_dbg(spd, "gpp partition created\n"); + return 0; +} + +static int mei_spd_gpp_close(struct mei_spd *spd) +{ + if (!spd->gpp) + return 0; + + mei_spd_bd_sync(spd); + blkdev_put(spd->gpp, GPP_FMODE); + spd->gpp = NULL; + + spd_dbg(spd, "gpp partition removed\n"); + return 0; +} + +#define UFSHCD "ufshcd" +static bool mei_spd_lun_ufs_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + struct scsi_device *sdev; + + switch (disk->major) { + case SCSI_DISK0_MAJOR: + case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR: + case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR: + break; + default: + return false; + } + + sdev = to_scsi_device(dev->parent); + + if (!sdev->host || + strncmp(sdev->host->hostt->name, UFSHCD, strlen(UFSHCD))) + return false; + + return sdev->lun == spd->gpp_partition_id; +} + +static bool mei_spd_gpp_mmc_match(struct mei_spd *spd, struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + int idx, part_id; + + if (disk->major != MMC_BLOCK_MAJOR) + return false; + + if (sscanf(disk->disk_name, "mmcblk%dgp%d", &idx, &part_id) != 2) + return false; + + return part_id == spd->gpp_partition_id - 1; +} + +static bool mei_spd_gpp_match(struct mei_spd *spd, struct device *dev) +{ + /* we are only interested in physical partitions */ + if (strncmp(dev->type->name, "disk", sizeof("disk"))) + return false; + + if (spd->dev_type == SPD_TYPE_EMMC) + return mei_spd_gpp_mmc_match(spd, dev); + else if (spd->dev_type == SPD_TYPE_UFS) + return mei_spd_lun_ufs_match(spd, dev); + else + return false; +} + +static int gpp_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_open(spd, dev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void gpp_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = container_of(intf, struct mei_spd, gpp_interface); + + if (!mei_spd_gpp_match(spd, dev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_gpp_close(spd)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP read offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_read(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP read failed ret = %d\n", ret); + + return ret; +} + +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size) +{ + int ret; + + spd_dbg(spd, "GPP write offset = %zx, size = %zx\n", off, size); + + if (!mei_spd_gpp_is_open(spd)) + return -ENODEV; + + ret = mei_spd_bd_write(spd, off, size, NULL, data); + if (ret) + spd_err(spd, "GPP write failed ret = %d\n", ret); + + return ret; +} + +void mei_spd_gpp_prepare(struct mei_spd *spd) +{ + spd->gpp_interface.add_dev = gpp_add_device; + spd->gpp_interface.remove_dev = gpp_remove_device; + spd->gpp_interface.class = &block_class; +} + +int mei_spd_gpp_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->gpp_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +void mei_spd_gpp_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->gpp_interface); +} diff --git a/drivers/misc/mei/spd/main.c b/drivers/misc/mei/spd/main.c new file mode 100644 index 000000000000..468cceffb7a0 --- /dev/null +++ b/drivers/misc/mei/spd/main.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2015 - 2018 Intel Corporation. All rights reserved. + */ +#include + +#include "spd.h" + +static void mei_spd_rx_cb(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + mutex_lock(&spd->lock); + mei_spd_cmd(spd); + mutex_unlock(&spd->lock); +} + +static int mei_spd_probe(struct mei_cl_device *cldev, + const struct mei_cl_device_id *id) +{ + struct mei_spd *spd; + u8 ver = mei_cldev_ver(cldev); + int ret; + + dev_dbg(&cldev->dev, "probing mei spd ver = %d\n", ver); + + if (ver < 2) { + dev_warn(&cldev->dev, "unuspported protocol version %d\n", ver); + return -ENODEV; + } + + spd = mei_spd_alloc(cldev); + if (!spd) + return -ENOMEM; + + mei_cldev_set_drvdata(cldev, spd); + + ret = mei_spd_dbgfs_register(spd, "spd"); + if (ret) + goto free; + + ret = mei_cldev_enable(cldev); + if (ret < 0) { + dev_err(&cldev->dev, "Could not enable device ret = %d\n", ret); + goto free; + } + + ret = mei_cldev_register_rx_cb(cldev, mei_spd_rx_cb); + if (ret) { + dev_err(&cldev->dev, "Error register event %d\n", ret); + goto disable; + } + + spd_dbg(spd, "protocol version %d\n", ver); + mei_spd_gpp_prepare(spd); + mei_spd_rpmb_prepare(spd); + mutex_lock(&spd->lock); + ret = mei_spd_cmd_init_req(spd); + mutex_unlock(&spd->lock); + if (ret) { + dev_err(&cldev->dev, "Could not start ret = %d\n", ret); + goto disable; + } + + return 0; + +disable: + mei_cldev_disable(cldev); + +free: + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + return ret; +} + +static int mei_spd_remove(struct mei_cl_device *cldev) +{ + struct mei_spd *spd = mei_cldev_get_drvdata(cldev); + + if (spd->state == MEI_SPD_STATE_RUNNING) { + spd->state = MEI_SPD_STATE_STOPPING; + mei_spd_gpp_exit(spd); + mei_spd_rpmb_exit(spd); + mutex_lock(&spd->lock); + mei_spd_cmd_storage_status_req(spd); + mutex_unlock(&spd->lock); + } + + mei_cldev_disable(cldev); + mei_spd_dbgfs_deregister(spd); + mei_cldev_set_drvdata(cldev, NULL); + mei_spd_free(spd); + + return 0; +} + +#define MEI_SPD_UUID UUID_LE(0x2a39291f, 0x5551, 0x482f, \ + 0x99, 0xcb, 0x9e, 0x22, 0x74, 0x97, 0x8c, 0xa8) + +static struct mei_cl_device_id mei_spd_tbl[] = { + { .uuid = MEI_SPD_UUID, .version = MEI_CL_VERSION_ANY}, + /* required last entry */ + { } +}; +MODULE_DEVICE_TABLE(mei, mei_spd_tbl); + +static struct mei_cl_driver mei_spd_driver = { + .id_table = mei_spd_tbl, + .name = "mei_spd", + + .probe = mei_spd_probe, + .remove = mei_spd_remove, +}; + +module_mei_cl_driver(mei_spd_driver); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("Storage Proxy driver based on mei bus"); diff --git a/drivers/misc/mei/spd/rpmb.c b/drivers/misc/mei/spd/rpmb.c new file mode 100644 index 000000000000..b74d0cd8f802 --- /dev/null +++ b/drivers/misc/mei/spd/rpmb.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Intel Host Storage Interface Linux driver + * Copyright (c) 2015 - 2018, Intel Corporation. + */ + +#include "cmd.h" +#include "spd.h" +#include + +static int mei_spd_rpmb_start(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->rdev == rdev) + return 0; + + if (spd->rdev) { + spd_warn(spd, "rpmb device already registered\n"); + return -EEXIST; + } + + spd->rdev = rpmb_dev_get(rdev); + spd_dbg(spd, "rpmb partition created\n"); + return 0; +} + +static int mei_spd_rpmb_stop(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (!spd->rdev) { + spd_dbg(spd, "Already stopped\n"); + return -EPROTO; + } + + if (rdev && spd->rdev != rdev) { + spd_dbg(spd, "Wrong RPMB on stop\n"); + return -EINVAL; + } + + rpmb_dev_put(spd->rdev); + spd->rdev = NULL; + + spd_dbg(spd, "rpmb partition removed\n"); + return 0; +} + +static int mei_spd_rpmb_match(struct mei_spd *spd, struct rpmb_dev *rdev) +{ + if (spd->dev_id_sz && rdev->ops->dev_id) { + if (rdev->ops->dev_id_len != spd->dev_id_sz || + memcmp(rdev->ops->dev_id, spd->dev_id, + rdev->ops->dev_id_len)) { + spd_dbg(spd, "ignore request for another rpmb\n"); + /* return 0; FW sends garbage now, ignore it */ + } + } + + switch (rdev->ops->type) { + case RPMB_TYPE_EMMC: + if (spd->dev_type != SPD_TYPE_EMMC) + return 0; + break; + case RPMB_TYPE_UFS: + if (spd->dev_type != SPD_TYPE_UFS) + return 0; + break; + default: + return 0; + } + + return 1; +} + +static int rpmb_add_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return 0; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_start(spd, rdev)) { + mutex_unlock(&spd->lock); + return 0; + } + + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); + + return 0; +} + +static void rpmb_remove_device(struct device *dev, struct class_interface *intf) +{ + struct mei_spd *spd = + container_of(intf, struct mei_spd, rpmb_interface); + struct rpmb_dev *rdev = to_rpmb_dev(dev); + + if (!mei_spd_rpmb_match(spd, rdev)) + return; + + mutex_lock(&spd->lock); + if (mei_spd_rpmb_stop(spd, rdev)) { + mutex_unlock(&spd->lock); + return; + } + + if (spd->state != MEI_SPD_STATE_STOPPING) + schedule_work(&spd->status_send_w); + mutex_unlock(&spd->lock); +} + +void mei_spd_rpmb_prepare(struct mei_spd *spd) +{ + spd->rpmb_interface.add_dev = rpmb_add_device; + spd->rpmb_interface.remove_dev = rpmb_remove_device; + spd->rpmb_interface.class = &rpmb_class; +} + +/** + * mei_spd_rpmb_init - init RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + * Returns: 0 if initialized successfully, <0 otherwise + */ +int mei_spd_rpmb_init(struct mei_spd *spd) +{ + int ret; + + ret = class_interface_register(&spd->rpmb_interface); + if (ret) + spd_err(spd, "Can't register interface\n"); + return ret; +} + +/** + * mei_spd_rpmb_exit - clean RPMB connection + * + * @spd: device + * + * Locking: spd->lock should not be held + */ +void mei_spd_rpmb_exit(struct mei_spd *spd) +{ + class_interface_unregister(&spd->rpmb_interface); +} + +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req, void *buf) +{ + struct rpmb_cmd cmd[3]; + struct rpmb_frame_jdec *frame_res = NULL; + u32 flags; + unsigned int i; + int ret; + + if (!spd->rdev) { + spd_err(spd, "RPMB not ready\n"); + return -ENODEV; + } + + i = 0; + flags = RPMB_F_WRITE; + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) + flags |= RPMB_F_REL_WRITE; + cmd[i].flags = flags; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + if (req == RPMB_WRITE_DATA || req == RPMB_PROGRAM_KEY) { + frame_res = kzalloc(sizeof(*frame_res), GFP_KERNEL); + if (!frame_res) + return -ENOMEM; + frame_res->req_resp = cpu_to_be16(RPMB_RESULT_READ); + cmd[i].flags = RPMB_F_WRITE; + cmd[i].nframes = 1; + cmd[i].frames = frame_res; + i++; + } + + cmd[i].flags = 0; + cmd[i].nframes = 1; + cmd[i].frames = buf; + i++; + + ret = rpmb_cmd_seq(spd->rdev, cmd, i); + if (ret) + spd_err(spd, "RPMB req failed ret = %d\n", ret); + + kfree(frame_res); + return ret; +} + +bool mei_spd_rpmb_is_open(struct mei_spd *spd) +{ + return !!spd->rdev; +} diff --git a/drivers/misc/mei/spd/spd.h b/drivers/misc/mei/spd/spd.h new file mode 100644 index 000000000000..b919a5cb7a4c --- /dev/null +++ b/drivers/misc/mei/spd/spd.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ +/* + * Copyright (C) 2015-2018 Intel Corp. All rights reserved + */ +#ifndef _MEI_SPD_H +#define _MEI_SPD_H + +#include +#include +#include + +enum mei_spd_state { + MEI_SPD_STATE_INIT, + MEI_SPD_STATE_INIT_WAIT, + MEI_SPD_STATE_INIT_DONE, + MEI_SPD_STATE_RUNNING, + MEI_SPD_STATE_STOPPING, +}; + +/** + * struct mei_spd - spd device struct + * + * @cldev: client bus device + * @gpp: GPP partition block device + * @gpp_partition_id: GPP partition id (1-6) + * @gpp_interface: gpp class interface for discovery + * @dev_type: storage device type + * @dev_id_sz: device id size + * @dev_id: device id string + * @rdev: RPMB device + * @rpmb_interface: gpp class interface for discovery + * @lock: mutex to sync request processing + * @state: driver state + * @status_send_w: workitem for sending status to the FW + * @buf_sz: receive/transmit buffer allocated size + * @buf: receive/transmit buffer + * @dbgfs_dir: debugfs directory entry + */ +struct mei_spd { + struct mei_cl_device *cldev; + struct block_device *gpp; + u32 gpp_partition_id; + struct class_interface gpp_interface; + u32 dev_type; + u32 dev_id_sz; + u8 *dev_id; + struct rpmb_dev *rdev; + struct class_interface rpmb_interface; + struct mutex lock; /* mutex to sync request processing */ + enum mei_spd_state state; + struct work_struct status_send_w; + size_t buf_sz; + u8 *buf; + +#if IS_ENABLED(CONFIG_DEBUG_FS) + struct dentry *dbgfs_dir; +#endif /* CONFIG_DEBUG_FS */ +}; + +struct mei_spd *mei_spd_alloc(struct mei_cl_device *cldev); +void mei_spd_free(struct mei_spd *spd); + +int mei_spd_cmd_init_req(struct mei_spd *spd); +int mei_spd_cmd_storage_status_req(struct mei_spd *spd); +ssize_t mei_spd_cmd(struct mei_spd *spd); + +void mei_spd_gpp_prepare(struct mei_spd *spd); +bool mei_spd_gpp_is_open(struct mei_spd *spd); +int mei_spd_gpp_init(struct mei_spd *spd); +void mei_spd_gpp_exit(struct mei_spd *spd); +int mei_spd_gpp_read(struct mei_spd *spd, size_t off, u8 *data, size_t size); +int mei_spd_gpp_write(struct mei_spd *spd, size_t off, u8 *data, size_t size); + +void mei_spd_rpmb_prepare(struct mei_spd *spd); +bool mei_spd_rpmb_is_open(struct mei_spd *spd); +int mei_spd_rpmb_init(struct mei_spd *spd); +void mei_spd_rpmb_exit(struct mei_spd *spd); +int mei_spd_rpmb_cmd_req(struct mei_spd *spd, u16 req_type, void *buf); + +#if IS_ENABLED(CONFIG_DEBUG_FS) +int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name); +void mei_spd_dbgfs_deregister(struct mei_spd *spd); +#else +static inline int mei_spd_dbgfs_register(struct mei_spd *spd, const char *name) +{ + return 0; +} + +static inline void mei_spd_dbgfs_deregister(struct mei_spd *spd) +{ +} + +#endif /* CONFIG_DEBUG_FS */ + +const char *mei_spd_state_str(enum mei_spd_state state); + +#define spd_err(spd, fmt, ...) \ + dev_err(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_warn(spd, fmt, ...) \ + dev_warn(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) +#define spd_dbg(spd, fmt, ...) \ + dev_dbg(&(spd)->cldev->dev, fmt, ##__VA_ARGS__) + +#endif /* _MEI_SPD_H */ diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c new file mode 100644 index 000000000000..ba94dcf09169 --- /dev/null +++ b/drivers/misc/memory_state_time.c @@ -0,0 +1,462 @@ +/* drivers/misc/memory_state_time.c + * + * Copyright (C) 2016 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define KERNEL_ATTR_RO(_name) \ +static struct kobj_attribute _name##_attr = __ATTR_RO(_name) + +#define KERNEL_ATTR_RW(_name) \ +static struct kobj_attribute _name##_attr = \ + __ATTR(_name, 0644, _name##_show, _name##_store) + +#define FREQ_HASH_BITS 4 +DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS); + +static DEFINE_MUTEX(mem_lock); + +#define TAG "memory_state_time" +#define BW_NODE "/soc/memory-state-time" +#define FREQ_TBL "freq-tbl" +#define BW_TBL "bw-buckets" +#define NUM_SOURCES "num-sources" + +#define LOWEST_FREQ 2 + +static int curr_bw; +static int curr_freq; +static u32 *bw_buckets; +static u32 *freq_buckets; +static int num_freqs; +static int num_buckets; +static int registered_bw_sources; +static u64 last_update; +static bool init_success; +static struct workqueue_struct *memory_wq; +static u32 num_sources = 10; +static int *bandwidths; + +struct freq_entry { + int freq; + u64 *buckets; /* Bandwidth buckets. */ + struct hlist_node hash; +}; + +struct queue_container { + struct work_struct update_state; + int value; + u64 time_now; + int id; + struct mutex *lock; +}; + +static int find_bucket(int bw) +{ + int i; + + if (bw_buckets != NULL) { + for (i = 0; i < num_buckets; i++) { + if (bw_buckets[i] > bw) { + pr_debug("Found bucket %d for bandwidth %d\n", + i, bw); + return i; + } + } + return num_buckets - 1; + } + return 0; +} + +static u64 get_time_diff(u64 time_now) +{ + u64 ms; + + ms = time_now - last_update; + last_update = time_now; + return ms; +} + +static ssize_t show_stat_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + int i, j; + int len = 0; + struct freq_entry *freq_entry; + + for (i = 0; i < num_freqs; i++) { + hash_for_each_possible(freq_hash_table, freq_entry, hash, + freq_buckets[i]) { + if (freq_entry->freq == freq_buckets[i]) { + len += scnprintf(buf + len, PAGE_SIZE - len, + "%d ", freq_buckets[i]); + if (len >= PAGE_SIZE) + break; + for (j = 0; j < num_buckets; j++) { + len += scnprintf(buf + len, + PAGE_SIZE - len, + "%llu ", + freq_entry->buckets[j]); + } + len += scnprintf(buf + len, PAGE_SIZE - len, + "\n"); + } + } + } + pr_debug("Current Time: %llu\n", ktime_get_boot_ns()); + return len; +} +KERNEL_ATTR_RO(show_stat); + +static void update_table(u64 time_now) +{ + struct freq_entry *freq_entry; + + pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq); + hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) { + if (curr_freq == freq_entry->freq) { + freq_entry->buckets[find_bucket(curr_bw)] + += get_time_diff(time_now); + break; + } + } +} + +static bool freq_exists(int freq) +{ + int i; + + for (i = 0; i < num_freqs; i++) { + if (freq == freq_buckets[i]) + return true; + } + return false; +} + +static int calculate_total_bw(int bw, int index) +{ + int i; + int total_bw = 0; + + pr_debug("memory_state_time New bw %d for id %d\n", bw, index); + bandwidths[index] = bw; + for (i = 0; i < registered_bw_sources; i++) + total_bw += bandwidths[i]; + return total_bw; +} + +static void freq_update_do_work(struct work_struct *work) +{ + struct queue_container *freq_state_update + = container_of(work, struct queue_container, + update_state); + if (freq_state_update) { + mutex_lock(&mem_lock); + update_table(freq_state_update->time_now); + curr_freq = freq_state_update->value; + mutex_unlock(&mem_lock); + kfree(freq_state_update); + } +} + +static void bw_update_do_work(struct work_struct *work) +{ + struct queue_container *bw_state_update + = container_of(work, struct queue_container, + update_state); + if (bw_state_update) { + mutex_lock(&mem_lock); + update_table(bw_state_update->time_now); + curr_bw = calculate_total_bw(bw_state_update->value, + bw_state_update->id); + mutex_unlock(&mem_lock); + kfree(bw_state_update); + } +} + +static void memory_state_freq_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (freq_exists(value) && init_success) { + struct queue_container *freq_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!freq_container) + return; + INIT_WORK(&freq_container->update_state, + freq_update_do_work); + freq_container->time_now = ktime_get_boot_ns(); + freq_container->value = value; + pr_debug("Scheduling freq update in work queue\n"); + queue_work(memory_wq, &freq_container->update_state); + } else { + pr_debug("Freq does not exist.\n"); + } + } +} + +static void memory_state_bw_update(struct memory_state_update_block *ub, + int value) +{ + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + if (init_success) { + struct queue_container *bw_container + = kmalloc(sizeof(struct queue_container), + GFP_KERNEL); + if (!bw_container) + return; + INIT_WORK(&bw_container->update_state, + bw_update_do_work); + bw_container->time_now = ktime_get_boot_ns(); + bw_container->value = value; + bw_container->id = ub->id; + pr_debug("Scheduling bandwidth update in work queue\n"); + queue_work(memory_wq, &bw_container->update_state); + } + } +} + +struct memory_state_update_block *memory_state_register_frequency_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating frequency source\n"); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_freq_update; + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_frequency_source); + +struct memory_state_update_block *memory_state_register_bandwidth_source(void) +{ + struct memory_state_update_block *block; + + if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) { + pr_debug("Allocating bandwidth source %d\n", + registered_bw_sources); + block = kmalloc(sizeof(struct memory_state_update_block), + GFP_KERNEL); + if (!block) + return NULL; + block->update_call = memory_state_bw_update; + if (registered_bw_sources < num_sources) { + block->id = registered_bw_sources++; + } else { + pr_err("Unable to allocate source; max number reached\n"); + kfree(block); + return NULL; + } + return block; + } + pr_err("Config option disabled.\n"); + return NULL; +} +EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source); + +/* Buckets are designated by their maximum. + * Returns the buckets decided by the capability of the device. + */ +static int get_bw_buckets(struct device *dev) +{ + int ret, lenb; + struct device_node *node = dev->of_node; + + of_property_read_u32(node, NUM_SOURCES, &num_sources); + if (!of_find_property(node, BW_TBL, &lenb)) { + pr_err("Missing %s property\n", BW_TBL); + return -ENODATA; + } + + bandwidths = devm_kzalloc(dev, + sizeof(*bandwidths) * num_sources, GFP_KERNEL); + if (!bandwidths) + return -ENOMEM; + lenb /= sizeof(*bw_buckets); + bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets), + GFP_KERNEL); + if (!bw_buckets) { + devm_kfree(dev, bandwidths); + return -ENOMEM; + } + ret = of_property_read_u32_array(node, BW_TBL, bw_buckets, + lenb); + if (ret < 0) { + devm_kfree(dev, bandwidths); + devm_kfree(dev, bw_buckets); + pr_err("Unable to read bandwidth table from device tree.\n"); + return ret; + } + + curr_bw = 0; + num_buckets = lenb; + return 0; +} + +/* Adds struct freq_entry nodes to the hashtable for each compatible frequency. + * Returns the supported number of frequencies. + */ +static int freq_buckets_init(struct device *dev) +{ + struct freq_entry *freq_entry; + int i; + int ret, lenf; + struct device_node *node = dev->of_node; + + if (!of_find_property(node, FREQ_TBL, &lenf)) { + pr_err("Missing %s property\n", FREQ_TBL); + return -ENODATA; + } + + lenf /= sizeof(*freq_buckets); + freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets), + GFP_KERNEL); + if (!freq_buckets) + return -ENOMEM; + pr_debug("freqs found len %d\n", lenf); + ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets, + lenf); + if (ret < 0) { + devm_kfree(dev, freq_buckets); + pr_err("Unable to read frequency table from device tree.\n"); + return ret; + } + pr_debug("ret freq %d\n", ret); + + num_freqs = lenf; + curr_freq = freq_buckets[LOWEST_FREQ]; + + for (i = 0; i < num_freqs; i++) { + freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry), + GFP_KERNEL); + if (!freq_entry) + return -ENOMEM; + freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets, + GFP_KERNEL); + if (!freq_entry->buckets) { + devm_kfree(dev, freq_entry); + return -ENOMEM; + } + pr_debug("memory_state_time Adding freq to ht %d\n", + freq_buckets[i]); + freq_entry->freq = freq_buckets[i]; + hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]); + } + return 0; +} + +struct kobject *memory_kobj; +EXPORT_SYMBOL_GPL(memory_kobj); + +static struct attribute *memory_attrs[] = { + &show_stat_attr.attr, + NULL +}; + +static struct attribute_group memory_attr_group = { + .attrs = memory_attrs, +}; + +static int memory_state_time_probe(struct platform_device *pdev) +{ + int error; + + error = get_bw_buckets(&pdev->dev); + if (error) + return error; + error = freq_buckets_init(&pdev->dev); + if (error) + return error; + last_update = ktime_get_boot_ns(); + init_success = true; + + pr_debug("memory_state_time initialized with num_freqs %d\n", + num_freqs); + return 0; +} + +static const struct of_device_id match_table[] = { + { .compatible = "memory-state-time" }, + {} +}; + +static struct platform_driver memory_state_time_driver = { + .probe = memory_state_time_probe, + .driver = { + .name = "memory-state-time", + .of_match_table = match_table, + .owner = THIS_MODULE, + }, +}; + +static int __init memory_state_time_init(void) +{ + int error; + + hash_init(freq_hash_table); + memory_wq = create_singlethread_workqueue("memory_wq"); + if (!memory_wq) { + pr_err("Unable to create workqueue.\n"); + return -EINVAL; + } + /* + * Create sys/kernel directory for memory_state_time. + */ + memory_kobj = kobject_create_and_add(TAG, kernel_kobj); + if (!memory_kobj) { + pr_err("Unable to allocate memory_kobj for sysfs directory.\n"); + error = -ENOMEM; + goto wq; + } + error = sysfs_create_group(memory_kobj, &memory_attr_group); + if (error) { + pr_err("Unable to create sysfs folder.\n"); + goto kobj; + } + + error = platform_driver_register(&memory_state_time_driver); + if (error) { + pr_err("Unable to register memory_state_time platform driver.\n"); + goto group; + } + return 0; + +group: sysfs_remove_group(memory_kobj, &memory_attr_group); +kobj: kobject_put(memory_kobj); +wq: destroy_workqueue(memory_wq); + return error; +} +module_init(memory_state_time_init); diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c new file mode 100644 index 000000000000..88dc1cd3a204 --- /dev/null +++ b/drivers/misc/uid_sys_stats.c @@ -0,0 +1,703 @@ +/* drivers/misc/uid_sys_stats.c + * + * Copyright (C) 2014 - 2015 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define UID_HASH_BITS 10 +DECLARE_HASHTABLE(hash_table, UID_HASH_BITS); + +static DEFINE_RT_MUTEX(uid_lock); +static struct proc_dir_entry *cpu_parent; +static struct proc_dir_entry *io_parent; +static struct proc_dir_entry *proc_parent; + +struct io_stats { + u64 read_bytes; + u64 write_bytes; + u64 rchar; + u64 wchar; + u64 fsync; +}; + +#define UID_STATE_FOREGROUND 0 +#define UID_STATE_BACKGROUND 1 +#define UID_STATE_BUCKET_SIZE 2 + +#define UID_STATE_TOTAL_CURR 2 +#define UID_STATE_TOTAL_LAST 3 +#define UID_STATE_DEAD_TASKS 4 +#define UID_STATE_SIZE 5 + +#define MAX_TASK_COMM_LEN 256 + +struct task_entry { + char comm[MAX_TASK_COMM_LEN]; + pid_t pid; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +}; + +struct uid_entry { + uid_t uid; + u64 utime; + u64 stime; + u64 active_utime; + u64 active_stime; + int state; + struct io_stats io[UID_STATE_SIZE]; + struct hlist_node hash; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + DECLARE_HASHTABLE(task_entries, UID_HASH_BITS); +#endif +}; + +static u64 compute_write_bytes(struct task_struct *task) +{ + if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes) + return 0; + + return task->ioac.write_bytes - task->ioac.cancelled_write_bytes; +} + +static void compute_io_bucket_stats(struct io_stats *io_bucket, + struct io_stats *io_curr, + struct io_stats *io_last, + struct io_stats *io_dead) +{ + /* tasks could switch to another uid group, but its io_last in the + * previous uid group could still be positive. + * therefore before each update, do an overflow check first + */ + int64_t delta; + + delta = io_curr->read_bytes + io_dead->read_bytes - + io_last->read_bytes; + io_bucket->read_bytes += delta > 0 ? delta : 0; + delta = io_curr->write_bytes + io_dead->write_bytes - + io_last->write_bytes; + io_bucket->write_bytes += delta > 0 ? delta : 0; + delta = io_curr->rchar + io_dead->rchar - io_last->rchar; + io_bucket->rchar += delta > 0 ? delta : 0; + delta = io_curr->wchar + io_dead->wchar - io_last->wchar; + io_bucket->wchar += delta > 0 ? delta : 0; + delta = io_curr->fsync + io_dead->fsync - io_last->fsync; + io_bucket->fsync += delta > 0 ? delta : 0; + + io_last->read_bytes = io_curr->read_bytes; + io_last->write_bytes = io_curr->write_bytes; + io_last->rchar = io_curr->rchar; + io_last->wchar = io_curr->wchar; + io_last->fsync = io_curr->fsync; + + memset(io_dead, 0, sizeof(struct io_stats)); +} + +#ifdef CONFIG_UID_SYS_STATS_DEBUG +static void get_full_task_comm(struct task_entry *task_entry, + struct task_struct *task) +{ + int i = 0, offset = 0, len = 0; + /* save one byte for terminating null character */ + int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1; + char buf[unused_len]; + struct mm_struct *mm = task->mm; + + /* fill the first TASK_COMM_LEN bytes with thread name */ + __get_task_comm(task_entry->comm, TASK_COMM_LEN, task); + i = strlen(task_entry->comm); + while (i < TASK_COMM_LEN) + task_entry->comm[i++] = ' '; + + /* next the executable file name */ + if (mm) { + down_read(&mm->mmap_sem); + if (mm->exe_file) { + char *pathname = d_path(&mm->exe_file->f_path, buf, + unused_len); + + if (!IS_ERR(pathname)) { + len = strlcpy(task_entry->comm + i, pathname, + unused_len); + i += len; + task_entry->comm[i++] = ' '; + unused_len--; + } + } + up_read(&mm->mmap_sem); + } + unused_len -= len; + + /* fill the rest with command line argument + * replace each null or new line character + * between args in argv with whitespace */ + len = get_cmdline(task, buf, unused_len); + while (offset < len) { + if (buf[offset] != '\0' && buf[offset] != '\n') + task_entry->comm[i++] = buf[offset]; + else + task_entry->comm[i++] = ' '; + offset++; + } + + /* get rid of trailing whitespaces in case when arg is memset to + * zero before being reset in userspace + */ + while (task_entry->comm[i-1] == ' ') + i--; + task_entry->comm[i] = '\0'; +} + +static struct task_entry *find_task_entry(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + + hash_for_each_possible(uid_entry->task_entries, task_entry, hash, + task->pid) { + if (task->pid == task_entry->pid) { + /* if thread name changed, update the entire command */ + int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN) + - task_entry->comm; + + if (strncmp(task_entry->comm, task->comm, len)) + get_full_task_comm(task_entry, task); + return task_entry; + } + } + return NULL; +} + +static struct task_entry *find_or_register_task(struct uid_entry *uid_entry, + struct task_struct *task) +{ + struct task_entry *task_entry; + pid_t pid = task->pid; + + task_entry = find_task_entry(uid_entry, task); + if (task_entry) + return task_entry; + + task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC); + if (!task_entry) + return NULL; + + get_full_task_comm(task_entry, task); + + task_entry->pid = pid; + hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid); + + return task_entry; +} + +static void remove_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + struct hlist_node *tmp_task; + + hash_for_each_safe(uid_entry->task_entries, bkt_task, + tmp_task, task_entry, hash) { + hash_del(&task_entry->hash); + kfree(task_entry); + } +} + +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + } +} + +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct task_entry *task_entry = find_or_register_task(uid_entry, task); + struct io_stats *task_io_slot = &task_entry->io[slot]; + + task_io_slot->read_bytes += task->ioac.read_bytes; + task_io_slot->write_bytes += compute_write_bytes(task); + task_io_slot->rchar += task->ioac.rchar; + task_io_slot->wchar += task->ioac.wchar; + task_io_slot->fsync += task->ioac.syscfs; +} + +static void compute_io_uid_tasks(struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + compute_io_bucket_stats(&task_entry->io[uid_entry->state], + &task_entry->io[UID_STATE_TOTAL_CURR], + &task_entry->io[UID_STATE_TOTAL_LAST], + &task_entry->io[UID_STATE_DEAD_TASKS]); + } +} + +static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry) +{ + struct task_entry *task_entry; + unsigned long bkt_task; + + hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) { + /* Separated by comma because space exists in task comm */ + seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", + task_entry->comm, + (unsigned long)task_entry->pid, + task_entry->io[UID_STATE_FOREGROUND].rchar, + task_entry->io[UID_STATE_FOREGROUND].wchar, + task_entry->io[UID_STATE_FOREGROUND].read_bytes, + task_entry->io[UID_STATE_FOREGROUND].write_bytes, + task_entry->io[UID_STATE_BACKGROUND].rchar, + task_entry->io[UID_STATE_BACKGROUND].wchar, + task_entry->io[UID_STATE_BACKGROUND].read_bytes, + task_entry->io[UID_STATE_BACKGROUND].write_bytes, + task_entry->io[UID_STATE_FOREGROUND].fsync, + task_entry->io[UID_STATE_BACKGROUND].fsync); + } +} +#else +static void remove_uid_tasks(struct uid_entry *uid_entry) {}; +static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {}; +static void add_uid_tasks_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) {}; +static void compute_io_uid_tasks(struct uid_entry *uid_entry) {}; +static void show_io_uid_tasks(struct seq_file *m, + struct uid_entry *uid_entry) {} +#endif + +static struct uid_entry *find_uid_entry(uid_t uid) +{ + struct uid_entry *uid_entry; + hash_for_each_possible(hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +static struct uid_entry *find_or_register_uid(uid_t uid) +{ + struct uid_entry *uid_entry; + + uid_entry = find_uid_entry(uid); + if (uid_entry) + return uid_entry; + + uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; +#ifdef CONFIG_UID_SYS_STATS_DEBUG + hash_init(uid_entry->task_entries); +#endif + hash_add(hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static int uid_cputime_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + u64 utime; + u64 stime; + unsigned long bkt; + uid_t uid; + + rt_mutex_lock(&uid_lock); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + uid_entry->active_stime = 0; + uid_entry->active_utime = 0; + } + + rcu_read_lock(); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + rcu_read_unlock(); + rt_mutex_unlock(&uid_lock); + pr_err("%s: failed to find the uid_entry for uid %d\n", + __func__, uid); + return -ENOMEM; + } + task_cputime_adjusted(task, &utime, &stime); + uid_entry->active_utime += utime; + uid_entry->active_stime += stime; + } while_each_thread(temp, task); + rcu_read_unlock(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + u64 total_utime = uid_entry->utime + + uid_entry->active_utime; + u64 total_stime = uid_entry->stime + + uid_entry->active_stime; + seq_printf(m, "%d: %llu %llu\n", uid_entry->uid, + ktime_to_ms(total_utime), ktime_to_ms(total_stime)); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_cputime_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_cputime_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_cputime_fops = { + .open = uid_cputime_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_remove_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_remove_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + char uids[128]; + char *start_uid, *end_uid = NULL; + long int uid_start = 0, uid_end = 0; + + if (count >= sizeof(uids)) + count = sizeof(uids) - 1; + + if (copy_from_user(uids, buffer, count)) + return -EFAULT; + + uids[count] = '\0'; + end_uid = uids; + start_uid = strsep(&end_uid, "-"); + + if (!start_uid || !end_uid) + return -EINVAL; + + if (kstrtol(start_uid, 10, &uid_start) != 0 || + kstrtol(end_uid, 10, &uid_end) != 0) { + return -EINVAL; + } + + /* Also remove uids from /proc/uid_time_in_state */ + cpufreq_task_times_remove_uids(uid_start, uid_end); + + rt_mutex_lock(&uid_lock); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(hash_table, uid_entry, tmp, + hash, (uid_t)uid_start) { + if (uid_start == uid_entry->uid) { + remove_uid_tasks(uid_entry); + hash_del(&uid_entry->hash); + kfree(uid_entry); + } + } + } + + rt_mutex_unlock(&uid_lock); + return count; +} + +static const struct file_operations uid_remove_fops = { + .open = uid_remove_open, + .release = single_release, + .write = uid_remove_write, +}; + + +static void add_uid_io_stats(struct uid_entry *uid_entry, + struct task_struct *task, int slot) +{ + struct io_stats *io_slot = &uid_entry->io[slot]; + + io_slot->read_bytes += task->ioac.read_bytes; + io_slot->write_bytes += compute_write_bytes(task); + io_slot->rchar += task->ioac.rchar; + io_slot->wchar += task->ioac.wchar; + io_slot->fsync += task->ioac.syscfs; + + add_uid_tasks_io_stats(uid_entry, task, slot); +} + +static void update_io_stats_all_locked(void) +{ + struct uid_entry *uid_entry = NULL; + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + unsigned long bkt; + uid_t uid; + + hash_for_each(hash_table, bkt, uid_entry, hash) { + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + } + + rcu_read_lock(); + do_each_thread(temp, task) { + uid = from_kuid_munged(user_ns, task_uid(task)); + if (!uid_entry || uid_entry->uid != uid) + uid_entry = find_or_register_uid(uid); + if (!uid_entry) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); + } +} + +static void update_io_stats_uid_locked(struct uid_entry *uid_entry) +{ + struct task_struct *task, *temp; + struct user_namespace *user_ns = current_user_ns(); + + memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0, + sizeof(struct io_stats)); + set_io_uid_tasks_zero(uid_entry); + + rcu_read_lock(); + do_each_thread(temp, task) { + if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid) + continue; + add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR); + } while_each_thread(temp, task); + rcu_read_unlock(); + + compute_io_bucket_stats(&uid_entry->io[uid_entry->state], + &uid_entry->io[UID_STATE_TOTAL_CURR], + &uid_entry->io[UID_STATE_TOTAL_LAST], + &uid_entry->io[UID_STATE_DEAD_TASKS]); + compute_io_uid_tasks(uid_entry); +} + + +static int uid_io_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + unsigned long bkt; + + rt_mutex_lock(&uid_lock); + + update_io_stats_all_locked(); + + hash_for_each(hash_table, bkt, uid_entry, hash) { + seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n", + uid_entry->uid, + uid_entry->io[UID_STATE_FOREGROUND].rchar, + uid_entry->io[UID_STATE_FOREGROUND].wchar, + uid_entry->io[UID_STATE_FOREGROUND].read_bytes, + uid_entry->io[UID_STATE_FOREGROUND].write_bytes, + uid_entry->io[UID_STATE_BACKGROUND].rchar, + uid_entry->io[UID_STATE_BACKGROUND].wchar, + uid_entry->io[UID_STATE_BACKGROUND].read_bytes, + uid_entry->io[UID_STATE_BACKGROUND].write_bytes, + uid_entry->io[UID_STATE_FOREGROUND].fsync, + uid_entry->io[UID_STATE_BACKGROUND].fsync); + + show_io_uid_tasks(m, uid_entry); + } + + rt_mutex_unlock(&uid_lock); + return 0; +} + +static int uid_io_open(struct inode *inode, struct file *file) +{ + return single_open(file, uid_io_show, PDE_DATA(inode)); +} + +static const struct file_operations uid_io_fops = { + .open = uid_io_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int uid_procstat_open(struct inode *inode, struct file *file) +{ + return single_open(file, NULL, NULL); +} + +static ssize_t uid_procstat_write(struct file *file, + const char __user *buffer, size_t count, loff_t *ppos) +{ + struct uid_entry *uid_entry; + uid_t uid; + int argc, state; + char input[128]; + + if (count >= sizeof(input)) + return -EINVAL; + + if (copy_from_user(input, buffer, count)) + return -EFAULT; + + input[count] = '\0'; + + argc = sscanf(input, "%u %d", &uid, &state); + if (argc != 2) + return -EINVAL; + + if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND) + return -EINVAL; + + rt_mutex_lock(&uid_lock); + + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + rt_mutex_unlock(&uid_lock); + return -EINVAL; + } + + if (uid_entry->state == state) { + rt_mutex_unlock(&uid_lock); + return count; + } + + update_io_stats_uid_locked(uid_entry); + + uid_entry->state = state; + + rt_mutex_unlock(&uid_lock); + + return count; +} + +static const struct file_operations uid_procstat_fops = { + .open = uid_procstat_open, + .release = single_release, + .write = uid_procstat_write, +}; + +static int process_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + struct task_struct *task = v; + struct uid_entry *uid_entry; + u64 utime, stime; + uid_t uid; + + if (!task) + return NOTIFY_OK; + + rt_mutex_lock(&uid_lock); + uid = from_kuid_munged(current_user_ns(), task_uid(task)); + uid_entry = find_or_register_uid(uid); + if (!uid_entry) { + pr_err("%s: failed to find uid %d\n", __func__, uid); + goto exit; + } + + task_cputime_adjusted(task, &utime, &stime); + uid_entry->utime += utime; + uid_entry->stime += stime; + + add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS); + +exit: + rt_mutex_unlock(&uid_lock); + return NOTIFY_OK; +} + +static struct notifier_block process_notifier_block = { + .notifier_call = process_notifier, +}; + +static int __init proc_uid_sys_stats_init(void) +{ + hash_init(hash_table); + + cpu_parent = proc_mkdir("uid_cputime", NULL); + if (!cpu_parent) { + pr_err("%s: failed to create uid_cputime proc entry\n", + __func__); + goto err; + } + + proc_create_data("remove_uid_range", 0222, cpu_parent, + &uid_remove_fops, NULL); + proc_create_data("show_uid_stat", 0444, cpu_parent, + &uid_cputime_fops, NULL); + + io_parent = proc_mkdir("uid_io", NULL); + if (!io_parent) { + pr_err("%s: failed to create uid_io proc entry\n", + __func__); + goto err; + } + + proc_create_data("stats", 0444, io_parent, + &uid_io_fops, NULL); + + proc_parent = proc_mkdir("uid_procstat", NULL); + if (!proc_parent) { + pr_err("%s: failed to create uid_procstat proc entry\n", + __func__); + goto err; + } + + proc_create_data("set", 0222, proc_parent, + &uid_procstat_fops, NULL); + + profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block); + + return 0; + +err: + remove_proc_subtree("uid_cputime", NULL); + remove_proc_subtree("uid_io", NULL); + remove_proc_subtree("uid_procstat", NULL); + return -ENOMEM; +} + +early_initcall(proc_uid_sys_stats_init); diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index 42e89060cd41..96c7ff63178c 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -36,6 +36,7 @@ config PWRSEQ_SIMPLE config MMC_BLOCK tristate "MMC block device driver" depends on BLOCK + select RPMB default y help Say Y here to enable the MMC block device driver support. diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index a0b9102c4c6e..354fdb883d3c 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -409,8 +410,8 @@ static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr, return 0; } -static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, - u32 retries_max) +static int ioctl_mmc_blk_rpmb_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) { int err; u32 retry_count = 0; @@ -612,7 +613,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ - err = ioctl_rpmb_card_status_poll(card, &status, 5); + err = ioctl_mmc_blk_rpmb_status_poll(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", @@ -1116,6 +1117,217 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK); } +static int mmc_blk_rpmb_process(struct mmc_blk_data *md, + struct mmc_blk_ioc_data *idata[], + u64 num_of_cmds) +{ + struct mmc_card *card; + struct mmc_queue *mq; + int err = 0; + struct request *req; + int op_mode; + + card = md->queue.card; + if (IS_ERR(card)) { + err = PTR_ERR(card); + goto cmd_err; + } + + /* + * Dispatch the ioctl()s into the block request queue. + */ + mq = &md->queue; + op_mode = idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, + req = blk_get_request(mq->queue, op_mode, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto cmd_err; + } + + req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL_RPMB; + req_to_mmc_queue_req(req)->drv_op_data = idata; + req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; + + blk_execute_rq(mq->queue, NULL, req, 0); + + err = req_to_mmc_queue_req(req)->drv_op_result; + + blk_put_request(req); + +cmd_err: + return err; +} + +static +struct mmc_blk_ioc_data *mmc_blk_rpmb_cmd_to_ioc_data(struct rpmb_cmd *cmd) +{ + struct mmc_blk_ioc_data *idata; + int err; + + idata = kzalloc(sizeof(*idata), GFP_KERNEL); + if (!idata) { + err = -ENOMEM; + goto out; + } + + if (cmd->flags & RPMB_F_WRITE) { + idata->ic.opcode = MMC_WRITE_MULTIPLE_BLOCK; + idata->ic.write_flag = 1; + if (cmd->flags & RPMB_F_REL_WRITE) + idata->ic.write_flag |= 1 << 31; + } else { + idata->ic.opcode = MMC_READ_MULTIPLE_BLOCK; + } + + /* nframes == 0 in case there is only meta data in the frame */ + idata->ic.blocks = cmd->nframes ?: 1; + idata->ic.blksz = 512; + + idata->buf_bytes = (u64)idata->ic.blksz * idata->ic.blocks; + if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { + err = -EOVERFLOW; + goto out; + } + + idata->buf = (unsigned char *)cmd->frames; + + return idata; +out: + kfree(idata); + return ERR_PTR(err); +} + +static int mmc_blk_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, + u32 num_of_cmds) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_blk_ioc_data **idata; + int err = 0; + u32 i; + + if (!rpmb) + return -ENODEV; + + idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL); + if (!idata) + return -ENOMEM; + + for (i = 0; i < num_of_cmds; i++) { + idata[i] = mmc_blk_rpmb_cmd_to_ioc_data(&cmds[i]); + if (IS_ERR(idata[i])) { + err = PTR_ERR(idata[i]); + num_of_cmds = i; + goto cmd_err; + } + idata[i]->rpmb = rpmb; + } + + get_device(&rpmb->dev); + mmc_blk_get(rpmb->md->disk); + + err = mmc_blk_rpmb_process(rpmb->md, idata, num_of_cmds); + +cmd_err: + for (i = 0; i < num_of_cmds; i++) + kfree(idata[i]); + + kfree(idata); + + put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); + + return err; +} + +static int mmc_blk_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + struct mmc_card *card; + + card = rpmb->md->queue.card; + return card->ext_csd.raw_rpmb_size_mult; +} + +static struct rpmb_ops mmc_rpmb_dev_ops = { + .cmd_seq = mmc_blk_rpmb_cmd_seq, + .get_capacity = mmc_blk_rpmb_get_capacity, + .type = RPMB_TYPE_EMMC, + .auth_method = RPMB_HMAC_ALGO_SHA_256, +}; + +static void mmc_blk_rpmb_unset_dev_id(struct rpmb_ops *ops) +{ + kfree(ops->dev_id); + ops->dev_id = NULL; +} + +static int mmc_blk_rpmb_set_dev_id(struct rpmb_ops *ops, struct mmc_card *card) +{ + char *id; + + id = kmalloc(sizeof(card->raw_cid), GFP_KERNEL); + if (!id) + return -ENOMEM; + + memcpy(id, card->raw_cid, sizeof(card->raw_cid)); + ops->dev_id = id; + ops->dev_id_len = sizeof(card->raw_cid); + + return 0; +} + +static void mmc_blk_rpmb_set_cap(struct rpmb_ops *ops, + struct mmc_card *card) +{ + u16 rel_wr_cnt; + + /* RPMB blocks are written in half sectors hence '* 2' */ + rel_wr_cnt = card->ext_csd.rel_sectors * 2; + /* eMMC 5.1 may support RPMB 8K (32) frames */ + if (card->ext_csd.rev >= 8) { + if (card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) + rel_wr_cnt = 32; + else + rel_wr_cnt = 2; + } + ops->wr_cnt_max = rel_wr_cnt; + ops->rd_cnt_max = card->host->max_blk_count; + ops->block_size = 1; /* 256B */ +} + +static void mmc_blk_rpmb_add(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct rpmb_dev *rdev; + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + mmc_blk_rpmb_set_dev_id(&mmc_rpmb_dev_ops, card); + mmc_blk_rpmb_set_cap(&mmc_rpmb_dev_ops, card); + + /* Add RPMB partitions */ + list_for_each_entry(rpmb, &md->rpmbs, node) { + rdev = rpmb_dev_register(&rpmb->dev, i++, &mmc_rpmb_dev_ops); + if (IS_ERR(rdev)) { + pr_warn("%s: cannot register to rpmb %ld\n", + dev_name(&rpmb->dev), PTR_ERR(rdev)); + } + } +} + +static void mmc_blk_rpmb_remove(struct mmc_card *card) +{ + struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + struct mmc_rpmb_data *rpmb; + u8 i = 0; + + list_for_each_entry(rpmb, &md->rpmbs, node) + rpmb_dev_unregister_by_device(&rpmb->dev, i++); + + mmc_blk_rpmb_unset_dev_id(&mmc_rpmb_dev_ops); +} + static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->blkdata; @@ -2936,6 +3148,9 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } + /* Add rpmb layer */ + mmc_blk_rpmb_add(card); + /* Add two debugfs entries */ mmc_blk_add_debugfs(card, md); @@ -2964,6 +3179,7 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = dev_get_drvdata(&card->dev); mmc_blk_remove_debugfs(card, md); + mmc_blk_rpmb_remove(card); mmc_blk_remove_parts(card, md); pm_runtime_get_sync(&card->dev); if (md->part_curr != md->part_type) { diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index f57f5de54206..f3dc49fa078d 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -441,7 +441,8 @@ int mmc_add_host(struct mmc_host *host) #endif mmc_start_host(host); - mmc_register_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_register_pm_notifier(host); return 0; } @@ -458,7 +459,8 @@ EXPORT_SYMBOL(mmc_add_host); */ void mmc_remove_host(struct mmc_host *host) { - mmc_unregister_pm_notifier(host); + if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY)) + mmc_unregister_pm_notifier(host); mmc_stop_host(host); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index d40744bbafa9..d8c119e50644 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -15,6 +15,7 @@ #include #include +#include "host.h" #include "sdio_ops.h" #include "core.h" #include "card.h" @@ -725,3 +726,15 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags) return 0; } EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags); + +void sdio_retune_hold_now(struct sdio_func *func) +{ + mmc_retune_hold_now(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_hold_now); + +void sdio_retune_release(struct sdio_func *func) +{ + mmc_retune_release(func->card->host); +} +EXPORT_SYMBOL_GPL(sdio_retune_release); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 7bfd366d970d..84644a102344 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -738,7 +738,8 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES, slot->host->mmc_host_ops.hs400_enhanced_strobe = intel_hs400_enhanced_strobe; - slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; + if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_CNP_EMMC) + slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } return ret; @@ -1502,6 +1503,10 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, ICPN_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, ICPH_SD, intel_byt_sd), + SDHCI_PCI_DEVICE(INTEL, EHL_EMMC, intel_glk_emmc), + SDHCI_PCI_DEVICE(INTEL, EHL_SD, intel_byt_sd), SDHCI_PCI_DEVICE(O2, 8120, o2), SDHCI_PCI_DEVICE(O2, 8220, o2), SDHCI_PCI_DEVICE(O2, 8221, o2), diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 2ef0bdca9197..66322cefe390 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -50,6 +50,10 @@ #define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375 #define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4 #define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8 +#define PCI_DEVICE_ID_INTEL_ICPN_SD 0x38f8 +#define PCI_DEVICE_ID_INTEL_ICPH_SD 0x3df8 +#define PCI_DEVICE_ID_INTEL_EHL_EMMC 0x4b47 +#define PCI_DEVICE_ID_INTEL_EHL_SD 0x4b48 #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000 #define PCI_DEVICE_ID_VIA_95D0 0x95d0 diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 50e9cc19023a..467b51b1570b 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2840,6 +2840,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, int ret; bool do_notify = false; +#ifdef CONFIG_ANDROID_PARANOID_NETWORK + if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) { + return -EPERM; + } +#endif + if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) { if (copy_from_user(&ifr, argp, ifreq_len)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index a907d7b065fa..1ac89386aabe 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -668,10 +668,16 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) brcmf_dbg(TRACE, "Enter: on=%d\n", on); wr_val = (on << SBSDIO_FUNC1_SLEEPCSR_KSO_SHIFT); + + /* Cannot re-tune if device is asleep */ + if (on) + sdio_retune_hold_now(bus->sdiodev->func1); + /* 1st KSO write goes to AOS wake up core if device is asleep */ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); if (on) { + sdio_retune_release(bus->sdiodev->func1); /* device WAKEUP through KSO: * write bit 0 & read back until * both bits 0 (kso bit) & 1 (dev on status) are set diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index 58898b99d3f7..145e10a8be55 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -549,6 +549,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) { int ret; + /* Disable filtering */ + ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0); + if (ret < 0) + return ret; + ret = wl1271_acx_ap_max_tx_retry(wl, wlvif); if (ret < 0) return ret; diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c index d8d70dd830b0..a3bc8f0e56ab 100644 --- a/drivers/nfc/fdp/i2c.c +++ b/drivers/nfc/fdp/i2c.c @@ -176,6 +176,16 @@ static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) /* Packet that contains a length */ if (tmp[0] == 0 && tmp[1] == 0) { phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3; + /* + * Ensure next_read_size does not exceed sizeof(tmp) + * for reading that many bytes during next iteration + */ + if (phy->next_read_size > FDP_NCI_I2C_MAX_PAYLOAD) { + dev_dbg(&client->dev, "%s: corrupted packet\n", + __func__); + phy->next_read_size = 5; + goto flush; + } } else { phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; diff --git a/drivers/nfc/st21nfca/dep.c b/drivers/nfc/st21nfca/dep.c index fd08be2917e6..3420c5104c94 100644 --- a/drivers/nfc/st21nfca/dep.c +++ b/drivers/nfc/st21nfca/dep.c @@ -217,7 +217,8 @@ static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, atr_req = (struct st21nfca_atr_req *)skb->data; - if (atr_req->length < sizeof(struct st21nfca_atr_req)) { + if (atr_req->length < sizeof(struct st21nfca_atr_req) || + atr_req->length > skb->len) { r = -EPROTO; goto exit; } diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 4bed9e842db3..acdce231e227 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -322,23 +322,33 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, * AID 81 5 to 16 * PARAMETERS 82 0 to 255 */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && + if (skb->len < NFC_MIN_AID_LENGTH + 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; + /* + * Buffer should have enough space for at least + * two tag fields + two length fields + aid_len (skb->data[1]) + */ + if (skb->len < skb->data[1] + 4) + return -EPROTO; + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); transaction->aid_len = skb->data[1]; memcpy(transaction->aid, &skb->data[2], transaction->aid_len); + transaction->params_len = skb->data[transaction->aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ + /* Check next byte is PARAMETERS tag (82) and the length field */ if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + NFC_EVT_TRANSACTION_PARAMS_TAG || + skb->len < transaction->aid_len + transaction->params_len + 4) { + devm_kfree(dev, transaction); return -EPROTO; + } - transaction->params_len = skb->data[transaction->aid_len + 3]; memcpy(transaction->params, skb->data + transaction->aid_len + 4, transaction->params_len); diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 88a8b5916624..a0027cebf2db 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -5,6 +5,7 @@ config BLK_DEV_NVME tristate "NVM Express block device" depends on PCI && BLOCK select NVME_CORE + select RPMB ---help--- The NVM Express driver is for solid state drives directly connected to the PCI or PCI Express bus. If you know you diff --git a/drivers/nvme/host/Makefile b/drivers/nvme/host/Makefile index aea459c65ae1..99f99e87b82b 100644 --- a/drivers/nvme/host/Makefile +++ b/drivers/nvme/host/Makefile @@ -15,6 +15,7 @@ nvme-core-$(CONFIG_NVM) += lightnvm.o nvme-core-$(CONFIG_FAULT_INJECTION_DEBUG_FS) += fault_inject.o nvme-y += pci.o +nvme-y += rpmb.o nvme-fabrics-y += fabrics.o diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index dd8ec1dd9219..43aa98f729d6 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -11,7 +11,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ - #include #include #include @@ -1659,25 +1658,57 @@ static const struct pr_ops nvme_pr_ops = { .pr_clear = nvme_pr_clear, }; -#ifdef CONFIG_BLK_SED_OPAL -int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, - bool send) +int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) { - struct nvme_ctrl *ctrl = data; struct nvme_command cmd; + dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + memset(&cmd, 0, sizeof(cmd)); - if (send) - cmd.common.opcode = nvme_admin_security_send; - else - cmd.common.opcode = nvme_admin_security_recv; + cmd.common.opcode = nvme_admin_security_send; + cmd.common.nsid = 0; + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); + cmd.common.cdw10[1] = cpu_to_le32(len); + + return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, + ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); +} +EXPORT_SYMBOL_GPL(nvme_sec_send); + +int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len) +{ + struct nvme_command cmd; + + dev_dbg(ctrl->device, "%s target = %hhu SPSP = %hu SECP = %hhX len=%zd\n", + __func__, nssf, spsp, secp, len); + + memset(&cmd, 0, sizeof(cmd)); + cmd.common.opcode = nvme_admin_security_recv; cmd.common.nsid = 0; - cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8); + cmd.common.cdw10[0] = + cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8 | nssf); cmd.common.cdw10[1] = cpu_to_le32(len); return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len, ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0); } +EXPORT_SYMBOL_GPL(nvme_sec_recv); + +#ifdef CONFIG_BLK_SED_OPAL +int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, + bool send) +{ + struct nvme_ctrl *ctrl = data; + + if (send) + return nvme_sec_send(ctrl, 0, spsp, secp, buffer, len); + else + return nvme_sec_recv(ctrl, 0, spsp, secp, buffer, len); +} EXPORT_SYMBOL_GPL(nvme_sec_submit); #endif /* CONFIG_BLK_SED_OPAL */ @@ -2468,7 +2499,10 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } + ctrl->rpmbs = le32_to_cpu(id->rpmbs); + ret = nvme_mpath_init(ctrl, id); + kfree(id); if (ret < 0) diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index bb4a2003c097..a596e9e84d1e 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -167,6 +168,7 @@ struct nvme_ctrl { struct list_head subsys_entry; struct opal_dev *opal_dev; + struct rpmb_dev *rdev; char name[12]; u16 cntlid; @@ -193,6 +195,7 @@ struct nvme_ctrl { u8 apsta; u32 oaes; u32 aen_result; + u32 rpmbs; unsigned int shutdown_timeout; unsigned int kato; bool subsystem; @@ -420,6 +423,12 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl); void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_put_ctrl(struct nvme_ctrl *ctrl); int nvme_init_identify(struct nvme_ctrl *ctrl); +int nvme_sec_send(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len); +int nvme_sec_recv(struct nvme_ctrl *ctrl, u8 nssf, u16 spsp, u8 secp, + void *buffer, size_t len); +int nvme_init_rpmb(struct nvme_ctrl *ctrl); +void nvme_exit_rpmb(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d668682f91df..6c19fa7525e3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2289,6 +2289,10 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + result = nvme_init_rpmb(&dev->ctrl); + if (result < 0) + goto out; + if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { if (!dev->ctrl.opal_dev) dev->ctrl.opal_dev = diff --git a/drivers/nvme/host/rpmb.c b/drivers/nvme/host/rpmb.c new file mode 100644 index 000000000000..34e807bfc4f9 --- /dev/null +++ b/drivers/nvme/host/rpmb.c @@ -0,0 +1,119 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* + * Copyright(c) 2018 Intel Corporation. All rights reserved. + */ +#include +#include "nvme.h" +#define NVME_SECP_RPMB 0xEA /* Security Protocol EAh is assigned + * for NVMe use (refer to ACS-4) + */ +#define NVME_SPSP_RPMB 0x0001 /* RPMB Target */ +static int nvme_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + struct nvme_ctrl *ctrl; + struct rpmb_cmd *cmd; + u32 size; + int ret; + int i; + + ctrl = dev_get_drvdata(dev); + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + size = rpmb_ioc_frames_len_nvme(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = nvme_sec_send(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + else + ret = nvme_sec_recv(ctrl, target, + NVME_SPSP_RPMB, NVME_SECP_RPMB, + cmd->frames, size); + } + + return ret; +} + +static int nvme_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct nvme_ctrl *ctrl; + + ctrl = dev_get_drvdata(dev); + + return ((ctrl->rpmbs >> 16) & 0xFF) + 1; +} + +static struct rpmb_ops nvme_rpmb_dev_ops = { + .cmd_seq = nvme_rpmb_cmd_seq, + .get_capacity = nvme_rpmb_get_capacity, + .type = RPMB_TYPE_NVME, +}; + +static void nvme_rpmb_set_cap(struct nvme_ctrl *ctrl, + struct rpmb_ops *ops) +{ + ops->wr_cnt_max = ((ctrl->rpmbs >> 24) & 0xFF) + 1; + ops->rd_cnt_max = ops->wr_cnt_max; + ops->block_size = 2; /* 1 sector == 2 half sectors */ + ops->auth_method = (ctrl->rpmbs >> 3) & 0x3; +} + +static void nvme_rpmb_add(struct nvme_ctrl *ctrl) +{ + struct rpmb_dev *rdev; + int ndevs = ctrl->rpmbs & 0x7; + int i; + + nvme_rpmb_set_cap(ctrl, &nvme_rpmb_dev_ops); + + /* Add RPMB partitions */ + for (i = 0; i < ndevs; i++) { + rdev = rpmb_dev_register(ctrl->device, i, &nvme_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(ctrl->device, "%s: cannot register to rpmb %ld\n", + dev_name(ctrl->device), PTR_ERR(rdev)); + } + dev_set_drvdata(&rdev->dev, ctrl); + } +} + +static void nvme_rpmb_remove(struct nvme_ctrl *ctrl) +{ + int ndevs = ctrl->rpmbs & 0x7; + int i; + + /* FIXME: target */ + for (i = 0; i < ndevs; i++) + rpmb_dev_unregister_by_device(ctrl->device, i); +} + +int nvme_init_rpmb(struct nvme_ctrl *ctrl) +{ + dev_err(ctrl->device, "RPMBS %X\n", ctrl->rpmbs); + + if ((ctrl->rpmbs & 0x7) == 0x0) { + dev_err(ctrl->device, "RPMBS No partitions\n"); + return 0; + } + + dev_err(ctrl->device, "RPMBS Number of partitions %d\n", + ctrl->rpmbs & 0x7); + dev_err(ctrl->device, "RPMBS Authentication Method: %d\n", + (ctrl->rpmbs >> 3) & 0x3); + dev_err(ctrl->device, "RPMBS Total Size: %d %dK", + (ctrl->rpmbs >> 16) & 0xFF, + (((ctrl->rpmbs >> 16) & 0xFF) + 1) * 128); + dev_err(ctrl->device, "RPMBS Access Size: %d %dB", + (ctrl->rpmbs >> 24) & 0xFF, + (((ctrl->rpmbs >> 24) & 0xFF) + 1) * 512); + + nvme_rpmb_add(ctrl); + + return 0; +} + +void nvme_exit_rpmb(struct nvme_ctrl *ctrl) +{ + nvme_rpmb_remove(ctrl); +} diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 800ad252cf9c..2aa4261d3e8f 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1072,42 +1072,66 @@ int __init early_init_dt_scan_memory(unsigned long node, const char *uname, return 0; } +/* + * Convert configs to something easy to use in C code + */ +#if defined(CONFIG_CMDLINE_FORCE) +static const int overwrite_incoming_cmdline = 1; +static const int read_dt_cmdline; +static const int concat_cmdline; +#elif defined(CONFIG_CMDLINE_EXTEND) +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline = 1; +#else /* CMDLINE_FROM_BOOTLOADER */ +static const int overwrite_incoming_cmdline; +static const int read_dt_cmdline = 1; +static const int concat_cmdline; +#endif + +#ifdef CONFIG_CMDLINE +static const char *config_cmdline = CONFIG_CMDLINE; +#else +static const char *config_cmdline = ""; +#endif + int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, int depth, void *data) { - int l; - const char *p; + int l = 0; + const char *p = NULL; + char *cmdline = data; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); - if (depth != 1 || !data || + if (depth != 1 || !cmdline || (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) return 0; early_init_dt_check_for_initrd(node); - /* Retrieve command line */ - p = of_get_flat_dt_prop(node, "bootargs", &l); - if (p != NULL && l > 0) - strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE)); - - /* - * CONFIG_CMDLINE is meant to be a default in case nothing else - * managed to set the command line, unless CONFIG_CMDLINE_FORCE - * is set in which case we override whatever was found earlier. - */ -#ifdef CONFIG_CMDLINE -#if defined(CONFIG_CMDLINE_EXTEND) - strlcat(data, " ", COMMAND_LINE_SIZE); - strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#elif defined(CONFIG_CMDLINE_FORCE) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#else - /* No arguments from boot loader, use kernel's cmdl*/ - if (!((char *)data)[0]) - strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE); -#endif -#endif /* CONFIG_CMDLINE */ + /* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */ + if (overwrite_incoming_cmdline || !cmdline[0]) + strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE); + + /* Retrieve command line unless forcing */ + if (read_dt_cmdline) + p = of_get_flat_dt_prop(node, "bootargs", &l); + + if (p != NULL && l > 0) { + if (concat_cmdline) { + int cmdline_len; + int copy_len; + strlcat(cmdline, " ", COMMAND_LINE_SIZE); + cmdline_len = strlen(cmdline); + copy_len = COMMAND_LINE_SIZE - cmdline_len - 1; + copy_len = min((int)l, copy_len); + strncpy(cmdline + cmdline_len, p, copy_len); + cmdline[cmdline_len + copy_len] = '\0'; + } else { + strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE)); + } + } pr_debug("Command line is: %s\n", (char*)data); diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 7af0ddec936b..34157eac789d 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -777,3 +777,44 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) return of_node_get(opp->np); } EXPORT_SYMBOL_GPL(dev_pm_opp_get_of_node); + +int of_dev_pm_opp_get_cpu_power(unsigned long *mW, unsigned long *KHz, int cpu) +{ + unsigned long mV, Hz, MHz; + struct device *cpu_dev; + struct dev_pm_opp *opp; + struct device_node *np; + u32 cap; + u64 tmp; + + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) + return -ENODEV; + + np = of_node_get(cpu_dev->of_node); + if (!np) + return -EINVAL; + + if (of_property_read_u32(np, "dynamic-power-coefficient", &cap)) + return -EINVAL; + + Hz = *KHz * 1000; + opp = dev_pm_opp_find_freq_ceil(cpu_dev, &Hz); + if (IS_ERR(opp)) + return -EINVAL; + + mV = dev_pm_opp_get_voltage(opp) / 1000; + dev_pm_opp_put(opp); + if (!mV) + return -EINVAL; + + MHz = Hz / 1000000; + tmp = (u64)cap * mV * mV * MHz; + do_div(tmp, 1000000000); + + *mW = (unsigned long)tmp; + *KHz = Hz / 1000; + + return 0; +} +EXPORT_SYMBOL_GPL(of_dev_pm_opp_get_cpu_power); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index f2ef896464b3..66b5001abe96 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -190,7 +190,7 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); } -static void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +void __iomem *pci_msix_desc_addr(struct msi_desc *desc) { return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; @@ -294,7 +294,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) } } -void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +void native_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6e0d1528d471..d3fe892641d0 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -168,6 +168,8 @@ static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); } +void __iomem *pci_msix_desc_addr(struct msi_desc *desc); + void pci_realloc_get_opt(char *); static inline int pci_no_d1d2(struct pci_dev *dev) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 201f9e5ff55c..b9dda363aea6 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -42,6 +42,70 @@ struct pci_domain_busn_res { int domain_nr; }; +#define PCI_IGNORE_MAX 8 + +static u16 devices_ignore_table[PCI_IGNORE_MAX]; +static int devices_ignore_cnt; + +static void parse_ignore_device(char *bdf_str) +{ + int fields; + unsigned int bus; + unsigned int dev; + unsigned int func; + + if (devices_ignore_cnt >= PCI_IGNORE_MAX - 1) + return; + + fields = sscanf(bdf_str, "%x:%x:%x", &bus, &dev, &func); + if (fields != 3) + return; + + devices_ignore_table[devices_ignore_cnt++] = + PCI_DEVID(bus, PCI_DEVFN(dev, func)); +} + +static int __init pci_deivces_ignore(char *str) +{ + int len; + char *start, *end; + char bdf[16]; + + devices_ignore_cnt = 0; + + while ((start = strchr(str, '('))) { + + end = strchr(start, ')'); + if (end == NULL) + break; + + len = end - start - 1; + if (len >= 16) /*invalid string*/ + break; + + memcpy((void *)bdf, (void *)(start+1), len); + bdf[len] = '\0'; + parse_ignore_device(bdf); + str = end + 1; + } + + return 1; +} +__setup("pci_devices_ignore=", pci_deivces_ignore); + +static bool device_on_ignore_list(int bus, int dev, int func) +{ + int i; + + for (i = 0; i < devices_ignore_cnt; i++) + if ((PCI_BUS_NUM(devices_ignore_table[i]) == bus) && + (PCI_SLOT(devices_ignore_table[i]) == dev) && + (PCI_FUNC(devices_ignore_table[i]) == func)) + return true; + + return false; +} + static struct resource *get_pci_domain_busn_res(int domain_nr) { struct pci_domain_busn_res *r; @@ -2442,6 +2506,11 @@ struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) return dev; } + if (device_on_ignore_list(bus->number, + PCI_SLOT(devfn), + PCI_FUNC(devfn))) + return NULL; + dev = pci_scan_device(bus, devfn); if (!dev) return NULL; diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 0c1aa6c314f5..4d0dfd030b07 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -1229,6 +1229,23 @@ config I2C_MULTI_INSTANTIATE To compile this driver as a module, choose M here: the module will be called i2c-multi-instantiate. +config INTEL_PSTORE_PRAM + tristate "Intel pstore RAM backend driver (PRAM BIOS feature)" + depends on ACPI + depends on PSTORE_RAM + ---help--- + This driver provides RAM backend for pstore, managed by BIOS + as PRAM (Persisted RAM buffer) debug feature. + + PRAM BIOS feature is configurable through BIOS setup or PRAM_Conf + EFI variable (GUID ecb54cd9-e5ae-4fdc-a971-e877756068f7). + Accepted values for variable are 0, 1, 2 and 3 as ASCII + string; will configure PRAM feature respectively as + Disabled, 4 MB, 16 MB and 64 MB. + + Safe to say Y, will not bind if your BIOS doesn't support + this feature. + endif # X86_PLATFORM_DEVICES config PMC_ATOM diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e6d1becf81ce..91fda053d781 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -92,3 +92,4 @@ obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_INTEL_TURBO_MAX_3) += intel_turbo_max_3.o obj-$(CONFIG_INTEL_CHTDC_TI_PWRBTN) += intel_chtdc_ti_pwrbtn.o obj-$(CONFIG_I2C_MULTI_INSTANTIATE) += i2c-multi-instantiate.o +obj-$(CONFIG_INTEL_PSTORE_PRAM) += intel_pstore_pram.o diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index e7edc8c63936..ffe0cac68694 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -606,15 +606,28 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } +static ssize_t intel_ssrambase_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + if (ipcdev.telem_punit_ssram_base > TELEM_PUNIT_SSRAM_OFFSET) + return scnprintf(buf, 64, "%llx\n", + ipcdev.telem_punit_ssram_base - TELEM_PUNIT_SSRAM_OFFSET); + else + return scnprintf(buf, 64, "%x\n", 0); +} static DEVICE_ATTR(simplecmd, S_IWUSR, NULL, intel_pmc_ipc_simple_cmd_store); static DEVICE_ATTR(northpeak, S_IWUSR, NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(ssrambase, S_IRUGO, + intel_ssrambase_show, NULL); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, &dev_attr_simplecmd.attr, + &dev_attr_ssrambase.attr, NULL }; diff --git a/drivers/platform/x86/intel_pstore_pram.c b/drivers/platform/x86/intel_pstore_pram.c new file mode 100644 index 000000000000..8c8b1291545f --- /dev/null +++ b/drivers/platform/x86/intel_pstore_pram.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define SZ_4K 0x00001000 +#define SZ_2M 0x00200000 + +/* PRAM stands for 'Persisted RAM' from BIOS point of view */ +#define ACPI_SIG_PRAM "PRAM" + +/* + * Following parameters match to those defined in fs/pstore/ram.c in + * order to keep campatibility between driver intefaces, please refer + * to it for implementaion details. + */ +static ulong pram_record_size = SZ_4K; +module_param_named(record_size, pram_record_size, ulong, 0400); +MODULE_PARM_DESC(record_size, "size of each dump done on oops/panic"); + +static ulong pram_console_size = SZ_2M; +module_param_named(console_size, pram_console_size, ulong, 0400); +MODULE_PARM_DESC(console_size, "size of kernel console log"); + +static ulong pram_ftrace_size = 2*SZ_4K; +module_param_named(ftrace_size, pram_ftrace_size, ulong, 0400); +MODULE_PARM_DESC(ftrace_size, "size of ftrace log"); + +static int pram_dump_oops = 1; +module_param_named(dump_oops, pram_dump_oops, int, 0600); +MODULE_PARM_DESC(dump_oops, + "set to 1 to dump oopses, 0 to only dump panics (default 1)"); + +static int pram_ecc; +module_param_named(ecc, pram_ecc, int, 0600); +MODULE_PARM_DESC(ecc, + "if non-zero, the option enables SW ECC support, provided by" + "fs/pstore/ram_core.c, and specifies ECC buffer size in bytes" + "(1 is a special value, means 16 bytes ECC)"); + +static struct ramoops_platform_data *pram_data; +static struct platform_device *pram_dev; + +struct acpi_table_pram { + struct acpi_table_header header; + u64 addr; + u32 size; +} __packed; + +static int register_pram_dev(unsigned long mem_address, + unsigned long mem_size) +{ + pram_data = kzalloc(sizeof(*pram_data), GFP_KERNEL); + if (!pram_data) { + pr_err("could not allocate pram_data\n"); + return -ENOMEM; + } + + pram_data->mem_address = mem_address; + pram_data->mem_size = mem_size; + pram_data->record_size = pram_record_size; + pram_data->console_size = pram_console_size; + pram_data->ftrace_size = pram_ftrace_size; + pram_data->dump_oops = pram_dump_oops; + /* + * For backwards compatibility with previous + * fs/pstore/ram_core.c implementation, + * intel_pstore_pram.ecc=1 means 16 bytes ECC. + */ + pram_data->ecc_info.ecc_size = pram_ecc == 1 ? 16 : pram_ecc; + + pram_dev = platform_device_register_data(NULL, "ramoops", -1, + pram_data, sizeof(struct ramoops_platform_data)); + if (IS_ERR(pram_dev)) { + pr_err("could not create platform device: %ld\n", + PTR_ERR(pram_dev)); + kfree(pram_data); + return PTR_ERR(pram_dev); + } + + pr_info("registered pram device, addr=0x%lx, size=0x%lx\n", + (unsigned long)pram_data->mem_address, (unsigned long)pram_data->mem_size); + + return 0; +} + +static int __init intel_pram_init(void) +{ + acpi_status status; + struct acpi_table_pram *pramt; + + status = acpi_get_table(ACPI_SIG_PRAM, 0, + (struct acpi_table_header **)&pramt); + if (status == AE_NOT_FOUND) { + pr_debug("PRAM table not found\n"); + return -ENODEV; + } else if (ACPI_FAILURE(status)) { + const char *msg = acpi_format_exception(status); + pr_err("Failed to get PRAM table: %s\n", msg); + return -EINVAL; + } + + if (!pramt->addr || !pramt->size) { + pr_debug("PRAM: bad address (0x%llx) or size (0x%lx)\n", + (unsigned long long)pramt->addr, + (unsigned long)pramt->size); + return -ENODEV; + } + + return register_pram_dev(pramt->addr, pramt->size); +} +postcore_initcall(intel_pram_init); + +static void __exit intel_pram_exit(void) +{ + platform_device_unregister(pram_dev); + kfree(pram_data); +} +module_exit(intel_pram_exit); diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index 6170ed8b6854..4e18ba98987e 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -171,6 +171,9 @@ static ssize_t power_supply_show_property(struct device *dev, ret = sprintf(buf, "%s\n", power_supply_scope_text[value.intval]); break; + case POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT: + ret = sprintf(buf, "%lld\n", value.int64val); + break; case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER: ret = sprintf(buf, "%s\n", value.strval); break; @@ -302,6 +305,12 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(precharge_current), POWER_SUPPLY_ATTR(charge_term_current), POWER_SUPPLY_ATTR(calibrate), + /* Local extensions */ + POWER_SUPPLY_ATTR(usb_hc), + POWER_SUPPLY_ATTR(usb_otg), + POWER_SUPPLY_ATTR(charge_enabled), + /* Local extensions of type int64_t */ + POWER_SUPPLY_ATTR(charge_counter_ext), /* Properties of type `const char *' */ POWER_SUPPLY_ATTR(model_name), POWER_SUPPLY_ATTR(manufacturer), diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index e09fe6ab3572..a3c1982b213a 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -38,6 +38,7 @@ config SCSI_UFSHCD select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS + select RPMB ---help--- This selects the support for UFS devices in Linux, say Y and make sure that you know the name of your UFS host adapter (the card diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index 8d9332bb7d0c..0b221c5a244c 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -570,10 +570,11 @@ static ssize_t _name##_show(struct device *dev, \ struct ufs_hba *hba = dev_get_drvdata(dev); \ int ret; \ int desc_len = QUERY_DESC_MAX_SIZE; \ - u8 *desc_buf; \ + char *desc_buf; \ + \ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ - if (!desc_buf) \ - return -ENOMEM; \ + if (!desc_buf) \ + return -ENOMEM; \ ret = ufshcd_query_descriptor_retry(hba, \ UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 0, 0, desc_buf, &desc_len); \ @@ -582,14 +583,13 @@ static ssize_t _name##_show(struct device *dev, \ goto out; \ } \ index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ - memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \ - if (ufshcd_read_string_desc(hba, index, desc_buf, \ - QUERY_DESC_MAX_SIZE, true)) { \ - ret = -EINVAL; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ goto out; \ - } \ - ret = snprintf(buf, PAGE_SIZE, "%s\n", \ - desc_buf + QUERY_DESC_HDR_SIZE); \ + ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \ out: \ kfree(desc_buf); \ return ret; \ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 14e5bf7af0bb..b60dfcb5f009 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -614,10 +614,14 @@ struct ufs_dev_info { * * @wmanufacturerid: card details * @model: card model + * @serial_no: serial number + * @serial_no_len: serial number string length */ struct ufs_dev_desc { u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + char *model; + char *serial_no; + size_t serial_no_len; }; /** diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c index ffe6f82182ba..4e79ea065556 100644 --- a/drivers/scsi/ufs/ufshcd-pci.c +++ b/drivers/scsi/ufs/ufshcd-pci.c @@ -67,9 +67,49 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba, return err; } +static int ufs_intel_pwr_change_notify(struct ufs_hba *hba, + enum ufs_notify_change_status notify, + struct ufs_pa_layer_attr *desired_pwr_info, + struct ufs_pa_layer_attr *final_pwr_info) +{ + struct pci_dev *pdev = to_pci_dev(hba->dev); + int ret = 0; + + if (!desired_pwr_info || !final_pwr_info) { + ret = -EINVAL; + goto out; + } + + switch (notify) { + case PRE_CHANGE: + dev_dbg(hba->dev, "PWR change PRE_CHANGE start\n"); + memcpy(final_pwr_info, desired_pwr_info, + sizeof(struct ufs_pa_layer_attr)); + + if (pdev->device == 0x9DFA && + (final_pwr_info->pwr_tx == FASTAUTO_MODE || + final_pwr_info->pwr_tx == FAST_MODE || + final_pwr_info->pwr_rx == FASTAUTO_MODE || + final_pwr_info->pwr_rx == FAST_MODE)) { + /* Currently, only RATE A is supported for HS mode */ + final_pwr_info->hs_rate = PA_HS_MODE_A; + } + break; + case POST_CHANGE: + break; + default: + ret = -EINVAL; + break; + } + +out: + return ret; +} + static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = { .name = "intel-pci", .link_startup_notify = ufs_intel_link_startup_notify, + .pwr_change_notify = ufs_intel_pwr_change_notify, }; #ifdef CONFIG_PM_SLEEP @@ -200,6 +240,15 @@ static const struct dev_pm_ops ufshcd_pci_pm_ops = { static const struct pci_device_id ufshcd_pci_tbl[] = { { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x34FA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x34FD), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x38FA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0xA0FA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0xA0FF), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B90), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x4B95), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x43FA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, + { PCI_VDEVICE(INTEL, 0x43FF), (kernel_ulong_t)&ufs_intel_cnl_hba_vops }, { } /* terminate list */ }; diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index c55f38ec391c..3a699a5d7ea8 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -37,11 +37,15 @@ * license terms, and distributes only under these terms. */ +#include #include #include #include #include #include +#include +#include + #include "ufshcd.h" #include "ufs_quirks.h" #include "unipro.h" @@ -234,6 +238,10 @@ static struct ufs_dev_fix ufs_fixups[] = { END_FIX }; +static int max_gear; +static int dflt_hs_rate; +static int dflt_hs_mode; + static void ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); @@ -297,16 +305,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba) scsi_block_requests(hba->host); } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -3120,7 +3118,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size) { int ret; @@ -3188,7 +3186,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); @@ -3206,49 +3204,77 @@ static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); } +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char blank_non_printable(char ch) +{ + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; +} + /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance * @desc_index: descriptor index - * @buf: pointer to buffer where descriptor would be read - * @size: size of buf + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: string size on success. + * -ENOMEM: on allocation failure + * -EINVAL: on a wrong parameter */ -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + char *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; + ssize_t ascii_len; int i; - char *buff_ascii; - - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3256,22 +3282,29 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = blank_non_printable(str[i]); + + str[ret++] = '\0'; - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + } else { + str = kzalloc(uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + memcpy(str, uc_str, uc_str->len); + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -3932,9 +3965,15 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) if (hba->max_pwr_info.is_valid) return 0; - pwr_info->pwr_tx = FAST_MODE; - pwr_info->pwr_rx = FAST_MODE; - pwr_info->hs_rate = PA_HS_MODE_B; + if (dflt_hs_mode != FAST_MODE && dflt_hs_mode != FASTAUTO_MODE) + dflt_hs_mode = FAST_MODE; + + if (dflt_hs_rate != PA_HS_MODE_A && dflt_hs_rate != PA_HS_MODE_B) + dflt_hs_rate = PA_HS_MODE_B; + + pwr_info->pwr_tx = dflt_hs_mode; + pwr_info->pwr_rx = dflt_hs_mode; + pwr_info->hs_rate = dflt_hs_rate; /* Get the connected lane count */ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), @@ -3980,6 +4019,12 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) pwr_info->pwr_tx = SLOW_MODE; } + if (max_gear > 0 && + (pwr_info->gear_rx > max_gear || pwr_info->gear_tx > max_gear)) { + pwr_info->gear_rx = max_gear; + pwr_info->gear_tx = max_gear; + } + hba->max_pwr_info.is_valid = true; return 0; } @@ -6179,6 +6224,227 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) kfree(desc_buf); } +#define SEC_PROTOCOL_UFS 0xEC +#define SEC_SPECIFIC_UFS_RPMB 0x001 + +#define SEC_PROTOCOL_CMD_SIZE 12 +#define SEC_PROTOCOL_RETRIES 3 +#define SEC_PROTOCOL_RETRIES_ON_RESET 10 +#define SEC_PROTOCOL_TIMEOUT msecs_to_jiffies(1000) + +static int +ufshcd_rpmb_security_out(struct scsi_device *sdev, u8 region, + void *frames, u32 trans_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_OUT; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(trans_len, cmd + 6); /* transfer length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, + frames, trans_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security out", &sshdr); + + return ret; +} + +static int +ufshcd_rpmb_security_in(struct scsi_device *sdev, u8 region, + void *frames, u32 alloc_len) +{ + struct scsi_sense_hdr sshdr; + int reset_retries = SEC_PROTOCOL_RETRIES_ON_RESET; + int ret; + u8 cmd[SEC_PROTOCOL_CMD_SIZE]; + + memset(cmd, 0, SEC_PROTOCOL_CMD_SIZE); + cmd[0] = SECURITY_PROTOCOL_IN; + cmd[1] = SEC_PROTOCOL_UFS; + cmd[2] = region; + cmd[3] = SEC_SPECIFIC_UFS_RPMB; + cmd[4] = 0; /* inc_512 bit 7 set to 0 */ + put_unaligned_be32(alloc_len, cmd + 6); /* allocation length */ + +retry: + ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, + frames, alloc_len, &sshdr, + SEC_PROTOCOL_TIMEOUT, SEC_PROTOCOL_RETRIES, + NULL); + + if (ret && scsi_sense_valid(&sshdr) && + sshdr.sense_key == UNIT_ATTENTION && + sshdr.asc == 0x29 && sshdr.ascq == 0x00) + /* + * Device reset might occur several times, + * give it one more chance + */ + if (--reset_retries > 0) + goto retry; + + if (ret) + dev_err(&sdev->sdev_gendev, "%s: failed with err %0x\n", + __func__, ret); + + if (driver_byte(ret) & DRIVER_SENSE) + scsi_print_sense_hdr(sdev, "rpmb: security in", &sshdr); + + return ret; +} + +static int ufshcd_rpmb_cmd_seq(struct device *dev, u8 target, + struct rpmb_cmd *cmds, u32 ncmds) +{ + unsigned long flags; + struct ufs_hba *hba = dev_get_drvdata(dev); + struct scsi_device *sdev; + struct rpmb_cmd *cmd; + u32 len; + u32 i; + int ret; + + spin_lock_irqsave(hba->host->host_lock, flags); + sdev = hba->sdev_ufs_rpmb; + if (sdev) { + ret = scsi_device_get(sdev); + if (!ret && !scsi_device_online(sdev)) { + ret = -ENODEV; + scsi_device_put(sdev); + } + } else { + ret = -ENODEV; + } + spin_unlock_irqrestore(hba->host->host_lock, flags); + if (ret) + return ret; + + for (ret = 0, i = 0; i < ncmds && !ret; i++) { + cmd = &cmds[i]; + len = rpmb_ioc_frames_len_jdec(cmd->nframes); + if (cmd->flags & RPMB_F_WRITE) + ret = ufshcd_rpmb_security_out(sdev, target, + cmd->frames, len); + else + ret = ufshcd_rpmb_security_in(sdev, target, + cmd->frames, len); + } + scsi_device_put(sdev); + return ret; +} + +static int ufshcd_rpmb_get_capacity(struct device *dev, u8 target) +{ + struct ufs_hba *hba = dev_get_drvdata(dev); + __be64 block_count; + int ret; + + ret = ufshcd_read_unit_desc_param(hba, + UFS_UPIU_RPMB_WLUN, + UNIT_DESC_PARAM_LOGICAL_BLK_COUNT, + (u8 *)&block_count, + sizeof(block_count)); + if (ret) + return ret; + + return be64_to_cpu(block_count) * SZ_512 / SZ_128K; +} + +static struct rpmb_ops ufshcd_rpmb_dev_ops = { + .cmd_seq = ufshcd_rpmb_cmd_seq, + .get_capacity = ufshcd_rpmb_get_capacity, + .type = RPMB_TYPE_UFS, + .auth_method = RPMB_HMAC_ALGO_SHA_256, + +}; + +static inline void ufshcd_rpmb_add(struct ufs_hba *hba, + struct ufs_dev_desc *dev_desc) +{ + struct rpmb_dev *rdev; + u8 rpmb_rw_size = 1; + int ret; + + ufshcd_rpmb_dev_ops.dev_id = kmemdup(dev_desc->serial_no, + dev_desc->serial_no_len, + GFP_KERNEL); + if (ufshcd_rpmb_dev_ops.dev_id) + ufshcd_rpmb_dev_ops.dev_id_len = dev_desc->serial_no_len; + + ret = scsi_device_get(hba->sdev_ufs_rpmb); + if (ret) + goto out_put_dev; + + if (hba->ufs_version >= UFSHCI_VERSION_21) { + ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, + GEOMETRY_DESC_PARAM_RPMB_RW_SIZE, + &rpmb_rw_size, + sizeof(rpmb_rw_size)); + if (ret) + goto out_put_dev; + } + + ufshcd_rpmb_dev_ops.rd_cnt_max = rpmb_rw_size; + ufshcd_rpmb_dev_ops.wr_cnt_max = rpmb_rw_size; + + rdev = rpmb_dev_register(hba->dev, 0, &ufshcd_rpmb_dev_ops); + if (IS_ERR(rdev)) { + dev_warn(hba->dev, "%s: cannot register to rpmb %ld\n", + dev_name(hba->dev), PTR_ERR(rdev)); + goto out_put_dev; + } + + return; + +out_put_dev: + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; +} + +static inline void ufshcd_rpmb_remove(struct ufs_hba *hba) +{ + unsigned long flags; + + if (!hba->sdev_ufs_rpmb) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + + rpmb_dev_unregister_by_device(hba->dev, 0); + scsi_device_put(hba->sdev_ufs_rpmb); + hba->sdev_ufs_rpmb = NULL; + + kfree(ufshcd_rpmb_dev_ops.dev_id); + ufshcd_rpmb_dev_ops.dev_id = NULL; + + spin_unlock_irqrestore(hba->host->host_lock, flags); +} + /** * ufshcd_scsi_add_wlus - Adds required W-LUs * @hba: per-adapter instance @@ -6226,6 +6492,8 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) ret = PTR_ERR(sdev_rpmb); goto remove_sdev_ufs_device; } + hba->sdev_ufs_rpmb = sdev_rpmb; + scsi_device_put(sdev_rpmb); sdev_boot = __scsi_add_device(hba->host, 0, 0, @@ -6247,9 +6515,12 @@ static int ufs_get_device_desc(struct ufs_hba *hba, { int err; size_t buff_len; - u8 model_index; + u8 index; u8 *desc_buf; + if (!dev_desc) + return -EINVAL; + buff_len = max_t(size_t, hba->desc_size.dev_desc, QUERY_DESC_MAX_SIZE + 1); desc_buf = kmalloc(buff_len, GFP_KERNEL); @@ -6272,32 +6543,43 @@ static int ufs_get_device_desc(struct ufs_hba *hba, dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; - model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - /* Zero-pad entire buffer for string termination. */ - memset(desc_buf, 0, buff_len); - - err = ufshcd_read_string_desc(hba, model_index, desc_buf, - QUERY_DESC_MAX_SIZE, true/*ASCII*/); - if (err) { + index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; + err = ufshcd_read_string_desc(hba, index, + &dev_desc->model, SD_ASCII_STD); + if (err < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); goto out; } - desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); + index = desc_buf[DEVICE_DESC_PARAM_SN]; + err = ufshcd_read_string_desc(hba, index, &dev_desc->serial_no, SD_RAW); + if (err < 0) { + dev_err(hba->dev, "%s: Failed reading Serial No. err = %d\n", + __func__, err); + goto out; + } - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; out: kfree(desc_buf); return err; } +static void ufs_put_device_desc(struct ufs_dev_desc *dev_desc) +{ + kfree(dev_desc->model); + dev_desc->model = NULL; + + kfree(dev_desc->serial_no); + dev_desc->serial_no = NULL; +} + static void ufs_fixup_device_setup(struct ufs_hba *hba, struct ufs_dev_desc *dev_desc) { @@ -6306,8 +6588,9 @@ static void ufs_fixup_device_setup(struct ufs_hba *hba, for (f = ufs_fixups; f->quirk; f++) { if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || f->card.wmanufacturerid == UFS_ANY_VENDOR) && - (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || - !strcmp(f->card.model, UFS_ANY_MODEL))) + ((dev_desc->model && + STR_PRFX_EQUAL(f->card.model, dev_desc->model)) || + !strcmp(f->card.model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } } @@ -6590,6 +6873,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) } ufs_fixup_device_setup(hba, &card); + ufshcd_tune_unipro_params(hba); ret = ufshcd_set_vccq_rail_unused(hba, @@ -6638,6 +6922,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (ufshcd_scsi_add_wlus(hba)) goto out; + ufshcd_rpmb_add(hba, &card); + /* Initialize devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { memcpy(&hba->clk_scaling.saved_pwr_info.info, @@ -6659,7 +6945,28 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) if (!hba->is_init_prefetch) hba->is_init_prefetch = true; + { + u32 refclkfreq; + + /* index = 0, selector = 0 */ + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &refclkfreq); + if (ret) + dev_err(hba->dev, "%s: UFS attribute bRefClkFreq %u, error %d\n", __func__, refclkfreq, ret); + else + dev_info(hba->dev, "%s: UFS attribute bRefClkFreq %u\n", __func__, refclkfreq); + if (!ret && refclkfreq != 0) { + refclkfreq = 0; + ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &refclkfreq); + if (ret) + dev_err(hba->dev, "%s: UFS failed to write attribute bRefClkFreq %u, error %d\n", __func__, refclkfreq, ret); + else + dev_info(hba->dev, "%s: UFS wrote attribute bRefClkFreq %u\n", __func__, refclkfreq); + } + } + out: + + ufs_put_device_desc(&card); /* * If we failed to initialize the device or the device is not * present, turn off the power/clocks etc. @@ -7859,6 +8166,8 @@ int ufshcd_shutdown(struct ufs_hba *hba) goto out; } + ufshcd_rpmb_remove(hba); + ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM); out: if (ret) @@ -7875,7 +8184,10 @@ EXPORT_SYMBOL(ufshcd_shutdown); */ void ufshcd_remove(struct ufs_hba *hba) { + ufshcd_rpmb_remove(hba); + ufs_sysfs_remove_nodes(hba->dev); + scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8151,6 +8463,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) } EXPORT_SYMBOL_GPL(ufshcd_init); +module_param(max_gear, int, 0444); +module_param(dflt_hs_rate, int, 0444); +module_param(dflt_hs_mode, int, 0444); + +MODULE_PARM_DESC(, "Maximum gear: 1, 2 , 3 ..."); +MODULE_PARM_DESC(, "Default high speed rate series : 1 (= rate A), 2 (= rate B)"); +MODULE_PARM_DESC(, "Default high speed power mode: 1 (= FAST), 4 (= FASTAUTO)"); + MODULE_AUTHOR("Santosh Yaragnavi "); MODULE_AUTHOR("Vinayak Holikatti "); MODULE_DESCRIPTION("Generic UFS host controller driver Core"); diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 33fdd3f281ae..82b5e7d317f5 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -458,6 +458,7 @@ struct ufs_stats { * @utmrdl_dma_addr: UTMRDL DMA address * @host: Scsi_Host instance of the driver * @dev: device handle + * @sdev_ufs_rpmb: reference to RPMB device W-LU * @lrb: local reference block * @lrb_in_use: lrb in use * @outstanding_tasks: Bits representing outstanding task requests @@ -522,6 +523,7 @@ struct ufs_hba { * "UFS device" W-LU. */ struct scsi_device *sdev_ufs_device; + struct scsi_device *sdev_ufs_rpmb; enum ufs_dev_pwr_mode curr_dev_pwr_mode; enum uic_link_state uic_link_state; @@ -875,14 +877,17 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, u8 param_offset, - u8 *param_read_buf, + void *param_read_buf, u8 param_size); int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + char **buf, bool ascii); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); diff --git a/drivers/sdw/Kconfig b/drivers/sdw/Kconfig new file mode 100644 index 000000000000..7e5a57f1f6d2 --- /dev/null +++ b/drivers/sdw/Kconfig @@ -0,0 +1,19 @@ +menuconfig SDW + tristate "SoundWire bus support" + select CRC8 + depends on X86 + help + SoundWire interface is typically used for transporting data + related to audio functions. +menuconfig SDW_CNL + tristate "Intel SoundWire master controller support" + depends on SDW && X86 + help + Intel SoundWire master controller driver +menuconfig SDW_MAXIM_SLAVE + bool "SoundWire Slave for the Intel CNL FPGA" + depends on SDW && X86 + help + SoundWire Slave on FPGA platform for Intel CNL IP + Mostly N for all the cases other than CNL Slave FPGA + diff --git a/drivers/sdw/Makefile b/drivers/sdw/Makefile new file mode 100644 index 000000000000..e2ba440f4ef2 --- /dev/null +++ b/drivers/sdw/Makefile @@ -0,0 +1,5 @@ +sdw_bus-objs := sdw.o sdw_bwcalc.o sdw_utils.o + +obj-$(CONFIG_SDW) += sdw_bus.o +obj-$(CONFIG_SDW_CNL) += sdw_cnl.o +obj-$(CONFIG_SDW_MAXIM_SLAVE) += sdw_maxim.o diff --git a/drivers/sdw/sdw.c b/drivers/sdw/sdw.c new file mode 100644 index 000000000000..aefd25d4e393 --- /dev/null +++ b/drivers/sdw/sdw.c @@ -0,0 +1,3459 @@ +/* + * sdw.c - SoundWire Bus driver implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_priv.h" + +#define sdw_slave_attr_gr NULL +#define sdw_mstr_attr_gr NULL + +#define CREATE_TRACE_POINTS +#include + +/* Global instance handling all the SoundWire buses */ +struct sdw_core sdw_core; + +static void sdw_slave_release(struct device *dev) +{ + kfree(to_sdw_slave(dev)); +} + +static void sdw_mstr_release(struct device *dev) +{ + struct sdw_master *mstr = to_sdw_master(dev); + + complete(&mstr->slv_released); +} + +static struct device_type sdw_slv_type = { + .groups = sdw_slave_attr_gr, + .release = sdw_slave_release, +}; + +static struct device_type sdw_mstr_type = { + .groups = sdw_mstr_attr_gr, + .release = sdw_mstr_release, +}; +/** + * sdw_slave_verify - return parameter as sdw_slv, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slv. + */ +struct sdw_slv *sdw_slave_verify(struct device *dev) +{ + return (dev->type == &sdw_slv_type) + ? to_sdw_slave(dev) + : NULL; +} + +/** + * sdw_mstr_verify - return parameter as sdw_master, or NULL + * @dev: device, probably from some driver model iterator + * + * When traversing the driver model tree, perhaps using driver model + * iterators like @device_for_each_child(), you can't assume very much + * about the nodes you find. Use this function to avoid oopses caused + * by wrongly treating some non-SDW device as an sdw_slv. + */ +struct sdw_master *sdw_mstr_verify(struct device *dev) +{ + return (dev->type == &sdw_mstr_type) + ? to_sdw_master(dev) + : NULL; +} + +static const struct sdw_slv_id *sdw_match_slave(const struct sdw_slv_id *id, + const struct sdw_slv *sdw_slv) +{ + while (id->name[0]) { + if (strncmp(sdw_slv->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static const struct sdw_master_id *sdw_match_master( + const struct sdw_master_id *id, + const struct sdw_master *sdw_mstr) +{ + if (!id) + return NULL; + while (id->name[0]) { + if (strncmp(sdw_mstr->name, id->name, SOUNDWIRE_NAME_SIZE) == 0) + return id; + id++; + } + return NULL; +} + +static int sdw_slv_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slv *sdw_slv; + struct sdw_slave_driver *drv = to_sdw_slave_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_SLAVE) + return ret; + + sdw_slv = to_sdw_slave(dev); + + if (drv->id_table) + ret = (sdw_match_slave(drv->id_table, sdw_slv) != NULL); + + if (driver->name && !ret) + ret = (strncmp(sdw_slv->name, driver->name, SOUNDWIRE_NAME_SIZE) + == 0); + if (ret) + sdw_slv->driver = drv; + return ret; +} +static int sdw_mstr_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_master *sdw_mstr; + struct sdw_mstr_driver *drv = to_sdw_mstr_driver(driver); + int ret = 0; + + /* Check if driver is slave type or not, both master and slave + * driver has first field as driver_type, so if driver is not + * of slave type return + */ + if (drv->driver_type != SDW_DRIVER_TYPE_MASTER) + return ret; + + sdw_mstr = to_sdw_master(dev); + + if (drv->id_table) + ret = (sdw_match_master(drv->id_table, sdw_mstr) != NULL); + + if (driver->name) + ret = (strncmp(sdw_mstr->name, driver->name, + SOUNDWIRE_NAME_SIZE) == 0); + if (ret) + sdw_mstr->driver = drv; + + return ret; +} + +static int sdw_mstr_probe(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(mstr, sdw_match_master(sdrv->id_table, mstr)); + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + +static int sdw_slv_probe(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + struct sdw_slv *sdwslv = to_sdw_slave(dev); + int ret = 0; + + if (!sdrv->probe) + return -ENODEV; + ret = dev_pm_domain_attach(dev, true); + if (ret != -EPROBE_DEFER) { + ret = sdrv->probe(sdwslv, sdw_match_slave(sdrv->id_table, + sdwslv)); + return 0; + if (ret) + dev_pm_domain_detach(dev, true); + } + return ret; +} + + +int sdw_slave_get_bus_params(struct sdw_slv *sdw_slv, + struct sdw_bus_params *params) +{ + struct sdw_bus *bus; + struct sdw_master *mstr = sdw_slv->mstr; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + if (!bus) + return -EFAULT; + + params->num_rows = bus->row; + params->num_cols = bus->col; + params->bus_clk_freq = bus->clk_freq >> 1; + params->bank = bus->active_bank; + + return 0; +} +EXPORT_SYMBOL(sdw_slave_get_bus_params); + +static int sdw_mstr_remove(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_master(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; + +} + +static int sdw_slv_remove(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + int ret = 0; + + if (sdrv->remove) + ret = sdrv->remove(to_sdw_slave(dev)); + else + return -ENODEV; + + dev_pm_domain_detach(dev, true); + return ret; +} + +static void sdw_slv_shutdown(struct device *dev) +{ + const struct sdw_slave_driver *sdrv = to_sdw_slave_driver(dev->driver); + + if (sdrv->shutdown) + sdrv->shutdown(to_sdw_slave(dev)); +} + +static void sdw_mstr_shutdown(struct device *dev) +{ + const struct sdw_mstr_driver *sdrv = to_sdw_mstr_driver(dev->driver); + struct sdw_master *mstr = to_sdw_master(dev); + + if (sdrv->shutdown) + sdrv->shutdown(mstr); +} + +static void sdw_shutdown(struct device *dev) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + sdw_slv_shutdown(dev); + else if (sdw_mstr) + sdw_mstr_shutdown(dev); +} + +static int sdw_remove(struct device *dev) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_remove(dev); + else if (sdw_mstr) + return sdw_mstr_remove(dev); + + return 0; +} + +static int sdw_probe(struct device *dev) +{ + + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_probe(dev); + else if (sdw_mstr) + return sdw_mstr_probe(dev); + + return -ENODEV; + +} + +static int sdw_match(struct device *dev, struct device_driver *driver) +{ + struct sdw_slv *sdw_slv; + struct sdw_master *sdw_mstr; + + sdw_slv = sdw_slave_verify(dev); + sdw_mstr = sdw_mstr_verify(dev); + if (sdw_slv) + return sdw_slv_match(dev, driver); + else if (sdw_mstr) + return sdw_mstr_match(dev, driver); + return 0; + +} + +#ifdef CONFIG_PM_SLEEP +static int sdw_legacy_suspend(struct device *dev, pm_message_t mesg) +{ + struct sdw_slv *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->suspend) + return 0; + + return driver->suspend(sdw_slv, mesg); +} + +static int sdw_legacy_resume(struct device *dev) +{ + struct sdw_slv *sdw_slv = NULL; + struct sdw_slave_driver *driver; + + if (dev->type == &sdw_slv_type) + sdw_slv = to_sdw_slave(dev); + + if (!sdw_slv || !dev->driver) + return 0; + + driver = to_sdw_slave_driver(dev->driver); + if (!driver->resume) + return 0; + + return driver->resume(sdw_slv); +} + +static int sdw_pm_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_suspend(dev); + else + return sdw_legacy_suspend(dev, PMSG_SUSPEND); +} + +static int sdw_pm_resume(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + + if (pm) + return pm_generic_resume(dev); + else + return sdw_legacy_resume(dev); +} + +#else +#define sdw_pm_suspend NULL +#define sdw_pm_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +static const struct dev_pm_ops soundwire_pm = { + .suspend = sdw_pm_suspend, + .resume = sdw_pm_resume, +#ifdef CONFIG_PM + .runtime_suspend = pm_generic_runtime_suspend, + .runtime_resume = pm_generic_runtime_resume, +#endif +}; + +struct bus_type sdwint_bus_type = { + .name = "soundwire", + .match = sdw_match, + .probe = sdw_probe, + .remove = sdw_remove, + .shutdown = sdw_shutdown, + .pm = &soundwire_pm, +}; +EXPORT_SYMBOL_GPL(sdwint_bus_type); + +struct device sdw_slv = { + .init_name = "soundwire", +}; + +static struct static_key sdw_trace_msg = STATIC_KEY_INIT_FALSE; + +int sdw_transfer_trace_reg(void) +{ + static_key_slow_inc(&sdw_trace_msg); + + return 0; +} + +void sdw_transfer_trace_unreg(void) +{ + static_key_slow_dec(&sdw_trace_msg); +} + +/** + * sdw_lock_mstr - Get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_lock_mstr(struct sdw_master *mstr) +{ + rt_mutex_lock(&mstr->bus_lock); +} + +/** + * sdw_trylock_mstr - Try to get exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +int sdw_trylock_mstr(struct sdw_master *mstr) +{ + return rt_mutex_trylock(&mstr->bus_lock); +} + + +/** + * sdw_unlock_mstr - Release exclusive access to an SDW bus segment + * @mstr: Target SDW bus segment + */ +void sdw_unlock_mstr(struct sdw_master *mstr) +{ + rt_mutex_unlock(&mstr->bus_lock); +} + + +static int sdw_assign_slv_number(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + int i, j, ret = -1; + + sdw_lock_mstr(mstr); + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned == true) + continue; + mstr->sdw_addr[i].assigned = true; + for (j = 0; j < 6; j++) + mstr->sdw_addr[i].dev_id[j] = msg->buf[j]; + ret = i; + break; + } + sdw_unlock_mstr(mstr); + return ret; +} + +static int sdw_program_slv_address(struct sdw_master *mstr, + u8 slave_addr) +{ + struct sdw_msg msg; + u8 buf[1] = {0}; + int ret; + + buf[0] = slave_addr; + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_DEVNUMBER; + msg.len = 1; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "Program Slave address change\n"); + return ret; + } + return 0; +} + +static int sdw_find_slave(struct sdw_master *mstr, struct sdw_msg + *msg, bool *found) +{ + struct sdw_slv_addr *sdw_addr; + int ret = 0, i, comparison; + *found = false; + + sdw_lock_mstr(mstr); + sdw_addr = mstr->sdw_addr; + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + comparison = memcmp(sdw_addr[i].dev_id, msg->buf, + SDW_NUM_DEV_ID_REGISTERS); + if ((!comparison) && (sdw_addr[i].assigned == true)) { + *found = true; + break; + } + } + sdw_unlock_mstr(mstr); + if (*found == true) + ret = sdw_program_slv_address(mstr, sdw_addr[i].slv_number); + return ret; +} + +static void sdw_free_slv_number(struct sdw_master *mstr, + int slv_number) +{ + int i; + + sdw_lock_mstr(mstr); + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (slv_number == mstr->sdw_addr[i].slv_number) { + mstr->sdw_addr[slv_number].assigned = false; + memset(&mstr->sdw_addr[slv_number].dev_id[0], 0x0, 6); + } + } + sdw_unlock_mstr(mstr); +} + + +int count; +static int sdw_register_slave(struct sdw_master *mstr) +{ + int ret = 0, i, ports; + struct sdw_msg msg; + u8 buf[6] = {0}; + struct sdw_slv *sdw_slv; + int slv_number = -1; + bool found = false; + + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.addr = SDW_SCP_DEVID_0; + msg.len = 6; + msg.buf = buf; + msg.slave_addr = 0x0; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + + while ((ret = (sdw_slave_transfer(mstr, &msg, 1)) == 1)) { + ret = sdw_find_slave(mstr, &msg, &found); + if (found && !ret) { + dev_info(&mstr->dev, "Slave already registered\n"); + continue; + /* Even if slave registering fails we continue for other + * slave status, but we flag error + */ + } else if (ret) { + dev_err(&mstr->dev, "Re-registering slave failed"); + continue; + } + slv_number = sdw_assign_slv_number(mstr, &msg); + if (slv_number <= 0) { + dev_err(&mstr->dev, "Failed to assign slv_number\n"); + ret = -EINVAL; + goto slv_number_assign_fail; + } + sdw_slv = kzalloc(sizeof(struct sdw_slv), GFP_KERNEL); + if (!sdw_slv) { + ret = -ENOMEM; + goto mem_alloc_failed; + } + sdw_slv->mstr = mstr; + sdw_slv->dev.parent = &sdw_slv->mstr->dev; + sdw_slv->dev.bus = &sdwint_bus_type; + sdw_slv->dev.type = &sdw_slv_type; + sdw_slv->slv_addr = &mstr->sdw_addr[slv_number]; + sdw_slv->slv_addr->slave = sdw_slv; + /* We have assigned new slave number, so its not present + * till it again attaches to bus with this new + * slave address + */ + sdw_slv->slv_addr->status = SDW_SLAVE_STAT_NOT_PRESENT; + for (i = 0; i < 6; i++) + sdw_slv->dev_id[i] = msg.buf[i]; + dev_dbg(&mstr->dev, "SDW slave slave id found with values\n"); + dev_dbg(&mstr->dev, "dev_id0 to dev_id5: %x:%x:%x:%x:%x:%x\n", + msg.buf[0], msg.buf[1], msg.buf[2], + msg.buf[3], msg.buf[4], msg.buf[5]); + dev_dbg(&mstr->dev, "Slave number assigned is %x\n", slv_number); + /* TODO: Fill the sdw_slv structre from ACPI */ + ports = sdw_slv->sdw_slv_cap.num_of_sdw_ports; + /* Add 1 for port 0 for simplicity */ + ports++; + sdw_slv->port_ready = + kzalloc((sizeof(struct completion) * ports), + GFP_KERNEL); + if (!sdw_slv->port_ready) { + ret = -ENOMEM; + goto port_alloc_mem_failed; + } + for (i = 0; i < ports; i++) + init_completion(&sdw_slv->port_ready[i]); + + dev_set_name(&sdw_slv->dev, "sdw-slave%d-%02x:%02x:%02x:%02x:%02x:%02x", + sdw_master_id(mstr), + sdw_slv->dev_id[0], + sdw_slv->dev_id[1], + sdw_slv->dev_id[2], + sdw_slv->dev_id[3], + sdw_slv->dev_id[4], + sdw_slv->dev_id[5] + mstr->nr); + /* Set name based on dev_id. This will be + * compared to load driver + */ + sprintf(sdw_slv->name, "%02x:%02x:%02x:%02x:%02x:%02x", + sdw_slv->dev_id[0], + sdw_slv->dev_id[1], + sdw_slv->dev_id[2], + sdw_slv->dev_id[3], + sdw_slv->dev_id[4], + sdw_slv->dev_id[5] + mstr->nr); + ret = device_register(&sdw_slv->dev); + if (ret) { + dev_err(&mstr->dev, "Register slave failed\n"); + goto reg_slv_failed; + } + ret = sdw_program_slv_address(mstr, slv_number); + if (ret) { + dev_err(&mstr->dev, "Programming slave address failed\n"); + goto program_slv_failed; + } + dev_dbg(&mstr->dev, "Slave registered with bus id %s\n", + dev_name(&sdw_slv->dev)); + sdw_slv->slv_number = slv_number; + mstr->num_slv++; + sdw_lock_mstr(mstr); + list_add_tail(&sdw_slv->node, &mstr->slv_list); + sdw_unlock_mstr(mstr); + + } + count++; + return 0; +program_slv_failed: + device_unregister(&sdw_slv->dev); +port_alloc_mem_failed: +reg_slv_failed: + kfree(sdw_slv); +mem_alloc_failed: + sdw_free_slv_number(mstr, slv_number); +slv_number_assign_fail: + return ret; + +} + +/** + * __sdw_transfer - unlocked flavor of sdw_slave_transfer + * @mstr: Handle to SDW bus + * @msg: One or more messages to execute before STOP is issued to + * terminate the operation; each message begins with a START. + * @num: Number of messages to be executed. + * + * Returns negative errno, else the number of messages executed. + * + * Adapter lock must be held when calling this function. No debug logging + * takes place. mstr->algo->master_xfer existence isn't checked. + */ +int __sdw_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num, + struct sdw_async_xfer_data *async_data) +{ + unsigned long orig_jiffies; + int ret = 0, try, i; + struct sdw_slv_capabilities *slv_cap; + int program_scp_addr_page; + int addr = msg->slave_addr; + + /* sdw_trace_msg gets enabled when tracepoint sdw_slave_transfer gets + * enabled. This is an efficient way of keeping the for-loop from + * being executed when not needed. + */ + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < num; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_read(mstr, &msg[i], i); + else + trace_sdw_write(mstr, &msg[i], i); + } + orig_jiffies = jiffies; + for (i = 0; i < num; i++) { + for (ret = 0, try = 0; try <= mstr->retries; try++) { + if (msg->slave_addr == 0) + /* If we are enumerating slave address 0, + * we dont program scp, it should be set + * default to 0 + */ + program_scp_addr_page = 0; + else if (msg->slave_addr == 15) + /* If we are broadcasting, we need to program + * the SCP address as some slaves will be + * supporting it while some wont be. + * So it should be programmed + */ + program_scp_addr_page = 1; + + else { + slv_cap = + &mstr->sdw_addr[addr].slave->sdw_slv_cap; + program_scp_addr_page = + slv_cap->paging_supported; + } + /* Call async or sync handler based on call */ + if (!async_data) + ret = mstr->driver->mstr_ops->xfer_msg(mstr, + msg, program_scp_addr_page); + /* Async transfer is not mandatory to support + * It requires only if stream is split across the + * masters, where bus driver need to send the commands + * for bank switch individually and wait for them + * to complete out side of the master context + */ + else if (mstr->driver->mstr_ops->xfer_msg_async && + async_data) + ret = mstr->driver->mstr_ops->xfer_msg_async( + mstr, msg, + program_scp_addr_page, + async_data); + else + return -ENOTSUPP; + if (ret != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + mstr->timeout)) + break; + } + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < msg->len; i++) + if (msg[i].flag & SDW_MSG_FLAG_READ) + trace_sdw_reply(mstr, &msg[i], i); + trace_sdw_result(mstr, i, ret); + } + if (!ret) + return i; + return ret; +} +EXPORT_SYMBOL_GPL(__sdw_transfer); + +/* NO PM version of slave transfer. Called from power management APIs + * to avoid dead locks. + */ +static int sdw_slave_transfer_nopm(struct sdw_master *mstr, struct sdw_msg *msg, + int num) +{ + int ret; + + if (mstr->driver->mstr_ops->xfer_msg) { + ret = __sdw_transfer(mstr, msg, num, NULL); + return ret; + } + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; +} + +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data) +{ + int ret; + /* Currently we support only message asynchronously, This is mainly + * used to do bank switch for multiple controllers + */ + if (num != 1) + return -EINVAL; + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + ret = __sdw_transfer(mstr, msg, num, async_data); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +/** + * sdw_slave_transfer: Transfer message between slave and mstr on the bus. + * @mstr: mstr master which will transfer the message + * @msg: Array of messages to be transferred. + * @num: Number of messages to be transferred, messages include read and write + * messages, but not the ping messages. + */ +int sdw_slave_transfer(struct sdw_master *mstr, struct sdw_msg *msg, int num) +{ + int ret; + + /* REVISIT the fault reporting model here is weak: + * + * - When we get an error after receiving N bytes from a slave, + * there is no way to report "N". + * + * - When we get a NAK after transmitting N bytes to a slave, + * there is no way to report "N" ... or to let the mstr + * continue executing the rest of this combined message, if + * that's the appropriate response. + * + * - When for example "num" is two and we successfully complete + * the first message but get an error part way through the + * second, it's unclear whether that should be reported as + * one (discarding status on the second message) or errno + * (discarding status on the first one). + */ + if (!(mstr->driver->mstr_ops->xfer_msg)) { + dev_dbg(&mstr->dev, "SDW level transfers not supported\n"); + return -EOPNOTSUPP; + } + pm_runtime_get_sync(&mstr->dev); + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else { + sdw_lock_mstr(mstr); + } + ret = __sdw_transfer(mstr, msg, num, NULL); + sdw_unlock_mstr(mstr); +out: + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_transfer); + +static int sdw_handle_dp0_interrupts(struct sdw_master *mstr, + struct sdw_slv *sdw_slv, u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + int impl_def_mask = 0; + u8 rbuf[1] = {0}, wbuf[1] = {0}; + + /* Create message for clearing the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DP0_INTCLEAR; + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DP0_INTSTAT; + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Interrupt status read failed for slave %x\n", sdw_slv->slv_number); + goto out; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %d port 0\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[0]); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_PORT_READY_MASK; + } + if (rd_msg.buf[0] & SDW_DP0_INTMASK_BRA_FAILURE_MASK) { + /* TODO: Handle BRA failure */ + dev_err(&mstr->dev, "BRA failed for slave %d\n", + sdw_slv->slv_number); + wr_msg.buf[0] |= SDW_DP0_INTCLEAR_BRA_FAILURE_MASK; + } + impl_def_mask = SDW_DP0_INTSTAT_IMPDEF1_MASK | + SDW_DP0_INTSTAT_IMPDEF2_MASK | + SDW_DP0_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} + +static int sdw_handle_port_interrupt(struct sdw_master *mstr, + struct sdw_slv *sdw_slv, int port_num, + u8 *status) +{ + int ret = 0; + struct sdw_msg rd_msg, wr_msg; + u8 rbuf[1], wbuf[1]; + int impl_def_mask = 0; + +/* + * Handle the Data port0 interrupt separately since the interrupt + * mask and stat register is different than other DPn registers + */ + if (port_num == 0 && sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return sdw_handle_dp0_interrupts(mstr, sdw_slv, status); + + /* Create message for reading the port interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_DPN_INTCLEAR + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + wr_msg.len = 1; + wr_msg.buf = wbuf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.addr = SDW_DPN_INTSTAT + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + rd_msg.len = 1; + rd_msg.buf = rbuf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Port Status read failed for slv %x port %x\n", + sdw_slv->slv_number, port_num); + goto out; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_TEST_FAIL_MASK) { + dev_err(&mstr->dev, "Test fail for slave %x port %x\n", + sdw_slv->slv_number, port_num); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_TEST_FAIL_MASK; + } + if (rd_msg.buf[0] & SDW_DPN_INTSTAT_PORT_READY_MASK) { + complete(&sdw_slv->port_ready[port_num]); + wr_msg.buf[0] |= SDW_DPN_INTCLEAR_PORT_READY_MASK; + } + impl_def_mask = SDW_DPN_INTSTAT_IMPDEF1_MASK | + SDW_DPN_INTSTAT_IMPDEF2_MASK | + SDW_DPN_INTSTAT_IMPDEF3_MASK; + if (rd_msg.buf[0] & impl_def_mask) { + /* TODO: Handle implementation defined mask ready */ + wr_msg.buf[0] |= impl_def_mask; + *status = wr_msg.buf[0]; + } + /* Clear and Ack the interrupt */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + return ret; + +} +static int sdw_handle_slave_alerts(struct sdw_master *mstr, + struct sdw_slv *sdw_slv) +{ + struct sdw_msg rd_msg[3], wr_msg; + u8 rbuf[3], wbuf[1]; + int i, ret = 0; + int cs_port_mask, cs_port_register, cs_port_start, cs_ports; + struct sdw_impl_def_intr_stat *intr_status; + struct sdw_portn_intr_stat *portn_stat; + u8 port_status[15] = {0}; + u8 control_port_stat = 0; + + + /* Read Instat 1, Instat 2 and Instat 3 registers */ + rd_msg[0].ssp_tag = 0x0; + rd_msg[0].flag = SDW_MSG_FLAG_READ; + rd_msg[0].addr = SDW_SCP_INTSTAT_1; + rd_msg[0].len = 1; + rd_msg[0].buf = &rbuf[0]; + rd_msg[0].slave_addr = sdw_slv->slv_number; + rd_msg[0].addr_page1 = 0x0; + rd_msg[0].addr_page2 = 0x0; + + rd_msg[1].ssp_tag = 0x0; + rd_msg[1].flag = SDW_MSG_FLAG_READ; + rd_msg[1].addr = SDW_SCP_INTSTAT2; + rd_msg[1].len = 1; + rd_msg[1].buf = &rbuf[1]; + rd_msg[1].slave_addr = sdw_slv->slv_number; + rd_msg[1].addr_page1 = 0x0; + rd_msg[1].addr_page2 = 0x0; + + rd_msg[2].ssp_tag = 0x0; + rd_msg[2].flag = SDW_MSG_FLAG_READ; + rd_msg[2].addr = SDW_SCP_INTSTAT3; + rd_msg[2].len = 1; + rd_msg[2].buf = &rbuf[2]; + rd_msg[2].slave_addr = sdw_slv->slv_number; + rd_msg[2].addr_page1 = 0x0; + rd_msg[2].addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.addr = SDW_SCP_INTCLEAR1; + wr_msg.len = 1; + wr_msg.buf = &wbuf[0]; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, rd_msg, 3); + if (ret != 3) { + ret = -EINVAL; + dev_err(&mstr->dev, "Reading of register failed\n"); + goto out; + } + /* First handle parity and bus clash interrupts */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_PARITY_MASK) { + dev_err(&mstr->dev, "Parity error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_PARITY_MASK; + } + /* Handle bus errors */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_BUS_CLASH_MASK) { + dev_err(&mstr->dev, "Bus clash error detected\n"); + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_BUS_CLASH_MASK; + } + /* Handle implementation defined mask */ + if (rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_IMPL_DEF_MASK) { + wr_msg.buf[0] |= SDW_SCP_INTCLEAR1_IMPL_DEF_MASK; + control_port_stat = (rd_msg[0].buf[0] & + SDW_SCP_INTSTAT1_IMPL_DEF_MASK); + } + + /* Handle Cascaded Port interrupts from Instat_1 registers */ + + /* Number of port status bits in this register */ + cs_ports = 4; + /* Port number starts at in this register */ + cs_port_start = 0; + /* Bit mask for the starting port intr status */ + cs_port_mask = 0x08; + /* Bit mask for the starting port intr status */ + cs_port_register = 0; + + /* Look for cascaded port interrupts, if found handle port + * interrupts. Do this for all the Int_stat registers. + */ + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to SCP3 + */ + if (!(rd_msg[0].buf[0] & SDW_SCP_INTSTAT1_SCP2_CASCADE_MASK)) + goto handle_instat_3_register; + + + cs_ports = 7; + cs_port_start = 4; + cs_port_mask = 0x1; + cs_port_register = 1; + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + + /* + * Handle cascaded interrupts from instat_2 register, + * if no cascaded interrupt from SCP2 cascade move to impl_def intrs + */ +handle_instat_3_register: + if (!(rd_msg[1].buf[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE_MASK)) + goto handle_impl_def_interrupts; + + cs_ports = 4; + cs_port_start = 11; + cs_port_mask = 0x1; + cs_port_register = 2; + + for (i = cs_port_start; i < cs_port_start + cs_ports && + i <= sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + if (rd_msg[cs_port_register].buf[0] & cs_port_mask) { + + ret += sdw_handle_port_interrupt(mstr, + sdw_slv, i, &port_status[i]); + } + cs_port_mask = cs_port_mask << 1; + } + +handle_impl_def_interrupts: + + /* + * If slave has not registered for implementation defined + * interrupts, dont read it. + */ + if (!sdw_slv->driver->handle_impl_def_interrupts) + goto ack_interrupts; + + intr_status = kzalloc(sizeof(*intr_status), GFP_KERNEL); + if (!intr_status) + return -ENOMEM; + + portn_stat = kzalloc((sizeof(*portn_stat)) * + sdw_slv->sdw_slv_cap.num_of_sdw_ports, + GFP_KERNEL); + if (!portn_stat) + return -ENOMEM; + + intr_status->portn_stat = portn_stat; + intr_status->control_port_stat = control_port_stat; + + /* Update the implementation defined status to Slave */ + for (i = 1; i < sdw_slv->sdw_slv_cap.num_of_sdw_ports; i++) { + + intr_status->portn_stat[i].status = port_status[i]; + intr_status->portn_stat[i].num = i; + } + + intr_status->port0_stat = port_status[0]; + intr_status->control_port_stat = wr_msg.buf[0]; + + ret = sdw_slv->driver->handle_impl_def_interrupts(sdw_slv, + intr_status); + if (ret) + dev_err(&mstr->dev, "Implementation defined interrupt handling failed\n"); + + kfree(portn_stat); + kfree(intr_status); + +ack_interrupts: + /* Ack the interrupts */ + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr->dev, "Register transfer failed\n"); + } +out: + return 0; +} + +int sdw_en_intr(struct sdw_slv *sdw_slv, int port_num, int mask) +{ + + struct sdw_msg rd_msg, wr_msg; + u8 buf; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + + rd_msg.addr = wr_msg.addr = SDW_DPN_INTMASK + + (SDW_NUM_DATA_PORT_REGISTERS * port_num); + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + /* Create message for reading the interrupts for DP0 interrupts*/ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DPN_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DPN_INTSTAT_PORT_READY_MASK; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DPn Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_en_scp_intr(struct sdw_slv *sdw_slv, int mask) +{ + struct sdw_msg rd_msg, wr_msg; + u8 buf = 0; + int ret; + struct sdw_master *mstr = sdw_slv->mstr; + u16 reg_addr; + + reg_addr = SDW_SCP_INTMASK1; + + rd_msg.addr = wr_msg.addr = reg_addr; + + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_SCP_INTMASK1_BUS_CLASH_MASK; + buf |= SDW_SCP_INTMASK1_PARITY_MASK; + + /* Create message for enabling the interrupts */ + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "SCP Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Return if DP0 is not present */ + if (!sdw_slv->sdw_slv_cap.sdw_dp0_supported) + return 0; + + + reg_addr = SDW_DP0_INTMASK; + rd_msg.addr = wr_msg.addr = reg_addr; + mask = sdw_slv->sdw_slv_cap.sdw_dp0_cap->imp_def_intr_mask; + buf = 0; + + /* Create message for reading the interrupt mask */ + /* Create message for reading the interrupt mask */ + rd_msg.ssp_tag = 0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.buf = &buf; + rd_msg.slave_addr = sdw_slv->slv_number; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + ret = sdw_slave_transfer(mstr, &rd_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask read failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + + /* Enable the Slave defined interrupts. */ + buf |= mask; + + /* Set the port ready and Test fail interrupt mask as well */ + buf |= SDW_DP0_INTSTAT_TEST_FAIL_MASK; + buf |= SDW_DP0_INTSTAT_PORT_READY_MASK; + buf |= SDW_DP0_INTSTAT_BRA_FAILURE_MASK; + + wr_msg.ssp_tag = 0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.buf = &buf; + wr_msg.slave_addr = sdw_slv->slv_number; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr, &wr_msg, 1); + if (ret != 1) { + dev_err(&mstr->dev, "DP0 Intr mask write failed for slave %x\n", + sdw_slv->slv_number); + return -EINVAL; + } + return 0; +} + +static int sdw_prog_slv(struct sdw_slv *sdw_slv) +{ + + struct sdw_slv_capabilities *cap; + int ret, i; + struct sdw_slv_dpn_capabilities *dpn_cap; + struct sdw_master *mstr = sdw_slv->mstr; + + if (!sdw_slv->slave_cap_updated) + return 0; + cap = &sdw_slv->sdw_slv_cap; + + /* Enable DP0 and SCP interrupts */ + ret = sdw_en_scp_intr(sdw_slv, cap->scp_impl_def_intr_mask); + + /* Failure should never happen, even if it happens we continue */ + if (ret) + dev_err(&mstr->dev, "SCP program failed\n"); + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + ret = sdw_en_intr(sdw_slv, (i + 1), + dpn_cap->imp_def_intr_mask); + + if (ret) + break; + } + return ret; +} + + +static void sdw_send_slave_status(struct sdw_slv *slave, + enum sdw_slave_status *status) +{ + struct sdw_slave_driver *slv_drv = slave->driver; + + if (slv_drv && slv_drv->update_slv_status) + slv_drv->update_slv_status(slave, status); +} + +static int sdw_wait_for_deprepare(struct sdw_slv *slave) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + struct sdw_master *mstr = slave->mstr; + + /* Create message to read clock stop status, its broadcast message. */ + buf[0] = 0xFF; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + /* + * Read the ClockStopNotFinished bit from the SCP_Stat register + * of particular Slave to make sure that clock stop prepare is done + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Clk stp deprepare failed for slave %d\n", + slave->slv_number); + + return -EINVAL; +} + +static void sdw_prep_slave_for_clk_stp(struct sdw_master *mstr, + struct sdw_slv *slave, + enum sdw_clk_stop_mode clock_stop_mode, + bool prep) +{ + bool wake_en; + struct sdw_slv_capabilities *cap; + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + + cap = &slave->sdw_slv_cap; + + /* Set the wakeup enable based on Slave capability */ + wake_en = !cap->wake_up_unavailable; + + if (prep) { + /* Even if its simplified clock stop prepare, + * setting prepare bit wont harm + */ + buf[0] |= (1 << SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_SHIFT); + buf[0] |= clock_stop_mode << + SDW_SCP_SYSTEMCTRL_CLK_STP_MODE_SHIFT; + buf[0] |= wake_en << SDW_SCP_SYSTEMCTRL_WAKE_UP_EN_SHIFT; + } else + buf[0] = 0; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + /* + * We are calling NOPM version of the transfer API, because + * Master controllers calls this from the suspend handler, + * so if we call the normal transfer API, it tries to resume + * controller, which result in deadlock + */ + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) + WARN_ONCE(1, "Clock Stop prepare failed for slave %d\n", + slave->slv_number); +} + +static int sdw_check_for_prep_bit(struct sdw_slv *slave) +{ + u8 buf[1] = {0}; + struct sdw_msg msg; + int ret; + struct sdw_master *mstr = slave->mstr; + + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = slave->slv_number; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_SYSTEMCTRL; + + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + /* We should continue even if it fails for some Slave */ + if (ret != 1) { + dev_err(&mstr->dev, "SCP_SystemCtrl read failed for Slave %d\n", + slave->slv_number); + return -EINVAL; + + } + return (buf[0] & SDW_SCP_SYSTEMCTRL_CLK_STP_PREP_MASK); + +} + +static int sdw_slv_deprepare_clk_stp1(struct sdw_slv *slave) +{ + struct sdw_slv_capabilities *cap; + int ret; + struct sdw_master *mstr = slave->mstr; + + cap = &slave->sdw_slv_cap; + + /* + * Slave might have enumerated 1st time or from clock stop mode 1 + * return if Slave doesn't require deprepare + */ + if (!cap->clk_stp1_deprep_required) + return 0; + + /* + * If Slave requires de-prepare after exiting from Clock Stop + * mode 1, than check for ClockStopPrepare bit in SystemCtrl register + * if its 1, de-prepare Slave from clock stop prepare, else + * return + */ + ret = sdw_check_for_prep_bit(slave); + /* If prepare bit is not set, return without error */ + if (!ret) + return 0; + + /* If error in reading register, return with error */ + if (ret < 0) + return ret; + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + if (ret) { + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + return ret; + } + } + + sdw_prep_slave_for_clk_stp(slave->mstr, slave, + cap->clock_stop1_mode_supported, false); + + /* Make sure NF = 0 for deprepare to complete */ + ret = sdw_wait_for_deprepare(slave); + + /* Return in de-prepare unsuccessful */ + if (ret) + return ret; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + cap->clock_stop1_mode_supported, false); + + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + + return ret; +} + +static void handle_slave_status(struct kthread_work *work) +{ + int i, ret = 0; + struct sdw_slv_status *status, *__status__; + struct sdw_bus *bus = + container_of(work, struct sdw_bus, kwork); + struct sdw_master *mstr = bus->mstr; + unsigned long flags; + bool slave_present = 0; + + /* Handle the new attached slaves to the bus. Register new slave + * to the bus. + */ + list_for_each_entry_safe(status, __status__, &bus->status_list, node) { + if (status->status[0] == SDW_SLAVE_STAT_ATTACHED_OK) { + ret += sdw_register_slave(mstr); + if (ret) + /* Even if adding new slave fails, we will + * continue. + */ + dev_err(&mstr->dev, "Registering new slave failed\n"); + } + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_present = false; + if (status->status[i] == SDW_SLAVE_STAT_NOT_PRESENT && + mstr->sdw_addr[i].assigned == true) { + /* Logical address was assigned to slave, but + * now its down, so mark it as not present + */ + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + slave_present = true; + } + + else if (status->status[i] == SDW_SLAVE_STAT_ALERT && + mstr->sdw_addr[i].assigned == true) { + ret = 0; + /* Handle slave alerts */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_ALERT; + ret = sdw_handle_slave_alerts(mstr, + mstr->sdw_addr[i].slave); + if (ret) + dev_err(&mstr->dev, "Handle slave alert failed for Slave %d\n", i); + + slave_present = true; + + + } else if (status->status[i] == + SDW_SLAVE_STAT_ATTACHED_OK && + mstr->sdw_addr[i].assigned == true) { + + sdw_prog_slv(mstr->sdw_addr[i].slave); + + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_ATTACHED_OK; + ret = sdw_slv_deprepare_clk_stp1( + mstr->sdw_addr[i].slave); + + /* + * If depreparing Slave fails, no need to + * reprogram Slave, this should never happen + * in ideal case. + */ + if (ret) + continue; + slave_present = true; + } + + if (!slave_present) + continue; + + sdw_send_slave_status(mstr->sdw_addr[i].slave, + &mstr->sdw_addr[i].status); + } + spin_lock_irqsave(&bus->spinlock, flags); + list_del(&status->node); + spin_unlock_irqrestore(&bus->spinlock, flags); + kfree(status); + } +} + +static int sdw_register_master(struct sdw_master *mstr) +{ + int ret = 0; + int i; + struct sdw_bus *sdw_bus; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) { + ret = -EAGAIN; + goto bus_init_not_done; + } + /* Sanity checks */ + if (unlikely(mstr->name[0] == '\0')) { + pr_err("sdw-core: Attempt to register an master with no name!\n"); + ret = -EINVAL; + goto mstr_no_name; + } + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) + mstr->sdw_addr[i].slv_number = i; + + rt_mutex_init(&mstr->bus_lock); + INIT_LIST_HEAD(&mstr->slv_list); + INIT_LIST_HEAD(&mstr->mstr_rt_list); + + sdw_bus = kzalloc(sizeof(struct sdw_bus), GFP_KERNEL); + if (!sdw_bus) + goto bus_alloc_failed; + sdw_bus->mstr = mstr; + init_completion(&sdw_bus->async_data.xfer_complete); + + mutex_lock(&sdw_core.core_lock); + list_add_tail(&sdw_bus->bus_node, &sdw_core.bus_list); + mutex_unlock(&sdw_core.core_lock); + + dev_set_name(&mstr->dev, "sdw-%d", mstr->nr); + mstr->dev.bus = &sdwint_bus_type; + mstr->dev.type = &sdw_mstr_type; + + ret = device_register(&mstr->dev); + if (ret) + goto out_list; + kthread_init_worker(&sdw_bus->kworker); + sdw_bus->status_thread = kthread_run(kthread_worker_fn, + &sdw_bus->kworker, "%s", + dev_name(&mstr->dev)); + if (IS_ERR(sdw_bus->status_thread)) { + dev_err(&mstr->dev, "error: failed to create status message task\n"); + ret = PTR_ERR(sdw_bus->status_thread); + goto task_failed; + } + kthread_init_work(&sdw_bus->kwork, handle_slave_status); + INIT_LIST_HEAD(&sdw_bus->status_list); + spin_lock_init(&sdw_bus->spinlock); + ret = sdw_mstr_bw_init(sdw_bus); + if (ret) { + dev_err(&mstr->dev, "error: Failed to init mstr bw\n"); + goto mstr_bw_init_failed; + } + dev_dbg(&mstr->dev, "master [%s] registered\n", mstr->name); + + return 0; + +mstr_bw_init_failed: +task_failed: + device_unregister(&mstr->dev); +out_list: + mutex_lock(&sdw_core.core_lock); + list_del(&sdw_bus->bus_node); + mutex_unlock(&sdw_core.core_lock); + kfree(sdw_bus); +bus_alloc_failed: +mstr_no_name: +bus_init_not_done: + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + return ret; +} + +/** + * sdw_master_update_slv_status: Report the status of slave to the bus driver. + * master calls this function based on the + * interrupt it gets once the slave changes its + * state. + * @mstr: Master handle for which status is reported. + * @status: Array of status of each slave. + */ +int sdw_master_update_slv_status(struct sdw_master *mstr, + struct sdw_status *status) +{ + struct sdw_bus *bus = NULL; + struct sdw_slv_status *slv_status; + unsigned long flags; + + list_for_each_entry(bus, &sdw_core.bus_list, bus_node) { + if (bus->mstr == mstr) + break; + } + /* This is master is not registered with bus driver */ + if (!bus) { + dev_info(&mstr->dev, "Master not registered with bus\n"); + return 0; + } + slv_status = kzalloc(sizeof(struct sdw_slv_status), GFP_ATOMIC); + memcpy(slv_status->status, status, sizeof(struct sdw_status)); + + spin_lock_irqsave(&bus->spinlock, flags); + list_add_tail(&slv_status->node, &bus->status_list); + spin_unlock_irqrestore(&bus->spinlock, flags); + + kthread_queue_work(&bus->kworker, &bus->kwork); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_update_slv_status); + +/** + * sdw_add_master_controller - declare sdw master, use dynamic bus number + * @master: the master to add + * Context: can sleep + * + * This routine is used to declare an sdw master when its bus number + * doesn't matter or when its bus number is specified by an dt alias. + * Examples of bases when the bus number doesn't matter: sdw masters + * dynamically added by USB links or PCI plugin cards. + * + * When this returns zero, a new bus number was allocated and stored + * in mstr->nr, and the specified master became available for slaves. + * Otherwise, a negative errno value is returned. + */ +int sdw_add_master_controller(struct sdw_master *mstr) +{ + int id; + + mutex_lock(&sdw_core.core_lock); + + id = idr_alloc(&sdw_core.idr, mstr, + sdw_core.first_dynamic_bus_num, 0, GFP_KERNEL); + mutex_unlock(&sdw_core.core_lock); + if (id < 0) + return id; + + mstr->nr = id; + + return sdw_register_master(mstr); +} +EXPORT_SYMBOL_GPL(sdw_add_master_controller); + +static void sdw_unregister_slave(struct sdw_slv *sdw_slv) +{ + + struct sdw_master *mstr; + + mstr = sdw_slv->mstr; + sdw_lock_mstr(mstr); + list_del(&sdw_slv->node); + sdw_unlock_mstr(mstr); + mstr->sdw_addr[sdw_slv->slv_number].assigned = false; + memset(mstr->sdw_addr[sdw_slv->slv_number].dev_id, 0x0, 6); + device_unregister(&sdw_slv->dev); + kfree(sdw_slv); +} + +static int __unregister_slave(struct device *dev, void *dummy) +{ + struct sdw_slv *slave = sdw_slave_verify(dev); + + if (slave && strcmp(slave->name, "dummy")) + sdw_unregister_slave(slave); + return 0; +} + +/** + * sdw_del_master_controller - unregister SDW master + * @mstr: the master being unregistered + * Context: can sleep + * + * This unregisters an SDW master which was previously registered + * by @sdw_add_master_controller or @sdw_add_master_controller. + */ +void sdw_del_master_controller(struct sdw_master *mstr) +{ + struct sdw_master *found; + + /* First make sure that this master was ever added */ + mutex_lock(&sdw_core.core_lock); + found = idr_find(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + if (found != mstr) { + pr_debug("sdw-core: attempting to delete unregistered master [%s]\n", mstr->name); + return; + } + /* Detach any active slaves. This can't fail, thus we do not + * check the returned value. + */ + device_for_each_child(&mstr->dev, NULL, __unregister_slave); + + /* device name is gone after device_unregister */ + dev_dbg(&mstr->dev, "mstrter [%s] unregistered\n", mstr->name); + + /* wait until all references to the device are gone + * + * FIXME: This is old code and should ideally be replaced by an + * alternative which results in decoupling the lifetime of the struct + * device from the sdw_master, like spi or netdev do. Any solution + * should be thoroughly tested with DEBUG_KOBJECT_RELEASE enabled! + */ + init_completion(&mstr->slv_released); + device_unregister(&mstr->dev); + wait_for_completion(&mstr->slv_released); + + /* free bus id */ + mutex_lock(&sdw_core.core_lock); + idr_remove(&sdw_core.idr, mstr->nr); + mutex_unlock(&sdw_core.core_lock); + + /* Clear the device structure in case this mstrter is ever going to be + added again */ + memset(&mstr->dev, 0, sizeof(mstr->dev)); +} +EXPORT_SYMBOL_GPL(sdw_del_master_controller); + +/** + * sdw_slave_xfer_bra_block: Transfer the data block using the BTP/BRA + * protocol. + * @mstr: SoundWire Master Master + * @block: Data block to be transferred. + */ +int sdw_slave_xfer_bra_block(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_mstr_driver *ops = NULL; + int ret; + + /* + * This API will be called by slave/codec + * when it needs to xfer firmware to + * its memory or perform bulk read/writes of registers. + */ + + /* + * Acquire core lock + * TODO: Acquire Master lock inside core lock + * similar way done in upstream. currently + * keeping it as core lock + */ + mutex_lock(&sdw_core.core_lock); + + /* Get master data structure */ + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + + break; + } + + /* + * Here assumption is made that complete SDW bandwidth is used + * by BRA. So bus will return -EBUSY if any active stream + * is running on given master. + * TODO: In final implementation extra bandwidth will be always + * allocated for BRA. In that case all the computation of clock, + * frame shape, transport parameters for DP0 will be done + * considering BRA feature. + */ + if (!list_empty(&mstr->mstr_rt_list)) { + + /* + * Currently not allowing BRA when any + * active stream on master, returning -EBUSY + */ + + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + return -EBUSY; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* + * Check whether Master is supporting bulk transfer. If not, then + * bus will use alternate method of performing BRA request using + * normal register read/write API. + * TODO: Currently if Master is not supporting BRA transfers, bus + * returns error. Bus driver to extend support for normal register + * read/write as alternate method. + */ + if (!ops->mstr_ops->xfer_bulk) + return -EINVAL; + + /* Data port Programming (ON) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, true); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter config failed ret=%d\n", ret); + goto error; + } + + /* Bulk Setup */ + ret = ops->mstr_ops->xfer_bulk(mstr, block); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Transfer failed ret=%d\n", ret); + goto error; + } + + /* Data port Programming (OFF) */ + ret = sdw_bus_bra_xport_config(sdw_mstr_bs, block, false); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Xport parameter de-config failed ret=%d\n", ret); + goto error; + } + +error: + /* Release lock */ + mutex_unlock(&sdw_core.core_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_slave_xfer_bra_block); + +/* + * An sdw_driver is used with one or more sdw_slv (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_mstr_driver_register(struct module *owner, + struct sdw_mstr_driver *driver) +{ + int res; + + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdwint_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_mstr_driver_register); + +void sdw_mstr_driver_unregister(struct sdw_mstr_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_mstr_driver_unregister); + +void sdw_slave_driver_unregister(struct sdw_slave_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(sdw_slave_driver_unregister); + +/* + * An sdw_driver is used with one or more sdw_slv (slave) nodes to access + * sdw slave chips, on a bus instance associated with some sdw_master. + */ +int __sdw_slave_driver_register(struct module *owner, + struct sdw_slave_driver *driver) +{ + int res; + /* Can't register until after driver model init */ + if (unlikely(WARN_ON(!sdwint_bus_type.p))) + return -EAGAIN; + + /* add the driver to the list of sdw drivers in the driver core */ + driver->driver.owner = owner; + driver->driver.bus = &sdwint_bus_type; + + /* When registration returns, the driver core + * will have called probe() for all matching-but-unbound slaves. + */ + res = driver_register(&driver->driver); + if (res) + return res; + pr_debug("sdw-core: driver [%s] registered\n", driver->driver.name); + + return 0; +} +EXPORT_SYMBOL_GPL(__sdw_slave_driver_register); + +int sdw_register_slave_capabilities(struct sdw_slv *sdw, + struct sdw_slv_capabilities *cap) +{ + struct sdw_slv_capabilities *slv_cap; + struct sdw_slv_dpn_capabilities *slv_dpn_cap, *dpn_cap; + struct port_audio_mode_properties *prop, *slv_prop; + int i, j; + int ret = 0; + + slv_cap = &sdw->sdw_slv_cap; + + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->wake_up_unavailable = cap->wake_up_unavailable; + slv_cap->test_mode_supported = cap->test_mode_supported; + slv_cap->clock_stop1_mode_supported = cap->clock_stop1_mode_supported; + slv_cap->simplified_clock_stop_prepare = + cap->simplified_clock_stop_prepare; + slv_cap->scp_impl_def_intr_mask = cap->scp_impl_def_intr_mask; + + slv_cap->highphy_capable = cap->highphy_capable; + slv_cap->paging_supported = cap->paging_supported; + slv_cap->bank_delay_support = cap->bank_delay_support; + slv_cap->port_15_read_behavior = cap->port_15_read_behavior; + slv_cap->sdw_dp0_supported = cap->sdw_dp0_supported; + slv_cap->num_of_sdw_ports = cap->num_of_sdw_ports; + slv_cap->sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap->num_of_sdw_ports), GFP_KERNEL); + if (!slv_cap->sdw_dpn_cap) + return -ENOMEM; + + for (i = 0; i < cap->num_of_sdw_ports; i++) { + dpn_cap = &cap->sdw_dpn_cap[i]; + slv_dpn_cap = &slv_cap->sdw_dpn_cap[i]; + slv_dpn_cap->port_direction = dpn_cap->port_direction; + slv_dpn_cap->port_number = dpn_cap->port_number; + slv_dpn_cap->max_word_length = dpn_cap->max_word_length; + slv_dpn_cap->min_word_length = dpn_cap->min_word_length; + slv_dpn_cap->num_word_length = dpn_cap->num_word_length; + if (NULL == dpn_cap->word_length_buffer) + slv_dpn_cap->word_length_buffer = + dpn_cap->word_length_buffer; + else { + slv_dpn_cap->word_length_buffer = + devm_kzalloc(&sdw->dev, + dpn_cap->num_word_length * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->word_length_buffer) + return -ENOMEM; + memcpy(slv_dpn_cap->word_length_buffer, + dpn_cap->word_length_buffer, + dpn_cap->num_word_length * + (sizeof(unsigned int))); + } + slv_dpn_cap->dpn_type = dpn_cap->dpn_type; + slv_dpn_cap->dpn_grouping = dpn_cap->dpn_grouping; + slv_dpn_cap->prepare_ch = dpn_cap->prepare_ch; + slv_dpn_cap->imp_def_intr_mask = dpn_cap->imp_def_intr_mask; + slv_dpn_cap->min_ch_num = dpn_cap->min_ch_num; + slv_dpn_cap->max_ch_num = dpn_cap->max_ch_num; + slv_dpn_cap->num_ch_supported = dpn_cap->num_ch_supported; + if (NULL == slv_dpn_cap->ch_supported) + slv_dpn_cap->ch_supported = dpn_cap->ch_supported; + else { + slv_dpn_cap->ch_supported = + devm_kzalloc(&sdw->dev, + dpn_cap->num_ch_supported * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_dpn_cap->ch_supported) + return -ENOMEM; + memcpy(slv_dpn_cap->ch_supported, + dpn_cap->ch_supported, + dpn_cap->num_ch_supported * + (sizeof(unsigned int))); + } + slv_dpn_cap->port_flow_mode_mask = + dpn_cap->port_flow_mode_mask; + slv_dpn_cap->block_packing_mode_mask = + dpn_cap->block_packing_mode_mask; + slv_dpn_cap->port_encoding_type_mask = + dpn_cap->port_encoding_type_mask; + slv_dpn_cap->num_audio_modes = dpn_cap->num_audio_modes; + + slv_dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + if (!slv_dpn_cap->mode_properties) + return -ENOMEM; + + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + slv_prop = &slv_dpn_cap->mode_properties[j]; + slv_prop->max_frequency = prop->max_frequency; + slv_prop->min_frequency = prop->min_frequency; + slv_prop->num_freq_configs = prop->num_freq_configs; + if (NULL == slv_prop->freq_supported) + slv_prop->freq_supported = + prop->freq_supported; + else { + slv_prop->freq_supported = + devm_kzalloc(&sdw->dev, + prop->num_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->freq_supported) + return -ENOMEM; + memcpy(slv_prop->freq_supported, + prop->freq_supported, + prop->num_freq_configs * + (sizeof(unsigned int))); + } + slv_prop->glitchless_transitions_mask + = prop->glitchless_transitions_mask; + slv_prop->max_sampling_frequency = + prop->max_sampling_frequency; + slv_prop->min_sampling_frequency = + prop->min_sampling_frequency; + slv_prop->num_sampling_freq_configs = + prop->num_sampling_freq_configs; + if (NULL == prop->sampling_freq_config) + slv_prop->sampling_freq_config = + prop->sampling_freq_config; + else { + slv_prop->sampling_freq_config = + devm_kzalloc(&sdw->dev, + prop->num_sampling_freq_configs * + (sizeof(unsigned int)), GFP_KERNEL); + if (!slv_prop->sampling_freq_config) + return -ENOMEM; + memcpy(slv_prop->sampling_freq_config, + prop->sampling_freq_config, + prop->num_sampling_freq_configs * + (sizeof(unsigned int))); + } + + slv_prop->ch_prepare_behavior = + prop->ch_prepare_behavior; + } + } + ret = sdw_prog_slv(sdw); + if (ret) + return ret; + sdw->slave_cap_updated = true; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_register_slave_capabilities); + +static int sdw_get_stream_tag(char *key, int *stream_tag) +{ + int i; + int ret = -EINVAL; + struct sdw_runtime *sdw_rt; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + /* If stream tag is already allocated return that after incrementing + * reference count. This is only possible if key is provided. + */ + mutex_lock(&sdw_core.core_lock); + if (!key) + goto key_check_not_required; + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!(strcmp(stream_tags[i].key, key))) { + stream_tags[i].ref_count++; + *stream_tag = stream_tags[i].stream_tag; + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } +key_check_not_required: + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (!stream_tags[i].ref_count) { + *stream_tag = stream_tags[i].stream_tag; + mutex_init(&stream_tags[i].stream_lock); + sdw_rt = kzalloc(sizeof(struct sdw_runtime), + GFP_KERNEL); + if (!sdw_rt) { + ret = -ENOMEM; + mutex_unlock(&sdw_core.core_lock); + goto out; + } + stream_tags[i].ref_count++; + INIT_LIST_HEAD(&sdw_rt->slv_rt_list); + INIT_LIST_HEAD(&sdw_rt->mstr_rt_list); + sdw_rt->stream_state = SDW_STATE_INIT_STREAM_TAG; + stream_tags[i].sdw_rt = sdw_rt; + if (key) + strlcpy(stream_tags[i].key, key, + SDW_MAX_STREAM_TAG_KEY_SIZE); + mutex_unlock(&sdw_core.core_lock); + return 0; + } + } + mutex_unlock(&sdw_core.core_lock); +out: + return ret; +} + +void sdw_release_stream_tag(int stream_tag) +{ + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + mutex_lock(&sdw_core.core_lock); + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream_tags[i].ref_count--; + if (stream_tags[i].ref_count == 0) { + kfree(stream_tags[i].sdw_rt); + memset(stream_tags[i].key, 0x0, + SDW_MAX_STREAM_TAG_KEY_SIZE); + } + } + } + mutex_unlock(&sdw_core.core_lock); +} +EXPORT_SYMBOL_GPL(sdw_release_stream_tag); + +/** + * sdw_alloc_stream_tag: Assign the stream tag for the unique streams + * between master and slave device. + * Normally master master will request for the + * stream tag for the stream between master + * and slave device. It programs the same stream + * tag to the slave device. Stream tag is unique + * for all the streams between masters and slave + * across SoCs. + * @guid: Group of the device port. All the ports of the device with + * part of same stream will have same guid. + * + * @stream:tag: Stream tag returned by bus driver. + */ +int sdw_alloc_stream_tag(char *guid, int *stream_tag) +{ + int ret = 0; + + ret = sdw_get_stream_tag(guid, stream_tag); + if (ret) { + pr_err("Stream tag assignment failed\n"); + goto out; + } + +out: + return ret; +} +EXPORT_SYMBOL_GPL(sdw_alloc_stream_tag); + +static struct sdw_mstr_runtime *sdw_get_mstr_rt(struct sdw_runtime *sdw_rt, + struct sdw_master *mstr) { + + struct sdw_mstr_runtime *mstr_rt; + int ret = 0; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) + return mstr_rt; + } + + /* Allocate sdw_mstr_runtime structure */ + mstr_rt = kzalloc(sizeof(struct sdw_mstr_runtime), GFP_KERNEL); + if (!mstr_rt) { + ret = -ENOMEM; + goto out; + } + + /* Initialize sdw_mstr_runtime structure */ + INIT_LIST_HEAD(&mstr_rt->port_rt_list); + INIT_LIST_HEAD(&mstr_rt->slv_rt_list); + list_add_tail(&mstr_rt->mstr_sdw_node, &sdw_rt->mstr_rt_list); + list_add_tail(&mstr_rt->mstr_node, &mstr->mstr_rt_list); + mstr_rt->rt_state = SDW_STATE_INIT_RT; + mstr_rt->mstr = mstr; +out: + return mstr_rt; +} + +static struct sdw_slave_runtime *sdw_config_slave_stream( + struct sdw_slv *slave, + struct sdw_stream_config *stream_config, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt; + int ret = 0; + struct sdw_stream_params *str_p; + + slv_rt = kzalloc(sizeof(struct sdw_slave_runtime), GFP_KERNEL); + if (!slv_rt) { + ret = -ENOMEM; + goto out; + } + slv_rt->slave = slave; + str_p = &slv_rt->stream_params; + slv_rt->direction = stream_config->direction; + slv_rt->rt_state = SDW_STATE_CONFIG_RT; + str_p->rate = stream_config->frame_rate; + str_p->channel_count = stream_config->channel_count; + str_p->bps = stream_config->bps; + INIT_LIST_HEAD(&slv_rt->port_rt_list); +out: + return slv_rt; +} + +static void sdw_release_mstr_stream(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *mstr_rt, *__mstr_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(mstr_rt, __mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&mstr_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Master port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &mstr_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&mstr_rt->mstr_sdw_node); + if (mstr_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + list_del(&mstr_rt->mstr_node); + pm_runtime_mark_last_busy(&mstr->dev); + pm_runtime_put_sync_autosuspend(&mstr->dev); + kfree(mstr_rt); + } + } +} + +static void sdw_release_slave_stream(struct sdw_slv *slave, + struct sdw_runtime *sdw_rt) +{ + struct sdw_slave_runtime *slv_rt, *__slv_rt; + struct sdw_port_runtime *port_rt, *__port_rt, *first_port_rt = NULL; + + list_for_each_entry_safe(slv_rt, __slv_rt, &sdw_rt->slv_rt_list, + slave_sdw_node) { + if (slv_rt->slave == slave) { + + /* Get first runtime node from port list */ + first_port_rt = list_first_entry(&slv_rt->port_rt_list, + struct sdw_port_runtime, + port_node); + + /* Release Slave port resources */ + list_for_each_entry_safe(port_rt, __port_rt, + &slv_rt->port_rt_list, port_node) + list_del(&port_rt->port_node); + + kfree(first_port_rt); + list_del(&slv_rt->slave_sdw_node); + if (slv_rt->direction == SDW_DATA_DIR_OUT) + sdw_rt->tx_ref_count--; + else + sdw_rt->rx_ref_count--; + pm_runtime_mark_last_busy(&slave->dev); + pm_runtime_put_sync_autosuspend(&slave->dev); + kfree(slv_rt); + } + } +} + +/** + * sdw_release_stream: De-allocates the bandwidth allocated to the + * the stream. This is reference counted, + * so for the last stream count, BW will be de-allocated + * for the stream. Normally this will be called + * as part of hw_free. + * + * @mstr: Master handle + * @slave: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across SoC for all soundwire + * busses. + * for each audio stream between slaves. This stream tag + * will be allocated by master driver for every + * stream getting open. + */ +int sdw_release_stream(struct sdw_master *mstr, + struct sdw_slv *slave, + unsigned int stream_tag) +{ + int i; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + if (!slave) + sdw_release_mstr_stream(mstr, sdw_rt); + else + sdw_release_slave_stream(slave, sdw_rt); + return 0; +} +EXPORT_SYMBOL_GPL(sdw_release_stream); + +/** + * sdw_configure_stream: Allocates the B/W onto the soundwire bus + * for transferring the data between slave and master. + * This is configuring the single stream of data. + * This will be called by slave, Slave stream + * configuration should match the master stream + * configuration. Normally slave would call this + * as a part of hw_params. + * + * @mstr: Master handle + * @sdw_slv: SoundWire slave handle. + * @stream_config: Stream configuration for the soundwire audio stream. + * @stream_tag: Unique stream tag identifier across the soundwire bus + * for each audio stream between slaves and master. + * This is something like stream_tag in HDA protocol, but + * here its virtual rather than being embedded into protocol. + * Further same stream tag is valid across masters also + * if some ports of the master is participating in + * stream aggregation. This is input parameters to the + * function. + */ +int sdw_config_stream(struct sdw_master *mstr, + struct sdw_slv *slave, + struct sdw_stream_config *stream_config, + unsigned int stream_tag) +{ + int i; + int ret = 0; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_mstr_runtime *mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + if (!sdw_rt) { + dev_err(&mstr->dev, "Valid stream tag not found\n"); + ret = -EINVAL; + goto out; + } + if (static_key_false(&sdw_trace_msg)) + trace_sdw_config_stream(mstr, slave, stream_config, + stream_tag); + + mutex_lock(&stream->stream_lock); + + mstr_rt = sdw_get_mstr_rt(sdw_rt, mstr); + if (!mstr_rt) { + dev_err(&mstr->dev, "master runtime configuration failed\n"); + ret = -EINVAL; + goto out; + } + + if (!slave) { + mstr_rt->direction = stream_config->direction; + mstr_rt->rt_state = SDW_STATE_CONFIG_RT; + sdw_rt->xport_state = SDW_STATE_ONLY_XPORT_STREAM; + + mstr_rt->stream_params.rate = stream_config->frame_rate; + mstr_rt->stream_params.channel_count = + stream_config->channel_count; + mstr_rt->stream_params.bps = stream_config->bps; + + } else + slv_rt = sdw_config_slave_stream(slave, + stream_config, sdw_rt); + /* Stream params will be stored based on Tx only, since there can + * be only one Tx and muliple Rx, There can be muliple Tx if + * there is aggregation on Tx. That is handled by adding the channels + * to stream_params for each aggregated Tx slaves + */ + if (!sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + sdw_rt->stream_params.rate = stream_config->frame_rate; + sdw_rt->stream_params.channel_count = + stream_config->channel_count; + sdw_rt->stream_params.bps = stream_config->bps; + sdw_rt->tx_ref_count++; + } + + + /* Normally there will be only one Tx in system, multiple Tx + * can only be there if we support aggregation. In that case + * there may be multiple slave or masters handing different + * channels of same Tx stream. + */ + else if (sdw_rt->tx_ref_count && stream_config->direction == + SDW_DATA_DIR_OUT) { + if (sdw_rt->stream_params.rate != + stream_config->frame_rate) { + dev_err(&mstr->dev, "Frame rate for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + if (sdw_rt->stream_params.bps != stream_config->bps) { + dev_err(&mstr->dev, "bps for aggregated devices not matching\n"); + ret = -EINVAL; + goto free_mem; + } + /* Number of channels gets added, since both devices will + * be supporting different channels. Like one Codec + * supporting L and other supporting R channel. + */ + sdw_rt->stream_params.channel_count += + stream_config->channel_count; + sdw_rt->tx_ref_count++; + } else + sdw_rt->rx_ref_count++; + + sdw_rt->type = stream_config->type; + sdw_rt->stream_state = SDW_STATE_CONFIG_STREAM; + + /* Slaves are added to two list, This is because BW is calculated + * for two masters individually, while Ports are enabled of all + * the aggregated masters and slaves part of the same stream tag + * simultaneously. + */ + if (slave) { + list_add_tail(&slv_rt->slave_sdw_node, &sdw_rt->slv_rt_list); + list_add_tail(&slv_rt->slave_node, &mstr_rt->slv_rt_list); + } + mutex_unlock(&stream->stream_lock); + if (slave) + pm_runtime_get_sync(&slave->dev); + else + pm_runtime_get_sync(&mstr->dev); + return ret; + +free_mem: + mutex_unlock(&stream->stream_lock); + kfree(mstr_rt); + kfree(slv_rt); +out: + return ret; + +} +EXPORT_SYMBOL_GPL(sdw_config_stream); + +/** + * sdw_chk_slv_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all slave port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_slv_dpn_caps(struct sdw_slv_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + struct port_audio_mode_properties *mode_prop = + dpn_cap->mode_properties; + int ret = 0, i, value; + + /* Check Sampling frequency */ + if (mode_prop->num_sampling_freq_configs) { + for (i = 0; i < mode_prop->num_sampling_freq_configs; i++) { + + value = mode_prop->sampling_freq_config[i]; + if (strm_prms->rate == value) + break; + } + + if (i == mode_prop->num_sampling_freq_configs) + return -EINVAL; + + } else { + + if ((strm_prms->rate < mode_prop->min_sampling_frequency) + || (strm_prms->rate > + mode_prop->max_sampling_frequency)) + return -EINVAL; + } + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) + return -EINVAL; + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +/** + * sdw_chk_mstr_dpn_caps - Return success + * -EINVAL - In case of error + * + * This function checks all master port capabilities + * for given stream parameters. If any of parameters + * is not supported in port capabilities, it returns + * error. + */ +int sdw_chk_mstr_dpn_caps(struct sdw_mstr_dpn_capabilities *dpn_cap, + struct sdw_stream_params *strm_prms) +{ + + int ret = 0, i, value; + + /* check for bit rate */ + if (dpn_cap->num_word_length) { + for (i = 0; i < dpn_cap->num_word_length; i++) { + + value = dpn_cap->word_length_buffer[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_word_length) + return -EINVAL; + + } else { + + if ((strm_prms->bps < dpn_cap->min_word_length) + || (strm_prms->bps > dpn_cap->max_word_length)) { + return -EINVAL; + } + + + } + + /* check for number of channels */ + if (dpn_cap->num_ch_supported) { + for (i = 0; i < dpn_cap->num_ch_supported; i++) { + + value = dpn_cap->ch_supported[i]; + if (strm_prms->bps == value) + break; + } + + if (i == dpn_cap->num_ch_supported) + return -EINVAL; + + } else { + + if ((strm_prms->channel_count < dpn_cap->min_ch_num) + || (strm_prms->channel_count > dpn_cap->max_ch_num)) + return -EINVAL; + } + + return ret; +} + +static int sdw_mstr_port_configuration(struct sdw_master *mstr, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_mstr_runtime *mstr_rt; + struct sdw_port_runtime *port_rt; + int found = 0; + int i; + int ret = 0, pn = 0; + struct sdw_mstr_dpn_capabilities *dpn_cap = + mstr->mstr_capabilities.sdw_dpn_cap; + + list_for_each_entry(mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (mstr_rt->mstr == mstr) { + found = 1; + break; + } + } + if (!found) { + dev_err(&mstr->dev, "Master not found for this port\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + if (!dpn_cap) + return -EINVAL; + /* + * Note: Here the assumption the configuration is not + * received for 0th port. + */ + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_mstr_dpn_caps(&dpn_cap[pn], + &mstr_rt->stream_params); + if (ret < 0) { + dev_err(&mstr->dev, + "Master capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &mstr_rt->port_rt_list); + } + + return ret; +} + +static int sdw_slv_port_configuration(struct sdw_slv *slave, + struct sdw_runtime *sdw_rt, + struct sdw_port_config *port_config) +{ + struct sdw_slave_runtime *slv_rt; + struct sdw_port_runtime *port_rt; + struct sdw_slv_dpn_capabilities *dpn_cap = + slave->sdw_slv_cap.sdw_dpn_cap; + int found = 0, ret = 0; + int i, pn; + + list_for_each_entry(slv_rt, &sdw_rt->slv_rt_list, slave_sdw_node) { + if (slv_rt->slave == slave) { + found = 1; + break; + } + } + if (!found) { + dev_err(&slave->mstr->dev, "Slave not found for this port\n"); + return -EINVAL; + } + + if (!slave->slave_cap_updated) { + dev_err(&slave->mstr->dev, "Slave capabilities not updated\n"); + return -EINVAL; + } + + port_rt = kzalloc((sizeof(struct sdw_port_runtime)) * + port_config->num_ports, GFP_KERNEL); + if (!port_rt) + return -EINVAL; + + for (i = 0; i < port_config->num_ports; i++) { + port_rt[i].channel_mask = port_config->port_cfg[i].ch_mask; + port_rt[i].port_num = pn = port_config->port_cfg[i].port_num; + + /* Perform capability check for master port */ + ret = sdw_chk_slv_dpn_caps(&dpn_cap[pn], + &slv_rt->stream_params); + if (ret < 0) { + dev_err(&slave->mstr->dev, + "Slave capabilities check failed\n"); + return -EINVAL; + } + + list_add_tail(&port_rt[i].port_node, &slv_rt->port_rt_list); + } + + return ret; +} + +/** + * sdw_config_port: Port configuration for the SoundWire. Multiple + * soundWire ports may form single stream. Like two + * ports each transferring/receiving mono channels + * forms single stream with stereo channels. + * There will be single ASoC DAI representing + * the both ports. So stream configuration will be + * stereo, but both of the ports will be configured + * for mono channels, each with different channel + * mask. This is used to program port w.r.t to stream. + * params. So no need to de-configure, since these + * are automatically destroyed once stream gets + * destroyed. + * @mstr: Master handle where the slave is connected. + * @slave: Slave handle. + * @port_config: Port configuration for each port of soundwire slave. + * @stream_tag: Stream tag, where this port is connected. + * + */ +int sdw_config_port(struct sdw_master *mstr, + struct sdw_slv *slave, + struct sdw_port_config *port_config, + unsigned int stream_tag) +{ + int ret = 0; + int i; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_runtime *sdw_rt = NULL; + struct sdw_stream_tag *stream = NULL; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tags[i].stream_tag == stream_tag) { + sdw_rt = stream_tags[i].sdw_rt; + stream = &stream_tags[i]; + break; + } + } + + if (!sdw_rt) { + dev_err(&mstr->dev, "Invalid stream tag\n"); + return -EINVAL; + } + + if (static_key_false(&sdw_trace_msg)) { + int i; + + for (i = 0; i < port_config->num_ports; i++) { + trace_sdw_config_port(mstr, slave, + &port_config->port_cfg[i], stream_tag); + } + } + + mutex_lock(&stream->stream_lock); + + if (!slave) + ret = sdw_mstr_port_configuration(mstr, sdw_rt, port_config); + else + ret = sdw_slv_port_configuration(slave, sdw_rt, port_config); + + mutex_unlock(&stream->stream_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(sdw_config_port); + +int sdw_prepare_and_enable(int stream_tag, bool enable) +{ + + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw(&stream_tags[i], enable); + if (ret) + pr_err("Bandwidth allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_prepare_and_enable); + +int sdw_disable_and_unprepare(int stream_tag, bool unprepare) +{ + int i, ret = 0; + struct sdw_stream_tag *stream_tags = sdw_core.stream_tags; + struct sdw_stream_tag *stream = NULL; + + mutex_lock(&sdw_core.core_lock); + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) { + if (stream_tag == stream_tags[i].stream_tag) { + stream = &stream_tags[i]; + break; + } + } + if (stream == NULL) { + mutex_unlock(&sdw_core.core_lock); + WARN_ON(1); /* Return from here after unlocking core*/ + return -EINVAL; + } + mutex_lock(&stream->stream_lock); + ret = sdw_bus_calc_bw_dis(&stream_tags[i], unprepare); + if (ret) + pr_err("Bandwidth de-allocation failed\n"); + + mutex_unlock(&stream->stream_lock); + + mutex_unlock(&sdw_core.core_lock); + return ret; +} +EXPORT_SYMBOL_GPL(sdw_disable_and_unprepare); + +int sdw_stop_clock(struct sdw_master *mstr, enum sdw_clk_stop_mode mode) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + int slave_present = 0; + + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned && + mstr->sdw_addr[i].status != + SDW_SLAVE_STAT_NOT_PRESENT) + slave_present = 1; + } + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now + */ + msg.ssp_tag = 1; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.addr = SDW_SCP_CTRL; + msg.len = 1; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + msg.buf = buf; + msg.slave_addr = 15; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + if (ret != 1 && slave_present) { + dev_err(&mstr->dev, "Failed to stop clk\n"); + return -EBUSY; + } + /* If we are entering clock stop mode1, mark all the slaves un-attached. + */ + if (mode == SDW_CLOCK_STOP_MODE_1) { + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned) + mstr->sdw_addr[i].status = + SDW_SLAVE_STAT_NOT_PRESENT; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_stop_clock); + +int sdw_wait_for_slave_enumeration(struct sdw_master *mstr, + struct sdw_slv *slave) +{ + int timeout = 0; + + /* Wait till device gets enumerated. Wait for 2Secs before + * giving up + */ + do { + msleep(100); + timeout++; + } while ((slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) && + timeout < 20); + + if (slave->slv_addr->status == SDW_SLAVE_STAT_NOT_PRESENT) + return -EBUSY; + return 0; +} +EXPORT_SYMBOL_GPL(sdw_wait_for_slave_enumeration); + +static enum sdw_clk_stop_mode sdw_get_clk_stp_mode(struct sdw_slv *slave) +{ + enum sdw_clk_stop_mode clock_stop_mode = SDW_CLOCK_STOP_MODE_0; + struct sdw_slv_capabilities *cap = &slave->sdw_slv_cap; + + if (!slave->driver) + return clock_stop_mode; + /* + * Get the dynamic value of clock stop from Slave driver + * if supported, else use the static value from + * capabilities register. Update the capabilities also + * if we have new dynamic value. + */ + if (slave->driver->get_dyn_clk_stp_mod) { + clock_stop_mode = slave->driver->get_dyn_clk_stp_mod(slave); + + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + cap->clock_stop1_mode_supported = true; + else + cap->clock_stop1_mode_supported = false; + } else + clock_stop_mode = cap->clock_stop1_mode_supported; + + return clock_stop_mode; +} + +/** + * sdw_master_stop_clock: Stop the clock. This function broadcasts the SCP_CTRL + * register with clock_stop_now bit set. + * + * @mstr: Master handle for which clock has to be stopped. + * + * Returns 0 on success, appropriate error code on failure. + */ +int sdw_master_stop_clock(struct sdw_master *mstr) +{ + int ret = 0, i; + struct sdw_msg msg; + u8 buf[1] = {0}; + enum sdw_clk_stop_mode mode; + + /* Send Broadcast message to the SCP_ctrl register with + * clock stop now. If none of the Slaves are attached, then there + * may not be ACK, flag the error about ACK not recevied but + * clock will be still stopped. + */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_WRITE; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_CTRL; + buf[0] |= 0x1 << SDW_SCP_CTRL_CLK_STP_NOW_SHIFT; + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + /* Even if broadcast fails, we stop the clock and flag error */ + if (ret != 1) + dev_err(&mstr->dev, "ClockStopNow Broadcast message failed\n"); + + /* + * Mark all Slaves as un-attached which are entering clock stop + * mode1 + */ + for (i = 1; i <= SOUNDWIRE_MAX_DEVICES; i++) { + + if (!mstr->sdw_addr[i].assigned) + continue; + + /* Get clock stop mode for all Slaves */ + mode = sdw_get_clk_stp_mode(mstr->sdw_addr[i].slave); + if (mode == SDW_CLOCK_STOP_MODE_0) + continue; + + /* If clock stop mode 1, mark Slave as not present */ + mstr->sdw_addr[i].status = SDW_SLAVE_STAT_NOT_PRESENT; + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_stop_clock); + +static struct sdw_slv *get_slave_for_prep_deprep(struct sdw_master *mstr, + int *slave_index) +{ + int i; + + for (i = *slave_index; i <= SOUNDWIRE_MAX_DEVICES; i++) { + if (mstr->sdw_addr[i].assigned != true) + continue; + + if (mstr->sdw_addr[i].status == SDW_SLAVE_STAT_NOT_PRESENT) + continue; + + *slave_index = i + 1; + return mstr->sdw_addr[i].slave; + } + return NULL; +} + +/* + * Wait till clock stop prepare/deprepare is finished. Prepare for all + * mode, De-prepare only for the Slaves resuming from clock stop mode 0 + */ +static void sdw_wait_for_clk_prep(struct sdw_master *mstr) +{ + int ret; + struct sdw_msg msg; + u8 buf[1] = {0}; + int timeout = 0; + + /* Create message to read clock stop status, its broadcast message. */ + msg.ssp_tag = 0; + msg.flag = SDW_MSG_FLAG_READ; + msg.len = 1; + msg.buf = &buf[0]; + msg.slave_addr = SDW_SLAVE_BDCAST_ADDR; + msg.addr_page1 = 0x0; + msg.addr_page2 = 0x0; + msg.addr = SDW_SCP_STAT; + buf[0] = 0xFF; + /* + * Once all the Slaves are written with prepare bit, + * we go ahead and broadcast the read message for the + * SCP_STAT register to read the ClockStopNotFinished bit + * Read till we get this a 0. Currently we have timeout of 1sec + * before giving up. Even if its not read as 0 after timeout, + * controller can stop the clock after warning. + */ + do { + /* + * Ideally this should not fail, but even if it fails + * in exceptional situation, we go ahead for clock stop + */ + ret = sdw_slave_transfer_nopm(mstr, &msg, 1); + + if (ret != 1) { + WARN_ONCE(1, "Clock stop status read failed\n"); + break; + } + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + break; + + /* + * TODO: Need to find from spec what is requirement. + * Since we are in suspend we should not sleep for more + * Ideally Slave should be ready to stop clock in less than + * few ms. + * So sleep less and increase loop time. This is not + * harmful, since if Slave is ready loop will terminate. + * + */ + msleep(2); + timeout++; + + } while (timeout != 500); + + if (!(buf[0] & SDW_SCP_STAT_CLK_STP_NF_MASK)) + + dev_info(&mstr->dev, "Clock stop prepare done\n"); + else + WARN_ONCE(1, "Some Slaves prepare un-successful\n"); +} + +/** + * sdw_master_prep_for_clk_stop: Prepare all the Slaves for clock stop. + * Iterate through each of the enumerated Slave. + * Prepare each Slave according to the clock stop + * mode supported by Slave. Use dynamic value from + * Slave callback if registered, else use static values + * from Slave capabilities registered. + * 1. Get clock stop mode for each Slave. + * 2. Call pre_prepare callback of each Slave if + * registered. + * 3. Prepare each Slave for clock stop + * 4. Broadcast the Read message to make sure + * all Slaves are prepared for clock stop. + * 5. Call post_prepare callback of each Slave if + * registered. + * + * @mstr: Master handle for which clock state has to be changed. + * + * Returns 0 + */ +int sdw_master_prep_for_clk_stop(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slv *slave = NULL; + int slv_index = 1; + + /* + * Get all the Slaves registered to the master driver for preparing + * for clock stop. Start from Slave with logical address as 1. + */ + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) { + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, true); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre prepare failed for Slave %d\n", + slave->slv_number); + } + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, true); + } + + /* Wait till prepare for all Slaves is finished */ + /* + * We should continue even if the prepare fails. Clock stop + * prepare failure on Slaves, should not impact the broadcasting + * of ClockStopNow. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + true); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post prepare failed for Slave %d\n", + slave->slv_number); + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_master_prep_for_clk_stop); + +/** + * sdw_mstr_deprep_after_clk_start: De-prepare all the Slaves + * exiting clock stop mode 0 after clock resumes. Clock + * is already resumed before this. De-prepare all the Slaves + * which were earlier in ClockStop mode0. De-prepare for the + * Slaves which were there in ClockStop mode1 is done after + * they enumerated back. Its not done here as part of master + * getting resumed. + * 1. Get clock stop mode for each Slave its exiting from + * 2. Call pre_prepare callback of each Slave exiting from + * clock stop mode 0. + * 3. De-Prepare each Slave exiting from Clock Stop mode0 + * 4. Broadcast the Read message to make sure + * all Slaves are de-prepared for clock stop. + * 5. Call post_prepare callback of each Slave exiting from + * clock stop mode0 + * + * + * @mstr: Master handle + * + * Returns 0 + */ +int sdw_mstr_deprep_after_clk_start(struct sdw_master *mstr) +{ + struct sdw_slv_capabilities *cap; + enum sdw_clk_stop_mode clock_stop_mode; + int ret = 0; + struct sdw_slv *slave = NULL; + /* We are preparing for stop */ + bool stop = false; + int slv_index = 1; + + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + /* Get the clock stop mode from which Slave is exiting */ + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. So nothing to be done + * here. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + /* + * Call the pre clock stop prepare, if Slave requires. + */ + if (slave->driver && slave->driver->pre_clk_stop_prep) + ret = slave->driver->pre_clk_stop_prep(slave, + clock_stop_mode, false); + + /* If it fails we still continue */ + if (ret) + dev_warn(&mstr->dev, "Pre de-prepare failed for Slave %d\n", + slave->slv_number); + + sdw_prep_slave_for_clk_stp(mstr, slave, clock_stop_mode, false); + } + + /* + * Wait till prepare is finished for all the Slaves. + */ + sdw_wait_for_clk_prep(mstr); + + slv_index = 1; + while ((slave = get_slave_for_prep_deprep(mstr, &slv_index)) != NULL) { + + cap = &slave->sdw_slv_cap; + + clock_stop_mode = sdw_get_clk_stp_mode(slave); + + /* + * Slave is exiting from Clock stop mode 1, De-prepare + * is optional based on capability, and it has to be done + * after Slave is enumerated. + */ + if (clock_stop_mode == SDW_CLOCK_STOP_MODE_1) + continue; + + if (slave->driver && slave->driver->post_clk_stop_prep) { + ret = slave->driver->post_clk_stop_prep(slave, + clock_stop_mode, + stop); + /* + * Even if Slave fails we continue with other + * Slaves. This should never happen ideally. + */ + if (ret) + dev_err(&mstr->dev, "Post de-prepare failed for Slave %d\n", + slave->slv_number); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_deprep_after_clk_start); + + +struct sdw_master *sdw_get_master(int nr) +{ + struct sdw_master *master; + + mutex_lock(&sdw_core.core_lock); + master = idr_find(&sdw_core.idr, nr); + if (master && !try_module_get(master->owner)) + master = NULL; + mutex_unlock(&sdw_core.core_lock); + + return master; +} +EXPORT_SYMBOL_GPL(sdw_get_master); + +void sdw_put_master(struct sdw_master *mstr) +{ + if (mstr) + module_put(mstr->owner); +} +EXPORT_SYMBOL_GPL(sdw_put_master); + +static void sdw_exit(void) +{ + device_unregister(&sdw_slv); + bus_unregister(&sdwint_bus_type); +} + +static int sdw_init(void) +{ + int retval; + int i; + + for (i = 0; i < SDW_NUM_STREAM_TAGS; i++) + sdw_core.stream_tags[i].stream_tag = i; + mutex_init(&sdw_core.core_lock); + INIT_LIST_HEAD(&sdw_core.bus_list); + idr_init(&sdw_core.idr); + retval = bus_register(&sdwint_bus_type); + + if (!retval) + retval = device_register(&sdw_slv); + + + if (retval) + bus_unregister(&sdwint_bus_type); + + retval = sdw_bus_bw_init(); + if (retval) { + device_unregister(&sdw_slv); + bus_unregister(&sdwint_bus_type); + } + + return retval; +} +postcore_initcall(sdw_init); +module_exit(sdw_exit); + +MODULE_AUTHOR("Hardik Shah "); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("SoundWire bus driver"); +MODULE_ALIAS("platform:soundwire"); diff --git a/drivers/sdw/sdw_bwcalc.c b/drivers/sdw/sdw_bwcalc.c new file mode 100644 index 000000000000..7ebb26756f59 --- /dev/null +++ b/drivers/sdw/sdw_bwcalc.c @@ -0,0 +1,3097 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ +#include +#include +#include +#include "sdw_priv.h" +#include +#include + + + +#ifndef CONFIG_SND_SOC_SVFPGA /* Original */ +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +int rows[MAX_NUM_ROWS] = {48, 50, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 50 +#else +int rows[MAX_NUM_ROWS] = {125, 64, 48, 50, 60, 72, 75, 80, 90, + 96, 144, 147, 100, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; +#define SDW_DEFAULT_SSP 24 +#endif /* IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) */ + +int cols[MAX_NUM_COLS] = {2, 4, 6, 8, 10, 12, 14, 16}; + +#else +/* For PDM Capture, frameshape used is 50x10 */ +int rows[MAX_NUM_ROWS] = {50, 100, 48, 60, 64, 72, 75, 80, 90, + 96, 125, 144, 147, 120, 128, 150, + 160, 180, 192, 200, 240, 250, 256}; + +int cols[MAX_NUM_COLS] = {10, 2, 4, 6, 8, 12, 14, 16}; +#define SDW_DEFAULT_SSP 50 +#endif + +/* + * TBD: Get supported clock frequency from ACPI and store + * it in master data structure. + */ +#define MAXCLOCKDIVS 1 +int clock_div[MAXCLOCKDIVS] = {1}; + +struct sdw_num_to_col sdw_num_col_mapping[MAX_NUM_COLS] = { + {0, 2}, {1, 4}, {2, 6}, {3, 8}, {4, 10}, {5, 12}, {6, 14}, {7, 16}, +}; + +struct sdw_num_to_row sdw_num_row_mapping[MAX_NUM_ROWS] = { + {0, 48}, {1, 50}, {2, 60}, {3, 64}, {4, 75}, {5, 80}, {6, 125}, + {7, 147}, {8, 96}, {9, 100}, {10, 120}, {11, 128}, {12, 150}, + {13, 160}, {14, 250}, {16, 192}, {17, 200}, {18, 240}, {19, 256}, + {20, 72}, {21, 144}, {22, 90}, {23, 180}, +}; + +/** + * sdw_bus_bw_init - returns Success + * + * + * This function is called from sdw_init function when bus driver + * gets intitalized. This function performs all the generic + * intializations required for BW control. + */ +int sdw_bus_bw_init(void) +{ + int r, c, rowcolcount = 0; + int control_bits = 48; + + for (c = 0; c < MAX_NUM_COLS; c++) { + + for (r = 0; r < MAX_NUM_ROWS; r++) { + sdw_core.rowcolcomb[rowcolcount].col = cols[c]; + sdw_core.rowcolcomb[rowcolcount].row = rows[r]; + sdw_core.rowcolcomb[rowcolcount].control_bits = + control_bits; + sdw_core.rowcolcomb[rowcolcount].data_bits = + (cols[c] * rows[r]) - control_bits; + rowcolcount++; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_bw_init); + + +/** + * sdw_mstr_bw_init - returns Success + * + * + * This function is called from sdw_register_master function + * for each master controller gets register. This function performs + * all the intializations per master controller required for BW control. + */ +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + + /* Initialize required parameters in bus structure */ + sdw_bs->bandwidth = 0; + sdw_bs->system_interval = 0; + sdw_bs->frame_freq = 0; + sdw_bs->clk_state = SDW_CLK_STATE_ON; + sdw_mstr_cap = &sdw_bs->mstr->mstr_capabilities; + sdw_bs->clk_freq = (sdw_mstr_cap->base_clk_freq * 2); + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_mstr_bw_init); + + +/** + * sdw_get_col_to_num + * + * Returns column number from the mapping. + */ +int sdw_get_col_to_num(int col) +{ + int i; + + for (i = 0; i < MAX_NUM_COLS; i++) { + if (sdw_num_col_mapping[i].col == col) + return sdw_num_col_mapping[i].num; + } + + return 0; /* Lowest Column number = 2 */ +} + + +/** + * sdw_get_row_to_num + * + * Returns row number from the mapping. + */ +int sdw_get_row_to_num(int row) +{ + int i; + + for (i = 0; i < MAX_NUM_ROWS; i++) { + if (sdw_num_row_mapping[i].row == row) + return sdw_num_row_mapping[i].num; + } + + return 0; /* Lowest Row number = 48 */ +} + +/* + * sdw_lcm - returns LCM of two numbers + * + * + * This function is called BW calculation function to find LCM + * of two numbers. + */ +int sdw_lcm(int num1, int num2) +{ + int max; + + /* maximum value is stored in variable max */ + max = (num1 > num2) ? num1 : num2; + + while (1) { + if (max%num1 == 0 && max%num2 == 0) + break; + ++max; + } + + return max; +} + + +/* + * sdw_cfg_slv_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures slave registers for + * transport and port parameters. + */ +int sdw_cfg_slv_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_slv_params, + struct sdw_port_params *p_slv_params, int slv_number) +{ + struct sdw_msg wr_msg, wr_msg1, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[8] = {0, 0, 0, 0, 0, 0, 0, 0}; + u8 wbuf1[2] = {0, 0}; + u8 rbuf[1] = {0}; + + +#ifdef CONFIG_SND_SOC_SVFPGA + /* + * The below hardcoding is required + * for running PDM capture with SV conora card + * because the transport params of card is not + * same as master parameters. Also not all + * standard registers are valid. + */ + t_slv_params->blockgroupcontrol_valid = false; + t_slv_params->sample_interval = 50; + t_slv_params->offset1 = 0; + t_slv_params->offset2 = 0; + t_slv_params->hstart = 1; + t_slv_params->hstop = 6; + p_slv_params->word_length = 30; +#endif + + /* Program slave alternate bank with all transport parameters */ + /* DPN_BlockCtrl2 */ + wbuf[0] = t_slv_params->blockgroupcontrol; + /* DPN_SampleCtrl1 */ + wbuf[1] = (t_slv_params->sample_interval - 1) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; + wbuf[2] = ((t_slv_params->sample_interval - 1) >> 8) & + SDW_DPN_SAMPLECTRL1_LOW_MASK; /* DPN_SampleCtrl2 */ + wbuf[3] = t_slv_params->offset1; /* DPN_OffsetCtrl1 */ + wbuf[4] = t_slv_params->offset2; /* DPN_OffsetCtrl2 */ + /* DPN_HCtrl */ + wbuf[5] = (t_slv_params->hstop | (t_slv_params->hstart << 4)); + wbuf[6] = t_slv_params->blockpackingmode; /* DPN_BlockCtrl3 */ + wbuf[7] = t_slv_params->lanecontrol; /* DPN_LaneCtrl */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + /* Program slave alternate bank with all port parameters */ + rd_msg.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + wbuf1[0] = (p_slv_params->port_flow_mode | + (p_slv_params->port_data_mode << + SDW_DPN_PORTCTRL_PORTDATAMODE_SHIFT) | + (rbuf[0])); + + wbuf1[1] = (p_slv_params->word_length - 1); + + /* Check whether address computed is correct for both cases */ + wr_msg.addr = ((SDW_DPN_BLOCKCTRL2 + + (1 * (!t_slv_params->blockgroupcontrol_valid)) + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num)); + + wr_msg1.addr = SDW_DPN_PORTCTRL + + (SDW_NUM_DATA_PORT_REGISTERS * t_slv_params->num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; +#ifdef CONFIG_SND_SOC_SVFPGA + wr_msg.len = (5 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#else + wr_msg.len = (7 + (1 * (t_slv_params->blockgroupcontrol_valid))); +#endif + + wr_msg.slave_addr = slv_number; + wr_msg.buf = &wbuf[0 + (1 * (!t_slv_params->blockgroupcontrol_valid))]; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + wr_msg1.ssp_tag = 0x0; + wr_msg1.flag = SDW_MSG_FLAG_WRITE; + wr_msg1.len = 2; + + wr_msg1.slave_addr = slv_number; + wr_msg1.buf = &wbuf1[0]; + wr_msg1.addr_page1 = 0x0; + wr_msg1.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } +out: + + return ret; +} + + +/* + * sdw_cfg_mstr_params - returns Success + * -EINVAL - In case of error. + * + * + * This function configures master registers for + * transport and port parameters. + */ +int sdw_cfg_mstr_params(struct sdw_bus *mstr_bs, + struct sdw_transport_params *t_mstr_params, + struct sdw_port_params *p_mstr_params) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + int banktouse, ret = 0; + + /* 1. Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 2. Set Master Xport Params */ + if (ops->mstr_port_ops->dpn_set_port_transport_params) { + ret = ops->mstr_port_ops->dpn_set_port_transport_params + (mstr_bs->mstr, t_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Set Master Port Params */ + if (ops->mstr_port_ops->dpn_set_port_params) { + ret = ops->mstr_port_ops->dpn_set_port_params + (mstr_bs->mstr, p_mstr_params, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_cfg_params_mstr_slv - returns Success + * + * This function copies/configure master/slave transport & + * port params. + * + */ +int sdw_cfg_params_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + bool state_check) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + struct sdw_transport_params *t_params, *t_slv_params; + struct sdw_port_params *p_params, *p_slv_params; + int ret = 0; + + list_for_each_entry(slv_rt, + &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* configure transport params based on state */ + if ((state_check) && + (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + continue; + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + /* Fill in port params here */ + port_slv_rt->port_params.num = port_slv_rt->port_num; + port_slv_rt->port_params.word_length = + slv_rt->stream_params.bps; + /* Normal/Isochronous Mode */ + port_slv_rt->port_params.port_flow_mode = 0x0; + /* Normal Mode */ + port_slv_rt->port_params.port_data_mode = 0x0; + t_slv_params = &port_slv_rt->transport_params; + p_slv_params = &port_slv_rt->port_params; + + /* Configure xport & port params for slave */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_slv_params, + p_slv_params, slv_rt->slave->slv_number); + if (ret < 0) + return ret; + + } + } + + if ((state_check) && + (sdw_mstr_bs_rt->rt_state == SDW_STATE_UNPREPARE_RT)) + return 0; + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + /* Transport and port parameters */ + t_params = &port_rt->transport_params; + p_params = &port_rt->port_params; + + + p_params->num = port_rt->port_num; + p_params->word_length = sdw_mstr_bs_rt->stream_params.bps; + p_params->port_flow_mode = 0x0; /* Normal/Isochronous Mode */ + p_params->port_data_mode = 0x0; /* Normal Mode */ + + /* Configure xport params and port params for master */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) + return ret; + + } + + return 0; +} + + +/* + * sdw_cfg_slv_enable_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable slave port channels. + */ +int sdw_cfg_slv_enable_disable(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + port_slv_strm->port_num)); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + + if (chn_en->is_activate) { + + /* + * 1. slave port enable_ch_pre + * --> callback + * --> no callback available + */ + + /* 2. slave port enable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + /* + * 3. slave port enable post pre + * --> callback + * --> no callback available + */ + slv_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + + } else { + + /* + * 1. slave port enable_ch_unpre + * --> callback + * --> no callback available + */ + + /* 2. slave port disable */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * 3. slave port enable post unpre + * --> callback + * --> no callback available + */ + if (!chn_en->is_bank_sw) + slv_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_activate_disable - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable master port channels. + */ +int sdw_cfg_mstr_activate_disable(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + struct port_chn_en_state *chn_en) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = port_mstr_strm->port_num; + activate_ch.ch_mask = port_mstr_strm->channel_mask; + activate_ch.activate = chn_en->is_activate; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + if ((chn_en->is_activate) || (chn_en->is_bank_sw)) + banktouse = !banktouse; + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + if (chn_en->is_activate) + mstr_rt_strm->rt_state = SDW_STATE_ENABLE_RT; + else if (!chn_en->is_bank_sw) + mstr_rt_strm->rt_state = SDW_STATE_DISABLE_RT; + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's. + */ +int sdw_en_dis_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_act) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + if (is_act) + chn_en.is_bank_sw = true; + else + chn_en.is_bank_sw = false; + + chn_en.is_activate = is_act; + + list_for_each_entry(slv_rt_strm, &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt_strm, + port_slv_strm, &chn_en); + if (ret < 0) + return ret; + + } + + break; + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, mstr_rt_strm, + port_mstr_strm, &chn_en); + if (ret < 0) + return ret; + + } + + } + + return 0; +} + + +/* + * sdw_en_dis_mstr_slv_state - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave enable/disable + * channel API's based on runtime state. + */ +int sdw_en_dis_mstr_slv_state(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_bs_rt, + struct port_chn_en_state *chn_en) +{ + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt, *port_rt; + int ret = 0; + + list_for_each_entry(slv_rt, &sdw_mstr_bs_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + ret = sdw_cfg_slv_enable_disable + (sdw_mstr_bs, slv_rt, + port_slv_rt, chn_en); + if (ret < 0) + return ret; + + } + } + } + + if (sdw_mstr_bs_rt->rt_state == SDW_STATE_ENABLE_RT) { + + list_for_each_entry(port_rt, + &sdw_mstr_bs_rt->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_activate_disable + (sdw_mstr_bs, sdw_mstr_bs_rt, port_rt, chn_en); + if (ret < 0) + return ret; + + } + } + + return 0; +} + + +/* + * sdw_get_clock_frmshp - returns Success + * -EINVAL - In case of error. + * + * + * This function computes clock and frame shape based on + * clock frequency. + */ +int sdw_get_clock_frmshp(struct sdw_bus *sdw_mstr_bs, int *frame_int, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = NULL; + struct port_audio_mode_properties *mode_prop = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_slv_rt = NULL; + int i, j, rc; + int clock_reqd = 0, frame_interval = 0, frame_frequency = 0; + int sel_row = 0, sel_col = 0, pn = 0; + int value; + bool clock_ok = false; + + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + + /* + * Find nearest clock frequency needed by master for + * given bandwidth + */ + for (i = 0; i < MAXCLOCKDIVS; i++) { + + /* TBD: Check why 3000 */ + if ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) <= + sdw_mstr_bs->bandwidth) || + ((((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]) + % 3000) != 0)) + continue; + + clock_reqd = ((sdw_mstr_cap->base_clk_freq * 2) / clock_div[i]); + + /* + * Check all the slave device capabilities + * here and find whether given frequency is + * supported by all slaves + */ + list_for_each_entry(slv_rt, &sdw_mstr_rt->slv_rt_list, + slave_node) { + + /* check for valid slave */ + if (slv_rt->slave == NULL) + break; + + /* check clock req for each port */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + pn = port_slv_rt->port_num; + + + sdw_slv_dpn_cap = + &slv_rt->slave->sdw_slv_cap.sdw_dpn_cap[pn]; + mode_prop = sdw_slv_dpn_cap->mode_properties; + + /* + * TBD: Indentation to be fixed, + * code refactoring to be considered. + */ + if (mode_prop->num_freq_configs) { + for (j = 0; j < + mode_prop->num_freq_configs; j++) { + value = + mode_prop->freq_supported[j]; + if (clock_reqd == value) { + clock_ok = true; + break; + } + if (j == + mode_prop->num_freq_configs) { + clock_ok = false; + break; + } + + } + + } else { + if ((clock_reqd < + mode_prop->min_frequency) || + (clock_reqd > + mode_prop->max_frequency)) { + clock_ok = false; + } else + clock_ok = true; + } + + /* Go for next clock frequency */ + if (!clock_ok) + break; + } + + /* + * Dont check next slave, go for next clock + * frequency + */ + if (!clock_ok) + break; + } + + /* check for next clock divider */ + if (!clock_ok) + continue; + + /* Find frame shape based on bandwidth per controller */ + for (rc = 0; rc < MAX_NUM_ROW_COLS; rc++) { + frame_interval = + sdw_core.rowcolcomb[rc].row * + sdw_core.rowcolcomb[rc].col; + frame_frequency = clock_reqd/frame_interval; + + if ((clock_reqd - + (frame_frequency * + sdw_core.rowcolcomb[rc]. + control_bits)) < + sdw_mstr_bs->bandwidth) + continue; + + break; + } + + /* Valid frameshape not found, check for next clock freq */ + if (rc == MAX_NUM_ROW_COLS) + continue; + + sel_row = sdw_core.rowcolcomb[rc].row; + sel_col = sdw_core.rowcolcomb[rc].col; + sdw_mstr_bs->frame_freq = frame_frequency; + sdw_mstr_bs->clk_freq = clock_reqd; + sdw_mstr_bs->clk_div = clock_div[i]; + clock_ok = false; + *frame_int = frame_interval; + sdw_mstr_bs->col = sel_col; + sdw_mstr_bs->row = sel_row; + + return 0; + } + + /* None of clock frequency matches, return error */ + if (i == MAXCLOCKDIVS) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_sys_interval - returns Success + * -EINVAL - In case of error. + * + * + * This function computes system interval. + */ +int sdw_compute_sys_interval(struct sdw_bus *sdw_mstr_bs, + struct sdw_master_capabilities *sdw_mstr_cap, + int frame_interval) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int lcmnum1 = 0, lcmnum2 = 0, div = 0, lcm = 0; + int sample_interval; + + /* + * once you got bandwidth frame shape for bus, + * run a loop for all the active streams running + * on bus and compute stream interval & sample_interval. + */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* + * Calculate sample interval for stream + * running on given master. + */ + if (sdw_mstr_rt->stream_params.rate) + sample_interval = (sdw_mstr_bs->clk_freq/ + sdw_mstr_rt->stream_params.rate); + else + return -EINVAL; + + /* Run port loop to assign sample interval per port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + + /* + * Assign sample interval each port transport + * properties. Assumption is that sample interval + * per port for given master will be same. + */ + t_params->sample_interval = sample_interval; + } + + /* Calculate LCM */ + lcmnum2 = sample_interval; + if (!lcmnum1) + lcmnum1 = sdw_lcm(lcmnum2, lcmnum2); + else + lcmnum1 = sdw_lcm(lcmnum1, lcmnum2); + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + /* Assign sample interval for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + + /* Assign sample interval each port */ + t_slv_params->sample_interval = sample_interval; + } + } + } + + /* + * If system interval already calculated + * In pause/resume, underrun scenario + */ + if (sdw_mstr_bs->system_interval) + return 0; + + /* Assign frame stream interval */ + sdw_mstr_bs->stream_interval = lcmnum1; + + /* 6. compute system_interval */ + if ((sdw_mstr_cap) && (sdw_mstr_bs->clk_freq)) { + + div = ((sdw_mstr_cap->base_clk_freq * 2) / + sdw_mstr_bs->clk_freq); + + if ((lcmnum1) && (frame_interval)) + lcm = sdw_lcm(lcmnum1, frame_interval); + else + return -EINVAL; + + sdw_mstr_bs->system_interval = (div * lcm); + + } + + /* + * Something went wrong, may be sdw_lcm value may be 0, + * return error accordingly + */ + if (!sdw_mstr_bs->system_interval) + return -EINVAL; + + + return 0; +} + +/** + * sdw_chk_first_node - returns True or false + * + * This function returns true in case of first node + * else returns false. + */ +bool sdw_chk_first_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_master *sdw_mstr) +{ + struct sdw_mstr_runtime *first_rt = NULL; + + first_rt = list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node); + if (sdw_mstr_rt == first_rt) + return true; + else + return false; + +} + +/* + * sdw_compute_hstart_hstop - returns Success + * -EINVAL - In case of error. + * + * + * This function computes hstart and hstop for running + * streams per master & slaves. + */ +int sdw_compute_hstart_hstop(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_rt; + struct sdw_transport_params *t_params = NULL, *t_slv_params = NULL; + struct sdw_slave_runtime *slv_rt = NULL; + struct sdw_port_runtime *port_rt, *port_slv_rt; + int hstart = 0, hstop = 0; + int column_needed = 0; + int sel_col = sdw_mstr_bs->col; + int group_count = 0, no_of_channels = 0; + struct temp_elements *temp, *element; + int rates[10]; + int num, ch_mask, block_offset, i, port_block_offset; + + /* Run loop for all master runtimes for given master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Perform grouping of streams based on stream rate */ + if (sdw_mstr_rt == list_first_entry(&sdw_mstr->mstr_rt_list, + struct sdw_mstr_runtime, mstr_node)) + rates[group_count++] = sdw_mstr_rt->stream_params.rate; + else { + num = group_count; + for (i = 0; i < num; i++) { + if (sdw_mstr_rt->stream_params.rate == rates[i]) + break; + + if (i == num) + rates[group_count++] = + sdw_mstr_rt->stream_params.rate; + } + } + } + + /* check for number of streams and number of group count */ + if (group_count == 0) + return 0; + + /* Allocate temporary memory holding temp variables */ + temp = kzalloc((sizeof(struct temp_elements) * group_count), + GFP_KERNEL); + if (!temp) + return -ENOMEM; + + /* Calculate full bandwidth per group */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->rate = rates[i]; + element->full_bw = sdw_mstr_bs->clk_freq/element->rate; + } + + /* Calculate payload bandwidth per group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + for (i = 0; i < group_count; i++) { + element = &temp[i]; + if (sdw_mstr_rt->stream_params.rate == element->rate) { + element->payload_bw += + sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + } + + /* Any of stream rate should match */ + if (i == group_count) + return -EINVAL; + } + } + + /* Calculate hwidth per group and total column needed per master */ + for (i = 0; i < group_count; i++) { + element = &temp[i]; + element->hwidth = + (sel_col * element->payload_bw + + element->full_bw - 1)/element->full_bw; + column_needed += element->hwidth; + } + + /* Check column required should not be greater than selected columns*/ + if (column_needed > sel_col - 1) + return -EINVAL; + + /* Compute hstop */ + hstop = sel_col - 1; + + /* Run loop for all groups to compute transport parameters */ + for (i = 0; i < group_count; i++) { + port_block_offset = block_offset = 1; + element = &temp[i]; + + /* Find streams associated with each group */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + /* should not compute any transport params */ + if (sdw_mstr_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + if (sdw_mstr_rt->stream_params.rate != element->rate) + continue; + + /* Compute hstart */ + sdw_mstr_rt->hstart = hstart = + hstop - element->hwidth + 1; + sdw_mstr_rt->hstop = hstop; + + /* Assign hstart, hstop, block offset for each port */ + list_for_each_entry(port_rt, + &sdw_mstr_rt->port_rt_list, port_node) { + + t_params = &port_rt->transport_params; + t_params->num = port_rt->port_num; + t_params->hstart = hstart; + t_params->hstop = hstop; + t_params->offset1 = port_block_offset; + t_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_params->blockgroupcontrol_valid = true; + t_params->blockgroupcontrol = 0x0; + t_params->lanecontrol = 0x0; + /* Copy parameters if first node */ + if (port_rt == list_first_entry + (&sdw_mstr_rt->port_rt_list, + struct sdw_port_runtime, port_node)) { + + sdw_mstr_rt->hstart = hstart; + sdw_mstr_rt->hstop = hstop; + + sdw_mstr_rt->block_offset = + port_block_offset; + + } + + /* Get no. of channels running on curr. port */ + ch_mask = port_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + + port_block_offset += + sdw_mstr_rt->stream_params.bps * + no_of_channels; + } + + /* Compute block offset */ + block_offset += sdw_mstr_rt->stream_params.bps * + sdw_mstr_rt->stream_params.channel_count; + + /* + * Re-assign port_block_offset for next stream + * under same group + */ + port_block_offset = block_offset; + } + + /* Compute hstop for next group */ + hstop = hstop - element->hwidth; + } + + /* Compute transport params for slave */ + + /* Run loop for master runtime streams running on master */ + list_for_each_entry(sdw_mstr_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + /* Get block offset from master runtime */ + port_block_offset = sdw_mstr_rt->block_offset; + + /* Run loop for slave per master runtime */ + list_for_each_entry(slv_rt, + &sdw_mstr_rt->slv_rt_list, slave_node) { + + if (slv_rt->slave == NULL) + break; + + if (slv_rt->rt_state == SDW_STATE_UNPREPARE_RT) + continue; + + /* Run loop for each port of slave */ + list_for_each_entry(port_slv_rt, + &slv_rt->port_rt_list, port_node) { + + t_slv_params = &port_slv_rt->transport_params; + t_slv_params->num = port_slv_rt->port_num; + + /* Assign transport parameters */ + t_slv_params->hstart = sdw_mstr_rt->hstart; + t_slv_params->hstop = sdw_mstr_rt->hstop; + t_slv_params->offset1 = port_block_offset; + t_slv_params->offset2 = port_block_offset >> 8; + + /* Only BlockPerPort supported */ + t_slv_params->blockgroupcontrol_valid = true; + t_slv_params->blockgroupcontrol = 0x0; + t_slv_params->lanecontrol = 0x0; + + /* Get no. of channels running on curr. port */ + ch_mask = port_slv_rt->channel_mask; + no_of_channels = (((ch_mask >> 3) & 1) + + ((ch_mask >> 2) & 1) + + ((ch_mask >> 1) & 1) + + (ch_mask & 1)); + + /* Increment block offset for next port/slave */ + port_block_offset += slv_rt->stream_params.bps * + no_of_channels; + } + } + } + + kfree(temp); + + return 0; +} + +/* + * sdw_cfg_frmshp_bnkswtch - returns Success + * -EINVAL - In case of error. + * -ENOMEM - In case of memory alloc failure. + * -EAGAIN - In case of activity ongoing. + * + * + * This function broadcast frameshape on framectrl + * register and performs bank switch. + */ +int sdw_cfg_frmshp_bnkswtch(struct sdw_bus *mstr_bs, bool is_wait) +{ + struct sdw_msg *wr_msg; + int ret = 0; + int banktouse, numcol, numrow; + u8 *wbuf; + + wr_msg = kzalloc(sizeof(struct sdw_msg), GFP_KERNEL); + if (!wr_msg) + return -ENOMEM; + + mstr_bs->async_data.msg = wr_msg; + + wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL); + if (!wbuf) + return -ENOMEM; + + numcol = sdw_get_col_to_num(mstr_bs->col); + numrow = sdw_get_row_to_num(mstr_bs->row); + + wbuf[0] = numcol | (numrow << 3); + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + if (banktouse) { + wr_msg->addr = (SDW_SCP_FRAMECTRL + SDW_BANK1_REGISTER_OFFSET) + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } else { + + wr_msg->addr = SDW_SCP_FRAMECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0); /* Data port 0 */ + } + + wr_msg->ssp_tag = 0x1; + wr_msg->flag = SDW_MSG_FLAG_WRITE; + wr_msg->len = 1; + wr_msg->slave_addr = 0xF; /* Broadcast address*/ + wr_msg->buf = wbuf; + wr_msg->addr_page1 = 0x0; + wr_msg->addr_page2 = 0x0; + + if (is_wait) { + + if (in_atomic() || irqs_disabled()) { + ret = sdw_trylock_mstr(mstr_bs->mstr); + if (!ret) { + /* SDW activity is ongoing. */ + ret = -EAGAIN; + goto out; + } + } else + sdw_lock_mstr(mstr_bs->mstr); + + ret = sdw_slave_transfer_async(mstr_bs->mstr, wr_msg, + 1, &mstr_bs->async_data); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } else { + ret = sdw_slave_transfer(mstr_bs->mstr, wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, "Register transfer failed\n"); + goto out; + } + + } + + msleep(100); /* TBD: Remove this */ + + /* + * TBD: check whether we need to poll on + * mcp active bank bit to switch bank + */ + mstr_bs->active_bank = banktouse; + + if (!is_wait) { + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + } + + +out: + + return ret; +} + +/* + * sdw_cfg_frmshp_bnkswtch_wait - returns Success + * -ETIMEDOUT - In case of timeout + * + * This function waits on completion of + * bank switch. + */ +int sdw_cfg_frmshp_bnkswtch_wait(struct sdw_bus *mstr_bs) +{ + unsigned long time_left; + struct sdw_master *mstr = mstr_bs->mstr; + + time_left = wait_for_completion_timeout( + &mstr_bs->async_data.xfer_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + sdw_unlock_mstr(mstr); + return -ETIMEDOUT; + } + kfree(mstr_bs->async_data.msg->buf); + kfree(mstr_bs->async_data.msg); + sdw_unlock_mstr(mstr); + return 0; +} + +/* + * sdw_config_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params config, set SSP interval, set Clock + * frequency, enable channel. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_config_bs_prms(struct sdw_bus *sdw_mstr_bs, bool state_check) +{ + struct port_chn_en_state chn_en; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct sdw_mstr_driver *ops; + int banktouse, ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + /* + * Configure transport and port params + * for master and slave ports. + */ + ret = sdw_cfg_params_mstr_slv(sdw_mstr_bs, + sdw_mstr_bs_rt, state_check); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "slave/master config params failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + /* + * TBD: Currently harcoded SSP interval, + * computed value to be taken from system_interval in + * bus data structure. + * Add error check. + */ + if (ops->mstr_ops->set_ssp_interval) + ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + SDW_DEFAULT_SSP, banktouse); + + /* + * Configure Clock + * TBD: Add error check + */ + if (ops->mstr_ops->set_clock_freq) + ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + + /* Enable channel on alternate bank for running streams */ + chn_en.is_activate = true; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state + (sdw_mstr_bs, sdw_mstr_bs_rt, &chn_en); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr_bs->mstr->dev, + "Channel enable failed\n"); + return ret; + } + + } + + return 0; +} + +/* + * sdw_dis_chan - returns Success + * -EINVAL - In case of error. + * + * + * This function disables channel on alternate + * bank. This API is called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis when channel on current + * bank is enabled. + * + */ +int sdw_dis_chan(struct sdw_bus *sdw_mstr_bs) +{ + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_mstr_runtime *sdw_mstr_bs_rt = NULL; + struct port_chn_en_state chn_en; + int ret = 0; + + list_for_each_entry(sdw_mstr_bs_rt, + &sdw_mstr->mstr_rt_list, mstr_node) { + + if (sdw_mstr_bs_rt->mstr == NULL) + continue; + + chn_en.is_activate = false; + chn_en.is_bank_sw = true; + ret = sdw_en_dis_mstr_slv_state(sdw_mstr_bs, + sdw_mstr_bs_rt, &chn_en); + if (ret < 0) + return ret; + } + + return 0; +} + + +/* + * sdw_cfg_slv_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare slave ports. + */ +int sdw_cfg_slv_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_slave_runtime *slv_rt_strm, + struct sdw_port_runtime *port_slv_strm, + bool prep) +{ + struct sdw_slave_driver *slv_ops = slv_rt_strm->slave->driver; + struct sdw_slv_capabilities *slv_cap = + &slv_rt_strm->slave->sdw_slv_cap; + struct sdw_slv_dpn_capabilities *sdw_slv_dpn_cap = + slv_cap->sdw_dpn_cap; + + struct sdw_msg wr_msg, rd_msg, rd_msg1; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + u8 rbuf1[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + rd_msg1.ssp_tag = 0x0; + rd_msg1.flag = SDW_MSG_FLAG_READ; + rd_msg1.len = 1; + rd_msg1.slave_addr = slv_rt_strm->slave->slv_number; + rd_msg1.buf = rbuf1; + rd_msg1.addr_page1 = 0x0; + rd_msg1.addr_page2 = 0x0; + + + rd_msg1.addr = SDW_DPN_PREPARESTATUS + + (SDW_NUM_DATA_PORT_REGISTERS * port_slv_strm->port_num); + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_rt_strm->slave->slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + if (prep) { /* PREPARE */ + + /* + * 1. slave port prepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_prepare) { + slv_ops->handle_pre_port_prepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port prepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, prepare required */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] | port_slv_strm->channel_mask); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * TBD: check on port ready, + * ideally we should check on prepare + * status for port_ready + */ + + /* wait for completion on port ready*/ + msleep(100); /* TBD: Remove this */ + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg1, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post pre + * --> callback + * --> handle_post_port_prepare + */ + if (slv_ops->handle_post_port_prepare) { + slv_ops->handle_post_port_prepare + (slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + + } else { + /* UNPREPARE */ + /* + * 1. slave port unprepare_ch_pre + * --> callback + * --> handle_pre_port_prepare + */ + if (slv_ops->handle_pre_port_unprepare) { + slv_ops->handle_pre_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + /* 2. slave port unprepare --> to write */ + if (sdw_slv_dpn_cap->prepare_ch) { + + /* NON SIMPLIFIED CM, unprepare required */ + + /* Read SDW_DPN_PREPARECTRL register */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + wbuf[0] = (rbuf[0] & ~(port_slv_strm->channel_mask)); + + /* + * TBD: poll for prepare interrupt bit before + * calling post_prepare + * Does it apply for unprepare aswell? + * 2. check capabilities if simplified CM + * no need to unprepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + } + + /* + * 3. slave port post unpre + * --> callback + * --> handle_post_port_unprepare + */ + if (slv_ops->handle_post_port_unprepare) { + slv_ops->handle_post_port_unprepare(slv_rt_strm->slave, + port_slv_strm->port_num, + port_slv_strm->channel_mask, + banktouse); + } + + slv_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + } +out: + return ret; + +} + + +/* + * sdw_cfg_mstr_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare master ports. + */ +int sdw_cfg_mstr_prep_unprep(struct sdw_bus *mstr_bs, + struct sdw_mstr_runtime *mstr_rt_strm, + struct sdw_port_runtime *port_mstr_strm, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = port_mstr_strm->port_num; + prep_ch.ch_mask = port_mstr_strm->channel_mask; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* TBD: Bank configuration */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + if (prep) + mstr_rt_strm->rt_state = SDW_STATE_PREPARE_RT; + else + mstr_rt_strm->rt_state = SDW_STATE_UNPREPARE_RT; + + return 0; +} + + +/* + * sdw_prep_unprep_mstr_slv - returns Success + * -EINVAL - In case of error. + * + * + * This function call master/slave prepare/unprepare + * port configuration API's, called from sdw_bus_calc_bw + * & sdw_bus_calc_bw_dis API's. + */ +int sdw_prep_unprep_mstr_slv(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, bool is_prep) +{ + struct sdw_slave_runtime *slv_rt_strm = NULL; + struct sdw_port_runtime *port_slv_strm, *port_mstr_strm; + struct sdw_mstr_runtime *mstr_rt_strm = NULL; + int ret = 0; + + list_for_each_entry(slv_rt_strm, + &sdw_rt->slv_rt_list, slave_sdw_node) { + + if (slv_rt_strm->slave == NULL) + break; + + list_for_each_entry(port_slv_strm, + &slv_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_slv_prep_unprep(sdw_mstr_bs, + slv_rt_strm, port_slv_strm, is_prep); + if (ret < 0) + return ret; + } + + } + + list_for_each_entry(mstr_rt_strm, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + if (mstr_rt_strm->mstr == NULL) + break; + + list_for_each_entry(port_mstr_strm, + &mstr_rt_strm->port_rt_list, port_node) { + + ret = sdw_cfg_mstr_prep_unprep(sdw_mstr_bs, + mstr_rt_strm, port_mstr_strm, is_prep); + if (ret < 0) + return ret; + } + } + + return 0; +} + +struct sdw_bus *master_to_bus(struct sdw_master *mstr) +{ + struct sdw_bus *sdw_mstr_bs = NULL; + + list_for_each_entry(sdw_mstr_bs, &sdw_core.bus_list, bus_node) { + /* Match master structure pointer */ + if (sdw_mstr_bs->mstr != mstr) + continue; + return sdw_mstr_bs; + } + /* This should never happen, added to suppress warning */ + WARN_ON(1); + + return NULL; +} + +/* + * sdw_chk_strm_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs all the required + * check such as isynchronous mode support, + * stream rates etc. This API is called + * from sdw_bus_calc_bw API. + * + */ +int sdw_chk_strm_prms(struct sdw_master_capabilities *sdw_mstr_cap, + struct sdw_stream_params *mstr_params, + struct sdw_stream_params *stream_params) +{ + /* Asynchronous mode not supported, return Error */ + if (((sdw_mstr_cap->base_clk_freq * 2) % mstr_params->rate) != 0) + return -EINVAL; + + /* Check for sampling frequency */ + if (stream_params->rate != mstr_params->rate) + return -EINVAL; + + return 0; +} + +/* + * sdw_compute_bs_prms - returns Success + * -EINVAL - In case of error. + * + * + * This function performs master/slave transport + * params computation. This API is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * + */ +int sdw_compute_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt) +{ + + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0, frame_interval = 0; + + sdw_mstr_cap = &sdw_mstr->mstr_capabilities; + + ret = sdw_get_clock_frmshp(sdw_mstr_bs, &frame_interval, + sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "clock/frameshape config failed\n"); + return ret; + } + + /* + * TBD: find right place to run sorting on + * master rt_list. Below sorting is done based on + * bps from low to high, that means PDM streams + * will be placed before PCM. + */ + + /* + * TBD Should we also perform sorting based on rate + * for PCM stream check. if yes then how?? + * creating two different list. + */ + + /* Compute system interval */ + ret = sdw_compute_sys_interval(sdw_mstr_bs, sdw_mstr_cap, + frame_interval); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute system interval failed\n"); + return ret; + } + + /* Compute hstart/hstop */ + ret = sdw_compute_hstart_hstop(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "compute hstart/hstop failed\n"); + return ret; + } + + return 0; +} + +/* + * sdw_bs_pre_bnkswtch_post - returns Success + * -EINVAL or ret value - In case of error. + * + * This API performs on of the following operation + * based on bs_state value: + * pre-activate port + * bank switch operation + * post-activate port + * bankswitch wait operation + * disable channel operation + */ +int sdw_bs_pre_bnkswtch_post(struct sdw_runtime *sdw_rt, int bs_state) +{ + struct sdw_mstr_runtime *mstr_rt_act = NULL; + struct sdw_bus *mstr_bs_act = NULL; + struct sdw_master_port_ops *ops; + int ret = 0; + + list_for_each_entry(mstr_rt_act, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (mstr_rt_act->mstr == NULL) + break; + + /* Get bus structure for master */ + mstr_bs_act = master_to_bus(mstr_rt_act->mstr); + if (!mstr_bs_act) + return -EINVAL; + + ops = mstr_bs_act->mstr->driver->mstr_port_ops; + + /* + * Note that current all the operations + * of pre->bankswitch->post->wait->disable + * are performed sequentially.The switch case + * is kept in order for code to scale where + * pre->bankswitch->post->wait->disable are + * not sequential and called from different + * instances. + */ + switch (bs_state) { + + case SDW_UPDATE_BS_PRE: + /* Pre activate ports */ + if (ops->dpn_port_activate_ch_pre) { + ret = ops->dpn_port_activate_ch_pre + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH: + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(mstr_bs_act, true); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_POST: + /* Post activate ports */ + if (ops->dpn_port_activate_ch_post) { + ret = ops->dpn_port_activate_ch_post + (mstr_bs_act->mstr, NULL, 0); + if (ret < 0) + return ret; + } + break; + case SDW_UPDATE_BS_BNKSWTCH_WAIT: + /* Post Bankswitch wait operation */ + ret = sdw_cfg_frmshp_bnkswtch_wait(mstr_bs_act); + if (ret < 0) + return ret; + break; + case SDW_UPDATE_BS_DIS_CHN: + /* Disable channel on previous bank */ + ret = sdw_dis_chan(mstr_bs_act); + if (ret < 0) + return ret; + break; + default: + return -EINVAL; + break; + } + } + + return ret; + +} + +/* + * sdw_update_bs_prms - returns Success + * -EINVAL - In case of error. + * + * Once all the parameters are configured + * for ports, this function performs bankswitch + * where all the new configured parameters + * gets in effect. This function is called + * from sdw_bus_calc_bw & sdw_bus_calc_bw_dis API. + * This function also disables all the channels + * enabled on previous bank after bankswitch. + */ +int sdw_update_bs_prms(struct sdw_bus *sdw_mstr_bs, + struct sdw_runtime *sdw_rt, + int last_node) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + int ret = 0; + + /* + * Optimization scope. + * Check whether we can assign function pointer + * link sync value is 1, and call that function + * if its not NULL. + */ + if ((last_node) && (sdw_mstr->link_sync_mask)) { + + /* Perform pre-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_PRE); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch operation*/ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_BNKSWTCH); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Bank Switch operation failed\n"); + return ret; + } + + /* Perform post-activate ports */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_POST); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Pre-activate port failed\n"); + return ret; + } + + /* Perform bankswitch post wait opearation */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, + SDW_UPDATE_BS_BNKSWTCH_WAIT); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "BnkSwtch wait op failed\n"); + return ret; + } + + /* Disable channels on previous bank */ + ret = sdw_bs_pre_bnkswtch_post(sdw_rt, SDW_UPDATE_BS_DIS_CHN); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + + } + + if (!sdw_mstr->link_sync_mask) { + + /* Configure Frame Shape/Switch Bank */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "bank switch failed\n"); + return ret; + } + + /* Disable all channels enabled on previous bank */ + ret = sdw_dis_chan(sdw_mstr_bs); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel disabled failed\n"); + return ret; + } + } + + return ret; +} + +/** + * sdw_chk_last_node - returns True or false + * + * This function returns true in case of last node + * else returns false. + */ +bool sdw_chk_last_node(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_mstr_runtime *last_rt = NULL; + + last_rt = list_last_entry(&sdw_rt->mstr_rt_list, + struct sdw_mstr_runtime, mstr_sdw_node); + if (sdw_mstr_rt == last_rt) + return true; + else + return false; + +} + +/** + * sdw_unprepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to unprepare ports and does recomputation of + * bus parameters. + */ +int sdw_unprepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + mstr_params = &sdw_mstr_rt->stream_params; + + /* 1. Un-prepare master and slave port */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, + sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch unprep failed\n"); + return ret; + } + + /* change stream state to unprepare */ + if (last_node) + sdw_rt->stream_state = + SDW_STATE_UNPREPARE_STREAM; + + /* + * Calculate new bandwidth, frame size + * and total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth -= sdw_mstr_rt->stream_bw; + + /* Something went wrong in bandwidth calulation */ + if (sdw_mstr_bs->bandwidth < 0) { + dev_err(&sdw_mstr->dev, "BW calculation failed\n"); + return -EINVAL; + } + + if (!sdw_mstr_bs->bandwidth) { + /* + * Last stream on master should + * return successfully + */ + sdw_mstr_bs->system_interval = 0; + sdw_mstr_bs->stream_interval = 0; + sdw_mstr_bs->frame_freq = 0; + sdw_mstr_bs->row = 0; + sdw_mstr_bs->col = 0; + return 0; + } + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus params */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_disable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to disable ports. + */ +int sdw_disable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + bool last_node = false; + int ret = 0; + + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* Lets do disabling of port for stream to be freed */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Ch dis failed\n"); + return ret; + } + + /* Change stream state to disable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_DISABLE_STREAM; + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_enable_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to enable ports. + */ +int sdw_enable_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + + ret = sdw_config_bs_prms(sdw_mstr_bs, false); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport params config failed\n"); + return ret; + } + + /* Enable new port for master and slave */ + ret = sdw_en_dis_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel enable failed\n"); + return ret; + } + + /* change stream state to enable */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_ENABLE_STREAM; + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + return ret; +} + +/** + * sdw_prepare_op - returns Success + * -EINVAL - In case of error. + * + * This function perform all operations required + * to prepare ports and does computation of + * bus parameters. + */ +int sdw_prepare_op(struct sdw_bus *sdw_mstr_bs, + struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt) +{ + struct sdw_stream_params *stream_params = &sdw_rt->stream_params; + struct sdw_master *sdw_mstr = sdw_mstr_bs->mstr; + struct sdw_master_capabilities *sdw_mstr_cap = NULL; + struct sdw_stream_params *mstr_params; + + bool last_node = false; + int ret = 0; + + last_node = sdw_chk_last_node(sdw_mstr_rt, sdw_rt); + sdw_mstr_cap = &sdw_mstr_bs->mstr->mstr_capabilities; + mstr_params = &sdw_mstr_rt->stream_params; + + /* + * check all the stream parameters received + * Check for isochronous mode, sample rate etc + */ + ret = sdw_chk_strm_prms(sdw_mstr_cap, mstr_params, + stream_params); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Stream param check failed\n"); + return -EINVAL; + } + + /* + * Calculate stream bandwidth, frame size and + * total BW required for master controller + */ + sdw_mstr_rt->stream_bw = mstr_params->rate * + mstr_params->channel_count * mstr_params->bps; + sdw_mstr_bs->bandwidth += sdw_mstr_rt->stream_bw; + + /* Compute transport params */ + ret = sdw_compute_bs_prms(sdw_mstr_bs, sdw_mstr_rt); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Params computation failed\n"); + return -EINVAL; + } + + /* Configure bus parameters */ + ret = sdw_config_bs_prms(sdw_mstr_bs, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "xport param config failed\n"); + return ret; + } + + /* + * Perform SDW bus update + * For Aggregation flow: + * Pre-> Bankswitch -> Post -> Disable channel + * For normal flow: + * Bankswitch -> Disable channel + */ + ret = sdw_update_bs_prms(sdw_mstr_bs, sdw_rt, last_node); + + /* Prepare new port for master and slave */ + ret = sdw_prep_unprep_mstr_slv(sdw_mstr_bs, sdw_rt, true); + if (ret < 0) { + /* TBD: Undo all the computation */ + dev_err(&sdw_mstr->dev, "Channel prepare failed\n"); + return ret; + } + + /* change stream state to prepare */ + if (last_node) + sdw_rt->stream_state = SDW_STATE_PREPARE_STREAM; + + + return ret; +} + +/** + * sdw_pre_en_dis_unprep_op - returns Success + * -EINVAL - In case of error. + * + * This function is called by sdw_bus_calc_bw + * and sdw_bus_calc_bw_dis to prepare, enable, + * unprepare and disable ports. Based on state + * value, individual APIs are called. + */ +int sdw_pre_en_dis_unprep_op(struct sdw_mstr_runtime *sdw_mstr_rt, + struct sdw_runtime *sdw_rt, int state) +{ + struct sdw_master *sdw_mstr = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + int ret = 0; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + /* + * All data structures required available, + * lets calculate BW for master controller + */ + + switch (state) { + + case SDW_STATE_PREPARE_STREAM: /* Prepare */ + ret = sdw_prepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_ENABLE_STREAM: /* Enable */ + ret = sdw_enable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_DISABLE_STREAM: /* Disable */ + ret = sdw_disable_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + case SDW_STATE_UNPREPARE_STREAM: /* UnPrepare */ + ret = sdw_unprepare_op(sdw_mstr_bs, sdw_mstr_rt, sdw_rt); + break; + default: + ret = -EINVAL; + break; + + } + + return ret; +} + +/** + * sdw_bus_calc_bw - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_prepare_and_enable + * whenever new stream is processed. The function based + * on the stream associated with controller calculates + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch. + */ +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable) +{ + + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* + * TBD: check for mstr_rt is in configured state or not + * If yes, then configure masters as well + * If no, then do not configure/enable master related parameters + */ + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, + mstr_sdw_node) { + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((sdw_rt->stream_state != SDW_STATE_CONFIG_STREAM) && + (sdw_rt->stream_state != SDW_STATE_UNPREPARE_STREAM)) + goto enable_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_PREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Prepare Operation failed\n"); + return -EINVAL; + } + } + +enable_stream: + + list_for_each_entry(sdw_mstr_rt, &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!enable) || + (sdw_rt->stream_state != SDW_STATE_PREPARE_STREAM)) + return 0; + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_ENABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Enable Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw); + +/** + * sdw_bus_calc_bw_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function is called from sdw_disable_and_unprepare + * whenever stream is ended. The function based disables/ + * unprepare port/channel of associated stream and computes + * required bandwidth, clock, frameshape, computes + * all transport params for a given port, enable channel + * & perform bankswitch for remaining streams on given + * controller. + */ +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare) +{ + struct sdw_runtime *sdw_rt = stream_tag->sdw_rt; + struct sdw_mstr_runtime *sdw_mstr_rt = NULL; + struct sdw_bus *sdw_mstr_bs = NULL; + struct sdw_master *sdw_mstr = NULL; + int ret = 0; + + + /* BW calulation for active master controller for given stream tag */ + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + + + if (sdw_mstr_rt->mstr == NULL) + break; + + if (sdw_rt->stream_state != SDW_STATE_ENABLE_STREAM) + goto unprepare_stream; + + /* Get bus structure for master */ + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_DISABLE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Disable Operation failed\n"); + return -EINVAL; + } + } + +unprepare_stream: + list_for_each_entry(sdw_mstr_rt, + &sdw_rt->mstr_rt_list, mstr_sdw_node) { + if (sdw_mstr_rt->mstr == NULL) + break; + + if ((!unprepare) || + (sdw_rt->stream_state != SDW_STATE_DISABLE_STREAM)) + return 0; + + sdw_mstr_bs = master_to_bus(sdw_mstr_rt->mstr); + if (!sdw_mstr_bs) + return -EINVAL; + + sdw_mstr = sdw_mstr_bs->mstr; + ret = sdw_pre_en_dis_unprep_op(sdw_mstr_rt, sdw_rt, + SDW_STATE_UNPREPARE_STREAM); + if (ret < 0) { + dev_err(&sdw_mstr->dev, "Unprepare Operation failed\n"); + return -EINVAL; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdw_bus_calc_bw_dis); + +/* + * sdw_slv_dp0_en_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Slave DP0 channels. + */ +int sdw_slv_dp0_en_dis(struct sdw_bus *mstr_bs, + bool is_enable, u8 slv_number) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + rd_msg.addr = wr_msg.addr = ((SDW_DPN_CHANNELEN + + (SDW_BANK1_REGISTER_OFFSET * banktouse)) + + (SDW_NUM_DATA_PORT_REGISTERS * + 0x0)); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (is_enable) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + rbuf[0] = 0; + /* This is just status read, can be removed later */ + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + +out: + return ret; + +} + + +/* + * sdw_mstr_dp0_act_dis - returns Success + * -EINVAL - In case of error. + * + * + * This function enable/disable Master DP0 channels. + */ +int sdw_mstr_dp0_act_dis(struct sdw_bus *mstr_bs, bool is_enable) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_activate_ch activate_ch; + int banktouse, ret = 0; + + activate_ch.num = 0; + activate_ch.ch_mask = 0x1; + activate_ch.activate = is_enable; /* Enable/Disable */ + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* 1. Master port enable_ch_pre */ + if (ops->mstr_port_ops->dpn_port_activate_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_pre + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 2. Master port enable */ + if (ops->mstr_port_ops->dpn_port_activate_ch) { + ret = ops->mstr_port_ops->dpn_port_activate_ch(mstr_bs->mstr, + &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + /* 3. Master port enable_ch_post */ + if (ops->mstr_port_ops->dpn_port_activate_ch_post) { + ret = ops->mstr_port_ops->dpn_port_activate_ch_post + (mstr_bs->mstr, &activate_ch, banktouse); + if (ret < 0) + return ret; + } + + return 0; +} + +/* + * sdw_slv_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Slave DP0. + */ +int sdw_slv_dp0_prep_unprep(struct sdw_bus *mstr_bs, + u8 slv_number, bool prepare) +{ + struct sdw_msg wr_msg, rd_msg; + int ret = 0; + int banktouse; + u8 wbuf[1] = {0}; + u8 rbuf[1] = {0}; + + /* Get current bank in use from bus structure*/ + banktouse = mstr_bs->active_bank; + banktouse = !banktouse; + + /* Read SDW_DPN_PREPARECTRL register */ + rd_msg.addr = wr_msg.addr = SDW_DPN_PREPARECTRL + + (SDW_NUM_DATA_PORT_REGISTERS * 0x0); + rd_msg.ssp_tag = 0x0; + rd_msg.flag = SDW_MSG_FLAG_READ; + rd_msg.len = 1; + rd_msg.slave_addr = slv_number; + rd_msg.buf = rbuf; + rd_msg.addr_page1 = 0x0; + rd_msg.addr_page2 = 0x0; + + wr_msg.ssp_tag = 0x0; + wr_msg.flag = SDW_MSG_FLAG_WRITE; + wr_msg.len = 1; + wr_msg.slave_addr = slv_number; + wr_msg.buf = wbuf; + wr_msg.addr_page1 = 0x0; + wr_msg.addr_page2 = 0x0; + + ret = sdw_slave_transfer(mstr_bs->mstr, &rd_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + if (prepare) + wbuf[0] = (rbuf[0] | 0x1); + else + wbuf[0] = (rbuf[0] & ~(0x1)); + + /* + * TBD: poll for prepare interrupt bit + * before calling post_prepare + * 2. check capabilities if simplified + * CM no need to prepare + */ + ret = sdw_slave_transfer(mstr_bs->mstr, &wr_msg, 1); + if (ret != 1) { + ret = -EINVAL; + dev_err(&mstr_bs->mstr->dev, + "Register transfer failed\n"); + goto out; + } + + /* + * Sleep for 100ms. + * TODO: check on check on prepare status for port_ready + */ + msleep(100); + +out: + return ret; + +} + +/* + * sdw_mstr_dp0_prep_unprep - returns Success + * -EINVAL - In case of error. + * + * + * This function prepare/unprepare Master DP0. + */ +int sdw_mstr_dp0_prep_unprep(struct sdw_bus *mstr_bs, + bool prep) +{ + struct sdw_mstr_driver *ops = mstr_bs->mstr->driver; + struct sdw_prepare_ch prep_ch; + int ret = 0; + + prep_ch.num = 0x0; + prep_ch.ch_mask = 0x1; + prep_ch.prepare = prep; /* Prepare/Unprepare */ + + /* 1. Master port prepare_ch_pre */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_pre) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_pre + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 2. Master port prepare */ + if (ops->mstr_port_ops->dpn_port_prepare_ch) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + /* 3. Master port prepare_ch_post */ + if (ops->mstr_port_ops->dpn_port_prepare_ch_post) { + ret = ops->mstr_port_ops->dpn_port_prepare_ch_post + (mstr_bs->mstr, &prep_ch); + if (ret < 0) + return ret; + } + + return 0; +} + +static int sdw_bra_config_ops(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + struct sdw_mstr_driver *ops; + int ret, banktouse; + + /* configure Master transport params */ + ret = sdw_cfg_mstr_params(sdw_mstr_bs, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master xport params config failed\n"); + return ret; + } + + /* configure Slave transport params */ + ret = sdw_cfg_slv_params(sdw_mstr_bs, t_params, + p_params, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave xport params config failed\n"); + return ret; + } + + /* Get master driver ops */ + ops = sdw_mstr_bs->mstr->driver; + + /* Configure SSP */ + banktouse = sdw_mstr_bs->active_bank; + banktouse = !banktouse; + + if (ops->mstr_ops->set_ssp_interval) { + ret = ops->mstr_ops->set_ssp_interval(sdw_mstr_bs->mstr, + 24, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: SSP interval config failed\n"); + return ret; + } + } + + /* Configure Clock */ + if (ops->mstr_ops->set_clock_freq) { + ret = ops->mstr_ops->set_clock_freq(sdw_mstr_bs->mstr, + sdw_mstr_bs->clk_div, banktouse); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Clock config failed\n"); + return ret; + } + } + + return 0; +} + +static int sdw_bra_xport_config_enable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, + struct sdw_transport_params *t_params, + struct sdw_port_params *p_params) +{ + int ret; + + /* Prepare sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + * 3. Try to use existing prep_unprep API both for master + * and slave. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, true); + + /* Prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master prepare failed\n"); + return ret; + } + + /* Enable sequence */ + ret = sdw_bra_config_ops(sdw_mstr_bs, block, t_params, p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: config operation failed\n"); + return ret; + } + + /* Enable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, true, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 enable failed\n"); + return ret; + } + + /* Enable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, true); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 enable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + return 0; +} + +static int sdw_bra_xport_config_disable(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block) +{ + int ret; + + /* Disable DP0 channel (Slave) */ + ret = sdw_slv_dp0_en_dis(sdw_mstr_bs, false, block->slave_addr); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Slave DP0 disable failed\n"); + return ret; + } + + /* Disable DP0 channel (Master) */ + ret = sdw_mstr_dp0_act_dis(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master DP0 disable failed\n"); + return ret; + } + + /* Bank Switch */ + ret = sdw_cfg_frmshp_bnkswtch(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: bank switch failed\n"); + return ret; + } + + /* + * TODO: There may be some slave which doesn't support + * de-prepare for DP0. We have two options here. + * 1. Just call prepare and ignore error from those + * codec who doesn't support de-prepare for DP0. + * 2. Get slave capabilities and based on prepare DP0 + * support, Program Slave prepare register. + * Currently going with approach 1, not checking return + * value. + */ + sdw_slv_dp0_prep_unprep(sdw_mstr_bs, block->slave_addr, false); + + /* De-prepare Master port */ + ret = sdw_mstr_dp0_prep_unprep(sdw_mstr_bs, false); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Master de-prepare failed\n"); + return ret; + } + + return 0; +} + +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable) +{ + struct sdw_transport_params t_params; + struct sdw_port_params p_params; + int ret; + + /* TODO: + * compute transport parameters based on current clock and + * frameshape. need to check how algorithm should be designed + * for BRA for computing clock, frameshape, SSP and transport params. + */ + + /* Transport Parameters */ + t_params.num = 0x0; /* DP 0 */ + t_params.blockpackingmode = 0x0; + t_params.blockgroupcontrol_valid = false; + t_params.blockgroupcontrol = 0x0; + t_params.lanecontrol = 0; + t_params.sample_interval = 10; + + t_params.hstart = 7; + t_params.hstop = 9; + t_params.offset1 = 0; + t_params.offset2 = 0; + + /* Port Parameters */ + p_params.num = 0x0; /* DP 0 */ + + /* Isochronous Mode */ + p_params.port_flow_mode = 0x0; + + /* Normal Mode */ + p_params.port_data_mode = 0x0; + + /* Word length */ + p_params.word_length = 3; + + /* Frameshape and clock params */ + sdw_mstr_bs->clk_div = 1; + sdw_mstr_bs->col = 10; + sdw_mstr_bs->row = 80; + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) + sdw_mstr_bs->bandwidth = 9.6 * 1000 * 1000; +#else + sdw_mstr_bs->bandwidth = 12 * 1000 * 1000; +#endif + + if (enable) { + ret = sdw_bra_xport_config_enable(sdw_mstr_bs, block, + &t_params, &p_params); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params config failed\n"); + return ret; + } + + } else { + ret = sdw_bra_xport_config_disable(sdw_mstr_bs, block); + if (ret < 0) { + dev_err(&sdw_mstr_bs->mstr->dev, "BRA: Xport params de-config failed\n"); + return ret; + } + } + + return 0; +} diff --git a/drivers/sdw/sdw_cnl.c b/drivers/sdw/sdw_cnl.c new file mode 100644 index 000000000000..c754edbe6564 --- /dev/null +++ b/drivers/sdw/sdw_cnl.c @@ -0,0 +1,2535 @@ +/* + * sdw_cnl.c - Intel SoundWire master controller driver implementation. + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Hardik T Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sdw_cnl_priv.h" + +static inline int cnl_sdw_reg_readl(void __iomem *base, int offset) +{ + int value; + + value = readl(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writel(void __iomem *base, int offset, int value) +{ + writel(value, base + offset); +} + +static inline u16 cnl_sdw_reg_readw(void __iomem *base, int offset) +{ + int value; + + value = readw(base + offset); + return value; +} + +static inline void cnl_sdw_reg_writew(void __iomem *base, int offset, u16 value) +{ + writew(value, base + offset); +} + +static inline int cnl_sdw_port_reg_readl(void __iomem *base, int offset, + int port_num) +{ + return cnl_sdw_reg_readl(base, offset + port_num * 128); +} + +static inline void cnl_sdw_port_reg_writel(u32 __iomem *base, int offset, + int port_num, int value) +{ + return cnl_sdw_reg_writel(base, offset + port_num * 128, value); +} + +struct cnl_sdw_async_msg { + struct completion *async_xfer_complete; + struct sdw_msg *msg; + int length; +}; + +struct cnl_sdw { + struct cnl_sdw_data data; + struct sdw_master *mstr; + irqreturn_t (*thread)(int irq, void *context); + void *thread_context; + struct completion tx_complete; + struct cnl_sdw_port port[CNL_SDW_MAX_PORTS]; + int num_pcm_streams; + struct cnl_sdw_pdi_stream *pcm_streams; + int num_in_pcm_streams; + struct cnl_sdw_pdi_stream *in_pcm_streams; + int num_out_pcm_streams; + struct cnl_sdw_pdi_stream *out_pcm_streams; + int num_pdm_streams; + struct cnl_sdw_pdi_stream *pdm_streams; + int num_in_pdm_streams; + struct cnl_sdw_pdi_stream *in_pdm_streams; + int num_out_pdm_streams; + struct cnl_sdw_pdi_stream *out_pdm_streams; + struct mutex stream_lock; + spinlock_t ctrl_lock; + struct cnl_sdw_async_msg async_msg; + u32 response_buf[0x80]; + bool sdw_link_status; + +}; + +static int sdw_power_up_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Try 10 times before timing out */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = (CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control |= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (link_control & cpa_mask) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Up\n", + data->inst_id); + sdw->sdw_link_status = 1; + return 0; + } + dev_err(&mstr->dev, "Failed to Power Up the SDW ctrl %d\n", + data->inst_id); + return -EIO; +} + +static void sdw_power_down_link(struct cnl_sdw *sdw) +{ + volatile int link_control; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + /* Retry 10 times before giving up */ + int timeout = 10; + int spa_mask, cpa_mask; + + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + spa_mask = ~(CNL_LCTL_SPA_MASK << (data->inst_id + CNL_LCTL_SPA_SHIFT)); + cpa_mask = (CNL_LCTL_CPA_MASK << (data->inst_id + CNL_LCTL_CPA_SHIFT)); + link_control &= spa_mask; + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_LCTL, link_control); + do { + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) + break; + timeout--; + /* Wait for 20ms before each retry */ + msleep(20); + } while (timeout != 0); + /* Read once again to confirm */ + link_control = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_LCTL); + if (!(link_control & cpa_mask)) { + dev_info(&mstr->dev, "SoundWire ctrl %d Powered Down\n", + data->inst_id); + sdw->sdw_link_status = 0; + return; + } + dev_err(&mstr->dev, "Failed to Power Down the SDW ctrl %d\n", + data->inst_id); +} + +static void sdw_init_phyctrl(struct cnl_sdw *sdw) +{ + /* TODO: Initialize based on hardware requirement */ + +} + +static void sdw_switch_to_mip(struct cnl_sdw *sdw) +{ + u16 ioctl; + u16 act = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + int act_offset = SDW_CNL_CTMCTL + (data->inst_id * + SDW_CNL_CTMCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + + ioctl &= ~(CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT); + ioctl &= ~(CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT); + + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + act |= 0x1 << CNL_CTMCTL_DOAIS_SHIFT; + act |= CNL_CTMCTL_DACTQE_MASK << CNL_CTMCTL_DACTQE_SHIFT; + act |= CNL_CTMCTL_DODS_MASK << CNL_CTMCTL_DODS_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, act_offset, act); +} + +static void sdw_switch_to_glue(struct cnl_sdw *sdw) +{ + u16 ioctl; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + ioctl = cnl_sdw_reg_readw(data->sdw_shim, ioctl_offset); + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + ioctl |= CNL_IOCTL_COE_MASK << CNL_IOCTL_COE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl &= ~(CNL_IOCTL_MIF_MASK << CNL_IOCTL_MIF_SHIFT); + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static void sdw_init_shim(struct cnl_sdw *sdw) +{ + u16 ioctl = 0; + struct cnl_sdw_data *data = &sdw->data; + int ioctl_offset = SDW_CNL_IOCTL + (data->inst_id * + SDW_CNL_IOCTL_REG_OFFSET); + + + ioctl |= CNL_IOCTL_BKE_MASK << CNL_IOCTL_BKE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_WPDD_MASK << CNL_IOCTL_WPDD_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DO_MASK << CNL_IOCTL_DO_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); + + ioctl |= CNL_IOCTL_DOE_MASK << CNL_IOCTL_DOE_SHIFT; + cnl_sdw_reg_writew(data->sdw_shim, ioctl_offset, ioctl); +} + +static int sdw_config_update(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + struct sdw_master *mstr = sdw->mstr; + int sync_reg, syncgo_mask; + volatile int config_update = 0; + volatile int sync_update = 0; + /* Try 10 times before giving up on configuration update */ + int timeout = 10; + int config_updated = 0; + + config_update |= MCP_CONFIGUPDATE_CONFIGUPDATE_MASK << + MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT; + /* Bit is self-cleared when configuration gets updated. */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIGUPDATE, + config_update); + + /* + * Set SYNCGO bit for Master(s) running in aggregated mode + * (MMModeEN = 1). This action causes all gSyncs of all Master IPs + * to be unmasked and asserted at the currently active gSync rate. + * The initialization-pending Master IP SoundWire bus clock will + * start up synchronizing to gSync, leading to bus reset entry, + * subsequent exit, and 1st Frame generation aligning to gSync. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + syncgo_mask = (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, + SDW_CNL_SYNC); + if ((sync_update & syncgo_mask) == 0) + break; + + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & syncgo_mask) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + + /* Reset timeout */ + timeout = 10; + } + + /* Wait for config update bit to be self cleared */ + do { + config_update = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONFIGUPDATE); + if ((config_update & + MCP_CONFIGUPDATE_CONFIGUPDATE_MASK) == 0) { + config_updated = 1; + break; + } + timeout--; + /* Wait for 20ms between each try */ + msleep(20); + + } while (timeout != 0); + if (!config_updated) { + dev_err(&mstr->dev, "SoundWire update failed\n"); + return -EIO; + } + return 0; +} + +static void sdw_enable_interrupt(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int int_mask = 0; + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK0, + MCP_SLAVEINTMASK0_MASK); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTMASK1, + MCP_SLAVEINTMASK1_MASK); + /* Enable slave interrupt mask */ + int_mask |= MCP_INTMASK_SLAVERESERVED_MASK << + MCP_INTMASK_SLAVERESERVED_SHIFT; + int_mask |= MCP_INTMASK_SLAVEALERT_MASK << + MCP_INTMASK_SLAVEALERT_SHIFT; + int_mask |= MCP_INTMASK_SLAVEATTACHED_MASK << + MCP_INTMASK_SLAVEATTACHED_SHIFT; + int_mask |= MCP_INTMASK_SLAVENOTATTACHED_MASK << + MCP_INTMASK_SLAVENOTATTACHED_SHIFT; + int_mask |= MCP_INTMASK_CONTROLBUSCLASH_MASK << + MCP_INTMASK_CONTROLBUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_DATABUSCLASH_MASK << + MCP_INTMASK_DATABUSCLASH_SHIFT; + int_mask |= MCP_INTMASK_RXWL_MASK << + MCP_INTMASK_RXWL_SHIFT; + int_mask |= MCP_INTMASK_IRQEN_MASK << + MCP_INTMASK_IRQEN_SHIFT; + int_mask |= MCP_INTMASK_DPPDIINT_MASK << + MCP_INTMASK_DPPDIINT_SHIFT; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTMASK, int_mask); +} + +static int sdw_pcm_pdi_init(struct cnl_sdw *sdw) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pcm_cap; + int pcm_cap_offset = SDW_CNL_PCMSCAP + (data->inst_id * + SDW_CNL_PCMSCAP_REG_OFFSET); + int ch_cnt_offset; + int i; + + pcm_cap = cnl_sdw_reg_readw(data->sdw_shim, pcm_cap_offset); + sdw->num_pcm_streams = (pcm_cap >> CNL_PCMSCAP_BSS_SHIFT) & + CNL_PCMSCAP_BSS_MASK; + dev_info(&mstr->dev, "Number of Bidirectional PCM stream = %d\n", + sdw->num_pcm_streams); + sdw->pcm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pcm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pcm_streams) + return -ENOMEM; + /* Two of the PCM streams are reserved for bulk transfers */ + sdw->pcm_streams -= SDW_CNL_PCM_PDI_NUM_OFFSET; + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < sdw->num_pcm_streams; i++) { + ch_cnt_offset = SDW_CNL_PCMSCHC + + (data->inst_id * SDW_CNL_PCMSCHC_REG_OFFSET) + + ((i + SDW_CNL_PCM_PDI_NUM_OFFSET) * 0x2); + + sdw->pcm_streams[i].ch_cnt = cnl_sdw_reg_readw(data->sdw_shim, + ch_cnt_offset); + /* Zero based value in register */ + sdw->pcm_streams[i].ch_cnt++; + sdw->pcm_streams[i].pdi_num = i; + sdw->pcm_streams[i].allocated = false; + dev_info(&mstr->dev, "CH Count for stream %d is %d\n", + i, sdw->pcm_streams[i].ch_cnt); + } + return 0; +} + +static int sdw_pdm_pdi_init(struct cnl_sdw *sdw) +{ + int i; + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int pdm_cap, pdm_ch_count, total_pdm_streams; + int pdm_cap_offset = SDW_CNL_PDMSCAP + + (data->inst_id * SDW_CNL_PDMSCAP_REG_OFFSET); + pdm_cap = cnl_sdw_reg_readw(data->sdw_shim, pdm_cap_offset); + sdw->num_pdm_streams = (pdm_cap >> CNL_PDMSCAP_BSS_SHIFT) & + CNL_PDMSCAP_BSS_MASK; + + sdw->pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->pdm_streams) + return -ENOMEM; + + sdw->num_in_pdm_streams = (pdm_cap >> CNL_PDMSCAP_ISS_SHIFT) & + CNL_PDMSCAP_ISS_MASK; + + sdw->in_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_in_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + + if (!sdw->in_pdm_streams) + return -ENOMEM; + + sdw->num_out_pdm_streams = (pdm_cap >> CNL_PDMSCAP_OSS_SHIFT) & + CNL_PDMSCAP_OSS_MASK; + /* Zero based value in register */ + sdw->out_pdm_streams = devm_kzalloc(&mstr->dev, + sdw->num_out_pdm_streams * sizeof(struct cnl_sdw_pdi_stream), + GFP_KERNEL); + if (!sdw->out_pdm_streams) + return -ENOMEM; + + total_pdm_streams = sdw->num_pdm_streams + + sdw->num_in_pdm_streams + + sdw->num_out_pdm_streams; + + pdm_ch_count = (pdm_cap >> CNL_PDMSCAP_CPSS_SHIFT) & + CNL_PDMSCAP_CPSS_MASK; + for (i = 0; i < sdw->num_pdm_streams; i++) { + sdw->pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_in_pdm_streams; i++) { + sdw->in_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->in_pdm_streams[i].pdi_num = i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->in_pdm_streams[i].allocated = false; + } + for (i = 0; i < sdw->num_out_pdm_streams; i++) { + sdw->out_pdm_streams[i].ch_cnt = pdm_ch_count; + sdw->out_pdm_streams[i].pdi_num = + i + SDW_CNL_PDM_PDI_NUM_OFFSET; + sdw->out_pdm_streams[i].allocated = false; + } + return 0; +} + +static int sdw_port_pdi_init(struct cnl_sdw *sdw) +{ + int i, ret = 0; + + for (i = 0; i < CNL_SDW_MAX_PORTS; i++) { + sdw->port[i].port_num = i; + sdw->port[i].allocated = false; + } + ret = sdw_pcm_pdi_init(sdw); + if (ret) + return ret; + ret = sdw_pdm_pdi_init(sdw); + + return ret; +} + +static int sdw_init(struct cnl_sdw *sdw, bool is_first_init) +{ + struct sdw_master *mstr = sdw->mstr; + struct cnl_sdw_data *data = &sdw->data; + int mcp_config, mcp_control, sync_reg, mcp_clockctrl; + volatile int sync_update = 0; + int timeout = 10; /* Try 10 times before timing out */ + int ret = 0, mask; + + /* Power up the link controller */ + ret = sdw_power_up_link(sdw); + if (ret) + return ret; + + /* Initialize the IO control registers */ + sdw_init_shim(sdw); + + /* Switch the ownership to Master IP from glue logic */ + sdw_switch_to_mip(sdw); + + /* write to MCP Control register to enable block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + mask = (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + mcp_control &= ~mask; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + do { + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) + break; + + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + + /* Write the MCP Control register to exit from clock stop */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + mask = (MCP_CONTROL_CLOCKSTOPCLEAR_MASK << + MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT); + mcp_control |= mask; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Reset timeout */ + timeout = 10; + + /* Wait for clock stop exit bit to be self cleared */ + do { + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + + /* Read once again to confirm */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONTROL); + if (!(mcp_control & mask)) { + dev_dbg(&sdw->mstr->dev, "SDW ctrl %d exit clock stop success\n", + data->inst_id); + } else { + dev_err(&sdw->mstr->dev, + "Failed exit from clock stop SDW ctrl %d\n", + data->inst_id); + return -EIO; + } + + /* Set SyncPRD period */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (SDW_CNL_DEFAULT_SYNC_PERIOD << CNL_SYNC_SYNCPRD_SHIFT); + + /* Set SyncPU bit */ + sync_reg |= (0x1 << CNL_SYNC_SYNCCPU_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + /* Reset timeout */ + timeout = 10; + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) == 0) + break; + timeout--; + /* Wait 20ms before each time */ + msleep(20); + } while (timeout != 0); + if ((sync_update & CNL_SYNC_SYNCCPU_MASK) != 0) { + dev_err(&mstr->dev, "Fail to set sync period\n"); + return -EINVAL; + } + + /* + * Set CMDSYNC bit based on Master ID + * Note that this bit is set only for the Master which will be + * running in aggregated mode (MMModeEN = 1). By doing + * this the gSync to Master IP to be masked inactive. + * Note that this is done in order to overcome hardware bug related + * to mis-alignment of gSync and frame. + */ + if (mstr->link_sync_mask) { + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + /* Set clock divider to default value in default bank */ + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + mcp_clockctrl |= SDW_CNL_DEFAULT_CLK_DIVIDER; + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CLOCKCTRL0, + mcp_clockctrl); + + /* Set the Frame shape init to default value */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FRAMESHAPEINIT, + SDW_CNL_DEFAULT_FRAME_SHAPE); + + + /* Set the SSP interval to default value for both banks */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL0, + SDW_CNL_DEFAULT_SSP_INTERVAL); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SSPCTRL1, + SDW_CNL_DEFAULT_SSP_INTERVAL); + + /* Set command acceptance mode. This is required because when + * Master broadcasts the clock_stop command to slaves, slaves + * might be already suspended, so this return NO ACK, in that + * case also master should go to clock stop mode. + */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_CMDACCEPTMODE_MASK << + MCP_CONTROL_CMDACCEPTMODE_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + /* Set Max cmd retry to 15 times */ + mcp_config |= (CNL_SDW_MAX_CMD_RETRIES << + MCP_CONFIG_MAXCMDRETRY_SHIFT); + + /* Set Ping request to ping delay to 15 frames. + * Spec supports 32 max frames + */ + mcp_config |= (CNL_SDW_MAX_PREQ_DELAY << + MCP_CONFIG_MAXPREQDELAY_SHIFT); + + /* If master is synchronized to some other master set Multimode */ + if (mstr->link_sync_mask) { + mcp_config |= (MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config |= (MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } else { + mcp_config &= ~(MCP_CONFIG_MMMODEEN_MASK << + MCP_CONFIG_MMMODEEN_SHIFT); + mcp_config &= ~(MCP_CONFIG_SSPMODE_MASK << + MCP_CONFIG_SSPMODE_SHIFT); + } + + /* Disable automatic bus release */ + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + /* Disable sniffer mode now */ + mcp_config &= ~(MCP_CONFIG_SNIFFEREN_MASK << + MCP_CONFIG_SNIFFEREN_SHIFT); + + /* Set the command mode for Tx and Rx command */ + mcp_config &= ~(MCP_CONFIG_CMDMODE_MASK << + MCP_CONFIG_CMDMODE_SHIFT); + + /* Set operation mode to normal */ + mcp_config &= ~(MCP_CONFIG_OPERATIONMODE_MASK << + MCP_CONFIG_OPERATIONMODE_SHIFT); + mcp_config |= ((MCP_CONFIG_OPERATIONMODE_NORMAL & + MCP_CONFIG_OPERATIONMODE_MASK) << + MCP_CONFIG_OPERATIONMODE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + + /* Initialize the phy control registers. */ + sdw_init_phyctrl(sdw); + + if (is_first_init) { + /* Initlaize the ports */ + ret = sdw_port_pdi_init(sdw); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + sdw_power_down_link(sdw); + return ret; + } + } + + /* Lastly enable interrupts */ + sdw_enable_interrupt(sdw); + + /* Update soundwire configuration */ + return sdw_config_update(sdw); +} + +static int sdw_alloc_pcm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, + enum sdw_data_direction direction) +{ + int num_pcm_streams, pdi_ch_map = 0, stream_id; + struct cnl_sdw_pdi_stream *stream, *pdi_stream; + unsigned int i; + unsigned int ch_map_offset, port_ctrl_offset, pdi_config_offset; + struct sdw_master *mstr = sdw->mstr; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + unsigned int stream_config; + + /* Currently PCM supports only bi-directional streams only */ + num_pcm_streams = sdw->num_pcm_streams; + stream = sdw->pcm_streams; + + mutex_lock(&sdw->stream_lock); + for (i = SDW_CNL_PCM_PDI_NUM_OFFSET; i < num_pcm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) { + dev_err(&mstr->dev, "Unable to allocate stream for PCM\n"); + return -EINVAL; + } + pdi_stream = port->pdi_stream; + /* We didnt get enough PDI streams, so free the allocated + * PDI streams. Free the port as well and return with error + */ + pdi_stream->l_ch_num = 0; + pdi_stream->h_ch_num = ch_cnt - 1; + ch_map_offset = SDW_CNL_PCMSCHM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr) + + (SDW_PCM_STRM_START_INDEX * pdi_stream->pdi_num); + if (port->direction == SDW_DATA_DIR_IN) + pdi_ch_map |= (CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + else + pdi_ch_map &= ~(CNL_PCMSYCM_DIR_MASK << CNL_PCMSYCM_DIR_SHIFT); + /* TODO: Remove this hardcoding */ + stream_id = mstr->nr * 16 + pdi_stream->pdi_num + 5; + pdi_stream->sdw_pdi_num = stream_id; + pdi_ch_map |= (stream_id & CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + pdi_ch_map |= (pdi_stream->l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + pdi_ch_map |= (0xF & CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + cnl_sdw_reg_writew(sdw->data.sdw_shim, ch_map_offset, + pdi_ch_map); + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (pdi_stream->pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET)); + stream_config |= (CNL_STRMZCFG_DMAT_VAL & CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + stream_config |= ((ch_cnt - 1) & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + cnl_sdw_reg_writel(sdw->data.alh_base, + (pdi_stream->sdw_pdi_num * ALH_CNL_STRMZCFG_OFFSET), + stream_config); + return 0; +} + +static int sdw_alloc_pdm_stream(struct cnl_sdw *sdw, + struct cnl_sdw_port *port, int ch_cnt, int direction) +{ + int num_pdm_streams; + struct cnl_sdw_pdi_stream *stream; + int i; + unsigned int port_ctrl_offset, pdi_config_offset; + unsigned int port_ctrl = 0, pdi_config = 0, channel_mask; + + /* Currently PDM supports either Input or Output Streams */ + if (direction == SDW_DATA_DIR_IN) { + num_pdm_streams = sdw->num_in_pdm_streams; + stream = sdw->in_pdm_streams; + } else { + num_pdm_streams = sdw->num_out_pdm_streams; + stream = sdw->out_pdm_streams; + } + mutex_lock(&sdw->stream_lock); + for (i = 0; i < num_pdm_streams; i++) { + if (stream[i].allocated == false) { + stream[i].allocated = true; + stream[i].port_num = port->port_num; + port->pdi_stream = &stream[i]; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port->pdi_stream) + return -EINVAL; + /* If direction is input, port is sink port*/ + if (direction == SDW_DATA_DIR_IN) + port_ctrl |= (PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + else + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl_offset = SDW_CNL_PORTCTRL + (port->port_num * + SDW_CNL_PORT_REG_OFFSET); + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, port_ctrl); + + pdi_config |= ((port->port_num & PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + channel_mask = (1 << ch_cnt) - 1; + pdi_config |= (channel_mask << PDINCONFIG_CHANNEL_MASK_SHIFT); + /* TODO: Remove below hardcodings */ + pdi_config_offset = (SDW_CNL_PDINCONFIG0 + (stream[i].pdi_num * 16)); + cnl_sdw_reg_writel(sdw->data.sdw_regs, pdi_config_offset, pdi_config); + + return 0; +} + +struct cnl_sdw_port *cnl_sdw_alloc_port(struct sdw_master *mstr, int ch_count, + enum sdw_data_direction direction, + enum cnl_sdw_pdi_stream_type stream_type) +{ + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + int i, ret = 0; + struct num_pdi_streams; + + sdw = sdw_master_get_drvdata(mstr); + + mutex_lock(&sdw->stream_lock); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].allocated == false) { + port = &sdw->port[i]; + port->allocated = true; + port->direction = direction; + port->ch_cnt = ch_count; + break; + } + } + mutex_unlock(&sdw->stream_lock); + if (!port) { + dev_err(&mstr->dev, "Unable to allocate port\n"); + return NULL; + } + port->pdi_stream = NULL; + if (stream_type == CNL_SDW_PDI_TYPE_PDM) + ret = sdw_alloc_pdm_stream(sdw, port, ch_count, direction); + else + ret = sdw_alloc_pcm_stream(sdw, port, ch_count, direction); + if (!ret) + return port; + + dev_err(&mstr->dev, "Unable to allocate stream\n"); + mutex_lock(&sdw->stream_lock); + port->allocated = false; + mutex_unlock(&sdw->stream_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(cnl_sdw_alloc_port); + +void cnl_sdw_free_port(struct sdw_master *mstr, int port_num) +{ + int i; + struct cnl_sdw *sdw; + struct cnl_sdw_port *port = NULL; + + sdw = sdw_master_get_drvdata(mstr); + for (i = 1; i < CNL_SDW_MAX_PORTS; i++) { + if (sdw->port[i].port_num == port_num) { + port = &sdw->port[i]; + break; + } + } + if (!port) + return; + mutex_lock(&sdw->stream_lock); + port->pdi_stream->allocated = false; + port->pdi_stream = NULL; + port->allocated = false; + mutex_unlock(&sdw->stream_lock); +} +EXPORT_SYMBOL_GPL(cnl_sdw_free_port); + +static int cnl_sdw_update_slave_status(struct cnl_sdw *sdw, int slave_intstat0, + int slave_intstat1) +{ + int i; + struct sdw_status slave_status; + u64 slaves_stat, slave_stat; + int ret = 0; + + memset(&slave_status, 0x0, sizeof(slave_status)); + slaves_stat = (u64) slave_intstat1 << + SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT; + slaves_stat |= slave_intstat0; + for (i = 0; i <= SOUNDWIRE_MAX_DEVICES; i++) { + slave_stat = slaves_stat >> (i * SDW_CNL_SLAVE_STATUS_BITS); + if (slave_stat & MCP_SLAVEINTSTAT_NOT_PRESENT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_NOT_PRESENT; + else if (slave_stat & MCP_SLAVEINTSTAT_ATTACHED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ATTACHED_OK; + else if (slave_stat & MCP_SLAVEINTSTAT_ALERT_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_ALERT; + else if (slave_stat & MCP_SLAVEINTSTAT_RESERVED_MASK) + slave_status.status[i] = SDW_SLAVE_STAT_RESERVED; + } + ret = sdw_master_update_slv_status(sdw->mstr, &slave_status); + return ret; +} + +static void cnl_sdw_read_response(struct cnl_sdw *sdw) +{ + struct cnl_sdw_data *data = &sdw->data; + int num_res = 0, i; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + + num_res = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_FIFOSTAT); + num_res &= MCP_RX_FIFO_AVAIL_MASK; + for (i = 0; i < num_res; i++) { + sdw->response_buf[i] = cnl_sdw_reg_readl(data->sdw_regs, + cmd_base); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } +} + +static enum sdw_command_response sdw_fill_message_response( + struct sdw_master *mstr, + struct sdw_msg *msg, + int count, int offset) +{ + int i, j; + int no_ack = 0, nack = 0; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + for (i = 0; i < count; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & + sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + break; + } + if (nack) { + dev_err(&mstr->dev, "Nack detected for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } else if (no_ack) { + dev_err(&mstr->dev, "Command ignored for slave %d\n", msg->slave_addr); + msg->len = 0; + return -EREMOTEIO; + } + if (msg->flag == SDW_MSG_FLAG_WRITE) + return 0; + /* Response and Command has same base address */ + for (j = 0; j < count; j++) + msg->buf[j + offset] = + (sdw->response_buf[j] >> MCP_RESPONSE_RDATA_SHIFT); + return 0; +} + + +irqreturn_t cnl_sdw_irq_handler(int irq, void *context) +{ + struct cnl_sdw *sdw = context; + volatile int int_status, status, wake_sts; + + struct cnl_sdw_data *data = &sdw->data; + volatile int slave_intstat0 = 0, slave_intstat1 = 0; + struct sdw_master *mstr = sdw->mstr; + + /* + * Return if IP is in power down state. Interrupt can still come + * since its shared irq. + */ + if (!sdw->sdw_link_status) + return IRQ_NONE; + + int_status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_INTSTAT); + status = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_STAT); + slave_intstat0 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT0); + slave_intstat1 = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_SLAVEINTSTAT1); + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + + if (!(int_status & (MCP_INTSTAT_IRQ_MASK << MCP_INTSTAT_IRQ_SHIFT))) + return IRQ_NONE; + + if (int_status & (MCP_INTSTAT_RXWL_MASK << MCP_INTSTAT_RXWL_SHIFT)) { + cnl_sdw_read_response(sdw); + if (sdw->async_msg.async_xfer_complete) { + sdw_fill_message_response(mstr, sdw->async_msg.msg, + sdw->async_msg.length, 0); + complete(sdw->async_msg.async_xfer_complete); + sdw->async_msg.async_xfer_complete = NULL; + sdw->async_msg.msg = NULL; + } else + complete(&sdw->tx_complete); + } + if (int_status & (MCP_INTSTAT_CONTROLBUSCLASH_MASK << + MCP_INTSTAT_CONTROLBUSCLASH_SHIFT)) { + /* Some slave is behaving badly, where its driving + * data line during control word bits. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for control word\n"); + } + if (int_status & (MCP_INTSTAT_DATABUSCLASH_MASK << + MCP_INTSTAT_DATABUSCLASH_SHIFT)) { + /* More than 1 slave is trying to drive bus. There is + * some problem with ownership of bus data bits, + * or either of the + * slave is behaving badly. + */ + dev_err_ratelimited(&mstr->dev, "Bus clash detected for control word\n"); + WARN_ONCE(1, "Bus clash detected for data word\n"); + } + + if (int_status & (MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK << + MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT)) { + dev_info(&mstr->dev, "Slave status change\n"); + cnl_sdw_update_slave_status(sdw, slave_intstat0, + slave_intstat1); + } + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT0, + slave_intstat0); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_SLAVEINTSTAT1, + slave_intstat1); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_INTSTAT, int_status); + return IRQ_HANDLED; +} + +static enum sdw_command_response cnl_program_scp_addr(struct sdw_master *mstr, + struct sdw_msg *msg) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data[2] = {0, 0}; + unsigned long time_left; + int no_ack = 0, nack = 0; + int i; + + /* Since we are programming 2 commands, program the + * RX watermark level at 2 + */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, 2); + /* Program device address */ + cmd_data[0] |= (msg->slave_addr & MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Write command to program the scp_addr1 register */ + cmd_data[0] |= (0x3 << MCP_COMMAND_COMMAND_SHIFT); + cmd_data[1] = cmd_data[0]; + /* scp_addr1 register address */ + cmd_data[0] |= (SDW_SCP_ADDRPAGE1 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[1] |= (SDW_SCP_ADDRPAGE2 << MCP_COMMAND_REG_ADDR_L_SHIFT); + cmd_data[0] |= msg->addr_page1; + cmd_data[1] |= msg->addr_page2; + + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[0]); + cmd_base += SDW_CNL_CMD_WORD_LEN; + cnl_sdw_reg_writel(data->sdw_regs, cmd_base, cmd_data[1]); + + time_left = wait_for_completion_timeout(&sdw->tx_complete, + 3000); + if (!time_left) { + dev_err(&mstr->dev, "Controller Timed out\n"); + msg->len = 0; + return -ETIMEDOUT; + } + + for (i = 0; i < CNL_SDW_SCP_ADDR_REGS; i++) { + if (!(MCP_RESPONSE_ACK_MASK & sdw->response_buf[i])) { + no_ack = 1; + dev_err(&mstr->dev, "Ack not recevied\n"); + if ((MCP_RESPONSE_NACK_MASK & sdw->response_buf[i])) { + nack = 1; + dev_err(&mstr->dev, "NACK recevied\n"); + } + } + } + /* We dont return error if NACK or No ACK detected for broadcast addr + * because some slave might support SCP addr, while some slaves may not + * support it. This is not correct, since we wont be able to find out + * if NACK is detected because of slave not supporting SCP_addrpage or + * its a genuine NACK because of bus errors. We are not sure what slaves + * will report, NACK or No ACK for the scp_addrpage programming if they + * dont support it. Spec is not clear about this. + * This needs to be thought through + */ + if (nack & (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write NACKed for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else if (no_ack && (msg->slave_addr != 15)) { + dev_err(&mstr->dev, "SCP_addrpage write ignored for slave %d\n", msg->slave_addr); + return -EREMOTEIO; + } else + return 0; + +} + +static enum sdw_command_response sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, int cmd, int offset, int count, bool async) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int j; + u32 cmd_base = SDW_CNL_MCP_COMMAND_BASE; + u32 cmd_data = 0; + unsigned long time_left; + u16 addr = msg->addr; + + /* Program the watermark level upto number of count */ + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_FIFOLEVEL, count); + + cmd_base = SDW_CNL_MCP_COMMAND_BASE; + for (j = 0; j < count; j++) { + /* Program device address */ + cmd_data = 0; + cmd_data |= (msg->slave_addr & + MCP_COMMAND_DEV_ADDR_MASK) << + MCP_COMMAND_DEV_ADDR_SHIFT; + /* Program read/write command */ + cmd_data |= (cmd << MCP_COMMAND_COMMAND_SHIFT); + /* program incrementing address register */ + cmd_data |= (addr++ << MCP_COMMAND_REG_ADDR_L_SHIFT); + /* Program the data if write command */ + if (msg->flag == SDW_MSG_FLAG_WRITE) + cmd_data |= + msg->buf[j + offset]; + + cmd_data |= ((msg->ssp_tag & + MCP_COMMAND_SSP_TAG_MASK) << + MCP_COMMAND_SSP_TAG_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, + cmd_base, cmd_data); + cmd_base += SDW_CNL_CMD_WORD_LEN; + } + + /* If Async dont wait for completion */ + if (async) + return 0; + /* Wait for 3 second for timeout */ + time_left = wait_for_completion_timeout(&sdw->tx_complete, 3 * HZ); + if (!time_left) { + dev_err(&mstr->dev, "Controller timedout\n"); + msg->len = 0; + return -ETIMEDOUT; + } + return sdw_fill_message_response(mstr, msg, count, offset); +} + +static enum sdw_command_response cnl_sdw_xfer_msg_async(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page, + struct sdw_async_xfer_data *data) +{ + int ret = 0, cmd; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + /* Only 1 message can be handled in Async fashion. This is used + * only for Bank switching where during aggregation it is required + * to synchronously switch the bank on more than 1 controller + */ + if (msg->len > 1) { + ret = -EINVAL; + goto error; + } + /* If scp addr programming fails goto error */ + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + if (ret) + goto error; + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + sdw->async_msg.async_xfer_complete = &data->xfer_complete; + sdw->async_msg.msg = msg; + sdw->async_msg.length = msg->len; + /* Dont wait for reply, calling function will wait for reply. */ + ret = sdw_xfer_msg(mstr, msg, cmd, 0, msg->len, true); + return ret; +error: + msg->len = 0; + complete(&data->xfer_complete); + return -EINVAL; + +} + +static enum sdw_command_response cnl_sdw_xfer_msg(struct sdw_master *mstr, + struct sdw_msg *msg, bool program_scp_addr_page) +{ + int i, ret = 0, cmd; + + if (program_scp_addr_page) + ret = cnl_program_scp_addr(mstr, msg); + + if (ret) { + msg->len = 0; + return ret; + } + + switch (msg->flag) { + case SDW_MSG_FLAG_READ: + cmd = 0x2; + break; + case SDW_MSG_FLAG_WRITE: + cmd = 0x3; + break; + default: + dev_err(&mstr->dev, "Command not supported\n"); + return -EINVAL; + } + for (i = 0; i < msg->len / SDW_CNL_MCP_COMMAND_LENGTH; i++) { + ret = sdw_xfer_msg(mstr, msg, + cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + break; + } + if (!(msg->len % SDW_CNL_MCP_COMMAND_LENGTH)) + return ret; + ret = sdw_xfer_msg(mstr, msg, cmd, i * SDW_CNL_MCP_COMMAND_LENGTH, + msg->len % SDW_CNL_MCP_COMMAND_LENGTH, false); + if (ret < 0) + return -EINVAL; + return ret; +} + +static void cnl_sdw_bra_prep_crc(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int addr = addr_offset; + + txdata_buf[addr++] = sdw_bus_compute_crc8((block->values + data_offset), + block->num_bytes); + txdata_buf[addr++] = 0x0; + txdata_buf[addr++] = 0x0; + txdata_buf[addr] |= ((0x2 & SDW_BRA_SOP_EOP_PDI_MASK) + << SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_prep_data(u8 *txdata_buf, + struct sdw_bra_block *block, int data_offset, int addr_offset) +{ + + int i; + int addr = addr_offset; + + for (i = 0; i < block->num_bytes; i += 2) { + + txdata_buf[addr++] = block->values[i + data_offset]; + if ((block->num_bytes - 1) - i) + txdata_buf[addr++] = block->values[i + data_offset + 1]; + else + txdata_buf[addr++] = 0; + + txdata_buf[addr++] = 0; + txdata_buf[addr++] = 0; + } +} + +static void cnl_sdw_bra_prep_hdr(u8 *txdata_buf, + struct sdw_bra_block *block, int rolling_id, int offset) +{ + + u8 tmp_hdr[6] = {0, 0, 0, 0, 0, 0}; + u8 temp = 0x0; + + /* + * 6 bytes header + * 1st byte: b11001010 + * b11: Header is active + * b0010: Device number 2 is selected + * b1: Write operation + * b0: MSB of BRA_NumBytes is 0 + * 2nd byte: LSB of number of bytes + * 3rd byte to 6th byte: Slave register offset + */ + temp |= (SDW_BRA_HDR_ACTIVE & SDW_BRA_HDR_ACTIVE_MASK) << + SDW_BRA_HDR_ACTIVE_SHIFT; + temp |= (block->slave_addr & SDW_BRA_HDR_SLV_ADDR_MASK) << + SDW_BRA_HDR_SLV_ADDR_SHIFT; + temp |= (block->cmd & SDW_BRA_HDR_RD_WR_MASK) << + SDW_BRA_HDR_RD_WR_SHIFT; + + if (block->num_bytes > SDW_BRA_HDR_MSB_BYTE_CHK) + temp |= (SDW_BRA_HDR_MSB_BYTE_SET & SDW_BRA_HDR_MSB_BYTE_MASK); + else + temp |= (SDW_BRA_HDR_MSB_BYTE_UNSET & + SDW_BRA_HDR_MSB_BYTE_MASK); + + txdata_buf[offset + 0] = tmp_hdr[0] = temp; + txdata_buf[offset + 1] = tmp_hdr[1] = block->num_bytes; + txdata_buf[offset + 3] |= ((SDW_BRA_SOP_EOP_PDI_STRT_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); + + txdata_buf[offset + 3] |= ((rolling_id & SDW_BRA_ROLLINGID_PDI_MASK) + << SDW_BRA_ROLLINGID_PDI_SHIFT); + + txdata_buf[offset + 4] = tmp_hdr[2] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK24) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT24); + + txdata_buf[offset + 5] = tmp_hdr[3] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK16) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT16); + + txdata_buf[offset + 8] = tmp_hdr[4] = ((block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK8) + >> SDW_BRA_HDR_SLV_REG_OFF_SHIFT8); + + txdata_buf[offset + 9] = tmp_hdr[5] = (block->reg_offset & + SDW_BRA_HDR_SLV_REG_OFF_MASK0); + + /* CRC check */ + txdata_buf[offset + 0xc] = sdw_bus_compute_crc8(tmp_hdr, + SDW_BRA_HEADER_SIZE); + + if (!block->cmd) + txdata_buf[offset + 0xf] = ((SDW_BRA_SOP_EOP_PDI_END_VALUE & + SDW_BRA_SOP_EOP_PDI_MASK) << + SDW_BRA_SOP_EOP_PDI_SHIFT); +} + +static void cnl_sdw_bra_pdi_tx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + struct cnl_sdw_pdi_stream tx_pdi_stream; + unsigned int tx_ch_map_offset, port_ctrl_offset, tx_pdi_config_offset; + unsigned int port_ctrl = 0, tx_pdi_config = 0, tx_stream_config; + int tx_pdi_ch_map = 0; + + if (enable) { + /* DP0 PORT CTRL REG */ + port_ctrl_offset = SDW_CNL_PORTCTRL + (SDW_BRA_PORT_ID * + SDW_CNL_PORT_REG_OFFSET); + + port_ctrl &= ~(PORTCTRL_PORT_DIRECTION_MASK << + PORTCTRL_PORT_DIRECTION_SHIFT); + + port_ctrl |= ((SDW_BRA_BULK_ENABLE & SDW_BRA_BLK_EN_MASK) << + SDW_BRA_BLK_EN_SHIFT); + + port_ctrl |= ((SDW_BRA_BPT_PAYLOAD_TYPE & + SDW_BRA_BPT_PYLD_TY_MASK) << + SDW_BRA_BPT_PYLD_TY_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, port_ctrl_offset, + port_ctrl); + + /* PDI0 Programming */ + tx_pdi_stream.l_ch_num = 0; + tx_pdi_stream.h_ch_num = 0xF; + tx_pdi_stream.pdi_num = SDW_BRA_PDI_TX_ID; + /* TODO: Remove hardcoding */ + tx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + tx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS2CM SHIM REG */ + tx_ch_map_offset = SDW_CNL_CTLS2CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + tx_pdi_ch_map |= (tx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + tx_pdi_ch_map |= (tx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, tx_ch_map_offset, + tx_pdi_ch_map); + + /* TX PDI0 CONFIG REG BANK 0 */ + tx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (tx_pdi_stream.pdi_num * 16)); + + tx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + tx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + tx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + tx_pdi_config_offset, tx_pdi_config); + + /* ALH STRMzCFG REG */ + tx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + tx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + tx_stream_config |= (0x0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (tx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + tx_stream_config); + + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_rx_config(struct sdw_master *mstr, + struct cnl_sdw *sdw, bool enable) +{ + + struct cnl_sdw_pdi_stream rx_pdi_stream; + unsigned int rx_ch_map_offset, rx_pdi_config_offset, rx_stream_config; + unsigned int rx_pdi_config = 0; + int rx_pdi_ch_map = 0; + + if (enable) { + + /* RX PDI1 Configuration */ + rx_pdi_stream.l_ch_num = 0; + rx_pdi_stream.h_ch_num = 0xF; + rx_pdi_stream.pdi_num = SDW_BRA_PDI_RX_ID; + rx_pdi_stream.sdw_pdi_num = mstr->nr * 16 + + rx_pdi_stream.pdi_num + 3; + + /* SNDWxPCMS3CM SHIM REG */ + rx_ch_map_offset = SDW_CNL_CTLS3CM + + (SDW_CNL_PCMSCHM_REG_OFFSET * mstr->nr); + + rx_pdi_ch_map |= (rx_pdi_stream.sdw_pdi_num & + CNL_PCMSYCM_STREAM_MASK) << + CNL_PCMSYCM_STREAM_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.l_ch_num & + CNL_PCMSYCM_LCHAN_MASK) << + CNL_PCMSYCM_LCHAN_SHIFT; + + rx_pdi_ch_map |= (rx_pdi_stream.h_ch_num & + CNL_PCMSYCM_HCHAN_MASK) << + CNL_PCMSYCM_HCHAN_SHIFT; + + cnl_sdw_reg_writew(sdw->data.sdw_shim, rx_ch_map_offset, + rx_pdi_ch_map); + + /* RX PDI1 CONFIG REG */ + rx_pdi_config_offset = (SDW_CNL_PDINCONFIG0 + + (rx_pdi_stream.pdi_num * 16)); + + rx_pdi_config |= ((SDW_BRA_PORT_ID & + PDINCONFIG_PORT_NUMBER_MASK) << + PDINCONFIG_PORT_NUMBER_SHIFT); + + rx_pdi_config |= (SDW_BRA_CHN_MASK << + PDINCONFIG_CHANNEL_MASK_SHIFT); + + rx_pdi_config |= (SDW_BRA_SOFT_RESET << + PDINCONFIG_PORT_SOFT_RESET_SHIFT); + + cnl_sdw_reg_writel(sdw->data.sdw_regs, + rx_pdi_config_offset, rx_pdi_config); + + + /* ALH STRMzCFG REG */ + rx_stream_config = cnl_sdw_reg_readl(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET)); + + rx_stream_config |= (CNL_STRMZCFG_DMAT_VAL & + CNL_STRMZCFG_DMAT_MASK) << + CNL_STRMZCFG_DMAT_SHIFT; + + rx_stream_config |= (0 & CNL_STRMZCFG_CHAN_MASK) << + CNL_STRMZCFG_CHAN_SHIFT; + + cnl_sdw_reg_writel(sdw->data.alh_base, + (rx_pdi_stream.sdw_pdi_num * + ALH_CNL_STRMZCFG_OFFSET), + rx_stream_config); + + } else { + + /* + * TODO: There is official workaround which needs to be + * performed for PDI config register. The workaround + * is to perform SoftRst twice in order to clear + * PDI fifo contents. + */ + + } +} + +static void cnl_sdw_bra_pdi_config(struct sdw_master *mstr, bool enable) +{ + struct cnl_sdw *sdw; + + /* Get driver data for master */ + sdw = sdw_master_get_drvdata(mstr); + + /* PDI0 configuration */ + cnl_sdw_bra_pdi_tx_config(mstr, sdw, enable); + + /* PDI1 configuration */ + cnl_sdw_bra_pdi_rx_config(mstr, sdw, enable); +} + +static int cnl_sdw_bra_verify_footer(u8 *rx_buf, int offset) +{ + int ret = 0; + u8 ftr_response; + u8 ack_nack = 0; + u8 ftr_result = 0; + + ftr_response = rx_buf[offset]; + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((ftr_response >> SDW_BRA_FTR_RESP_ACK_SHIFT) & + SDW_BRA_FTR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA Packet Ignored\n"); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK\n"); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Reserved\n"); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort\n"); + return -EINVAL; + } + + /* + * BRA footer result check + * Writes: + * 0 -> Good. Target accepted write payload + * 1 -> Bad. Target did not accept write payload + * Reads: + * 0 -> Good. Target completed read operation successfully + * 1 -> Bad. Target failed to complete read operation successfully + */ + ftr_result = (ftr_response >> SDW_BRA_FTR_RESP_RES_SHIFT) & + SDW_BRA_FTR_RESP_RES_MASK; + if (ftr_result == SDW_BRA_FTR_RESULT_BAD) { + pr_info("BRA: Read/Write operation failed on target side\n"); + /* Error scenario */ + return -EINVAL; + } + + pr_info("BRA: Read/Write operation complete on target side\n"); + + return ret; +} + +static int cnl_sdw_bra_verify_hdr(u8 *rx_buf, int offset, bool *chk_footer, + int roll_id) +{ + int ret = 0; + u8 hdr_response, rolling_id; + u8 ack_nack = 0; + u8 not_ready = 0; + + /* Match rolling ID */ + hdr_response = rx_buf[offset]; + rolling_id = rx_buf[offset + SDW_BRA_ROLLINGID_PDI_INDX]; + + rolling_id = (rolling_id & SDW_BRA_ROLLINGID_PDI_MASK); + if (roll_id != rolling_id) { + pr_info("BRA: Rolling ID doesn't match, returning error\n"); + return -EINVAL; + } + + /* + * ACK/NACK check + * NACK+ACK value from target: + * 00 -> Ignored + * 01 -> OK + * 10 -> Failed (Header CRC check failed) + * 11 -> Reserved + * NACK+ACK values at Target or initiator + * 00 -> Ignored + * 01 -> OK + * 10 -> Abort (Header cannot be trusted) + * 11 -> Abort (Header cannot be trusted) + */ + ack_nack = ((hdr_response >> SDW_BRA_HDR_RESP_ACK_SHIFT) & + SDW_BRA_HDR_RESP_ACK_MASK); + if (ack_nack == SDW_BRA_ACK_NAK_IGNORED) { + pr_info("BRA: Packet Ignored rolling_id:%d\n", rolling_id); + ret = -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_OK) + pr_info("BRA: Packet OK rolling_id:%d\n", rolling_id); + else if (ack_nack == SDW_BRA_ACK_NAK_FAILED_ABORT) { + pr_info("BRA: Packet Failed/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } else if (ack_nack == SDW_BRA_ACK_NAK_RSVD_ABORT) { + pr_info("BRA: Packet Reserved/Abort rolling_id:%d\n", rolling_id); + return -EINVAL; + } + + /* BRA not ready check */ + not_ready = (hdr_response >> SDW_BRA_HDR_RESP_NRDY_SHIFT) & + SDW_BRA_HDR_RESP_NRDY_MASK; + if (not_ready == SDW_BRA_TARGET_NOT_READY) { + pr_info("BRA: Target not ready for read/write operation rolling_id:%d\n", + rolling_id); + chk_footer = false; + return -EBUSY; + } + + pr_info("BRA: Target ready for read/write operation rolling_id:%d\n", rolling_id); + return ret; +} + +static void cnl_sdw_bra_remove_data_padding(u8 *src_buf, u8 *dst_buf, + u8 size) { + + int i; + + for (i = 0; i < size/2; i++) { + + *dst_buf++ = *src_buf++; + *dst_buf++ = *src_buf++; + src_buf++; + src_buf++; + } +} + + +static int cnl_sdw_bra_check_data(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) { + + int offset = 0, rolling_id = 0, tmp_offset = 0; + int rx_crc_comp = 0, rx_crc_rvd = 0; + int i, ret; + bool chk_footer = true; + int rx_buf_size = info->rx_block_size; + u8 *rx_buf = info->rx_ptr; + u8 *tmp_buf = NULL; + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA RX DATA:", DUMP_PREFIX_OFFSET, 8, 4, + rx_buf, rx_buf_size, false); + + /* Allocate temporary buffer in case of read request */ + if (!block->cmd) { + tmp_buf = kzalloc(block->num_bytes, GFP_KERNEL); + if (!tmp_buf) { + ret = -ENOMEM; + goto error; + } + } + + /* + * TODO: From the response header and footer there is no mention of + * read or write packet so controller needs to keep transmit packet + * information in order to verify rx packet. Also the current + * approach used for error mechanism is any of the packet response + * is not success, just report the whole transfer failed to Slave. + */ + + /* + * Verification of response packet for one known + * hardcoded configuration. This needs to be extended + * once we have dynamic algorithm integrated. + */ + + /* 2 valid read response */ + for (i = 0; i < info->valid_packets; i++) { + + + pr_info("BRA: Verifying packet number:%d with rolling id:%d\n", + info->packet_info[i].packet_num, + rolling_id); + chk_footer = true; + ret = cnl_sdw_bra_verify_hdr(rx_buf, offset, &chk_footer, + rolling_id); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Header verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment offset for header response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + if (!block->cmd) { + + /* Remove PDI padding for data */ + cnl_sdw_bra_remove_data_padding(&rx_buf[offset], + &tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Increment offset for consumed data */ + offset = offset + + (info->packet_info[i].num_data_bytes * 2); + + rx_crc_comp = sdw_bus_compute_crc8(&tmp_buf[tmp_offset], + info->packet_info[i].num_data_bytes); + + /* Match Data CRC */ + rx_crc_rvd = rx_buf[offset]; + if (rx_crc_comp != rx_crc_rvd) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Data CRC doesn't match for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + /* Increment destination buffer with copied data */ + tmp_offset = tmp_offset + + info->packet_info[i].num_data_bytes; + + /* Increment offset for CRC */ + offset = offset + SDW_BRA_DATA_CRC_SIZE_PDI; + } + + if (chk_footer) { + ret = cnl_sdw_bra_verify_footer(rx_buf, offset); + if (ret < 0) { + ret = -EINVAL; + dev_err(&mstr->dev, "BRA: Footer verification failed for packet number:%d\n", + info->packet_info[i].packet_num); + goto error; + } + + } + + /* Increment offset for footer response */ + offset = offset + SDW_BRA_HEADER_RESP_SIZE_PDI; + + /* Increment rolling id for next packet */ + rolling_id++; + if (rolling_id > 0xF) + rolling_id = 0; + } + + /* + * No need to check for dummy responses from codec + * Assumption made here is that dummy packets are + * added in 1ms buffer only after valid packets. + */ + + /* Copy data to codec buffer in case of read request */ + if (!block->cmd) + memcpy(block->values, tmp_buf, block->num_bytes); + +error: + /* Free up temp buffer allocated in case of read request */ + if (!block->cmd) + kfree(tmp_buf); + + /* Free up buffer allocated in cnl_sdw_bra_data_ops */ + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_bra_data_ops(struct sdw_master *mstr, + struct sdw_bra_block *block, struct bra_info *info) +{ + + struct sdw_bra_block tmp_block; + int i; + int tx_buf_size = 384, rx_buf_size = 1152; + u8 *tx_buf = NULL, *rx_buf = NULL; + int rolling_id = 0, total_bytes = 0, offset = 0, reg_offset = 0; + int dummy_read = 0x0000; + int ret; + + /* + * TODO: Run an algorithm here to identify the buffer size + * for TX and RX buffers + number of dummy packets (read + * or write) to be added for to align buffers. + */ + + info->tx_block_size = tx_buf_size; + info->tx_ptr = tx_buf = kzalloc(tx_buf_size, GFP_KERNEL); + if (!tx_buf) { + ret = -ENOMEM; + goto error; + } + + info->rx_block_size = rx_buf_size; + info->rx_ptr = rx_buf = kzalloc(rx_buf_size, GFP_KERNEL); + if (!rx_buf) { + ret = -ENOMEM; + goto error; + } + + /* Fill valid packets transferred per millisecond buffer */ + info->valid_packets = 2; + info->packet_info = kcalloc(info->valid_packets, + sizeof(*info->packet_info), + GFP_KERNEL); + if (!info->packet_info) { + ret = -ENOMEM; + goto error; + } + + /* + * Below code performs packet preparation for one known + * configuration. + * 1. 2 Valid Read request with 18 bytes each. + * 2. 22 dummy read packets with 18 bytes each. + */ + for (i = 0; i < info->valid_packets; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = block->cmd; /* Read Request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = block->reg_offset + reg_offset; + tmp_block.values = NULL; + reg_offset += tmp_block.num_bytes; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + /* Total Header size: Header + Header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + if (block->cmd) { + /* + * PDI data preparation in case of write request + * Assumption made here is data size from codec will + * be always an even number. + */ + cnl_sdw_bra_prep_data(tx_buf, &tmp_block, + total_bytes, offset); + offset += tmp_block.num_bytes * 2; + + /* Data CRC */ + cnl_sdw_bra_prep_crc(tx_buf, &tmp_block, + total_bytes, offset); + offset += SDW_BRA_DATA_CRC_SIZE_PDI; + } + + total_bytes += tmp_block.num_bytes; + rolling_id++; + + /* Fill packet info data structure */ + info->packet_info[i].packet_num = i + 1; + info->packet_info[i].num_data_bytes = tmp_block.num_bytes; + } + + /* Prepare dummy packets */ + for (i = 0; i < 22; i++) { + tmp_block.slave_addr = block->slave_addr; + tmp_block.cmd = 0; /* Read request */ + tmp_block.num_bytes = 18; + tmp_block.reg_offset = dummy_read++; + tmp_block.values = NULL; + + cnl_sdw_bra_prep_hdr(tx_buf, &tmp_block, rolling_id, offset); + + /* Total Header size: RD header + RD header CRC size on PDI */ + offset += SDW_BRA_HEADER_TOTAL_SZ_PDI; + + total_bytes += tmp_block.num_bytes; + rolling_id++; + } + + /* TODO: Remove below hex dump print */ + print_hex_dump(KERN_DEBUG, "BRA PDI VALID TX DATA:", + DUMP_PREFIX_OFFSET, 8, 4, tx_buf, tx_buf_size, false); + + return 0; + +error: + kfree(info->tx_ptr); + kfree(info->rx_ptr); + kfree(info->packet_info); + + return ret; +} + +static int cnl_sdw_xfer_bulk(struct sdw_master *mstr, + struct sdw_bra_block *block) +{ + struct cnl_sdw *sdw = sdw_master_get_platdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + struct cnl_bra_operation *ops = data->bra_data->bra_ops; + struct bra_info info; + int ret; + + /* + * 1. PDI Configuration + * 2. Prepare BRA packets including CRC calculation. + * 3. Configure TX and RX DMA in one shot mode. + * 4. Configure TX and RX Pipeline. + * 5. Run TX and RX DMA. + * 6. Run TX and RX pipelines. + * 7. Wait on completion for RX buffer. + * 8. Match TX and RX buffer packets and check for errors. + */ + + /* Memset bra_info data structure */ + memset(&info, 0x0, sizeof(info)); + + /* Fill master number in bra info data structure */ + info.mstr_num = mstr->nr; + + /* Prepare TX buffer */ + ret = cnl_sdw_bra_data_ops(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Request packet(s) creation failed\n"); + goto out; + } + + /* Pipeline Setup (ON) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline setup failed\n"); + goto out; + } + + /* PDI Configuration (ON) */ + cnl_sdw_bra_pdi_config(mstr, true); + + /* Trigger START host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, true, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline start failed\n"); + goto out; + } + + /* Trigger STOP host DMA and pipeline */ + ret = ops->bra_platform_xfer(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline stop failed\n"); + goto out; + } + + /* Pipeline Setup (OFF) */ + ret = ops->bra_platform_setup(data->bra_data->drv_data, false, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Pipeline de-setup failed\n"); + goto out; + } + + /* Verify RX buffer */ + ret = cnl_sdw_bra_check_data(mstr, block, &info); + if (ret < 0) { + dev_err(&mstr->dev, "BRA: Response packet(s) incorrect\n"); + goto out; + } + + /* PDI Configuration (OFF) */ + cnl_sdw_bra_pdi_config(mstr, false); + +out: + return ret; +} + +static int cnl_sdw_mon_handover(struct sdw_master *mstr, + bool enable) +{ + int mcp_config; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + mcp_config = cnl_sdw_reg_readl(data->sdw_regs, SDW_CNL_MCP_CONFIG); + if (enable) + mcp_config |= MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT; + else + mcp_config &= ~(MCP_CONFIG_BRELENABLE_MASK << + MCP_CONFIG_BRELENABLE_SHIFT); + + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONFIG, mcp_config); + return 0; +} + +static int cnl_sdw_set_ssp_interval(struct sdw_master *mstr, + int ssp_interval, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int sspctrl_offset, check; + + if (bank) + sspctrl_offset = SDW_CNL_MCP_SSPCTRL1; + else + sspctrl_offset = SDW_CNL_MCP_SSPCTRL0; + + cnl_sdw_reg_writel(data->sdw_regs, sspctrl_offset, ssp_interval); + + check = cnl_sdw_reg_readl(data->sdw_regs, sspctrl_offset); + + return 0; +} + +static int cnl_sdw_set_clock_freq(struct sdw_master *mstr, + int cur_clk_div, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int mcp_clockctrl_offset, mcp_clockctrl; + + + /* TODO: Retrieve divider value or get value directly from calling + * function + */ + int divider = (cur_clk_div - 1); + + if (bank) { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL1; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL1); + + } else { + mcp_clockctrl_offset = SDW_CNL_MCP_CLOCKCTRL0; + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CLOCKCTRL0); + } + + mcp_clockctrl |= divider; + + /* Write value here */ + cnl_sdw_reg_writel(data->sdw_regs, mcp_clockctrl_offset, + mcp_clockctrl); + + mcp_clockctrl = cnl_sdw_reg_readl(data->sdw_regs, + mcp_clockctrl_offset); + return 0; +} + +static int cnl_sdw_set_port_params(struct sdw_master *mstr, + struct sdw_port_params *params, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_config = 0, dpn_config_offset; + + if (bank) + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + else + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_config |= (((params->word_length - 1) & DPN_CONFIG_WL_MASK) << + DPN_CONFIG_WL_SHIFT); + dpn_config |= ((params->port_flow_mode & DPN_CONFIG_PF_MODE_MASK) << + DPN_CONFIG_PF_MODE_SHIFT); + dpn_config |= ((params->port_data_mode & DPN_CONFIG_PD_MODE_MASK) << + DPN_CONFIG_PD_MODE_SHIFT); + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + return 0; +} + +static int cnl_sdw_set_port_transport_params(struct sdw_master *mstr, + struct sdw_transport_params *params, int bank) +{ +struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + int dpn_config = 0, dpn_config_offset; + int dpn_samplectrl_offset; + int dpn_offsetctrl = 0, dpn_offsetctrl_offset; + int dpn_hctrl = 0, dpn_hctrl_offset; + + if (bank) { + dpn_config_offset = SDW_CNL_DPN_CONFIG1; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL1; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL1; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL1; + } else { + dpn_config_offset = SDW_CNL_DPN_CONFIG0; + dpn_samplectrl_offset = SDW_CNL_DPN_SAMPLECTRL0; + dpn_hctrl_offset = SDW_CNL_DPN_HCTRL0; + dpn_offsetctrl_offset = SDW_CNL_DPN_OFFSETCTRL0; + } + dpn_config = cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + dpn_config |= ((params->blockgroupcontrol & DPN_CONFIG_BGC_MASK) << + DPN_CONFIG_BGC_SHIFT); + dpn_config |= ((params->blockpackingmode & DPN_CONFIG_BPM_MASK) << + DPN_CONFIG_BPM_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_config_offset, params->num, dpn_config); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_config_offset, params->num); + + dpn_offsetctrl |= ((params->offset1 & DPN_OFFSETCTRL0_OF1_MASK) << + DPN_OFFSETCTRL0_OF1_SHIFT); + + dpn_offsetctrl |= ((params->offset2 & DPN_OFFSETCTRL0_OF2_MASK) << + DPN_OFFSETCTRL0_OF2_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_offsetctrl_offset, params->num, dpn_offsetctrl); + + + dpn_hctrl |= ((params->hstart & DPN_HCTRL_HSTART_MASK) << + DPN_HCTRL_HSTART_SHIFT); + dpn_hctrl |= ((params->hstop & DPN_HCTRL_HSTOP_MASK) << + DPN_HCTRL_HSTOP_SHIFT); + dpn_hctrl |= ((params->lanecontrol & DPN_HCTRL_LCONTROL_MASK) << + DPN_HCTRL_LCONTROL_SHIFT); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_hctrl_offset, params->num, dpn_hctrl); + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_samplectrl_offset, params->num, + (params->sample_interval - 1)); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_hctrl_offset, params->num); + + cnl_sdw_port_reg_readl(data->sdw_regs, + dpn_samplectrl_offset, params->num); + + return 0; +} + +static int cnl_sdw_port_activate_ch(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + int dpn_channelen_offset; + int ch_mask; + + if (bank) + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN1; + else + dpn_channelen_offset = SDW_CNL_DPN_CHANNELEN0; + + if (activate_ch->activate) + ch_mask = activate_ch->ch_mask; + else + ch_mask = 0; + + cnl_sdw_port_reg_writel(data->sdw_regs, + dpn_channelen_offset, activate_ch->num, + ch_mask); + + return 0; +} + +static int cnl_sdw_port_activate_ch_pre(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + + if (mstr->link_sync_mask) { + /* Check if this link is synchronized with some other link */ + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If link is synchronized with other link than + * Need to make sure that command doesnt go till + * ssync is applied + */ + sync_reg |= (1 << (data->inst_id + CNL_SYNC_CMDSYNC_SHIFT)); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + } + + return 0; +} +static int cnl_sdw_port_activate_ch_post(struct sdw_master *mstr, + struct sdw_activate_ch *activate_ch, int bank) +{ + int sync_reg; + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + struct cnl_sdw_data *data = &sdw->data; + volatile int sync_update = 0; + int timeout = 10; + + + sync_reg = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + /* If waiting for synchronization set the go bit, else return */ + if (!(sync_reg & SDW_CMDSYNC_SET_MASK)) + return 0; + sync_reg |= (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT); + cnl_sdw_reg_writel(data->sdw_shim, SDW_CNL_SYNC, sync_reg); + + do { + sync_update = cnl_sdw_reg_readl(data->sdw_shim, SDW_CNL_SYNC); + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) == 0) + break; + msleep(20); + timeout--; + + } while (timeout); + + if ((sync_update & + (CNL_SYNC_SYNCGO_MASK << CNL_SYNC_SYNCGO_SHIFT)) != 0) { + dev_err(&mstr->dev, "Failed to set sync go\n"); + return -EIO; + } + return 0; +} + +static int cnl_sdw_probe(struct sdw_master *mstr, + const struct sdw_master_id *sdw_id) +{ + struct cnl_sdw *sdw; + int ret = 0; + struct cnl_sdw_data *data = mstr->dev.platform_data; + + sdw = devm_kzalloc(&mstr->dev, sizeof(*sdw), GFP_KERNEL); + if (!sdw) { + ret = -ENOMEM; + return ret; + } + dev_info(&mstr->dev, + "Controller Resources ctrl_base = %p shim=%p irq=%d inst_id=%d\n", + data->sdw_regs, data->sdw_shim, data->irq, data->inst_id); + sdw->data.sdw_regs = data->sdw_regs; + sdw->data.sdw_shim = data->sdw_shim; + sdw->data.irq = data->irq; + sdw->data.inst_id = data->inst_id; + sdw->data.alh_base = data->alh_base; + sdw->mstr = mstr; + spin_lock_init(&sdw->ctrl_lock); + sdw_master_set_drvdata(mstr, sdw); + init_completion(&sdw->tx_complete); + mutex_init(&sdw->stream_lock); + ret = sdw_init(sdw, true); + if (ret) { + dev_err(&mstr->dev, "SoundWire controller init failed %d\n", + data->inst_id); + return ret; + } + ret = devm_request_irq(&mstr->dev, + sdw->data.irq, cnl_sdw_irq_handler, IRQF_SHARED, "SDW", sdw); + if (ret) { + dev_err(&mstr->dev, "unable to grab IRQ %d, disabling device\n", + sdw->data.irq); + sdw_power_down_link(sdw); + return ret; + } + pm_runtime_set_autosuspend_delay(&mstr->dev, 3000); + pm_runtime_use_autosuspend(&mstr->dev); + pm_runtime_enable(&mstr->dev); + pm_runtime_get_sync(&mstr->dev); + /* Resuming the device, since its already ON, function will simply + * return doing nothing + */ + pm_runtime_mark_last_busy(&mstr->dev); + /* Suspending the device after 3 secs, by the time + * all the slave would have enumerated. Initial + * clock freq is 9.6MHz and frame shape is 48X2, so + * there are 200000 frames in second, total there are + * minimum 600000 frames before device suspends. Soundwire + * spec says slave should get attached to bus in 4096 + * error free frames after reset. So this should be + * enough to make sure device gets attached to bus. + */ + pm_runtime_put_sync_autosuspend(&mstr->dev); + return ret; +} + +static int cnl_sdw_remove(struct sdw_master *mstr) +{ + struct cnl_sdw *sdw = sdw_master_get_drvdata(mstr); + + sdw_power_down_link(sdw); + + return 0; +} + +#ifdef CONFIG_PM +static int cnl_sdw_runtime_suspend(struct device *dev) +{ + int volatile mcp_stat; + int mcp_control; + int timeout = 0; + int ret = 0; + + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + + /* If its suspended return */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) { + dev_info(dev, "Clock is already stopped\n"); + return 0; + } + + /* Write the MCP Control register to prevent block wakeup */ + mcp_control = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_CONTROL); + mcp_control |= (MCP_CONTROL_BLOCKWAKEUP_MASK << + MCP_CONTROL_BLOCKWAKEUP_SHIFT); + cnl_sdw_reg_writel(data->sdw_regs, SDW_CNL_MCP_CONTROL, mcp_control); + + /* Prepare all the slaves for clock stop */ + ret = sdw_master_prep_for_clk_stop(sdw->mstr); + if (ret) + return ret; + + /* Call bus function to broadcast the clock stop now */ + ret = sdw_master_stop_clock(sdw->mstr); + if (ret) + return ret; + /* Wait for clock to be stopped, we are waiting at max 1sec now */ + while (timeout != 10) { + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT)) + break; + msleep(100); + timeout++; + } + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_err(dev, "Clock Stop failed\n"); + ret = -EBUSY; + goto out; + } + /* Switch control from master IP to glue */ + sdw_switch_to_glue(sdw); + + sdw_power_down_link(sdw); + + /* Enable the wakeup */ + cnl_sdw_reg_writew(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET, + (0x1 << data->inst_id)); +out: + return ret; +} + +static int cnl_sdw_clock_stop_exit(struct cnl_sdw *sdw) +{ + u16 wake_en, wake_sts; + int ret; + struct cnl_sdw_data *data = &sdw->data; + + /* Disable the wake up interrupt */ + wake_en = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKEEN_REG_OFFSET); + wake_en &= ~(0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKEEN_REG_OFFSET, + wake_en); + + /* Clear wake status. This may be set if Slave requested wakeup has + * happened, or may not be if it master requested. But in any case + * this wont make any harm + */ + wake_sts = cnl_sdw_reg_readw(data->sdw_shim, + SDW_CNL_SNDWWAKESTS_REG_OFFSET); + wake_sts |= (0x1 << data->inst_id); + cnl_sdw_reg_writew(data->sdw_shim, SDW_CNL_SNDWWAKESTS_REG_OFFSET, + wake_sts); + ret = sdw_init(sdw, false); + if (ret < 0) { + pr_err("sdw_init fail: %d\n", ret); + return ret; + } + + dev_info(&sdw->mstr->dev, "Exit from clock stop successful\n"); + return 0; + +} + +static int cnl_sdw_runtime_resume(struct device *dev) +{ + struct cnl_sdw *sdw = dev_get_drvdata(dev); + struct cnl_sdw_data *data = &sdw->data; + int volatile mcp_stat; + struct sdw_master *mstr; + int ret = 0; + + mstr = sdw->mstr; + /* + * If already resumed, do nothing. This can happen because of + * wakeup enable. + */ + mcp_stat = cnl_sdw_reg_readl(data->sdw_regs, + SDW_CNL_MCP_STAT); + if (!(mcp_stat & (MCP_STAT_CLOCKSTOPPED_MASK << + MCP_STAT_CLOCKSTOPPED_SHIFT))) { + dev_info(dev, "Clock is already running\n"); + return 0; + } + dev_info(dev, "%s %d Clock is stopped\n", __func__, __LINE__); + + ret = cnl_sdw_clock_stop_exit(sdw); + if (ret) + return ret; + dev_info(&mstr->dev, "Exit from clock stop successful\n"); + + /* Prepare all the slaves to comeout of clock stop */ + ret = sdw_mstr_deprep_after_clk_start(sdw->mstr); + if (ret) + return ret; + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int cnl_sdw_sleep_resume(struct device *dev) +{ + return cnl_sdw_runtime_resume(dev); +} +static int cnl_sdw_sleep_suspend(struct device *dev) +{ + return cnl_sdw_runtime_suspend(dev); +} +#else +#define cnl_sdw_sleep_suspend NULL +#define cnl_sdw_sleep_resume NULL +#endif /* CONFIG_PM_SLEEP */ +#else +#define cnl_sdw_runtime_suspend NULL +#define cnl_sdw_runtime_resume NULL +#endif /* CONFIG_PM */ + + +static const struct dev_pm_ops cnl_sdw_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cnl_sdw_sleep_suspend, cnl_sdw_sleep_resume) + SET_RUNTIME_PM_OPS(cnl_sdw_runtime_suspend, + cnl_sdw_runtime_resume, NULL) +}; + +static struct sdw_master_ops cnl_sdw_master_ops = { + .xfer_msg_async = cnl_sdw_xfer_msg_async, + .xfer_msg = cnl_sdw_xfer_msg, + .xfer_bulk = cnl_sdw_xfer_bulk, + .monitor_handover = cnl_sdw_mon_handover, + .set_ssp_interval = cnl_sdw_set_ssp_interval, + .set_clock_freq = cnl_sdw_set_clock_freq, + .set_frame_shape = NULL, +}; + +static struct sdw_master_port_ops cnl_sdw_master_port_ops = { + .dpn_set_port_params = cnl_sdw_set_port_params, + .dpn_set_port_transport_params = cnl_sdw_set_port_transport_params, + .dpn_port_activate_ch = cnl_sdw_port_activate_ch, + .dpn_port_activate_ch_pre = cnl_sdw_port_activate_ch_pre, + .dpn_port_activate_ch_post = cnl_sdw_port_activate_ch_post, + .dpn_port_prepare_ch = NULL, + .dpn_port_prepare_ch_pre = NULL, + .dpn_port_prepare_ch_post = NULL, + +}; + +static struct sdw_mstr_driver cnl_sdw_mstr_driver = { + .driver_type = SDW_DRIVER_TYPE_MASTER, + .driver = { + .name = "cnl_sdw_mstr", + .pm = &cnl_sdw_pm_ops, + }, + .probe = cnl_sdw_probe, + .remove = cnl_sdw_remove, + .mstr_ops = &cnl_sdw_master_ops, + .mstr_port_ops = &cnl_sdw_master_port_ops, +}; + +static int __init cnl_sdw_init(void) +{ + return sdw_mstr_driver_register(&cnl_sdw_mstr_driver); +} +module_init(cnl_sdw_init); + +static void cnl_sdw_exit(void) +{ + sdw_mstr_driver_unregister(&cnl_sdw_mstr_driver); +} +module_exit(cnl_sdw_exit); + +MODULE_DESCRIPTION("Intel SoundWire Master Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Hardik Shah "); diff --git a/drivers/sdw/sdw_cnl_priv.h b/drivers/sdw/sdw_cnl_priv.h new file mode 100644 index 000000000000..b7f44e1f9d6f --- /dev/null +++ b/drivers/sdw/sdw_cnl_priv.h @@ -0,0 +1,385 @@ +/* + * sdw_cnl_priv.h - Private definition for intel master controller driver. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_CNL_PRIV_H +#define _LINUX_SDW_CNL_PRIV_H + +#define SDW_CNL_PM_TIMEOUT 3000 /* ms */ +#define SDW_CNL_SLAVES_STAT_UPPER_DWORD_SHIFT 32 +#define SDW_CNL_SLAVE_STATUS_BITS 4 +#define SDW_CNL_CMD_WORD_LEN 4 +#define SDW_CNL_DEFAULT_SSP_INTERVAL 0x18 +#define SDW_CNL_DEFAULT_CLK_DIVIDER 0 + +#if IS_ENABLED(CONFIG_SND_SOC_INTEL_CNL_FPGA) +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x257F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x48 +#else +#define SDW_CNL_DEFAULT_SYNC_PERIOD 0x176F +#define SDW_CNL_DEFAULT_FRAME_SHAPE 0x30 +#endif + +#define SDW_CNL_PORT_REG_OFFSET 0x80 +#define CNL_SDW_SCP_ADDR_REGS 0x2 +#define SDW_CNL_PCM_PDI_NUM_OFFSET 0x2 +#define SDW_CNL_PDM_PDI_NUM_OFFSET 0x6 + +#define SDW_CNL_CTMCTL_REG_OFFSET 0x60 +#define SDW_CNL_IOCTL_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHC_REG_OFFSET 0x60 +#define SDW_CNL_PDMSCAP_REG_OFFSET 0x60 +#define SDW_CNL_PCMSCHM_REG_OFFSET 0x60 +#define SDW_CNL_SNDWWAKEEN_REG_OFFSET 0x190 +#define SDW_CNL_SNDWWAKESTS_REG_OFFSET 0x192 + + +#define SDW_CNL_MCP_CONFIG 0x0 +#define MCP_CONFIG_BRELENABLE_MASK 0x1 +#define MCP_CONFIG_BRELENABLE_SHIFT 0x6 +#define MCP_CONFIG_MAXCMDRETRY_SHIFT 24 +#define MCP_CONFIG_MAXCMDRETRY_MASK 0xF +#define MCP_CONFIG_MAXPREQDELAY_SHIFT 16 +#define MCP_CONFIG_MAXPREQDELAY_MASK 0x1F +#define MCP_CONFIG_MMMODEEN_SHIFT 0x7 +#define MCP_CONFIG_MMMODEEN_MASK 0x1 +#define MCP_CONFIG_SNIFFEREN_SHIFT 0x5 +#define MCP_CONFIG_SNIFFEREN_MASK 0x1 +#define MCP_CONFIG_SSPMODE_SHIFT 0x4 +#define MCP_CONFIG_SSPMODE_MASK 0x1 +#define MCP_CONFIG_CMDMODE_SHIFT 0x3 +#define MCP_CONFIG_CMDMODE_MASK 0x1 + +#define MCP_CONFIG_OPERATIONMODE_MASK 0x7 +#define MCP_CONFIG_OPERATIONMODE_SHIFT 0x0 +#define MCP_CONFIG_OPERATIONMODE_NORMAL 0x0 + +#define SDW_CNL_MCP_CONTROL 0x4 +#define MCP_CONTROL_RESETDELAY_SHIFT 0x8 +#define MCP_CONTROL_CMDRST_SHIFT 0x7 +#define MCP_CONTROL_CMDRST_MASK 0x1 +#define MCP_CONTROL_SOFTRST_SHIFT 0x6 +#define MCP_CONTROL_SOFTCTRLBUSRST_SHIFT 0x5 +#define MCP_CONTROL_HARDCTRLBUSRST_MASK 0x1 +#define MCP_CONTROL_HARDCTRLBUSRST_SHIFT 0x4 +#define MCP_CONTROL_CLOCKPAUSEREQ_SHIFT 0x3 +#define MCP_CONTROL_CLOCKSTOPCLEAR_SHIFT 0x2 +#define MCP_CONTROL_CLOCKSTOPCLEAR_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_MASK 0x1 +#define MCP_CONTROL_CMDACCEPTMODE_SHIFT 0x1 +#define MCP_CONTROL_BLOCKWAKEUP_SHIFT 0x0 +#define MCP_CONTROL_BLOCKWAKEUP_MASK 0x1 + + +#define MCP_SLAVEINTMASK0_MASK 0xFFFFFFFF +#define MCP_SLAVEINTMASK1_MASK 0x0000FFFF + +#define SDW_CNL_MCP_CMDCTRL 0x8 +#define SDW_CNL_MCP_SSPSTAT 0xC +#define SDW_CNL_MCP_FRAMESHAPE 0x10 +#define SDW_CNL_MCP_FRAMESHAPEINIT 0x14 +#define SDW_CNL_MCP_CONFIGUPDATE 0x18 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_SHIFT 0x0 +#define MCP_CONFIGUPDATE_CONFIGUPDATE_MASK 0x1 + +#define SDW_CNL_MCP_PHYCTRL 0x1C +#define SDW_CNL_MCP_SSPCTRL0 0x20 +#define SDW_CNL_MCP_SSPCTRL1 0x28 +#define SDW_CNL_MCP_CLOCKCTRL0 0x30 +#define SDW_CNL_MCP_CLOCKCTRL1 0x38 +#define SDW_CNL_MCP_STAT 0x40 +#define SDW_CNL_MCP_INTSTAT 0x44 +#define MCP_INTSTAT_IRQ_SHIFT 31 +#define MCP_INTSTAT_IRQ_MASK 1 +#define MCP_INTSTAT_WAKEUP_SHIFT 16 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_SHIFT 12 +#define MCP_INTSTAT_SLAVE_STATUS_CHANGED_MASK 0xF +#define MCP_INTSTAT_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTSTAT_SLAVEATTACHED_SHIFT 13 +#define MCP_INTSTAT_SLAVEALERT_SHIFT 14 +#define MCP_INTSTAT_SLAVERESERVED_SHIFT 15 + +#define MCP_INTSTAT_DPPDIINT_SHIFT 11 +#define MCP_INTSTAT_DPPDIINTMASK 0x1 +#define MCP_INTSTAT_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTSTAT_CONTROLBUSCLASH_MASK 0x1 +#define MCP_INTSTAT_DATABUSCLASH_SHIFT 9 +#define MCP_INTSTAT_DATABUSCLASH_MASK 0x1 +#define MCP_INTSTAT_CMDERR_SHIFT 7 +#define MCP_INTSTAT_CMDERR_MASK 0x1 +#define MCP_INTSTAT_TXE_SHIFT 1 +#define MCP_INTSTAT_TXE_MASK 0x1 +#define MCP_INTSTAT_RXWL_SHIFT 2 +#define MCP_INTSTAT_RXWL_MASK 1 + +#define SDW_CNL_MCP_INTMASK 0x48 +#define MCP_INTMASK_IRQEN_SHIFT 31 +#define MCP_INTMASK_IRQEN_MASK 0x1 +#define MCP_INTMASK_WAKEUP_SHIFT 16 +#define MCP_INTMASK_WAKEUP_MASK 0x1 +#define MCP_INTMASK_SLAVERESERVED_SHIFT 15 +#define MCP_INTMASK_SLAVERESERVED_MASK 0x1 +#define MCP_INTMASK_SLAVEALERT_SHIFT 14 +#define MCP_INTMASK_SLAVEALERT_MASK 0x1 +#define MCP_INTMASK_SLAVEATTACHED_SHIFT 13 +#define MCP_INTMASK_SLAVEATTACHED_MASK 0x1 +#define MCP_INTMASK_SLAVENOTATTACHED_SHIFT 12 +#define MCP_INTMASK_SLAVENOTATTACHED_MASK 0x1 +#define MCP_INTMASK_DPPDIINT_SHIFT 11 +#define MCP_INTMASK_DPPDIINT_MASK 0x1 +#define MCP_INTMASK_CONTROLBUSCLASH_SHIFT 10 +#define MCP_INTMASK_CONTROLBUSCLASH_MASK 1 +#define MCP_INTMASK_DATABUSCLASH_SHIFT 9 +#define MCP_INTMASK_DATABUSCLASH_MASK 1 +#define MCP_INTMASK_CMDERR_SHIFT 7 +#define MCP_INTMASK_CMDERR_MASK 0x1 +#define MCP_INTMASK_TXE_SHIFT 1 +#define MCP_INTMASK_TXE_MASK 0x1 +#define MCP_INTMASK_RXWL_SHIFT 2 +#define MCP_INTMASK_RXWL_MASK 0x1 + +#define SDW_CNL_MCP_INTSET 0x4C +#define SDW_CNL_MCP_STAT 0x40 +#define MCP_STAT_ACTIVE_BANK_MASK 0x1 +#define MCP_STAT_ACTIVE_BANK_SHIT 20 +#define MCP_STAT_CLOCKSTOPPED_MASK 0x1 +#define MCP_STAT_CLOCKSTOPPED_SHIFT 16 + +#define SDW_CNL_MCP_SLAVESTAT 0x50 +#define MCP_SLAVESTAT_MASK 0x3 + +#define SDW_CNL_MCP_SLAVEINTSTAT0 0x54 +#define MCP_SLAVEINTSTAT_NOT_PRESENT_MASK 0x1 +#define MCP_SLAVEINTSTAT_ATTACHED_MASK 0x2 +#define MCP_SLAVEINTSTAT_ALERT_MASK 0x4 +#define MCP_SLAVEINTSTAT_RESERVED_MASK 0x8 + +#define SDW_CNL_MCP_SLAVEINTSTAT1 0x58 +#define SDW_CNL_MCP_SLAVEINTMASK0 0x5C +#define SDW_CNL_MCP_SLAVEINTMASK1 0x60 +#define SDW_CNL_MCP_PORTINTSTAT 0x64 +#define SDW_CNL_MCP_PDISTAT 0x6C + +#define SDW_CNL_MCP_FIFOLEVEL 0x78 +#define SDW_CNL_MCP_FIFOSTAT 0x7C +#define MCP_RX_FIFO_AVAIL_MASK 0x3F +#define SDW_CNL_MCP_COMMAND_BASE 0x80 +#define SDW_CNL_MCP_RESPONSE_BASE 0x80 +#define SDW_CNL_MCP_COMMAND_LENGTH 0x20 + +#define MCP_COMMAND_SSP_TAG_MASK 0x1 +#define MCP_COMMAND_SSP_TAG_SHIFT 31 +#define MCP_COMMAND_COMMAND_MASK 0x7 +#define MCP_COMMAND_COMMAND_SHIFT 28 +#define MCP_COMMAND_DEV_ADDR_MASK 0xF +#define MCP_COMMAND_DEV_ADDR_SHIFT 24 +#define MCP_COMMAND_REG_ADDR_H_MASK 0x7 +#define MCP_COMMAND_REG_ADDR_H_SHIFT 16 +#define MCP_COMMAND_REG_ADDR_L_MASK 0xFF +#define MCP_COMMAND_REG_ADDR_L_SHIFT 8 +#define MCP_COMMAND_REG_DATA_MASK 0xFF +#define MCP_COMMAND_REG_DATA_SHIFT 0x0 + +#define MCP_RESPONSE_RDATA_MASK 0xFF +#define MCP_RESPONSE_RDATA_SHIFT 8 +#define MCP_RESPONSE_ACK_MASK 0x1 +#define MCP_RESPONSE_ACK_SHIFT 0 +#define MCP_RESPONSE_NACK_MASK 0x2 + +#define SDW_CNL_DPN_CONFIG0 0x100 +#define SDW_CNL_DPN_CHANNELEN0 0x104 +#define SDW_CNL_DPN_SAMPLECTRL0 0x108 +#define SDW_CNL_DPN_OFFSETCTRL0 0x10C +#define SDW_CNL_DPN_HCTRL0 0x110 +#define SDW_CNL_DPN_ASYNCCTRL0 0x114 + +#define SDW_CNL_DPN_CONFIG1 0x118 +#define SDW_CNL_DPN_CHANNELEN1 0x11C +#define SDW_CNL_DPN_SAMPLECTRL1 0x120 +#define SDW_CNL_DPN_OFFSETCTRL1 0x124 +#define SDW_CNL_DPN_HCTRL1 0x128 + +#define SDW_CNL_PORTCTRL 0x130 +#define PORTCTRL_PORT_DIRECTION_SHIFT 0x7 +#define PORTCTRL_PORT_DIRECTION_MASK 0x1 +#define PORTCTRL_BANK_INVERT_SHIFT 0x8 +#define PORTCTRL_BANK_INVERT_MASK 0x1 + +#define SDW_CNL_PDINCONFIG0 0x1100 +#define SDW_CNL_PDINCONFIG1 0x1108 +#define PDINCONFIG_CHANNEL_MASK_SHIFT 0x8 +#define PDINCONFIG_CHANNEL_MASK_MASK 0xFF +#define PDINCONFIG_PORT_NUMBER_SHIFT 0x0 +#define PDINCONFIG_PORT_NUMBER_MASK 0x1F +#define PDINCONFIG_PORT_SOFT_RESET_SHIFT 0x18 +#define PDINCONFIG_PORT_SOFT_RESET 0x1F + +#define DPN_CONFIG_WL_SHIFT 0x8 +#define DPN_CONFIG_WL_MASK 0x1F +#define DPN_CONFIG_PF_MODE_SHIFT 0x0 +#define DPN_CONFIG_PF_MODE_MASK 0x3 +#define DPN_CONFIG_PD_MODE_SHIFT 0x2 +#define DPN_CONFIG_PD_MODE_MASK 0x3 +#define DPN_CONFIG_BPM_MASK 0x1 +#define DPN_CONFIG_BPM_SHIFT 0x12 +#define DPN_CONFIG_BGC_MASK 0x3 +#define DPN_CONFIG_BGC_SHIFT 0x10 + +#define DPN_SAMPLECTRL_SI_MASK 0xFFFF +#define DPN_SAMPLECTRL_SI_SHIFT 0x0 + +#define DPN_OFFSETCTRL0_OF1_MASK 0xFF +#define DPN_OFFSETCTRL0_OF1_SHIFT 0x0 +#define DPN_OFFSETCTRL0_OF2_MASK 0xFF +#define DPN_OFFSETCTRL0_OF2_SHIFT 0x8 + +#define DPN_HCTRL_HSTOP_MASK 0xF +#define DPN_HCTRL_HSTOP_SHIFT 0x0 +#define DPN_HCTRL_HSTART_MASK 0xF +#define DPN_HCTRL_HSTART_SHIFT 0x4 +#define DPN_HCTRL_LCONTROL_MASK 0x7 +#define DPN_HCTRL_LCONTROL_SHIFT 0x8 + +/* SoundWire Shim registers */ +#define SDW_CNL_LCAP 0x0 +#define SDW_CNL_LCTL 0x4 +#define CNL_LCTL_CPA_SHIFT 8 +#define CNL_LCTL_SPA_SHIFT 0 +#define CNL_LCTL_CPA_MASK 0x1 +#define CNL_LCTL_SPA_MASK 0x1 + +#define SDW_CMDSYNC_SET_MASK 0xF0000 +#define SDW_CNL_IPPTR 0x8 +#define SDW_CNL_SYNC 0xC +#define CNL_SYNC_CMDSYNC_MASK 0x1 +#define CNL_SYNC_CMDSYNC_SHIFT 16 +#define CNL_SYNC_SYNCGO_MASK 0x1 +#define CNL_SYNC_SYNCGO_SHIFT 0x18 +#define CNL_SYNC_SYNCPRD_MASK 0x7FFF +#define CNL_SYNC_SYNCPRD_SHIFT 0x0 +#define CNL_SYNC_SYNCCPU_MASK 0x8000 +#define CNL_SYNC_SYNCCPU_SHIFT 0xF + +#define SDW_CNL_CTLSCAP 0x10 +#define SDW_CNL_CTLS0CM 0x12 +#define SDW_CNL_CTLS1CM 0x14 +#define SDW_CNL_CTLS2CM 0x16 +#define SDW_CNL_CTLS3CM 0x18 + +#define SDW_CNL_PCMSCAP 0x20 +#define CNL_PCMSCAP_BSS_SHIFT 8 +#define CNL_PCMSCAP_BSS_MASK 0x1F +#define CNL_PCMSCAP_OSS_SHIFT 4 +#define CNL_PCMSCAP_OSS_MASK 0xF +#define CNL_PCMSCAP_ISS_SHIFT 0 +#define CNL_PCMSCAP_ISS_MASK 0xF + +#define SDW_CNL_PCMSCHM 0x22 +#define CNL_PCMSYCM_DIR_SHIFT 15 +#define CNL_PCMSYCM_DIR_MASK 0x1 +#define CNL_PCMSYCM_STREAM_SHIFT 8 +#define CNL_PCMSYCM_STREAM_MASK 0x3F +#define CNL_PCMSYCM_HCHAN_SHIFT 4 +#define CNL_PCMSYCM_HCHAN_MASK 0xF +#define CNL_PCMSYCM_LCHAN_SHIFT 0 +#define CNL_PCMSYCM_LCHAN_MASK 0xF + +#define SDW_CNL_PCMSCHC 0x42 + +#define SDW_CNL_PDMSCAP 0x62 +#define CNL_PDMSCAP_BSS_SHIFT 8 +#define CNL_PDMSCAP_BSS_MASK 0x1F +#define CNL_PDMSCAP_OSS_SHIFT 4 +#define CNL_PDMSCAP_OSS_MASK 0xF +#define CNL_PDMSCAP_ISS_SHIFT 0 +#define CNL_PDMSCAP_ISS_MASK 0xF +#define CNL_PDMSCAP_CPSS_SHIFT 13 +#define CNL_PDMSCAP_CPSS_MASK 0x7 +#define SDW_CNL_PDMSCM + +#define SDW_CNL_IOCTL 0x6C +#define CNL_IOCTL_MIF_SHIFT 0x0 +#define CNL_IOCTL_MIF_MASK 0x1 +#define CNL_IOCTL_CO_SHIFT 0x1 +#define CNL_IOCTL_CO_MASK 0x1 +#define CNL_IOCTL_COE_SHIFT 0x2 +#define CNL_IOCTL_COE_MASK 0x1 +#define CNL_IOCTL_DO_SHIFT 0x3 +#define CNL_IOCTL_DO_MASK 0x1 +#define CNL_IOCTL_DOE_SHIFT 0x4 +#define CNL_IOCTL_DOE_MASK 0x1 +#define CNL_IOCTL_BKE_SHIFT 0x5 +#define CNL_IOCTL_BKE_MASK 0x1 +#define CNL_IOCTL_WPDD_SHIFT 0x6 +#define CNL_IOCTL_WPDD_MASK 0x1 +#define CNL_IOCTL_CIBD_SHIFT 0x8 +#define CNL_IOCTL_CIBD_MASK 0x1 +#define CNL_IOCTL_DIBD_SHIFT 0x9 +#define CNL_IOCTL_DIBD_MASK 0x1 + +#define SDW_CNL_CTMCTL_OFFSET 0x60 +#define SDW_CNL_CTMCTL 0x6E +#define CNL_CTMCTL_DACTQE_SHIFT 0x0 +#define CNL_CTMCTL_DACTQE_MASK 0x1 +#define CNL_CTMCTL_DODS_SHIFT 0x1 +#define CNL_CTMCTL_DODS_MASK 0x1 +#define CNL_CTMCTL_DOAIS_SHIFT 0x3 +#define CNL_CTMCTL_DOAIS_MASK 0x3 + +#define ALH_CNL_STRMZCFG_BASE 0x4 +#define ALH_CNL_STRMZCFG_OFFSET 0x4 +#define CNL_STRMZCFG_DMAT_SHIFT 0x0 +#define CNL_STRMZCFG_DMAT_MASK 0xFF +#define CNL_STRMZCFG_DMAT_VAL 0x3 +#define CNL_STRMZCFG_CHAN_SHIFT 16 +#define CNL_STRMZCFG_CHAN_MASK 0xF + +#define SDW_BRA_HEADER_SIZE_PDI 12 /* In bytes */ +#define SDW_BRA_HEADER_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_DATA_CRC_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_FOOTER_RESP_SIZE_PDI 4 /* In bytes */ +#define SDW_BRA_PADDING_SZ_PDI 4 /* In bytes */ +#define SDW_BRA_HEADER_TOTAL_SZ_PDI 16 /* In bytes */ + +#define SDW_BRA_SOP_EOP_PDI_STRT_VALUE 0x4 +#define SDW_BRA_SOP_EOP_PDI_END_VALUE 0x2 +#define SDW_BRA_SOP_EOP_PDI_MASK 0x1F +#define SDW_BRA_SOP_EOP_PDI_SHIFT 5 + +#define SDW_BRA_STRM_ID_BLK_OUT 3 +#define SDW_BRA_STRM_ID_BLK_IN 4 + +#define SDW_BRA_PDI_TX_ID 0 +#define SDW_BRA_PDI_RX_ID 1 + +#define SDW_BRA_SOFT_RESET 0x1 +#define SDW_BRA_BULK_ENABLE 1 +#define SDW_BRA_BLK_EN_MASK 0xFFFEFFFF +#define SDW_BRA_BLK_EN_SHIFT 16 + +#define SDW_BRA_ROLLINGID_PDI_INDX 3 +#define SDW_BRA_ROLLINGID_PDI_MASK 0xF +#define SDW_BRA_ROLLINGID_PDI_SHIFT 0 + +#define SDW_PCM_STRM_START_INDEX 0x2 + +#endif /* _LINUX_SDW_CNL_H */ diff --git a/drivers/sdw/sdw_maxim.c b/drivers/sdw/sdw_maxim.c new file mode 100644 index 000000000000..0081c5c00497 --- /dev/null +++ b/drivers/sdw/sdw_maxim.c @@ -0,0 +1,146 @@ +/* + * sdw_maxim.c -- Maxim SoundWire slave device driver. Dummy driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include + + +static int maxim_register_sdw_capabilties(struct sdw_slv *sdw, + const struct sdw_slv_id *sdw_id) +{ + struct sdw_slv_capabilities cap; + struct sdw_slv_dpn_capabilities *dpn_cap = NULL; + struct port_audio_mode_properties *prop = NULL; + int i, j; + + cap.wake_up_unavailable = true; + cap.test_mode_supported = false; + cap.clock_stop1_mode_supported = false; + cap.simplified_clock_stop_prepare = false; + cap.highphy_capable = true; + cap.paging_supported = false; + cap.bank_delay_support = false; + cap.port_15_read_behavior = 0; + cap.sdw_dp0_supported = false; + cap.num_of_sdw_ports = 3; + cap.sdw_dpn_cap = devm_kzalloc(&sdw->dev, + ((sizeof(struct sdw_slv_dpn_capabilities)) * + cap.num_of_sdw_ports), GFP_KERNEL); + for (i = 0; i < cap.num_of_sdw_ports; i++) { + dpn_cap = &cap.sdw_dpn_cap[i]; + if (i == 0 || i == 2) + dpn_cap->port_direction = SDW_PORT_SOURCE; + else + dpn_cap->port_direction = SDW_PORT_SINK; + + dpn_cap->port_number = i+1; + dpn_cap->max_word_length = 24; + dpn_cap->min_word_length = 16; + dpn_cap->num_word_length = 0; + dpn_cap->word_length_buffer = NULL; + dpn_cap->dpn_type = SDW_FULL_DP; + dpn_cap->dpn_grouping = SDW_BLOCKGROUPCOUNT_1; + dpn_cap->prepare_ch = SDW_CP_SM; + dpn_cap->imp_def_intr_mask = 0x0; + dpn_cap->min_ch_num = 1; + dpn_cap->max_ch_num = 2; + dpn_cap->num_ch_supported = 0; + dpn_cap->ch_supported = NULL; + dpn_cap->port_flow_mode_mask = SDW_PORT_FLOW_MODE_ISOCHRONOUS; + dpn_cap->block_packing_mode_mask = + SDW_PORT_BLK_PKG_MODE_BLK_PER_PORT_MASK | + SDW_PORT_BLK_PKG_MODE_BLK_PER_CH_MASK; + dpn_cap->port_encoding_type_mask = + SDW_PORT_ENCODING_TYPE_TWOS_CMPLMNT | + SDW_PORT_ENCODING_TYPE_SIGN_MAGNITUDE | + SDW_PORT_ENCODING_TYPE_IEEE_32_FLOAT; + dpn_cap->num_audio_modes = 1; + + dpn_cap->mode_properties = devm_kzalloc(&sdw->dev, + ((sizeof(struct port_audio_mode_properties)) * + dpn_cap->num_audio_modes), GFP_KERNEL); + for (j = 0; j < dpn_cap->num_audio_modes; j++) { + prop = &dpn_cap->mode_properties[j]; + prop->max_frequency = 16000000; + prop->min_frequency = 1000000; + prop->num_freq_configs = 0; + prop->freq_supported = NULL; + prop->glitchless_transitions_mask = 0x1; + prop->max_sampling_frequency = 192000; + prop->min_sampling_frequency = 8000; + prop->num_sampling_freq_configs = 0; + prop->sampling_freq_config = NULL; + prop->ch_prepare_behavior = SDW_CH_PREP_ANY_TIME; + } + } + return sdw_register_slave_capabilities(sdw, &cap); + +} +static int maxim_sdw_probe(struct sdw_slv *sdw, + const struct sdw_slv_id *sdw_id) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave Registered %lx\n", sdw_id->driver_data); + return maxim_register_sdw_capabilties(sdw, sdw_id); +} + +static int maxim_sdw_remove(struct sdw_slv *sdw) +{ + dev_info(&sdw->dev, "Maxim SoundWire Slave un-Registered\n"); + return 0; +} + +static const struct sdw_slv_id maxim_id[] = { + {"03:01:9f:79:00:00", 0}, + {"09:01:9f:79:00:00", 1}, + {"04:01:9f:79:00:00", 2}, + {"0a:01:9f:79:00:00", 3}, + {"04:01:9f:79:00:00", 4}, + {"0a:01:9f:79:00:00", 5}, + {"05:01:9f:79:00:00", 6}, + {"06:01:9f:79:00:00", 7}, + {"05:01:9f:79:00:00", 8}, + {"00:01:9f:79:00:00", 9}, + {"06:01:9f:79:00:00", 10}, + {"07:01:9f:79:00:00", 11}, + {"00:01:9f:79:00:00", 12}, + {"06:01:9f:79:00:00", 13}, + {"01:01:9f:79:00:00", 14}, + {"07:01:9f:79:00:00", 15}, + {"08:01:9f:79:00:00", 16}, + {"01:01:9f:79:00:00", 17}, + {"07:01:9f:79:00:00", 18}, + {"02:01:9f:79:00:00", 19}, + {"08:01:9f:79:00:00", 20}, + {"09:01:9f:79:00:00", 21}, + {"02:01:9f:79:00:00", 22}, + {"08:01:9f:79:00:00", 23}, + {"03:01:9f:79:00:00", 24}, + {"09:01:9f:79:00:00", 25}, + {"0a:01:9f:79:00:00", 26}, + {}, +}; + +MODULE_DEVICE_TABLE(sdwint, maxim_id); + +static struct sdw_slave_driver maxim_sdw_driver = { + .driver_type = SDW_DRIVER_TYPE_SLAVE, + .driver = { + .name = "maxim", + }, + .probe = maxim_sdw_probe, + .remove = maxim_sdw_remove, + .id_table = maxim_id, +}; + +module_sdw_slave_driver(maxim_sdw_driver); + +MODULE_DESCRIPTION("SoundWire Maxim Slave Driver"); +MODULE_AUTHOR("Hardik Shah, "); +MODULE_LICENSE("GPL"); diff --git a/drivers/sdw/sdw_priv.h b/drivers/sdw/sdw_priv.h new file mode 100644 index 000000000000..fd060bfa74c4 --- /dev/null +++ b/drivers/sdw/sdw_priv.h @@ -0,0 +1,280 @@ +/* + * sdw_priv.h - Private definition for sdw bus interface. + * + * Copyright (C) 2014-2015 Intel Corp + * Author: Hardik Shah + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + */ + +#ifndef _LINUX_SDW_PRIV_H +#define _LINUX_SDW_PRIV_H + +#include /* For kthread */ +#include + +#define SDW_MAX_STREAM_TAG_KEY_SIZE 80 +#define SDW_NUM_STREAM_TAGS 100 +#define MAX_NUM_ROWS 23 /* As per SDW Spec */ +#define MAX_NUM_COLS 8/* As per SDW Spec */ +#define MAX_NUM_ROW_COLS (MAX_NUM_ROWS * MAX_NUM_COLS) + +#define SDW_STATE_INIT_STREAM_TAG 0x1 +#define SDW_STATE_ALLOC_STREAM 0x2 +#define SDW_STATE_CONFIG_STREAM 0x3 +#define SDW_STATE_PREPARE_STREAM 0x4 +#define SDW_STATE_ENABLE_STREAM 0x5 +#define SDW_STATE_DISABLE_STREAM 0x6 +#define SDW_STATE_UNPREPARE_STREAM 0x7 +#define SDW_STATE_RELEASE_STREAM 0x8 +#define SDW_STATE_FREE_STREAM 0x9 +#define SDW_STATE_FREE_STREAM_TAG 0xA +#define SDW_STATE_ONLY_XPORT_STREAM 0xB + +#define SDW_STATE_INIT_RT 0x1 +#define SDW_STATE_CONFIG_RT 0x2 +#define SDW_STATE_PREPARE_RT 0x3 +#define SDW_STATE_ENABLE_RT 0x4 +#define SDW_STATE_DISABLE_RT 0x5 +#define SDW_STATE_UNPREPARE_RT 0x6 +#define SDW_STATE_RELEASE_RT 0x7 + +#define SDW_SLAVE_BDCAST_ADDR 15 + +struct sdw_runtime; +/* Defined in sdw.c, used by multiple files of module */ +extern struct sdw_core sdw_core; + +enum sdw_port_state { + SDW_PORT_STATE_CH_READY, + SDW_PORT_STATE_CH_STOPPED, + SDW_PORT_STATE_CH_PREPARING, + SDW_PORT_STATE_CH_DEPREPARING, +}; + +enum sdw_stream_state { + SDW_STREAM_ALLOCATED, + SDW_STREAM_FREE, + SDW_STREAM_ACTIVE, + SDW_STREAM_INACTIVE, +}; + +enum sdw_clk_state { + SDW_CLK_STATE_OFF = 0, + SDW_CLK_STATE_ON = 1, +}; + +enum sdw_update_bs_state { + SDW_UPDATE_BS_PRE, + SDW_UPDATE_BS_BNKSWTCH, + SDW_UPDATE_BS_POST, + SDW_UPDATE_BS_BNKSWTCH_WAIT, + SDW_UPDATE_BS_DIS_CHN, +}; + +enum sdw_port_en_state { + SDW_PORT_STATE_PREPARE, + SDW_PORT_STATE_ENABLE, + SDW_PORT_STATE_DISABLE, + SDW_PORT_STATE_UNPREPARE, +}; + +struct port_chn_en_state { + bool is_activate; + bool is_bank_sw; +}; + +struct temp_elements { + int rate; + int full_bw; + int payload_bw; + int hwidth; +}; + +struct sdw_stream_tag { + int stream_tag; + struct mutex stream_lock; + int ref_count; + enum sdw_stream_state stream_state; + char key[SDW_MAX_STREAM_TAG_KEY_SIZE]; + struct sdw_runtime *sdw_rt; +}; + +struct sdw_stream_params { + unsigned int rate; + unsigned int channel_count; + unsigned int bps; +}; + +struct sdw_port_runtime { + int port_num; + enum sdw_port_state port_state; + int channel_mask; + /* Frame params and stream params are per port based + * Single stream of audio may be split + * into mutliple port each handling + * subset of channels, channels should + * be contiguous in subset + */ + struct sdw_transport_params transport_params; + struct sdw_port_params port_params; + struct list_head port_node; +}; + +struct sdw_slave_runtime { + /* Simplified port or full port, there cannot be both types of + * data port for single stream, so data structure is kept per + * slave runtime, not per port + */ + enum sdw_dpn_type type; + struct sdw_slv *slave; + int direction; + /* Stream may be split into multiple slaves, so this is for + * this particular slave + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + struct list_head slave_sdw_node; + struct list_head slave_node; + int rt_state; /* State of runtime structure */ + +}; + + +struct sdw_mstr_runtime { + struct sdw_master *mstr; + int direction; + /* Stream may be split between multiple masters so this + * is for invidual master, if stream is split into multiple + * streams. For calculating the bandwidth on the particular bus + * stream params of master is taken into account. + */ + struct sdw_stream_params stream_params; + struct list_head port_rt_list; + /* Two nodes are required because BW calculation is based on master + * while stream enabling is based on stream_tag, where multiple + * masters may be involved + */ + struct list_head mstr_sdw_node; /* This is to add mstr_rt in sdw_rt */ + struct list_head mstr_node; /* This is to add mstr_rt in mstr */ + + struct list_head slv_rt_list; + /* Individual stream bandwidth on given master */ + unsigned int stream_bw; + /* State of runtime structure */ + int rt_state; + int hstart; + int hstop; + int block_offset; + int sub_block_offset; +}; + +struct sdw_runtime { + int tx_ref_count; + int rx_ref_count; + /* This is stream params for whole stream + * but stream may be split between two + * masters, or two slaves. + */ + struct sdw_stream_params stream_params; + struct list_head slv_rt_list; + struct list_head mstr_rt_list; + enum sdw_stream_type type; + int stream_state; + int xport_state; + +}; + +struct sdw_slv_status { + struct list_head node; + enum sdw_slave_status status[SOUNDWIRE_MAX_DEVICES+1]; +}; + +/** Bus structure which handles bus related information */ +struct sdw_bus { + struct list_head bus_node; + struct sdw_master *mstr; + unsigned int port_grp_mask[2]; + unsigned int slave_grp_mask[2]; + unsigned int clk_state; + unsigned int active_bank; + unsigned int clk_freq; + unsigned int clk_div; + /* Bus total Bandwidth. Initialize and reset to zero */ + unsigned int bandwidth; + unsigned int stream_interval; /* Stream Interval */ + unsigned int system_interval; /* Bus System Interval */ + unsigned int frame_freq; + unsigned int col; + unsigned int row; + struct task_struct *status_thread; + struct kthread_worker kworker; + struct kthread_work kwork; + struct list_head status_list; + spinlock_t spinlock; + struct sdw_async_xfer_data async_data; +}; + +/** Holds supported Row-Column combination related information */ +struct sdw_rowcol { + int row; + int col; + int control_bits; + int data_bits; +}; + +/** + * Global soundwire structure. It handles all the streams spawned + * across masters and has list of bus structure per every master + * registered + */ +struct sdw_core { + struct sdw_stream_tag stream_tags[SDW_NUM_STREAM_TAGS]; + struct sdw_rowcol rowcolcomb[MAX_NUM_ROW_COLS]; + struct list_head bus_list; + struct mutex core_lock; + struct idr idr; + int first_dynamic_bus_num; +}; + +/* Structure holding mapping of numbers to cols */ +struct sdw_num_to_col { + int num; + int col; +}; + +/* Structure holding mapping of numbers to rows */ +struct sdw_num_to_row { + int num; + int row; +}; + +int sdw_slave_port_config_port_params(struct sdw_slave_runtime *slv_rt); +int sdw_slave_port_prepare(struct sdw_slave_runtime, bool prepare); +int sdw_bus_bw_init(void); +int sdw_mstr_bw_init(struct sdw_bus *sdw_bs); +int sdw_bus_calc_bw(struct sdw_stream_tag *stream_tag, bool enable); +int sdw_bus_calc_bw_dis(struct sdw_stream_tag *stream_tag, bool unprepare); +int sdw_bus_bra_xport_config(struct sdw_bus *sdw_mstr_bs, + struct sdw_bra_block *block, bool enable); +int sdw_chn_enable(void); +void sdw_unlock_mstr(struct sdw_master *mstr); +int sdw_trylock_mstr(struct sdw_master *mstr); +void sdw_lock_mstr(struct sdw_master *mstr); +int sdw_slave_transfer_async(struct sdw_master *mstr, struct sdw_msg *msg, + int num, + struct sdw_async_xfer_data *async_data); + +#endif /* _LINUX_SDW_PRIV_H */ diff --git a/drivers/sdw/sdw_utils.c b/drivers/sdw/sdw_utils.c new file mode 100644 index 000000000000..724323d01993 --- /dev/null +++ b/drivers/sdw/sdw_utils.c @@ -0,0 +1,49 @@ +/* + * sdw_bwcalc.c - SoundWire Bus BW calculation & CHN Enabling implementation + * + * Copyright (C) 2015-2016 Intel Corp + * Author: Sanyog Kale + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include + + + +/** + * sdw_bus_compute_crc8: SoundWire bus helper function to compute crc8. + * This API uses crc8 helper functions internally. + * + * @values: Data buffer. + * @num_bytes: Number of bytes. + */ +u8 sdw_bus_compute_crc8(u8 *values, u8 num_bytes) +{ + u8 table[256]; + u8 poly = 0x4D; /* polynomial = x^8 + x^6 + x^3 + x^2 + 1 */ + u8 crc = CRC8_INIT_VALUE; /* Initialize 8 bit to 11111111 */ + + /* Populate MSB */ + crc8_populate_msb(table, poly); + + /* CRC computation */ + crc = crc8(table, values, num_bytes, crc); + + return crc; +} +EXPORT_SYMBOL(sdw_bus_compute_crc8); diff --git a/drivers/soundwire/Kconfig b/drivers/soundwire/Kconfig index 19c8efb9a5ee..a4b03e8cd694 100644 --- a/drivers/soundwire/Kconfig +++ b/drivers/soundwire/Kconfig @@ -4,6 +4,7 @@ menuconfig SOUNDWIRE bool "SoundWire support" + depends on !SDW ---help--- SoundWire is a 2-Pin interface with data and clock line ratified by the MIPI Alliance. SoundWire is used for transporting data diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 1abf76be2aa8..89735a5fd9e1 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -126,4 +126,6 @@ source "drivers/staging/axis-fifo/Kconfig" source "drivers/staging/erofs/Kconfig" +source "drivers/staging/igb_avb/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index ab0cbe8815b1..f7d9b0acf361 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -53,3 +53,4 @@ obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ obj-$(CONFIG_EROFS_FS) += erofs/ +obj-$(CONFIG_IGB_AVB) += igb_avb/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 17c5587805f5..7419cfa83713 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -23,7 +23,33 @@ config ANDROID_VSOC a 'cuttlefish' Android image inside QEmu. The driver interacts with a QEmu ivshmem device. If built as a module, it will be called vsoc. +config SYNC + bool "Synchronization framework" + default n + ---help--- + This option enables the framework for synchronization between multiple + drivers. Sync implementations can take advantage of hardware + synchronization built into devices like GPUs. + +config ANDROID_FWDATA + tristate "Parser for Android-specific firmware data" + depends on ACPI + default n + ---help--- + This driver parses Android-specific data (e.g. fstab configuration) + stored in firmware (e.g. ACPI tables), and present it to user space + via sysfs. Android Oreo (8.0) and later requires some essential boot- + time configuration to be available in a directory structure organized + in Device Tree style, e.g. /proc/device-tree/firmware/android/ on + platforms that enable DT. Platforms that use ACPI instead of DT, such + as Goldfish (Ranchu) x86/x86_64, should enable this driver to ensure + the required information can be found in sysfs with the expected + layout. + source "drivers/staging/android/ion/Kconfig" +source "drivers/staging/android/abl/Kconfig" +source "drivers/staging/android/sbl/Kconfig" +source "drivers/staging/android/vsbl/Kconfig" endif # if ANDROID diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index 90e6154f11a4..26ce3aad604d 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,6 +1,10 @@ ccflags-y += -I$(src) # needed for trace events obj-y += ion/ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += abl/ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sbl/ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsbl/ obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_VSOC) += vsoc.o +obj-$(CONFIG_ANDROID_FWDATA) += fwdata.o diff --git a/drivers/staging/android/abl/Kconfig b/drivers/staging/android/abl/Kconfig new file mode 100644 index 000000000000..3c0a2c566ba0 --- /dev/null +++ b/drivers/staging/android/abl/Kconfig @@ -0,0 +1,19 @@ +config ABL_BOOTLOADER_CONTROL + tristate "ABL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. + +config SEND_SLCAN_ENABLE + bool "control slcan protocol" + depends on X86 + default n + help + This option control slcan protocol enable/disable in ablbc driver + The IOC compononent on broxton IVI platform use slcan protocol to + communicate befor calling powerctl program. + If no use IOC, this option can be disabed. diff --git a/drivers/staging/android/abl/Makefile b/drivers/staging/android/abl/Makefile new file mode 100644 index 000000000000..b70a05a2af6d --- /dev/null +++ b/drivers/staging/android/abl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ABL_BOOTLOADER_CONTROL) += ablbc.o diff --git a/drivers/staging/android/abl/ablbc.c b/drivers/staging/android/abl/ablbc.c new file mode 100644 index 000000000000..59154f5e10ef --- /dev/null +++ b/drivers/staging/android/abl/ablbc.c @@ -0,0 +1,415 @@ +/* + * ablbc: control ABL bootloaders + * Copyright (c) 2013-2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "ablbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static const unsigned int DEFAULT_TARGET_INDEX; + +static const char * const cold_reset[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#05015555555555", + NULL}; +static const char * const suppress_heartbeat[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#01035555555555", + NULL}; +static const char * const reboot_request[] = { + "/vendor/bin/cansend", + "slcan0", + "0000FFFF#03015555555555", + NULL}; + +static int execute_slcan_command(const char *cmd[]) +{ +#ifdef CONFIG_SEND_SLCAN_ENABLE + struct subprocess_info *sub_info; + int ret = -1; + + sub_info = call_usermodehelper_setup((char *)cmd[0], + (char **)cmd,(char **) NULL, GFP_KERNEL, + (void *)NULL, (void*)NULL, (void*)NULL); + + if (sub_info) { + ret = call_usermodehelper_exec(sub_info, + UMH_WAIT_PROC); + pr_info("Exec cmd=%s ret=%d\n", cmd[0], ret); + } + + if (ret) + pr_err("Failure on cmd=%s ret=%d\n", cmd[0], ret); + + return ret; +#else + return 0; +#endif +} + +static int ablbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + ret = execute_slcan_command((const char **)suppress_heartbeat); + if (ret) + goto done; + + ret = execute_slcan_command((const char **)reboot_request); + if (ret) + goto done; + + ret = execute_slcan_command((const char **)cold_reset); + +done: + return NOTIFY_DONE; +} + +static struct notifier_block ablbc_reboot_notifier = { + .notifier_call = ablbc_reboot_notifier_call, +}; + +static int __init ablbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&ablbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(ablbc_init); + +static void __exit ablbc_exit(void) +{ + unregister_reboot_notifier(&ablbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(ablbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Automotive Bootloader boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/fwdata.c b/drivers/staging/android/fwdata.c new file mode 100644 index 000000000000..525f7e92ec84 --- /dev/null +++ b/drivers/staging/android/fwdata.c @@ -0,0 +1,402 @@ +/* + * Copyright (C) 2017 Intel, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include + +struct android_fwdata_state { + struct device *dev; + struct kobject *properties_kobj; + struct kobject *android_kobj; + struct kobject *vbmeta_kobj; + struct kobject *fstab_kobj; + struct kobject *system_kobj; + struct kobject *vendor_kobj; + struct kobject *product_kobj; + struct kobject *odm_kobj; +}; + +static struct android_fwdata_state state; + +/* Called when /properties// is read. */ +static ssize_t property_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + const char *prefix = NULL; + char key[128]; + const char *value = NULL; + int ret; + + /* It would be much more convenient if show() gave us the relative path + * to the file being read, e.g. properties/android/fstab/system/dev, + * which could be easily converted to a property key. + * TODO: Infer the relative path from kobj and remove all hard-coded + * property keys. + */ + if (kobj == state.android_kobj) { + prefix = "android"; + } else if (kobj == state.vbmeta_kobj) { + prefix = "android.vbmeta"; + } else if (kobj == state.fstab_kobj) { + prefix = "android.fstab"; + } else if (kobj == state.system_kobj) { + prefix = "android.fstab.system"; + } else if (kobj == state.vendor_kobj) { + prefix = "android.fstab.vendor"; + } else if (kobj == state.product_kobj) { + prefix = "android.fstab.product"; + } else if (kobj == state.odm_kobj) { + prefix = "android.fstab.odm"; + } else { + pr_err("%s: Unexpected folder\n", __func__); + return -EINVAL; + } + /* We don't put any file in properties/ directly, so prefix can't be + * empty. + */ + snprintf(key, sizeof(key), "%s.%s", prefix, attr->attr.name); + + ret = device_property_read_string(state.dev, key, &value); + if (ret) { + pr_err("%s: Failed to read property '%s', ret=%d\n", __func__, + key, ret); + return ret; + } + return scnprintf(buf, PAGE_SIZE, "%s\n", value); +} + +#define DT_SIMPLE_ATTR(_prefix, _name) \ + struct kobj_attribute _prefix##_##_name##_attr = { \ + .attr = { \ + .name = __stringify(_name), \ + .mode = 0444, \ + }, \ + .show = property_show, \ + } + +static DT_SIMPLE_ATTR(android, compatible); +static DT_SIMPLE_ATTR(vbmeta, compatible); +static DT_SIMPLE_ATTR(fstab, compatible); +static DT_SIMPLE_ATTR(system, compatible); +static DT_SIMPLE_ATTR(vendor, compatible); +static DT_SIMPLE_ATTR(product, compatible); +static DT_SIMPLE_ATTR(odm, compatible); + +static DT_SIMPLE_ATTR(vbmeta, parts); + +static struct attribute *vbmeta_attrs[] = { + &vbmeta_compatible_attr.attr, + &vbmeta_parts_attr.attr, + NULL, +}; + +static struct attribute_group vbmeta_group = { + .attrs = vbmeta_attrs, +}; + +static DT_SIMPLE_ATTR(system, dev); +static DT_SIMPLE_ATTR(system, type); +static DT_SIMPLE_ATTR(system, mnt_flags); +static DT_SIMPLE_ATTR(system, fsmgr_flags); + +static DT_SIMPLE_ATTR(vendor, dev); +static DT_SIMPLE_ATTR(vendor, type); +static DT_SIMPLE_ATTR(vendor, mnt_flags); +static DT_SIMPLE_ATTR(vendor, fsmgr_flags); + +static DT_SIMPLE_ATTR(product, dev); +static DT_SIMPLE_ATTR(product, type); +static DT_SIMPLE_ATTR(product, mnt_flags); +static DT_SIMPLE_ATTR(product, fsmgr_flags); + +static DT_SIMPLE_ATTR(odm, dev); +static DT_SIMPLE_ATTR(odm, type); +static DT_SIMPLE_ATTR(odm, mnt_flags); +static DT_SIMPLE_ATTR(odm, fsmgr_flags); + +static struct attribute *system_attrs[] = { + &system_compatible_attr.attr, + &system_dev_attr.attr, + &system_type_attr.attr, + &system_mnt_flags_attr.attr, + &system_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group system_group = { + .attrs = system_attrs, +}; + +static struct attribute *vendor_attrs[] = { + &vendor_compatible_attr.attr, + &vendor_dev_attr.attr, + &vendor_type_attr.attr, + &vendor_mnt_flags_attr.attr, + &vendor_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group vendor_group = { + .attrs = vendor_attrs, +}; + +static struct attribute *product_attrs[] = { + &product_compatible_attr.attr, + &product_dev_attr.attr, + &product_type_attr.attr, + &product_mnt_flags_attr.attr, + &product_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group product_group = { + .attrs = product_attrs, +}; + +static struct attribute *odm_attrs[] = { + &odm_compatible_attr.attr, + &odm_dev_attr.attr, + &odm_type_attr.attr, + &odm_mnt_flags_attr.attr, + &odm_fsmgr_flags_attr.attr, + NULL, +}; + +static struct attribute_group odm_group = { + .attrs = odm_attrs, +}; + +static struct kobject *create_folder(struct kobject *parent, const char *name) +{ + struct kobject *kobj; + + kobj = kobject_create_and_add(name, parent); + if (!kobj) { + pr_err("%s: Failed to create %s/\n", __func__, name); + return NULL; + } + return kobj; +} + +static struct kobject *create_folder_with_file(struct kobject *parent, + const char *name, + struct kobj_attribute *attr) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_create_file() rather + * than sysfs_create_file(), but the former does not support + * creating the file in a subfolder. + */ + int ret; + + ret = sysfs_create_file(kobj, &attr->attr); + if (ret) { + pr_err("%s: Failed to create %s/%s: ret=%d\n", __func__, + name, attr->attr.name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_file(struct kobject *kobj, + struct kobj_attribute *attr) +{ + sysfs_remove_file(kobj, &attr->attr); + kobject_put(kobj); +} + +static struct kobject *create_folder_with_files(struct kobject *parent, + const char *name, + struct attribute_group *group) +{ + struct kobject *kobj; + + kobj = create_folder(parent, name); + if (kobj) { + /* Note: Usually drivers should use device_add_groups() rather + * than sysfs_create_group(), but the former does not support + * creating the folder in a subfolder. + */ + int ret; + + ret = sysfs_create_group(kobj, group); + if (ret) { + pr_err("%s: Failed to create %s/*: ret=%d\n", __func__, + name, ret); + kobject_put(kobj); + return NULL; + } + } + return kobj; +} + +static void remove_folder_with_files(struct kobject *kobj, + struct attribute_group *group) +{ + sysfs_remove_group(kobj, group); + kobject_put(kobj); +} + +static void clean_up(void) +{ + if (state.vendor_kobj) { + /* Delete /properties/android/fstab/vendor/ */ + remove_folder_with_files(state.vendor_kobj, &vendor_group); + state.vendor_kobj = NULL; + } + if (state.product_kobj) { + /* Delete /properties/android/fstab/product/ */ + remove_folder_with_files(state.product_kobj, &product_group); + state.product_kobj = NULL; + } + if (state.odm_kobj) { + /* Delete /properties/android/fstab/odm/ */ + remove_folder_with_files(state.odm_kobj, &odm_group); + state.odm_kobj = NULL; + } + if (state.system_kobj) { + /* Delete /properties/android/fstab/system/ */ + remove_folder_with_files(state.system_kobj, &system_group); + state.system_kobj = NULL; + } + if (state.fstab_kobj) { + /* Delete /properties/android/fstab/ */ + remove_folder_with_file(state.fstab_kobj, + &fstab_compatible_attr); + state.fstab_kobj = NULL; + } + if (state.vbmeta_kobj) { + /* Delete /properties/android/vbmeta/ */ + remove_folder_with_files(state.vbmeta_kobj, &vbmeta_group); + state.vbmeta_kobj = NULL; + } + if (state.android_kobj) { + /* Delete /properties/android/ */ + remove_folder_with_file(state.android_kobj, + &android_compatible_attr); + state.android_kobj = NULL; + } + if (state.properties_kobj) { + /* Delete /properties/ */ + kobject_put(state.properties_kobj); + state.properties_kobj = NULL; + } +} + +static int android_fwdata_probe(struct platform_device *pdev) +{ + int ret = -EIO; + + state.dev = &pdev->dev; + /* Create /properties/ */ + state.properties_kobj = create_folder(&state.dev->kobj, "properties"); + if (!state.properties_kobj) + goto out; + + /* TODO: Iterate over all device properties in firmware, and dynamically + * create sysfs nodes under /properties/ + */ + + /* Create /properties/android/compatible */ + state.android_kobj = create_folder_with_file(state.properties_kobj, + "android", + &android_compatible_attr); + if (!state.android_kobj) + goto out; + + if (device_property_present(state.dev, "android.vbmeta.compatible")) { + /* Firmware contains vbmeta config for AVB 2.0 */ + state.vbmeta_kobj = create_folder_with_files(state.android_kobj, + "vbmeta", + &vbmeta_group); + if (!state.vbmeta_kobj) + goto out; + } + + /* Create /properties/android/fstab/compatible */ + state.fstab_kobj = create_folder_with_file(state.android_kobj, "fstab", + &fstab_compatible_attr); + if (!state.fstab_kobj) + goto out; + + if (device_property_present(state.dev, "android.fstab.system.dev")) { + /* Firmware contains fstab config for early mount of /system */ + state.system_kobj = create_folder_with_files(state.fstab_kobj, + "system", + &system_group); + if (!state.system_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.vendor.dev")) { + /* Firmware contains fstab config for early mount of /vendor */ + state.vendor_kobj = create_folder_with_files(state.fstab_kobj, + "vendor", + &vendor_group); + if (!state.vendor_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.product.dev")) { + /* Firmware contains fstab config for early mount of /product */ + state.product_kobj = create_folder_with_files(state.fstab_kobj, + "product", + &product_group); + if (!state.product_kobj) + goto out; + } + if (device_property_present(state.dev, "android.fstab.odm.dev")) { + /* Firmware contains fstab config for early mount of /odm */ + state.odm_kobj = create_folder_with_files(state.fstab_kobj, + "odm", + &odm_group); + if (!state.odm_kobj) + goto out; + } + return 0; + +out: + clean_up(); + return ret; +} + +static int android_fwdata_remove(struct platform_device *pdev) +{ + clean_up(); + return 0; +} + +static const struct acpi_device_id android_fwdata_acpi_match[] = { + { "ANDR0001", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, android_fwdata_acpi_match); + +static struct platform_driver android_fwdata_driver = { + .probe = android_fwdata_probe, + .remove = android_fwdata_remove, + .driver = { + .name = "android_fwdata", + .owner = THIS_MODULE, + .acpi_match_table = ACPI_PTR(android_fwdata_acpi_match), + } +}; + +module_platform_driver(android_fwdata_driver); diff --git a/drivers/staging/android/sbl/Kconfig b/drivers/staging/android/sbl/Kconfig new file mode 100644 index 000000000000..4b550cadcb40 --- /dev/null +++ b/drivers/staging/android/sbl/Kconfig @@ -0,0 +1,9 @@ +config SBL_BOOTLOADER_CONTROL + tristate "SBL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/sbl/Makefile b/drivers/staging/android/sbl/Makefile new file mode 100644 index 000000000000..6d1258d7bfc1 --- /dev/null +++ b/drivers/staging/android/sbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_SBL_BOOTLOADER_CONTROL) += sblbc.o diff --git a/drivers/staging/android/sbl/sblbc.c b/drivers/staging/android/sbl/sblbc.c new file mode 100644 index 000000000000..3d354fd31e23 --- /dev/null +++ b/drivers/staging/android/sbl/sblbc.c @@ -0,0 +1,372 @@ +/* + * sblbc: control SBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "sblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* ABL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) +#define USERCMD_UPDATE_IFWI(len) _USERCMD_(2, len) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +static bool capsule_request; + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_capsule_cmd { + char action; + char device; + char partition; + char file_name[1]; +} __packed; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + for (i = 0; i < size; i++) + { + pr_err("Kernel Addr=0x%X, data=0x%X\n", (unsigned int)(NVRAM_START_ADDRESS + offset + i), (unsigned int)(*(unsigned char *)(data + i))); + } + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static struct kobject *capsule_kobject; + +static ssize_t is_capsule_requested(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", capsule_request); +} + +enum capsule_device_type { + EMMC = 2, + SDCARD = 4 +}; + +static ssize_t capsule_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + struct nvram_msg msg; + struct nvram_capsule_cmd *capsule_cmd; + char name[32], partition; + enum capsule_device_type device; + int ret, padding; + unsigned char size; + union _cdata_header cdh; + + device = (buf[0] == 'm' ? EMMC : SDCARD); + partition = buf[1] - '0'; + if (strlen(buf+3) >= sizeof(name)) { + pr_err(MODULE_NAME " buf+3: %ld is too long\n", strlen(buf+3)); + return -ENOMEM; + } + + ret = sscanf(buf+3, "%s", name); + pr_info(MODULE_NAME " capsule parameters (%d): DEVICE=%d PARTITION=%d NAME=%s\n", + ret, device, partition, name); + + cdh.data = 0; + cdh.tag = CDATA_TAG_USER_CMD; + + /* padding of filename on next dword */ + padding = (4 - (3 + strlen(name))%4)%4; + size = 2 + sizeof(cdh) + 3 + strlen(name) + padding + 4; + cdh.length = 1 + (3 + strlen(name) + padding) / 4; + + msg.magic = NVRAM_VALID_FLAG; + msg.size = size; + msg.cdata_header.data = cdh.data; + + capsule_cmd = kmalloc(size, GFP_KERNEL); + if (!capsule_cmd) + return -ENOMEM; + + capsule_cmd->action = USERCMD_UPDATE_IFWI(strlen(name) + 2); + capsule_cmd->device = device; + capsule_cmd->partition = partition; + strncpy(capsule_cmd->file_name, name, strlen(name)); + msg.cdata_payload = (char *)capsule_cmd; + msg.cdata_payload_size = 3 + strlen(name) + padding; + msg.crc = crc32c_msg(&msg); + write_msg_to_nvram(&msg); + capsule_request = true; + + kfree(capsule_cmd); + + return count; +} + +static struct kobj_attribute capsule_name_attribute = + __ATTR(capsule_name, 0600, NULL, capsule_store); + +static struct kobj_attribute capsule_requested_attribute = + __ATTR(capsule_requested, 0400, is_capsule_requested, NULL); + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int sblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block sblbc_reboot_notifier = { + .notifier_call = sblbc_reboot_notifier_call, +}; + +static int __init sblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&sblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + capsule_kobject = kobject_create_and_add("capsule", kernel_kobj); + if (!capsule_kobject) + return -ENOMEM; + + ret = sysfs_create_file(capsule_kobject, + &capsule_name_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_name\n"); + goto err; + } + + ret = sysfs_create_file(capsule_kobject, + &capsule_requested_attribute.attr); + if (ret) { + pr_err("failed to create the foo file in /sys/kernel/capsule/capsule_requested\n"); + goto err; + } + + return 0; + +err: + kobject_put(capsule_kobject); + return ret; +} + +module_init(sblbc_init); + +static void __exit sblbc_exit(void) +{ + unregister_reboot_notifier(&sblbc_reboot_notifier); + kobject_put(capsule_kobject); +} +module_exit(sblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/vsbl/Kconfig b/drivers/staging/android/vsbl/Kconfig new file mode 100644 index 000000000000..465fed22ca6b --- /dev/null +++ b/drivers/staging/android/vsbl/Kconfig @@ -0,0 +1,9 @@ +config VSBL_BOOTLOADER_CONTROL + tristate "vSBL Bootloader Control module" + depends on X86 + default n + help + This driver installs a reboot hook, such that if reboot() is + invoked with a string argument, the corresponding ABL Action + is written in CMOS data, in order to be processed by ABL on + reboot. diff --git a/drivers/staging/android/vsbl/Makefile b/drivers/staging/android/vsbl/Makefile new file mode 100644 index 000000000000..8ce038941fc6 --- /dev/null +++ b/drivers/staging/android/vsbl/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VSBL_BOOTLOADER_CONTROL) += vsblbc.o diff --git a/drivers/staging/android/vsbl/vsblbc.c b/drivers/staging/android/vsbl/vsblbc.c new file mode 100644 index 000000000000..527a174f0130 --- /dev/null +++ b/drivers/staging/android/vsbl/vsblbc.c @@ -0,0 +1,262 @@ +/* + * vsblbc: control vSBL bootloaders + * Copyright (c) 2013-2017, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MODULE_NAME "vsblbc" + +/* RTC read and write */ +static inline unsigned char cmos_read_ext_bank(u8 addr) +{ + outb(addr, RTC_PORT(4)); + return inb(RTC_PORT(5)); +} +#define CMOS_READ_EXT(a) cmos_read_ext_bank(a) + +static inline void cmos_write_ext_bank(u8 val, u8 addr) +{ + outb(addr, RTC_PORT(4)); + outb(val, RTC_PORT(5)); +} +#define CMOS_WRITE_EXT(v, a) cmos_write_ext_bank(v, a) + +/* vSBL Conventions */ +#define NVRAM_START_ADDRESS 0x10 + +#define _USERCMD_(cmd, len) (((cmd) << 5) | ((len) & 0x1f)) +#define USERCMD_END _USERCMD_(0, 0) +#define USERCMD_ACTION _USERCMD_(7, 1) + +#define CDATA_TAG_USER_CMD 0x4d +#define NVRAM_VALID_FLAG 0x12 + +#define CRC32C_POLYNOMIAL 0x82F63B78 /* CRC32C Castagnoli */ + +union _cdata_header { + uint32_t data; + struct { + unsigned ncond : 2; + unsigned length : 10; + unsigned flags : 4; + unsigned version: 4; + unsigned tag : 12; + }; +}; + +struct nvram_reboot_cmd { + char action; + char target; + char end; + char padding; +} __packed; + +struct name2id { + const char *name; + int id; +}; + +struct nvram_msg { + char magic; + char size; + union _cdata_header cdata_header; + char *cdata_payload; + size_t cdata_payload_size; + uint32_t crc; +} __packed; + +static const struct name2id NAME2ID[] = { + { "main", 0x00 }, + { "android", 0x00 }, + { "bootloader", 0x01 }, + { "fastboot", 0x01 }, + { "elk", 0x02 }, + { "recovery", 0x03 }, + { "crashmode", 0x04 }, + { "dnx", 0x05 }, + { "cli", 0x10 }, +}; + +static size_t offset; /* memorize offset between each call */ + +static size_t write_data_to_nvram(char *data, size_t size) +{ + int i; + unsigned long flags; + + spin_lock_irqsave(&rtc_lock, flags); + for (i = 0; i < size; i++) + CMOS_WRITE_EXT(*(data + i), NVRAM_START_ADDRESS + offset + i); + + offset += size; + spin_unlock_irqrestore(&rtc_lock, flags); + + return i; +} + +static void write_msg_to_nvram(struct nvram_msg *nvram_msg) +{ + /* Ensure to start from top : only one command expected */ + offset = 0; + write_data_to_nvram((void*)nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + write_data_to_nvram((void*)(nvram_msg->cdata_payload), + nvram_msg->cdata_payload_size); + write_data_to_nvram((void*)&(nvram_msg->crc), sizeof(nvram_msg->crc)); +} + +/* Compute CRC for one byte (shift register-based: one bit at a time). */ +static uint32_t crc32c_byte(uint32_t crc, unsigned byte) +{ + int i; + uint32_t c; + + for (i = 0 ; i < 8 ; i += 1) { + c = (crc ^ byte) & 1; + if (c) + crc = (crc >> 1) ^ CRC32C_POLYNOMIAL; + else + crc = (crc >> 1); + byte >>= 1; + } + + return crc; +} + +/* Compute CRC for a given buffer. */ +static uint32_t crc32c_buf(uint32_t crc, const void *addr, unsigned len) +{ + unsigned i; + + for (i = 0 ; i < len ; i += 1) + crc = crc32c_byte(crc, *(uint8_t *)(addr + i)); + + return crc; +} + +static uint32_t crc32c_msg(struct nvram_msg *nvram_msg) +{ + uint32_t crc; + + crc = crc32c_buf(~0, nvram_msg, + offsetof(struct nvram_msg, cdata_payload)); + crc = crc32c_buf(crc, nvram_msg->cdata_payload, + nvram_msg->cdata_payload_size); + return crc; +} + +static int reboot_target_name2id(const char *name) +{ + size_t i; + + for (i = 0; i < ARRAY_SIZE(NAME2ID); i++) + if (!strcmp(NAME2ID[i].name, name)) + return NAME2ID[i].id; + + return -EINVAL; +} + +static int set_reboot_target(const char *name) +{ + int id; + struct nvram_msg msg; + struct nvram_reboot_cmd reboot_cmd; + union _cdata_header cdh; + + if (name == NULL) { + pr_err("Error in %s: NULL target\n", __func__); + return -EINVAL; + } + + id = reboot_target_name2id(name); + if (id < 0) { + pr_err("Error in %s: '%s' is not a valid target\n", + __func__, name); + return -EINVAL; + } + + cdh.data = 0; + cdh.length = 2; /* 2*32 bits, from header to padding */ + cdh.tag = CDATA_TAG_USER_CMD; + + memset(&reboot_cmd, 0, sizeof(reboot_cmd)); + memset(&msg, 0, sizeof(msg)); + msg.magic = NVRAM_VALID_FLAG; + msg.cdata_header.data = cdh.data; + reboot_cmd.action = USERCMD_ACTION; + + reboot_cmd.target = id; + msg.cdata_payload = (void*)&reboot_cmd; + msg.cdata_payload_size = sizeof(reboot_cmd); + msg.size = offsetof(struct nvram_msg, cdata_payload) + + sizeof(reboot_cmd) + sizeof(msg.crc); + msg.crc = crc32c_msg(&msg); + + write_msg_to_nvram(&msg); + + return 0; +} + +static int vsblbc_reboot_notifier_call(struct notifier_block *notifier, + unsigned long what, void *data) +{ + const char *target = (const char *)data; + int ret; + + if (what != SYS_RESTART) + return NOTIFY_DONE; + + if (target[0] != '\0') { + ret = set_reboot_target(target); + if (ret) + pr_err("%s: Failed to set reboot target, ret=%d\n", + __func__, ret); + } + + return NOTIFY_DONE; +} + +static struct notifier_block vsblbc_reboot_notifier = { + .notifier_call = vsblbc_reboot_notifier_call, +}; + +static int __init vsblbc_init(void) +{ + int ret; + + ret = register_reboot_notifier(&vsblbc_reboot_notifier); + if (ret) { + pr_err(MODULE_NAME ": unable to register reboot notifier\n"); + return ret; + } + + return 0; +} + +module_init(vsblbc_init); + +static void __exit vsblbc_exit(void) +{ + unregister_reboot_notifier(&vsblbc_reboot_notifier); +} +module_exit(vsblbc_exit); + +MODULE_AUTHOR("Guillaume Betous "); +MODULE_DESCRIPTION("Virtual Slimboot boot control driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk deleted file mode 100644 index fdadbf611757..000000000000 --- a/drivers/staging/greybus/tools/Android.mk +++ /dev/null @@ -1,10 +0,0 @@ -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) - -LOCAL_SRC_FILES:= loopback_test.c -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE := gb_loopback_test - -include $(BUILD_EXECUTABLE) - diff --git a/drivers/staging/igb_avb/COPYING b/drivers/staging/igb_avb/COPYING new file mode 100644 index 000000000000..d159169d1050 --- /dev/null +++ b/drivers/staging/igb_avb/COPYING @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/drivers/staging/igb_avb/Kconfig b/drivers/staging/igb_avb/Kconfig new file mode 100644 index 000000000000..5f4b325140de --- /dev/null +++ b/drivers/staging/igb_avb/Kconfig @@ -0,0 +1,17 @@ +config IGB_AVB + tristate "Avnu IGB AVB driver" + depends on IGB=n && E1000=n && E1000E=n && PCI + select DCA + default n + ---help--- + This is the Intel I210 Ethernet driver that lives + at https://github.com/AVnu/OpenAvnu/tree/master/ + kmod/igb. Note that this is different from drivers/ + net/ethernet/intel/igb. It can be used for developing + Audio/Video Bridging applications, Industrial Ethernet + applications which require precise timing control over + frame transmission, or test harnesses for measuring system + latencies and sampling events. It is exclusive with the + in-tree IGB driver, so only one of them can be enabled + at any point in time. There are also coexistance issues with + the e1000 and e1000e. diff --git a/drivers/staging/igb_avb/LICENSE b/drivers/staging/igb_avb/LICENSE new file mode 100644 index 000000000000..b84d7002e5c4 --- /dev/null +++ b/drivers/staging/igb_avb/LICENSE @@ -0,0 +1,24 @@ + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2012 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + diff --git a/drivers/staging/igb_avb/Makefile b/drivers/staging/igb_avb/Makefile new file mode 100644 index 000000000000..eaae47e157f3 --- /dev/null +++ b/drivers/staging/igb_avb/Makefile @@ -0,0 +1,18 @@ +obj-$(CONFIG_IGB_AVB) += igb_avb.o + +igb_avb-y := igb_main.o \ + e1000_82575.o \ + e1000_i210.o \ + e1000_mac.o \ + e1000_nvm.o e1000_phy.o \ + e1000_manage.o \ + igb_param.o \ + igb_ethtool.o \ + kcompat.o \ + e1000_api.o \ + e1000_mbx.o \ + igb_vmdq.o \ + igb_procfs.o \ + igb_hwmon.o \ + igb_debugfs.o \ + igb_ptp.o diff --git a/drivers/staging/igb_avb/README b/drivers/staging/igb_avb/README new file mode 100644 index 000000000000..c08ff5d4d18d --- /dev/null +++ b/drivers/staging/igb_avb/README @@ -0,0 +1,65 @@ +INTRODUCTION + +This component demonstrates various features of the Intel I210 Ethernet +controller. These features can be used for developing Audio/Video Bridging +applications, Industrial Ethernet applications which require precise timing +control over frame transmission, or test harnesses for measuring system +latencies and sampling events. + +This component - igb_avb - is limited to the Intel I210 Ethernet controller. +The kernel module can be loaded in parallel to existing in-kernel igb modules +which may be used on other supported Intel LAN controllers. Modifications are +required to the in-kernel drivers if the existing in-kernel igb driver has +support for the Intel I210. + +BUILDING + +The kernel igb module should be built which supports the latest Linux kernel +3.x PTP clock support. Unlike the standard igb driver, this version enables +PTP by default (and will fail to build without kernel PTP support enabled). + +RUNNING + +To install the kernel mode driver, you must have root permissions. Typically, +the driver is loaded by removing the currently running igb and running igb_avb: + sudo rmmod igb + sudo modprobe i2c_algo_bit + sudo modprobe dca + sudo modprobe ptp + sudo insmod ./igb_avb.ko + +Another option is to install the igb_avb driver in the "updates" directory +which will override igb for the other drivers claiming the same device ID. This +will allow the coexistence of igb and igb_avb. Copy igb_avb.ko to: + sudo cp igb_avb.ko /lib/modules/`uname -r`/updates/ + sudo depmod -a + modprobe igb_avb + +As the AVB Transmit queues (0,1) are mapped to a user-space application, +typical LAN traffic must be steered away from these queues. The driver +implements one method registering an ndo_select_queue handler to map traffic to +queue[3]. Another possibly faster method uses the the transmit packet steering +(XPS) functionality available since 2.6.35. An example script is below + +#!/bin/bash + +INTERFACE=p2p1 +export INTERFACE + +rmmod igb +rmmod igb_avb +insmod ./igb_avb.ko +sleep 1 +ifconfig $INTERFACE down +echo 0 > /sys/class/net/$INTERFACE/queues/tx-0/xps_cpus +echo 0 > /sys/class/net/$INTERFACE/queues/tx-1/xps_cpus +echo f > /sys/class/net/$INTERFACE/queues/tx-2/xps_cpus +echo f > /sys/class/net/$INTERFACE/queues/tx-3/xps_cpus +ifconfig $INTERFACE up + +You map also want to disable the network manager from 'managing' your +interface. The easiest way is to find the interface configuration scripts on +your distribution. On Fedora 18, these are located at +/etc/sysconfig/network-scripts/ifcfg-. Edit the file to set +'BOOTPROTO=none'. This eliminates DHCP trying to configure the interface while +you may be doing user-space application configuration. diff --git a/drivers/staging/igb_avb/e1000_82575.c b/drivers/staging/igb_avb/e1000_82575.c new file mode 100644 index 000000000000..2fcc3bf3af39 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_82575.c @@ -0,0 +1,3809 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* + * 82575EB Gigabit Network Connection + * 82575EB Gigabit Backplane Connection + * 82575GB Gigabit Network Connection + * 82576 Gigabit Network Connection + * 82576 Quad Port Gigabit Mezzanine Adapter + * 82580 Gigabit Network Connection + * I350 Gigabit Network Connection + */ + +#include "e1000_api.h" +#include "e1000_i210.h" + +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw); +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw); +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw); +static void e1000_release_phy_82575(struct e1000_hw *hw); +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw); +static void e1000_release_nvm_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_82575(struct e1000_hw *hw); +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw); +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw); +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_reset_hw_82575(struct e1000_hw *hw); +static s32 e1000_reset_hw_82580(struct e1000_hw *hw); +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 *data); +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, + u32 offset, u16 data); +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, + bool active); +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, + bool active); +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw); +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_get_media_type_82575(struct e1000_hw *hw); +#ifdef I2C_ENABLED +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw); +#endif +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data); +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, + u32 offset, u16 data); +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw); +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask); +static bool e1000_sgmii_active_82575(struct e1000_hw *hw); +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw); +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw); +static void e1000_config_collision_dist_82575(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw); +static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw); +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw); +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw); +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw); +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, + u16 offset); +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw); +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw); +static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value); +static void e1000_clear_vfta_i350(struct e1000_hw *hw); + +static void e1000_i2c_start(struct e1000_hw *hw); +static void e1000_i2c_stop(struct e1000_hw *hw); +static void e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data); +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data); +static s32 e1000_get_i2c_ack(struct e1000_hw *hw); +static void e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data); +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data); +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl); +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data); +static bool e1000_get_i2c_data(u32 *i2cctl); + +static const u16 e1000_82580_rxpbs_table[] = { + 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +#define E1000_82580_RXPBS_TABLE_SIZE \ + (sizeof(e1000_82580_rxpbs_table) / \ + sizeof(e1000_82580_rxpbs_table[0])) + +/** + * e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO + * @hw: pointer to the HW structure + * + * Called to determine if the I2C pins are being used for I2C or as an + * external MDIO interface since the two options are mutually exclusive. + **/ +static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw) +{ + u32 reg = 0; + bool ext_mdio = false; + + DEBUGFUNC("e1000_sgmii_uses_mdio_82575"); + + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + reg = E1000_READ_REG(hw, E1000_MDIC); + ext_mdio = !!(reg & E1000_MDIC_DEST); + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + reg = E1000_READ_REG(hw, E1000_MDICNFG); + ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO); + break; + default: + break; + } + return ext_mdio; +} + +/** + * e1000_init_phy_params_82575 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext; + + DEBUGFUNC("e1000_init_phy_params_82575"); + + phy->ops.read_i2c_byte = e1000_read_i2c_byte_generic; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_generic; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + goto out; + } + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82575; + + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.acquire = e1000_acquire_phy_82575; + phy->ops.check_reset_block = e1000_check_reset_block_generic; + phy->ops.commit = e1000_phy_sw_reset_generic; + phy->ops.get_cfg_done = e1000_get_cfg_done_82575; + phy->ops.release = e1000_release_phy_82575; + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + if (e1000_sgmii_active_82575(hw)) { + phy->ops.reset = e1000_phy_hw_reset_sgmii_82575; + ctrl_ext |= E1000_CTRL_I2C_ENA; + } else { + phy->ops.reset = e1000_phy_hw_reset_generic; + ctrl_ext &= ~E1000_CTRL_I2C_ENA; + } + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + e1000_reset_mdicnfg_82580(hw); + + if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) { + phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575; + phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575; + } else { + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i354: + phy->ops.read_reg = e1000_read_phy_reg_82580; + phy->ops.write_reg = e1000_write_phy_reg_82580; + break; + case e1000_i210: + case e1000_i211: + phy->ops.read_reg = e1000_read_phy_reg_gs40g; + phy->ops.write_reg = e1000_write_phy_reg_gs40g; + break; + default: + phy->ops.read_reg = e1000_read_phy_reg_igp; + phy->ops.write_reg = e1000_write_phy_reg_igp; + } + } + + /* Set phy->phy_addr and phy->id. */ + ret_val = e1000_get_phy_id_82575(hw); + + /* Verify phy id and set remaining function pointers */ + switch (phy->id) { + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1111_I_PHY_ID: + phy->type = e1000_phy_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + if (phy->id == I347AT4_E_PHY_ID || + phy->id == M88E1112_E_PHY_ID || + phy->id == M88E1340M_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else if (phy->id == M88E1543_E_PHY_ID || + phy->id == M88E1512_E_PHY_ID) + phy->ops.get_cable_length = + e1000_get_cable_length_m88_gen2; + else + phy->ops.get_cable_length = e1000_get_cable_length_m88; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + /* Check if this PHY is confgured for media swap. */ + if (phy->id == M88E1112_E_PHY_ID) { + u16 data; + + ret_val = phy->ops.write_reg(hw, + E1000_M88E1112_PAGE_ADDR, + 2); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, + E1000_M88E1112_MAC_CTRL_1, + &data); + if (ret_val) + goto out; + + data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >> + E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT; + if (data == E1000_M88E1112_AUTO_COPPER_SGMII || + data == E1000_M88E1112_AUTO_COPPER_BASEX) + hw->mac.ops.check_for_link = + e1000_check_for_link_media_swap; + } + if (phy->id == M88E1512_E_PHY_ID) { + ret_val = e1000_initialize_M88E1512_phy(hw); + if (ret_val) + goto out; + } + break; + case IGP03E1000_E_PHY_ID: + case IGP04E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.get_info = e1000_get_phy_info_igp; + phy->ops.get_cable_length = e1000_get_cable_length_igp_2; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic; + break; + case I82580_I_PHY_ID: + case I350_I_PHY_ID: + phy->type = e1000_phy_82580; + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + break; + case I210_I_PHY_ID: + phy->type = e1000_phy_i210; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.get_info = e1000_get_phy_info_m88; + phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82580; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82580; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params_82575 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u16 size; + + DEBUGFUNC("e1000_init_nvm_params_82575"); + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* + * Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* Just in case size is out of range, cap it to the largest + * EEPROM size supported + */ + if (size > 15) + size = 15; + + nvm->word_size = 1 << size; + if (hw->mac.type < e1000_i210) { + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? + 16 : 8; + break; + } + if (nvm->word_size == (1 << 15)) + nvm->page_size = 128; + + nvm->type = e1000_nvm_eeprom_spi; + } else { + nvm->type = e1000_nvm_flash_hw; + } + + /* Function Pointers */ + nvm->ops.acquire = e1000_acquire_nvm_82575; + nvm->ops.release = e1000_release_nvm_82575; + if (nvm->word_size < (1 << 15)) + nvm->ops.read = e1000_read_nvm_eerd; + else + nvm->ops.read = e1000_read_nvm_spi; + + nvm->ops.write = e1000_write_nvm_spi; + nvm->ops.validate = e1000_validate_nvm_checksum_generic; + nvm->ops.update = e1000_update_nvm_checksum_generic; + nvm->ops.valid_led_default = e1000_valid_led_default_82575; + + /* override generic family function pointers for specific descendants */ + switch (hw->mac.type) { + case e1000_82580: + nvm->ops.validate = e1000_validate_nvm_checksum_82580; + nvm->ops.update = e1000_update_nvm_checksum_82580; + break; + case e1000_i350: + case e1000_i354: + nvm->ops.validate = e1000_validate_nvm_checksum_i350; + nvm->ops.update = e1000_update_nvm_checksum_i350; + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_mac_params_82575 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + + DEBUGFUNC("e1000_init_mac_params_82575"); + + /* Derives media type */ + e1000_get_media_type_82575(hw); + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES_82575; + if (mac->type == e1000_82576) + mac->rar_entry_count = E1000_RAR_ENTRIES_82576; + if (mac->type == e1000_82580) + mac->rar_entry_count = E1000_RAR_ENTRIES_82580; + if (mac->type == e1000_i350 || mac->type == e1000_i354) + mac->rar_entry_count = E1000_RAR_ENTRIES_I350; + + /* Enable EEE default settings for EEE supported devices */ + if (mac->type >= e1000_i350) + dev_spec->eee_disable = false; + + /* Allow a single clear of the SW semaphore on I210 and newer */ + if (mac->type >= e1000_i210) + dev_spec->clear_semaphore_once = true; + + /* Set if part includes ASF firmware */ + mac->asf_firmware_present = true; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = + !!(E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK); + + /* Function pointers */ + + /* bus type/speed/width */ + mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic; + /* reset */ + if (mac->type >= e1000_82580) + mac->ops.reset_hw = e1000_reset_hw_82580; + else + mac->ops.reset_hw = e1000_reset_hw_82575; + /* hw initialization */ + if ((mac->type == e1000_i210) || (mac->type == e1000_i211)) + mac->ops.init_hw = e1000_init_hw_i210; + else + mac->ops.init_hw = e1000_init_hw_82575; + /* link setup */ + mac->ops.setup_link = e1000_setup_link_generic; + /* physical interface link setup */ + mac->ops.setup_physical_interface = + (hw->phy.media_type == e1000_media_type_copper) + ? e1000_setup_copper_link_82575 : e1000_setup_serdes_link_82575; + /* physical interface shutdown */ + mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575; + /* physical interface power up */ + mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575; + /* check for link */ + mac->ops.check_for_link = e1000_check_for_link_82575; + /* read mac address */ + mac->ops.read_mac_addr = e1000_read_mac_addr_82575; + /* configure collision distance */ + mac->ops.config_collision_dist = e1000_config_collision_dist_82575; + /* multicast address update */ + mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic; + if (hw->mac.type == e1000_i350 || mac->type == e1000_i354) { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_i350; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_i350; + } else { + /* writing VFTA */ + mac->ops.write_vfta = e1000_write_vfta_generic; + /* clearing VFTA */ + mac->ops.clear_vfta = e1000_clear_vfta_generic; + } + if (hw->mac.type >= e1000_82580) + mac->ops.validate_mdi_setting = + e1000_validate_mdi_setting_crossover_generic; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_generic; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_generic; + mac->ops.led_off = e1000_led_off_generic; + /* clear hardware counters */ + mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575; + /* link info */ + mac->ops.get_link_up_info = e1000_get_link_up_info_82575; + /* get thermal sensor data */ + mac->ops.get_thermal_sensor_data = + e1000_get_thermal_sensor_data_generic; + mac->ops.init_thermal_sensor_thresh = + e1000_init_thermal_sensor_thresh_generic; + /* acquire SW_FW sync */ + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_82575; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_82575; + if (mac->type >= e1000_i210) { + mac->ops.acquire_swfw_sync = e1000_acquire_swfw_sync_i210; + mac->ops.release_swfw_sync = e1000_release_swfw_sync_i210; + } + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_function_pointers_82575 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_init_function_pointers_82575"); + + hw->mac.ops.init_params = e1000_init_mac_params_82575; + hw->nvm.ops.init_params = e1000_init_nvm_params_82575; + hw->phy.ops.init_params = e1000_init_phy_params_82575; + hw->mbx.ops.init_params = e1000_init_mbx_params_pf; +} + +/** + * e1000_acquire_phy_82575 - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * Acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_acquire_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + return hw->mac.ops.acquire_swfw_sync(hw, mask); +} + +/** + * e1000_release_phy_82575 - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_82575(struct e1000_hw *hw) +{ + u16 mask = E1000_SWFW_PHY0_SM; + + DEBUGFUNC("e1000_release_phy_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_SWFW_PHY1_SM; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_SWFW_PHY2_SM; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_SWFW_PHY3_SM; + + hw->mac.ops.release_swfw_sync(hw, mask); +} + +/** + * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the serial gigabit media independent + * interface and stores the retrieved information in data. + **/ +static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_read_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %u is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the serial gigabit + * media independent interface. + **/ +static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, + u16 data) +{ + s32 ret_val = -E1000_ERR_PARAM; + + DEBUGFUNC("e1000_write_phy_reg_sgmii_82575"); + + if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_i2c(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_get_phy_id_82575 - Retrieve PHY addr and id + * @hw: pointer to the HW structure + * + * Retrieves the PHY address and ID for both PHY's which do and do not use + * sgmi interface. + **/ +static s32 e1000_get_phy_id_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + u32 ctrl_ext; + u32 mdic; + + DEBUGFUNC("e1000_get_phy_id_82575"); + + /* some i354 devices need an extra read for phy id */ + if (hw->mac.type == e1000_i354) + e1000_get_phy_id(hw); + + /* + * For SGMII PHYs, we try the list of possible addresses until + * we find one that works. For non-SGMII PHYs + * (e.g. integrated copper PHYs), an address of 1 should + * work. The result of this function should mean phy->phy_addr + * and phy->id are set correctly. + */ + if (!e1000_sgmii_active_82575(hw)) { + phy->addr = 1; + ret_val = e1000_get_phy_id(hw); + goto out; + } + + if (e1000_sgmii_uses_mdio_82575(hw)) { + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + mdic = E1000_READ_REG(hw, E1000_MDIC); + mdic &= E1000_MDIC_PHY_MASK; + phy->addr = mdic >> E1000_MDIC_PHY_SHIFT; + break; + case e1000_82580: + case e1000_i350: + case e1000_i354: + case e1000_i210: + case e1000_i211: + mdic = E1000_READ_REG(hw, E1000_MDICNFG); + mdic &= E1000_MDICNFG_PHY_MASK; + phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT; + break; + default: + ret_val = -E1000_ERR_PHY; + goto out; + break; + } + ret_val = e1000_get_phy_id(hw); + goto out; + } + + /* Power on sgmii phy if it is disabled */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, + ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA); + E1000_WRITE_FLUSH(hw); + msec_delay(300); + + /* + * The address field in the I2CCMD register is 3 bits and 0 is invalid. + * Therefore, we need to test 1-7 + */ + for (phy->addr = 1; phy->addr < 8; phy->addr++) { + ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id); + if (ret_val == E1000_SUCCESS) { + DEBUGOUT2("Vendor ID 0x%08X read at address %u\n", + phy_id, phy->addr); + /* + * At the time of this writing, The M88 part is + * the only supported SGMII PHY product. + */ + if (phy_id == M88_VENDOR) + break; + } else { + DEBUGOUT1("PHY address %u was unreadable\n", + phy->addr); + } + } + + /* A valid PHY type couldn't be found. */ + if (phy->addr == 8) { + phy->addr = 0; + ret_val = -E1000_ERR_PHY; + } else { + ret_val = e1000_get_phy_id(hw); + } + + /* restore previous sfp cage power state */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + +out: + return ret_val; +} + +/** + * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY using the serial gigabit media independent interface. + **/ +static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + struct e1000_phy_info *phy = &hw->phy; + + DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575"); + + /* + * This isn't a true "hard" reset, but is the only reset + * available to us at this time. + */ + + DEBUGOUT("Soft resetting SGMII attached PHY...\n"); + + if (!(hw->phy.ops.write_reg)) + goto out; + + /* + * SFP documentation requires the following to configure the SPF module + * to work on SGMII. No further documentation is given. + */ + ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084); + if (ret_val) + goto out; + + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + goto out; + + if (phy->id == M88E1512_E_PHY_ID) + ret_val = e1000_initialize_M88E1512_phy(hw); +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82575"); + + if (!(hw->phy.ops.read_reg)) + goto out; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + goto out; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + goto out; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + goto out; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + goto out; + } + } + +out: + return ret_val; +} + +/** + * e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d0_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (active) { + data |= E1000_82580_PM_D0_LPLU; + + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } else { + data &= ~E1000_82580_PM_D0_LPLU; + + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_82580"); + + data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + + if (!active) { + data &= ~E1000_82580_PM_D3_LPLU; + /* + * LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) + data |= E1000_82580_PM_SPD; + else if (phy->smart_speed == e1000_smart_speed_off) + data &= ~E1000_82580_PM_SPD; + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_82580_PM_D3_LPLU; + /* When LPLU is enabled, we should disable SmartSpeed */ + data &= ~E1000_82580_PM_SPD; + } + + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data); + return E1000_SUCCESS; +} + +/** + * e1000_acquire_nvm_82575 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_acquire_nvm_82575"); + + ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + if (ret_val) + goto out; + + /* + * Check if there is some access + * error this access may hook on + */ + if (hw->mac.type == e1000_i350) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT | + E1000_EECD_TIMEOUT)) { + /* Clear all access error flags */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_ERROR_CLR); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + if (hw->mac.type == e1000_82580) { + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + if (eecd & E1000_EECD_BLOCKED) { + /* Clear access error flag */ + E1000_WRITE_REG(hw, E1000_EECD, eecd | + E1000_EECD_BLOCKED); + DEBUGOUT("Nvm bit banging access error detected and cleared.\n"); + } + } + + ret_val = e1000_acquire_nvm_generic(hw); + if (ret_val) + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); + +out: + return ret_val; +} + +/** + * e1000_release_nvm_82575 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_82575"); + + e1000_release_nvm_generic(hw); + + e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; + + DEBUGFUNC("e1000_acquire_swfw_sync_82575"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_generic(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_82575 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_82575"); + + while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_cfg_done_82575 - Read config done bit + * @hw: pointer to the HW structure + * + * Read the management control register for the config done bit for + * completion status. NOTE: silicon which is EEPROM-less will fail trying + * to read the config done bit, so an error is *ONLY* logged and returns + * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon + * would not be able to be reset or change link. + **/ +static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + DEBUGFUNC("e1000_get_cfg_done_82575"); + + if (hw->bus.func == E1000_FUNC_1) + mask = E1000_NVM_CFG_DONE_PORT_1; + else if (hw->bus.func == E1000_FUNC_2) + mask = E1000_NVM_CFG_DONE_PORT_2; + else if (hw->bus.func == E1000_FUNC_3) + mask = E1000_NVM_CFG_DONE_PORT_3; + while (timeout) { + if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask) + break; + msec_delay(1); + timeout--; + } + if (!timeout) + DEBUGOUT("MNG configuration cycle has not completed.\n"); + + /* If EEPROM is not marked present, init the PHY manually */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) + e1000_phy_init_script_igp3(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_link_up_info_82575 - Get link speed/duplex info + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * This is a wrapper function, if using the serial gigabit media independent + * interface, use PCS to retrieve the link speed and duplex information. + * Otherwise, use the generic function to get the link speed and duplex info. + **/ +static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + DEBUGFUNC("e1000_get_link_up_info_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed, + duplex); + else + ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, + duplex); + + return ret_val; +} + +/** + * e1000_check_for_link_82575 - Check for link + * @hw: pointer to the HW structure + * + * If sgmii is enabled, then use the pcs register to determine link, otherwise + * use the generic interface for determining link. + **/ +static s32 e1000_check_for_link_82575(struct e1000_hw *hw) +{ + s32 ret_val; + u16 speed, duplex; + + DEBUGFUNC("e1000_check_for_link_82575"); + + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed, + &duplex); + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + + /* + * Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + } else { + ret_val = e1000_check_for_copper_link_generic(hw); + } + + return ret_val; +} + +/** + * e1000_check_for_link_media_swap - Check which M88E1112 interface linked + * @hw: pointer to the HW structure + * + * Poll the M88E1112 interfaces to see which interface achieved link. + */ +static s32 e1000_check_for_link_media_swap(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + u8 port = 0; + + DEBUGFUNC("e1000_check_for_link_media_swap"); + + /* Check for copper. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_COPPER; + + /* Check for other. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data); + if (ret_val) + return ret_val; + + if (data & E1000_M88E1112_STATUS_LINK) + port = E1000_MEDIA_PORT_OTHER; + + /* Determine if a swap needs to happen. */ + if (port && (hw->dev_spec._82575.media_port != port)) { + hw->dev_spec._82575.media_port = port; + hw->dev_spec._82575.media_changed = true; + } + + if (port == E1000_MEDIA_PORT_COPPER) { + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + e1000_check_for_link_82575(hw); + } else { + e1000_check_for_link_82575(hw); + /* reset page to 0 */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown + * @hw: pointer to the HW structure + **/ +static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_power_up_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + /* Enable PCS to turn on link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg |= E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* Power up the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); +} + +/** + * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Using the physical coding sub-layer (PCS), retrieve the current speed and + * duplex, then store the values in the pointers provided. + **/ +static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, + u16 *speed, u16 *duplex) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 pcs; + u32 status; + + DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575"); + + /* + * Read the PCS Status register for link state. For non-copper mode, + * the status register is not accurate. The PCS status register is + * used instead. + */ + pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + /* + * The link up bit determines when link is up on autoneg. + */ + if (pcs & E1000_PCS_LSTS_LINK_OK) { + mac->serdes_has_link = true; + + /* Detect and store PCS speed */ + if (pcs & E1000_PCS_LSTS_SPEED_1000) + *speed = SPEED_1000; + else if (pcs & E1000_PCS_LSTS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + /* Detect and store PCS duplex */ + if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + /* Check if it is an I354 2.5Gb backplane connection. */ + if (mac->type == e1000_i354) { + status = E1000_READ_REG(hw, E1000_STATUS); + if ((status & E1000_STATUS_2P5_SKU) && + !(status & E1000_STATUS_2P5_SKU_OVER)) { + *speed = SPEED_2500; + *duplex = FULL_DUPLEX; + DEBUGOUT("2500 Mbs, "); + DEBUGOUT("Full Duplex\n"); + } + } + + } else { + mac->serdes_has_link = false; + *speed = 0; + *duplex = 0; + } + + return E1000_SUCCESS; +} + +/** + * e1000_shutdown_serdes_link_82575 - Remove link during power down + * @hw: pointer to the HW structure + * + * In the case of serdes shut down sfp and PCS on driver unload + * when management pass thru is not enabled. + **/ +void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw) +{ + u32 reg; + + DEBUGFUNC("e1000_shutdown_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return; + + if (!e1000_enable_mng_pass_thru(hw)) { + /* Disable PCS to turn off link */ + reg = E1000_READ_REG(hw, E1000_PCS_CFG0); + reg &= ~E1000_PCS_CFG_PCS_EN; + E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg); + + /* shutdown the laser */ + reg = E1000_READ_REG(hw, E1000_CTRL_EXT); + reg |= E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg); + + /* flush the write to verify completion */ + E1000_WRITE_FLUSH(hw); + msec_delay(1); + } + + return; +} + +/** + * e1000_reset_hw_82575 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_reset_hw_82575"); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + /* set the completion timeout for interface */ + ret_val = e1000_set_pcie_completion_timeout(hw); + if (ret_val) + DEBUGOUT("PCI-E Set completion timeout has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + DEBUGOUT("Issuing a global reset to MAC\n"); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* If EEPROM is not present, run manual init scripts */ + if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES)) + e1000_reset_init_script_82575(hw); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + return ret_val; +} + +/** + * e1000_init_hw_82575 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +s32 e1000_init_hw_82575(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + DEBUGFUNC("e1000_init_hw_82575"); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + if (ret_val) { + DEBUGOUT("Error initializing identification LED\n"); + /* This is not fatal and we should not stop init due to this */ + } + + /* Disabling VLAN filtering */ + DEBUGOUT("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address */ + e1000_init_rx_addrs_generic(hw, rar_count); + + /* Zero out the Multicast HASH table */ + DEBUGOUT("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Zero out the Unicast HASH table */ + DEBUGOUT("Zeroing the UTA\n"); + for (i = 0; i < mac->uta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the default MTU size */ + hw->dev_spec._82575.mtu = 1500; + + /* + * Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82575(hw); + + return ret_val; +} + +/** + * e1000_setup_copper_link_82575 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u32 phpm_reg; + + DEBUGFUNC("e1000_setup_copper_link_82575"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Clear Go Link Disconnect bit on supported devices */ + switch (hw->mac.type) { + case e1000_82580: + case e1000_i350: + case e1000_i210: + case e1000_i211: + phpm_reg = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT); + phpm_reg &= ~E1000_82580_PM_GO_LINKD; + E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, phpm_reg); + break; + default: + break; + } + + ret_val = e1000_setup_serdes_link_82575(hw); + if (ret_val) + goto out; + + if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) { + /* allow time for SFP cage time to power up phy */ + msec_delay(300); + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + goto out; + } + } + switch (hw->phy.type) { + case e1000_phy_i210: + case e1000_phy_m88: + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + ret_val = e1000_copper_link_setup_m88_gen2(hw); + break; + default: + ret_val = e1000_copper_link_setup_m88(hw); + break; + } + break; + case e1000_phy_igp_3: + ret_val = e1000_copper_link_setup_igp(hw); + break; + case e1000_phy_82580: + ret_val = e1000_copper_link_setup_82577(hw); + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + goto out; + + ret_val = e1000_setup_copper_link_generic(hw); +out: + return ret_val; +} + +/** + * e1000_setup_serdes_link_82575 - Setup link for serdes + * @hw: pointer to the HW structure + * + * Configure the physical coding sub-layer (PCS) link. The PCS link is + * used on copper connections where the serialized gigabit media independent + * interface (sgmii), or serdes fiber is being used. Configures the link + * for auto-negotiation or forces speed/duplex. + **/ +static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw) +{ + u32 ctrl_ext, ctrl_reg, reg, anadv_reg; + bool pcs_autoneg; + s32 ret_val = E1000_SUCCESS; + u16 data; + + DEBUGFUNC("e1000_setup_serdes_link_82575"); + + if ((hw->phy.media_type != e1000_media_type_internal_serdes) && + !e1000_sgmii_active_82575(hw)) + return ret_val; + + /* + * On the 82575, SerDes loopback mode persists until it is + * explicitly turned off or a power cycle is performed. A read to + * the register does not indicate its status. Therefore, we ensure + * loopback mode is disabled during initialization. + */ + E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + + /* power on the sfp cage if present */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + ctrl_reg = E1000_READ_REG(hw, E1000_CTRL); + ctrl_reg |= E1000_CTRL_SLU; + + /* set both sw defined pins on 82575/82576*/ + if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) + ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1; + + reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + + /* default pcs_autoneg to the same setting as mac autoneg */ + pcs_autoneg = hw->mac.autoneg; + + switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) { + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* sgmii mode lets the phy handle forcing speed/duplex */ + pcs_autoneg = true; + /* autoneg time out should be disabled for SGMII mode */ + reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT); + break; + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + /* disable PCS autoneg and support parallel detect only */ + pcs_autoneg = false; + /* fall through to default case */ + default: + if (hw->mac.type == e1000_82575 || + hw->mac.type == e1000_82576) { + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT) + pcs_autoneg = false; + } + + /* + * non-SGMII modes only supports a speed of 1000/Full for the + * link so it is best to just force the MAC and let the pcs + * link either autoneg or be forced to 1000/Full + */ + ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD | + E1000_CTRL_FD | E1000_CTRL_FRCDPX; + + /* set speed of 1000/Full if speed/duplex is forced */ + reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL; + break; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg); + + /* + * New SerDes mode allows for forcing speed or autonegotiating speed + * at 1gb. Autoneg should be default set by most drivers. This is the + * mode that will be compatible with older link partners and switches. + * However, both are supported by the hardware and some drivers/tools. + */ + reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP | + E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK); + + if (pcs_autoneg) { + /* Set PCS register for autoneg */ + reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ + E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ + + /* Disable force flow control for autoneg */ + reg &= ~E1000_PCS_LCTL_FORCE_FCTRL; + + /* Configure flow control advertisement for autoneg */ + anadv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE); + + switch (hw->fc.requested_mode) { + case e1000_fc_full: + case e1000_fc_rx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + anadv_reg |= E1000_TXCW_PAUSE; + break; + case e1000_fc_tx_pause: + anadv_reg |= E1000_TXCW_ASM_DIR; + break; + default: + break; + } + + E1000_WRITE_REG(hw, E1000_PCS_ANADV, anadv_reg); + + DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); + } else { + /* Set PCS register for forced link */ + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ + + /* Force flow control for forced link */ + reg |= E1000_PCS_LCTL_FORCE_FCTRL; + + DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); + } + + E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg); + + if (!pcs_autoneg && !e1000_sgmii_active_82575(hw)) + e1000_force_mac_fc_generic(hw); + + return ret_val; +} + +/** + * e1000_get_media_type_82575 - derives current media type. + * @hw: pointer to the HW structure + * + * The media type is chosen reflecting few settings. + * The following are taken into account: + * - link mode set in the current port Init Control Word #3 + * - current link mode settings in CSR register + * - MDIO vs. I2C PHY control interface chosen + * - SFP module media type + **/ +static s32 e1000_get_media_type_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext = 0; + u32 link_mode = 0; + + /* Set internal phy as default */ + dev_spec->sgmii_active = false; + dev_spec->module_plugged = false; + + /* Get CSR setting */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + + /* extract link mode setting */ + link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK; + + switch (link_mode) { + case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX: + hw->phy.media_type = e1000_media_type_internal_serdes; + break; + case E1000_CTRL_EXT_LINK_MODE_GMII: + hw->phy.media_type = e1000_media_type_copper; + break; + case E1000_CTRL_EXT_LINK_MODE_SGMII: + /* Get phy control interface type set (MDIO vs. I2C)*/ + if (e1000_sgmii_uses_mdio_82575(hw)) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + break; + } + /* fall through for I2C based SGMII */ + case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES: + /* read media type from SFP EEPROM */ +#ifdef I2C_ENABLED + printk(KERN_INFO "igb_avb I2C enabled - set_sfp_media_type_82575() called"); + ret_val = e1000_set_sfp_media_type_82575(hw); +#else + printk(KERN_INFO "igb_avb I2C disabled - set_sfp_media_type_82575() not necessary"); + hw->phy.media_type = e1000_media_type_unknown; +#endif + if ((ret_val != E1000_SUCCESS) || + (hw->phy.media_type == e1000_media_type_unknown)) { + /* + * If media type was not identified then return media + * type defined by the CTRL_EXT settings. + */ + hw->phy.media_type = e1000_media_type_internal_serdes; + + if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) { + hw->phy.media_type = e1000_media_type_copper; + dev_spec->sgmii_active = true; + } + + break; + } + + /* do not change link mode for 100BaseFX */ + if (dev_spec->eth_flags.e100_base_fx) + break; + + /* change current link mode setting */ + ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + + if (hw->phy.media_type == e1000_media_type_copper) + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII; + else + ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + break; + } + + return ret_val; +} + +/** + * e1000_set_sfp_media_type_82575 - derives SFP module media type. + * @hw: pointer to the HW structure + * + * The media type is chosen based on SFP module. + * compatibility flags retrieved from SFP ID EEPROM. + **/ +#ifdef I2C_ENABLED +static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw) +{ + s32 ret_val = E1000_ERR_CONFIG; + u32 ctrl_ext = 0; + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + struct sfp_e1000_flags *eth_flags = &dev_spec->eth_flags; + u8 tranceiver_type = 0; + s32 timeout = 3; + + /* Turn I2C interface ON and power on sfp cage */ + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA); + + E1000_WRITE_FLUSH(hw); + + /* Read SFP module data */ + while (timeout) { + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET), + &tranceiver_type); + if (ret_val == E1000_SUCCESS) + break; + msec_delay(100); + timeout--; + } + if (ret_val != E1000_SUCCESS) + goto out; + + ret_val = e1000_read_sfp_data_byte(hw, + E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET), + (u8 *)eth_flags); + if (ret_val != E1000_SUCCESS) + goto out; + + /* Check if there is some SFP module plugged and powered */ + if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) || + (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) { + dev_spec->module_plugged = true; + if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) { + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e100_base_fx) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_internal_serdes; + } else if (eth_flags->e1000_base_t) { + dev_spec->sgmii_active = true; + hw->phy.media_type = e1000_media_type_copper; + } else { + hw->phy.media_type = e1000_media_type_unknown; + DEBUGOUT("PHY module has not been recognized\n"); + goto out; + } + } else { + hw->phy.media_type = e1000_media_type_unknown; + } + ret_val = E1000_SUCCESS; +out: + /* Restore I2C interface setting */ + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + return ret_val; +} +#endif +/** + * e1000_valid_led_default_82575 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_82575"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_82575_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT; + break; + } + } +out: + return ret_val; +} + +/** + * e1000_sgmii_active_82575 - Return sgmii state + * @hw: pointer to the HW structure + * + * 82575 silicon has a serialized gigabit media independent interface (sgmii) + * which can be enabled for use in the embedded applications. Simply + * return the current state of the sgmii interface. + **/ +static bool e1000_sgmii_active_82575(struct e1000_hw *hw) +{ + struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575; + return dev_spec->sgmii_active; +} + +/** + * e1000_reset_init_script_82575 - Inits HW defaults after reset + * @hw: pointer to the HW structure + * + * Inits recommended HW defaults after a reset when there is no EEPROM + * detected. This is only for the 82575. + **/ +static s32 e1000_reset_init_script_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_reset_init_script_82575"); + + if (hw->mac.type == e1000_82575) { + DEBUGOUT("Running reset init script for 82575\n"); + /* SerDes configuration via SERDESCTRL */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15); + + /* CCM configuration via CCMCTL register */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00); + + /* PCIe lanes configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81); + + /* PCIe PLL Configuration */ + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00); + e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00); + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_82575 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_mac_addr_82575"); + + /* + * If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_mac_addr_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_config_collision_dist_82575 - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_82575(struct e1000_hw *hw) +{ + u32 tctl_ext; + + DEBUGFUNC("e1000_config_collision_dist_82575"); + + tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT); + + tctl_ext &= ~E1000_TCTL_EXT_COLD; + tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_power_down_phy_copper_82575 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + + if (!(phy->ops.check_reset_block)) + return; + + /* If the management interface is not enabled, then power down */ + if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); + + return; +} + +/** + * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_82575"); + + e1000_clear_hw_cntrs_base_generic(hw); + + E1000_READ_REG(hw, E1000_PRC64); + E1000_READ_REG(hw, E1000_PRC127); + E1000_READ_REG(hw, E1000_PRC255); + E1000_READ_REG(hw, E1000_PRC511); + E1000_READ_REG(hw, E1000_PRC1023); + E1000_READ_REG(hw, E1000_PRC1522); + E1000_READ_REG(hw, E1000_PTC64); + E1000_READ_REG(hw, E1000_PTC127); + E1000_READ_REG(hw, E1000_PTC255); + E1000_READ_REG(hw, E1000_PTC511); + E1000_READ_REG(hw, E1000_PTC1023); + E1000_READ_REG(hw, E1000_PTC1522); + + E1000_READ_REG(hw, E1000_ALGNERRC); + E1000_READ_REG(hw, E1000_RXERRC); + E1000_READ_REG(hw, E1000_TNCRS); + E1000_READ_REG(hw, E1000_CEXTERR); + E1000_READ_REG(hw, E1000_TSCTC); + E1000_READ_REG(hw, E1000_TSCTFC); + + E1000_READ_REG(hw, E1000_MGTPRC); + E1000_READ_REG(hw, E1000_MGTPDC); + E1000_READ_REG(hw, E1000_MGTPTC); + + E1000_READ_REG(hw, E1000_IAC); + E1000_READ_REG(hw, E1000_ICRXOC); + + E1000_READ_REG(hw, E1000_ICRXPTC); + E1000_READ_REG(hw, E1000_ICRXATC); + E1000_READ_REG(hw, E1000_ICTXPTC); + E1000_READ_REG(hw, E1000_ICTXATC); + E1000_READ_REG(hw, E1000_ICTXQEC); + E1000_READ_REG(hw, E1000_ICTXQMTC); + E1000_READ_REG(hw, E1000_ICRXDMTC); + + E1000_READ_REG(hw, E1000_CBTMPC); + E1000_READ_REG(hw, E1000_HTDPMC); + E1000_READ_REG(hw, E1000_CBRMPC); + E1000_READ_REG(hw, E1000_RPTHC); + E1000_READ_REG(hw, E1000_HGPTC); + E1000_READ_REG(hw, E1000_HTCBDPC); + E1000_READ_REG(hw, E1000_HGORCL); + E1000_READ_REG(hw, E1000_HGORCH); + E1000_READ_REG(hw, E1000_HGOTCL); + E1000_READ_REG(hw, E1000_HGOTCH); + E1000_READ_REG(hw, E1000_LENERRS); + + /* This register should not be read in copper configurations */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) || + e1000_sgmii_active_82575(hw)) + E1000_READ_REG(hw, E1000_SCVPC); +} + +/** + * e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable + * @hw: pointer to the HW structure + * + * After Rx enable, if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This + * function clears the fifos and flushes any packets that came in as rx was + * being enabled. + **/ +void e1000_rx_fifo_flush_82575(struct e1000_hw *hw) +{ + u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; + int i, ms_wait; + + DEBUGFUNC("e1000_rx_fifo_flush_82575"); + + /* disable IPv6 options as per hardware errata */ + rfctl = E1000_READ_REG(hw, E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + if (hw->mac.type != e1000_82575 || + !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN)) + return; + + /* Disable all Rx queues */ + for (i = 0; i < 4; i++) { + rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i)); + E1000_WRITE_REG(hw, E1000_RXDCTL(i), + rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE); + } + /* Poll all queues to verify they have shut down */ + for (ms_wait = 0; ms_wait < 10; ms_wait++) { + msec_delay(1); + rx_enabled = 0; + for (i = 0; i < 4; i++) + rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i)); + if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE)) + break; + } + + if (ms_wait == 10) + DEBUGOUT("Queue disable timed out after 10ms\n"); + + /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all + * incoming packets are rejected. Set enable and wait 2ms so that + * any packet that was coming in as RCTL.EN was set is flushed + */ + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); + + rlpml = E1000_READ_REG(hw, E1000_RLPML); + E1000_WRITE_REG(hw, E1000_RLPML, 0); + + rctl = E1000_READ_REG(hw, E1000_RCTL); + temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP); + temp_rctl |= E1000_RCTL_LPE; + + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl); + E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN); + E1000_WRITE_FLUSH(hw); + msec_delay(2); + + /* Enable Rx queues that were previously enabled and restore our + * previous state + */ + for (i = 0; i < 4; i++) + E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]); + E1000_WRITE_REG(hw, E1000_RCTL, rctl); + E1000_WRITE_FLUSH(hw); + + E1000_WRITE_REG(hw, E1000_RLPML, rlpml); + E1000_WRITE_REG(hw, E1000_RFCTL, rfctl); + + /* Flush receive errors generated by workaround */ + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_MPC); +} + +/** + * e1000_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure + * + * The defaults for 82575 and 82576 should be in the range of 50us to 50ms, + * however the hardware default for these parts is 500us to 1ms which is less + * than the 10ms recommended by the pci-e spec. To address this we need to + * increase the value to either 10ms to 200ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw) +{ + u32 gcr = E1000_READ_REG(hw, E1000_GCR); + s32 ret_val = E1000_SUCCESS; + u16 pcie_devctl2; + + /* only take action if timeout value is defaulted to 0 */ + if (gcr & E1000_GCR_CMPL_TMOUT_MASK) + goto out; + + /* + * if capababilities version is type 1 we can write the + * timeout of 10ms to 200ms through the GCR register + */ + if (!(gcr & E1000_GCR_CAP_VER2)) { + gcr |= E1000_GCR_CMPL_TMOUT_10ms; + goto out; + } + + /* + * for version 2 capabilities we need to write the config space + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); + if (ret_val) + goto out; + + pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms; + + ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2, + &pcie_devctl2); +out: + /* disable completion timeout resend */ + gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND; + + E1000_WRITE_REG(hw, E1000_GCR, gcr); + return ret_val; +} + +/** + * e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * @pf: Physical Function pool - do not set anti-spoofing for the PF + * + * enables/disables L2 switch anti-spoofing functionality. + **/ +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf) +{ + u32 reg_val, reg_offset; + + switch (hw->mac.type) { + case e1000_82576: + reg_offset = E1000_DTXSWC; + break; + case e1000_i350: + case e1000_i354: + reg_offset = E1000_TXSWC; + break; + default: + return; + } + + reg_val = E1000_READ_REG(hw, reg_offset); + if (enable) { + reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + /* The PF can spoof - it has to in order to + * support emulation mode NICs + */ + reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS)); + } else { + reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK | + E1000_DTXSWC_VLAN_SPOOF_MASK); + } + E1000_WRITE_REG(hw, reg_offset, reg_val); +} + +/** + * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables L2 switch loopback functionality. + **/ +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) +{ + u32 dtxswc; + + switch (hw->mac.type) { + case e1000_82576: + dtxswc = E1000_READ_REG(hw, E1000_DTXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc); + break; + case e1000_i350: + case e1000_i354: + dtxswc = E1000_READ_REG(hw, E1000_TXSWC); + if (enable) + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; + else + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; + E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc); + break; + default: + /* Currently no other hardware supports loopback */ + break; + } + + +} + +/** + * e1000_vmdq_set_replication_pf - enable or disable vmdq replication + * @hw: pointer to the hardware struct + * @enable: state to enter, either enabled or disabled + * + * enables/disables replication of packets across multiple pools. + **/ +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) +{ + u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL); + + if (enable) + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; + else + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; + + E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl); +} + +/** + * e1000_read_phy_reg_82580 - Read 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_write_phy_reg_82580 - Write 82580 MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_82580"); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + + hw->phy.ops.release(hw); + +out: + return ret_val; +} + +/** + * e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits + * @hw: pointer to the HW structure + * + * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on + * the values found in the EEPROM. This addresses an issue in which these + * bits are not restored from EEPROM after reset. + **/ +static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 mdicnfg; + u16 nvm_data = 0; + + DEBUGFUNC("e1000_reset_mdicnfg_82580"); + + if (hw->mac.type != e1000_82580) + goto out; + if (!e1000_sgmii_active_82575(hw)) + goto out; + + ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + + NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + if (nvm_data & NVM_WORD24_EXT_MDIO) + mdicnfg |= E1000_MDICNFG_EXT_MDIO; + if (nvm_data & NVM_WORD24_COM_MDIO) + mdicnfg |= E1000_MDICNFG_COM_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); +out: + return ret_val; +} + +/** + * e1000_reset_hw_82580 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets function or entire device (all ports, etc.) + * to a known state. + **/ +static s32 e1000_reset_hw_82580(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + /* BH SW mailbox bit in SW_FW_SYNC */ + u16 swmbsw_mask = E1000_SW_SYNCH_MB; + u32 ctrl; + bool global_device_reset = hw->dev_spec._82575.global_device_reset; + + DEBUGFUNC("e1000_reset_hw_82580"); + + hw->dev_spec._82575.global_device_reset = false; + + /* 82580 does not reliably do global_device_reset due to hw errata */ + if (hw->mac.type == e1000_82580) + global_device_reset = false; + + /* Get current control state. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* + * Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000_disable_pcie_master_generic(hw); + if (ret_val) + DEBUGOUT("PCI-E Master disable polling has failed.\n"); + + DEBUGOUT("Masking off all interrupts\n"); + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_WRITE_REG(hw, E1000_RCTL, 0); + E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(hw); + + msec_delay(10); + + /* Determine whether or not a global dev reset is requested */ + if (global_device_reset && hw->mac.ops.acquire_swfw_sync(hw, + swmbsw_mask)) + global_device_reset = false; + + if (global_device_reset && !(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STAT_DEV_RST_SET)) + ctrl |= E1000_CTRL_DEV_RST; + else + ctrl |= E1000_CTRL_RST; + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + switch (hw->device_id) { + case E1000_DEV_ID_DH89XXCC_SGMII: + break; + default: + E1000_WRITE_FLUSH(hw); + break; + } + + /* Add delay to insure DEV_RST or RST has time to complete */ + msec_delay(5); + + ret_val = e1000_get_auto_rd_done_generic(hw); + if (ret_val) { + /* + * When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + DEBUGOUT("Auto Read Done did not complete\n"); + } + + /* clear global device reset status bit */ + E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET); + + /* Clear any pending interrupt events. */ + E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff); + E1000_READ_REG(hw, E1000_ICR); + + ret_val = e1000_reset_mdicnfg_82580(hw); + if (ret_val) + DEBUGOUT("Could not reset MDICNFG based on EEPROM\n"); + + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + + /* Release semaphore */ + if (global_device_reset) + hw->mac.ops.release_swfw_sync(hw, swmbsw_mask); + + return ret_val; +} + +/** + * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size + * @data: data received by reading RXPBS register + * + * The 82580 uses a table based approach for packet buffer allocation sizes. + * This function converts the retrieved value into the correct table value + * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7 + * 0x0 36 72 144 1 2 4 8 16 + * 0x8 35 70 140 rsv rsv rsv rsv rsv + */ +u16 e1000_rxpbs_adjust_82580(u32 data) +{ + u16 ret_val = 0; + + if (data < E1000_82580_RXPBS_TABLE_SIZE) + ret_val = e1000_82580_rxpbs_table[data]; + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val = E1000_SUCCESS; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset"); + + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_with_offset - Update EEPROM + * checksum + * @hw: pointer to the HW structure + * @offset: offset in words of the checksum protected region + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_with_offset"); + + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1, + &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_regions_count = 1; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) { + /* if chekcsums compatibility bit is set validate checksums + * for all 4 ports. */ + eeprom_regions_count = 4; + } + + for (j = 0; j < eeprom_regions_count; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82580 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw) +{ + s32 ret_val; + u16 j, nvm_data; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_82580"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum compatibility bit.\n"); + goto out; + } + + if (!(nvm_data & NVM_COMPATIBILITY_BIT_MASK)) { + /* set compatibility bit to validate checksums appropriately */ + nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK; + ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1, + &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Write Error while updating checksum compatibility bit.\n"); + goto out; + } + } + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM section checksum by reading/adding each word of + * the EEPROM and then verifies that the sum of the EEPROM is + * equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_validate_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_validate_nvm_checksum_with_offset(hw, + nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM section checksums for all 4 ports by reading/adding + * each word of the EEPROM up to the checksum. Then calculates the EEPROM + * checksum and writes the value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 j; + u16 nvm_offset; + + DEBUGFUNC("e1000_update_nvm_checksum_i350"); + + for (j = 0; j < 4; j++) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j); + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset); + if (ret_val != E1000_SUCCESS) + goto out; + } + +out: + return ret_val; +} + +/** + * __e1000_access_emi_reg - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_emi_reg(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_emi_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data) +{ + DEBUGFUNC("e1000_read_emi_reg"); + + return __e1000_access_emi_reg(hw, addr, data, true); +} + +/** + * e1000_initialize_M88E1512_phy - Initialize M88E1512 PHY + * @hw: pointer to the HW structure + * + * Initialize Marverl 1512 to work correctly with Avoton. + **/ +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_initialize_M88E1512_phy"); + + /* Check if this is correct PHY. */ + if (phy->id != M88E1512_E_PHY_ID) + goto out; + + /* Switch to PHY page 0xFF. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FF); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x214B); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2144); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0x0C28); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2146); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xB233); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x214D); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_2, 0xCC0C); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_1, 0x2159); + if (ret_val) + goto out; + + /* Switch to PHY page 0xFB. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x00FB); + if (ret_val) + goto out; + + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_CFG_REG_3, 0x000D); + if (ret_val) + goto out; + + /* Switch to PHY page 0x12. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0x12); + if (ret_val) + goto out; + + /* Change mode to SGMII-to-Copper */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1512_MODE, 0x8001); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + msec_delay(1000); +out: + return ret_val; +} + +/** + * e1000_set_eee_i350 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + u32 ipcnfg, eeer; + + DEBUGFUNC("e1000_set_eee_i350"); + + if ((hw->mac.type < e1000_i350) || + (hw->phy.media_type != e1000_media_type_copper)) + goto out; + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG); + eeer = E1000_READ_REG(hw, E1000_EEER); + + /* enable or disable per user setting */ + if (!(hw->dev_spec._82575.eee_disable)) { + u32 eee_su = E1000_READ_REG(hw, E1000_EEE_SU); + + if (adv100M) + ipcnfg |= E1000_IPCNFG_EEE_100M_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN; + + if (adv1G) + ipcnfg |= E1000_IPCNFG_EEE_1G_AN; + else + ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN; + + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + + /* This bit should not be set in normal operation. */ + if (eee_su & E1000_EEE_SU_LPI_CLK_STP) + DEBUGOUT("LPI Clock Stop Bit should not be set!\n"); + } else { + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN); + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN | + E1000_EEER_LPI_FC); + } + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg); + E1000_WRITE_REG(hw, E1000_EEER, eeer); + E1000_READ_REG(hw, E1000_IPCNFG); + E1000_READ_REG(hw, E1000_EEER); +out: + + return E1000_SUCCESS; +} + +/** + * e1000_set_eee_i354 - Enable/disable EEE support + * @hw: pointer to the HW structure + * @adv1g: boolean flag enabling 1G EEE advertisement + * @adv100m: boolean flag enabling 100M EEE advertisement + * + * Enable/disable EEE legacy mode based on setting in dev_spec structure. + * + **/ +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_set_eee_i354"); + + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + if (!hw->dev_spec._82575.eee_disable) { + /* Switch to PHY page 18. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18); + if (ret_val) + goto out; + + ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1, + &phy_data); + if (ret_val) + goto out; + + phy_data |= E1000_M88E1543_EEE_CTRL_1_MS; + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1, + phy_data); + if (ret_val) + goto out; + + /* Return the PHY to page 0. */ + ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0); + if (ret_val) + goto out; + + /* Turn on EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + if (adv100M) + phy_data |= E1000_EEE_ADV_100_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_100_SUPPORTED; + + if (adv1G) + phy_data |= E1000_EEE_ADV_1000_SUPPORTED; + else + phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED; + + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } else { + /* Turn off EEE advertisement. */ + ret_val = e1000_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED | + E1000_EEE_ADV_1000_SUPPORTED); + ret_val = e1000_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354, + E1000_EEE_ADV_DEV_I354, + phy_data); + } + +out: + return ret_val; +} + +/** + * e1000_get_eee_status_i354 - Get EEE status + * @hw: pointer to the HW structure + * @status: EEE status + * + * Get EEE status by guessing based on whether Tx or Rx LPI indications have + * been received. + **/ +s32 e1000_get_eee_status_i354(struct e1000_hw *hw, bool *status) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_data; + + DEBUGFUNC("e1000_get_eee_status_i354"); + + /* Check if EEE is supported on this device. */ + if ((hw->phy.media_type != e1000_media_type_copper) || + ((phy->id != M88E1543_E_PHY_ID) && + (phy->id != M88E1512_E_PHY_ID))) + goto out; + + ret_val = e1000_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354, + E1000_PCS_STATUS_DEV_I354, + &phy_data); + if (ret_val) + goto out; + + *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD | + E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false; + +out: + return ret_val; +} + +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * e1000_clear_vfta_i350 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_i350(struct e1000_hw *hw) +{ + u32 offset; + int i; + + DEBUGFUNC("e1000_clear_vfta_350"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + int i; + + DEBUGFUNC("e1000_write_vfta_350"); + + for (i = 0; i < 10; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_i2c_bb - Enable I2C bit-bang + * @hw: pointer to the HW structure + * + * Enable I2C bit-bang interface + * + **/ +s32 e1000_set_i2c_bb(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u32 ctrl_ext, i2cparams; + + DEBUGFUNC("e1000_set_i2c_bb"); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_I2C_ENA; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); + + i2cparams = E1000_READ_REG(hw, E1000_I2CPARAMS); + i2cparams |= E1000_I2CBB_EN; + i2cparams |= E1000_I2C_DATA_OE_N; + i2cparams |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cparams); + E1000_WRITE_FLUSH(hw); + + return ret_val; +} + +/** + * e1000_read_i2c_byte_generic - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @dev_addr: device address + * @data: value read + * + * Performs byte read operation over I2C interface at + * a specified device address. + **/ +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 10; + u32 retry = 1; + u16 swfw_mask = 0; + + bool nack = true; + + DEBUGFUNC("e1000_read_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + do { + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) + != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto read_byte_out; + } + + e1000_i2c_start(hw); + + /* Device Address and write indication */ + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_start(hw); + + /* Device Address and read indication */ + status = e1000_clock_out_i2c_byte(hw, (dev_addr | 0x1)); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_clock_in_i2c_byte(hw, data); + + status = e1000_clock_out_i2c_bit(hw, nack); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + msec_delay(100); + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte read error - Retrying.\n"); + else + DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +read_byte_out: + + return status; +} + +/** + * e1000_write_i2c_byte_generic - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: value to write + * + * Performs byte write operation over I2C interface at + * a specified device address. + **/ +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data) +{ + s32 status = E1000_SUCCESS; + u32 max_retry = 1; + u32 retry = 0; + u16 swfw_mask = 0; + + DEBUGFUNC("e1000_write_i2c_byte_generic"); + + swfw_mask = E1000_SWFW_PHY0_SM; + + if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS) { + status = E1000_ERR_SWFW_SYNC; + goto write_byte_out; + } + + do { + e1000_i2c_start(hw); + + status = e1000_clock_out_i2c_byte(hw, dev_addr); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, byte_offset); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_clock_out_i2c_byte(hw, data); + if (status != E1000_SUCCESS) + goto fail; + + status = e1000_get_i2c_ack(hw); + if (status != E1000_SUCCESS) + goto fail; + + e1000_i2c_stop(hw); + break; + +fail: + e1000_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) + DEBUGOUT("I2C byte write error - Retrying.\n"); + else + DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +write_byte_out: + + return status; +} + +/** + * e1000_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) + **/ +static void e1000_i2c_start(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_start"); + + /* Start condition must begin with data and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 1); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ + usec_delay(E1000_I2C_T_SU_STA); + + e1000_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ + usec_delay(E1000_I2C_T_HD_STA); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_i2c_stop - Sets I2C stop condition + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) + **/ +static void e1000_i2c_stop(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + e1000_set_i2c_data(hw, &i2cctl, 0); + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ + usec_delay(E1000_I2C_T_SU_STO); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ + usec_delay(E1000_I2C_T_BUF); +} + +/** + * e1000_clock_in_i2c_byte - Clocks in one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte to clock in + * + * Clocks in one byte data via I2C data/clock + **/ +static void e1000_clock_in_i2c_byte(struct e1000_hw *hw, u8 *data) +{ + s32 i; + bool bit = 0; + + DEBUGFUNC("e1000_clock_in_i2c_byte"); + + *data = 0; + for (i = 7; i >= 0; i--) { + e1000_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + +} + +/** + * e1000_clock_out_i2c_byte - Clocks out one byte via I2C + * @hw: pointer to hardware structure + * @data: data byte clocked out + * + * Clocks out one byte data via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_byte(struct e1000_hw *hw, u8 data) +{ + s32 status = E1000_SUCCESS; + s32 i; + u32 i2cctl; + bool bit = 0; + + DEBUGFUNC("e1000_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = e1000_clock_out_i2c_bit(hw, bit); + + if (status != E1000_SUCCESS) + break; + } + + /* Release SDA line (set high) */ + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + i2cctl |= E1000_I2C_DATA_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, i2cctl); + E1000_WRITE_FLUSH(hw); + + return status; +} + +/** + * e1000_get_i2c_ack - Polls for I2C ACK + * @hw: pointer to hardware structure + * + * Clocks in/out one bit via I2C data/clock + **/ +static s32 e1000_get_i2c_ack(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + u32 i = 0; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 timeout = 10; + bool ack = true; + + DEBUGFUNC("e1000_get_i2c_ack"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + /* Wait until SCL returns high */ + for (i = 0; i < timeout; i++) { + usec_delay(1); + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (i2cctl & E1000_I2C_CLK_IN) + break; + } + if (!(i2cctl & E1000_I2C_CLK_IN)) + return E1000_ERR_I2C; + + ack = e1000_get_i2c_data(&i2cctl); + if (ack) { + DEBUGOUT("I2C ack was not received.\n"); + status = E1000_ERR_I2C; + } + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + + return status; +} + +/** + * e1000_clock_in_i2c_bit - Clocks in one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: read data value + * + * Clocks in one bit via I2C data/clock + **/ +static void e1000_clock_in_i2c_bit(struct e1000_hw *hw, bool *data) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_in_i2c_bit"); + + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + *data = e1000_get_i2c_data(&i2cctl); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ + usec_delay(E1000_I2C_T_LOW); + +} + +/** + * e1000_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock + * @hw: pointer to hardware structure + * @data: data value to write + * + * Clocks out one bit via I2C data/clock + **/ +static s32 e1000_clock_out_i2c_bit(struct e1000_hw *hw, bool data) +{ + s32 status; + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + + DEBUGFUNC("e1000_clock_out_i2c_bit"); + + status = e1000_set_i2c_data(hw, &i2cctl, data); + if (status == E1000_SUCCESS) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ + usec_delay(E1000_I2C_T_LOW); + } else { + status = E1000_ERR_I2C; + DEBUGOUT1("I2C data was not set to %X\n", data); + } + + return status; +} +/** + * e1000_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' + **/ +static void e1000_raise_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + DEBUGFUNC("e1000_raise_i2c_clk"); + + *i2cctl |= E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL rise time (1000ns) */ + usec_delay(E1000_I2C_T_RISE); +} + +/** + * e1000_lower_i2c_clk - Lowers the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' + **/ +static void e1000_lower_i2c_clk(struct e1000_hw *hw, u32 *i2cctl) +{ + + DEBUGFUNC("e1000_lower_i2c_clk"); + + *i2cctl &= ~E1000_I2C_CLK_OUT; + *i2cctl &= ~E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ + usec_delay(E1000_I2C_T_FALL); +} + +/** + * e1000_set_i2c_data - Sets the I2C data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit + **/ +static s32 e1000_set_i2c_data(struct e1000_hw *hw, u32 *i2cctl, bool data) +{ + s32 status = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_i2c_data"); + + if (data) + *i2cctl |= E1000_I2C_DATA_OUT; + else + *i2cctl &= ~E1000_I2C_DATA_OUT; + + *i2cctl &= ~E1000_I2C_DATA_OE_N; + *i2cctl |= E1000_I2C_CLK_OE_N; + E1000_WRITE_REG(hw, E1000_I2CPARAMS, *i2cctl); + E1000_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ + usec_delay(E1000_I2C_T_RISE + E1000_I2C_T_FALL + E1000_I2C_T_SU_DATA); + + *i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + if (data != e1000_get_i2c_data(i2cctl)) { + status = E1000_ERR_I2C; + DEBUGOUT1("Error - I2C data was not set to %X.\n", data); + } + + return status; +} + +/** + * e1000_get_i2c_data - Reads the I2C SDA data bit + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value + **/ +static bool e1000_get_i2c_data(u32 *i2cctl) +{ + bool data; + + DEBUGFUNC("e1000_get_i2c_data"); + + if (*i2cctl & E1000_I2C_DATA_IN) + data = 1; + else + data = 0; + + return data; +} + +/** + * e1000_i2c_bus_clear - Clears the I2C bus + * @hw: pointer to hardware structure + * + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +void e1000_i2c_bus_clear(struct e1000_hw *hw) +{ + u32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS); + u32 i; + + DEBUGFUNC("e1000_i2c_bus_clear"); + + e1000_i2c_start(hw); + + e1000_set_i2c_data(hw, &i2cctl, 1); + + for (i = 0; i < 9; i++) { + e1000_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ + usec_delay(E1000_I2C_T_HIGH); + + e1000_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ + usec_delay(E1000_I2C_T_LOW); + } + + e1000_i2c_start(hw); + + /* Put the i2c bus back to default state */ + e1000_i2c_stop(hw); +} + +static const u8 e1000_emc_temp_data[4] = { + E1000_EMC_INTERNAL_DATA, + E1000_EMC_DIODE1_DATA, + E1000_EMC_DIODE2_DATA, + E1000_EMC_DIODE3_DATA +}; +static const u8 e1000_emc_therm_limit[4] = { + E1000_EMC_INTERNAL_THERM_LIMIT, + E1000_EMC_DIODE1_THERM_LIMIT, + E1000_EMC_DIODE2_THERM_LIMIT, + E1000_EMC_DIODE3_THERM_LIMIT +}; + +/** + * e1000_get_thermal_sensor_data_generic - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_get_thermal_sensor_data_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + data->sensor[0].temp = (E1000_READ_REG(hw, E1000_THMJT) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return E1000_SUCCESS; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + if (num_sensors > E1000_MAX_SENSORS) + num_sensors = E1000_MAX_SENSORS; + + for (i = 1; i < num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + + if (sensor_location != 0) + hw->phy.ops.read_i2c_byte(hw, + e1000_emc_temp_data[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + &data->sensor[i].temp); + } + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + * and save off the threshold and location values into mac.thermal_sensor_data + **/ +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw) +{ + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; + u8 sensor_index; + u8 sensor_location; + u8 therm_limit; + u8 i; + struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + + DEBUGFUNC("e1000_init_thermal_sensor_thresh_generic"); + + if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0)) + return E1000_NOT_IMPLEMENTED; + + memset(data, 0, sizeof(struct e1000_thermal_sensor_data)); + + data->sensor[0].location = 0x1; + data->sensor[0].caution_thresh = + (E1000_READ_REG(hw, E1000_THHIGHTC) & 0xFF); + data->sensor[0].max_op_thresh = + (E1000_READ_REG(hw, E1000_THLOWTC) & 0xFF); + + /* Return the internal sensor only if ETS is unsupported */ + e1000_read_nvm(hw, NVM_ETS_CFG, 1, &ets_offset); + if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) + return E1000_SUCCESS; + + e1000_read_nvm(hw, ets_offset, 1, &ets_cfg); + if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT) + != NVM_ETS_TYPE_EMC) + return E1000_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >> + NVM_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK); + + for (i = 1; i <= num_sensors; i++) { + e1000_read_nvm(hw, (ets_offset + i), 1, &ets_sensor); + sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >> + NVM_ETS_DATA_INDEX_SHIFT); + sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >> + NVM_ETS_DATA_LOC_SHIFT); + therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK; + + hw->phy.ops.write_i2c_byte(hw, + e1000_emc_therm_limit[sensor_index], + E1000_I2C_THERMAL_SENSOR_ADDR, + therm_limit); + + if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) { + data->sensor[i].location = sensor_location; + data->sensor[i].caution_thresh = therm_limit; + data->sensor[i].max_op_thresh = therm_limit - + low_thresh_delta; + } + } + return E1000_SUCCESS; +} diff --git a/drivers/staging/igb_avb/e1000_82575.h b/drivers/staging/igb_avb/e1000_82575.h new file mode 100644 index 000000000000..c6b61f71e353 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_82575.h @@ -0,0 +1,510 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_82575_H_ +#define _E1000_82575_H_ + +#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) +/* + * Receive Address Register Count + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * These entries are also used for MAC-based filtering. + */ +/* + * For 82576, there are an additional set of RARs that begin at an offset + * separate from the first set of RARs. + */ +#define E1000_RAR_ENTRIES_82575 16 +#define E1000_RAR_ENTRIES_82576 24 +#define E1000_RAR_ENTRIES_82580 24 +#define E1000_RAR_ENTRIES_I350 32 +#define E1000_SW_SYNCH_MB 0x00000100 +#define E1000_STAT_DEV_RST_SET 0x00100000 +#define E1000_CTRL_DEV_RST 0x20000000 + +struct e1000_adv_data_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + u32 data; + struct { + u32 datalen:16; /* Data buffer length */ + u32 rsvd:4; + u32 dtyp:4; /* Descriptor type */ + u32 dcmd:8; /* Descriptor command */ + } config; + } lower; + union { + u32 data; + struct { + u32 status:4; /* Descriptor status */ + u32 idx:4; + u32 popts:6; /* Packet Options */ + u32 paylen:18; /* Payload length */ + } options; + } upper; +}; + +#define E1000_TXD_DTYP_ADV_C 0x2 /* Advanced Context Descriptor */ +#define E1000_TXD_DTYP_ADV_D 0x3 /* Advanced Data Descriptor */ +#define E1000_ADV_TXD_CMD_DEXT 0x20 /* Descriptor extension (0 = legacy) */ +#define E1000_ADV_TUCMD_IPV4 0x2 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADV_TUCMD_IPV6 0x0 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADV_TUCMD_L4T_UDP 0x0 /* L4 Packet TYPE of UDP */ +#define E1000_ADV_TUCMD_L4T_TCP 0x4 /* L4 Packet TYPE of TCP */ +#define E1000_ADV_TUCMD_MKRREQ 0x10 /* Indicates markers are required */ +#define E1000_ADV_DCMD_EOP 0x1 /* End of Packet */ +#define E1000_ADV_DCMD_IFCS 0x2 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADV_DCMD_RS 0x8 /* Report Status */ +#define E1000_ADV_DCMD_VLE 0x40 /* Add VLAN tag */ +#define E1000_ADV_DCMD_TSE 0x80 /* TCP Seg enable */ +/* Extended Device Control */ +#define E1000_CTRL_EXT_NSICR 0x00000001 /* Disable Intr Clear all on read */ + +struct e1000_adv_context_desc { + union { + u32 ip_config; + struct { + u32 iplen:9; + u32 maclen:7; + u32 vlan_tag:16; + } fields; + } ip_setup; + u32 seq_num; + union { + u64 l4_config; + struct { + u32 mkrloc:9; + u32 tucmd:11; + u32 dtyp:4; + u32 adv:8; + u32 rsvd:4; + u32 idx:4; + u32 l4len:8; + u32 mss:16; + } fields; + } l4_setup; +}; + +/* SRRCTL bit definitions */ +#define E1000_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ +#define E1000_SRRCTL_BSIZEHDRSIZE_MASK 0x00000F00 +#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ +#define E1000_SRRCTL_DESCTYPE_LEGACY 0x00000000 +#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 +#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION 0x06000000 +#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 +#define E1000_SRRCTL_DESCTYPE_MASK 0x0E000000 +#define E1000_SRRCTL_TIMESTAMP 0x40000000 +#define E1000_SRRCTL_DROP_EN 0x80000000 + +#define E1000_SRRCTL_BSIZEPKT_MASK 0x0000007F +#define E1000_SRRCTL_BSIZEHDR_MASK 0x00003F00 + +#define E1000_TX_HEAD_WB_ENABLE 0x1 +#define E1000_TX_SEQNUM_WB_ENABLE 0x2 + +#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_VMDQ 0x00000003 +#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 +#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 +#define E1000_MRQC_ENABLE_RSS_8Q 0x00000002 + +#define E1000_VMRCTL_MIRROR_PORT_SHIFT 8 +#define E1000_VMRCTL_MIRROR_DSTPORT_MASK (7 << \ + E1000_VMRCTL_MIRROR_PORT_SHIFT) +#define E1000_VMRCTL_POOL_MIRROR_ENABLE (1 << 0) +#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE (1 << 1) +#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2) + +#define E1000_EICR_TX_QUEUE ( \ + E1000_EICR_TX_QUEUE0 | \ + E1000_EICR_TX_QUEUE1 | \ + E1000_EICR_TX_QUEUE2 | \ + E1000_EICR_TX_QUEUE3) + +#define E1000_EICR_RX_QUEUE ( \ + E1000_EICR_RX_QUEUE0 | \ + E1000_EICR_RX_QUEUE1 | \ + E1000_EICR_RX_QUEUE2 | \ + E1000_EICR_RX_QUEUE3) + +#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE +#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE + +#define EIMS_ENABLE_MASK ( \ + E1000_EIMS_RX_QUEUE | \ + E1000_EIMS_TX_QUEUE | \ + E1000_EIMS_TCP_TIMER | \ + E1000_EIMS_OTHER) + +/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +#define E1000_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +#define E1000_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +#define E1000_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +#define E1000_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +#define E1000_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +#define E1000_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +#define E1000_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +#define E1000_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */ + +/* Receive Descriptor - Advanced */ +union e1000_adv_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + union { + __le32 data; + struct { + __le16 pkt_info; /*RSS type, Pkt type*/ + /* Split Header, header buffer len */ + __le16 hdr_info; + } hs_rss; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; /* Packet length */ + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define E1000_RXDADV_RSSTYPE_MASK 0x0000000F +#define E1000_RXDADV_RSSTYPE_SHIFT 12 +#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 +#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 +#define E1000_RXDADV_SPLITHEADER_EN 0x00001000 +#define E1000_RXDADV_SPH 0x8000 +#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ +#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */ +#define E1000_RXDADV_ERR_HBO 0x00800000 + +/* RSS Hash results */ +#define E1000_RXDADV_RSSTYPE_NONE 0x00000000 +#define E1000_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +#define E1000_RXDADV_RSSTYPE_IPV4 0x00000002 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +#define E1000_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +#define E1000_RXDADV_RSSTYPE_IPV6 0x00000005 +#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +#define E1000_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 +#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + +/* RSS Packet Types as indicated in the receive descriptor */ +#define E1000_RXDADV_PKTTYPE_ILMASK 0x000000F0 +#define E1000_RXDADV_PKTTYPE_TLMASK 0x00000F00 +#define E1000_RXDADV_PKTTYPE_NONE 0x00000000 +#define E1000_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPV4 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPV4 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPV6 hdr present */ +#define E1000_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPV6 hdr + extensions */ +#define E1000_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +#define E1000_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +#define E1000_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +#define E1000_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ + +#define E1000_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +#define E1000_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +#define E1000_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +#define E1000_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +#define E1000_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + +/* LinkSec results */ +/* Security Processing bit Indication */ +#define E1000_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + +#define E1000_RXDADV_IPSEC_STATUS_SECP 0x00020000 +#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED 0x18000000 + +/* Transmit Descriptor - Advanced */ +union e1000_adv_tx_desc { + struct { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { + __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; +}; + +/* Adv Transmit Descriptor Config Masks */ +#define E1000_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Descriptor */ +#define E1000_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +#define E1000_ADVTXD_DCMD_EOP 0x01000000 /* End of Packet */ +#define E1000_ADVTXD_DCMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_ADVTXD_DCMD_RS 0x08000000 /* Report Status */ +#define E1000_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +#define E1000_ADVTXD_DCMD_DEXT 0x20000000 /* Descriptor extension (1=Adv) */ +#define E1000_ADVTXD_DCMD_VLE 0x40000000 /* VLAN pkt enable */ +#define E1000_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +#define E1000_ADVTXD_MAC_LINKSEC 0x00040000 /* Apply LinkSec on pkt */ +#define E1000_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 Timestamp pkt */ +#define E1000_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED prsnt in WB */ +#define E1000_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +#define E1000_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +/* 1st & Last TSO-full iSCSI PDU*/ +#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 +#define E1000_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ +#define E1000_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ + +/* Context descriptors */ +struct e1000_adv_tx_context_desc { + __le32 vlan_macip_lens; + __le32 seqnum_seed; + __le32 type_tucmd_mlhl; + __le32 mss_l4len_idx; +}; + +#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +#define E1000_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +#define E1000_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ +/* IPSec Encrypt Enable for ESP */ +#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000 +/* Req requires Markers and CRC */ +#define E1000_ADVTXD_TUCMD_MKRREQ 0x00002000 +#define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +#define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +/* Adv ctxt IPSec SA IDX mask */ +#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK 0x000000FF +/* Adv ctxt IPSec ESP len mask */ +#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK 0x000000FF + +/* Additional Transmit Descriptor Control definitions */ +#define E1000_TXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Tx Queue */ +#define E1000_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wbk flushing */ +/* Tx Queue Arbitration Priority 0=low, 1=high */ +#define E1000_TXDCTL_PRIORITY 0x08000000 + +/* Additional Receive Descriptor Control definitions */ +#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Ena specific Rx Queue */ +#define E1000_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. wbk flushing */ + +/* Direct Cache Access (DCA) definitions */ +#define E1000_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ + +#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ + +#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header ena */ +#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload ena */ +#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx Desc Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ +#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ + +#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ +#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */ +#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */ +#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */ + +/* Additional interrupt register bit definitions */ +#define E1000_ICR_LSECPNS 0x00000020 /* PN threshold - server */ +#define E1000_IMS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ +#define E1000_ICS_LSECPNS E1000_ICR_LSECPNS /* PN threshold - server */ + +/* ETQF register bit definitions */ +#define E1000_ETQF_FILTER_ENABLE (1 << 26) +#define E1000_ETQF_IMM_INT (1 << 29) +#define E1000_ETQF_1588 (1 << 30) +#define E1000_ETQF_QUEUE_ENABLE (1 << 31) +/* + * ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters + * here!! + * + * Current filters: + * EAPOL 802.1x (0x888e): Filter 0 + */ +#define E1000_ETQF_FILTER_EAPOL 0 + +#define E1000_FTQF_VF_BP 0x00008000 +#define E1000_FTQF_1588_TIME_STAMP 0x08000000 +#define E1000_FTQF_MASK 0xF0000000 +#define E1000_FTQF_MASK_PROTO_BP 0x10000000 +#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000 +#define E1000_FTQF_MASK_DEST_ADDR_BP 0x40000000 +#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000 + +#define E1000_NVM_APME_82575 0x0400 +#define MAX_NUM_VFS 7 + +#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof cntrl */ +#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof cntrl */ +#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */ +#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8 +#define E1000_DTXSWC_LLE_SHIFT 16 +#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ + +/* Easy defines for setting default pool, would normally be left a zero */ +#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 +#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) + +/* Other useful VMD_CTL register defines */ +#define E1000_VT_CTL_IGNORE_MAC (1 << 28) +#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29) +#define E1000_VT_CTL_VM_REPL_EN (1 << 30) + +/* Per VM Offload register setup */ +#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */ +#define E1000_VMOLR_LPE 0x00010000 /* Accept Long packet */ +#define E1000_VMOLR_RSSE 0x00020000 /* Enable RSS */ +#define E1000_VMOLR_AUPE 0x01000000 /* Accept untagged packets */ +#define E1000_VMOLR_ROMPE 0x02000000 /* Accept overflow multicast */ +#define E1000_VMOLR_ROPE 0x04000000 /* Accept overflow unicast */ +#define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ +#define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ +#define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_VMOLR_VPE 0x00800000 /* VLAN promiscuous enable */ +#define E1000_VMOLR_UPE 0x20000000 /* Unicast promisuous enable */ +#define E1000_DVMOLR_HIDVLAN 0x20000000 /* Vlan hiding enable */ +#define E1000_DVMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ +#define E1000_DVMOLR_STRCRC 0x80000000 /* CRC stripping enable */ + +#define E1000_PBRWAC_WALPB 0x00000007 /* Wrap around event on LAN Rx PB */ +#define E1000_PBRWAC_PBE 0x00000008 /* Rx packet buffer empty */ + +#define E1000_VLVF_ARRAY_SIZE 32 +#define E1000_VLVF_VLANID_MASK 0x00000FFF +#define E1000_VLVF_POOLSEL_SHIFT 12 +#define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) +#define E1000_VLVF_LVLAN 0x00100000 +#define E1000_VLVF_VLANID_ENABLE 0x80000000 + +#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + +#define E1000_IOVCTL 0x05BBC +#define E1000_IOVCTL_REUSE_VFQ 0x00000001 + +#define E1000_RPLOLR_STRVLAN 0x40000000 +#define E1000_RPLOLR_STRCRC 0x80000000 + +#define E1000_TCTL_EXT_COLD 0x000FFC00 +#define E1000_TCTL_EXT_COLD_SHIFT 10 + +#define E1000_DTXCTL_8023LL 0x0004 +#define E1000_DTXCTL_VLAN_ADDED 0x0008 +#define E1000_DTXCTL_OOS_ENABLE 0x0010 +#define E1000_DTXCTL_MDP_EN 0x0020 +#define E1000_DTXCTL_SPOOF_INT 0x0040 + +#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14) + +#define ALL_QUEUES 0xFFFF + +/* Rx packet buffer size defines */ +#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F +void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable); +void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf); +void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable); +s32 e1000_init_nvm_params_82575(struct e1000_hw *hw); +s32 e1000_init_hw_82575(struct e1000_hw *hw); + +u16 e1000_rxpbs_adjust_82580(u32 data); +s32 e1000_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M); +s32 e1000_get_eee_status_i354(struct e1000_hw *, bool *); +s32 e1000_initialize_M88E1512_phy(struct e1000_hw *hw); +#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8 +#define E1000_EMC_INTERNAL_DATA 0x00 +#define E1000_EMC_INTERNAL_THERM_LIMIT 0x20 +#define E1000_EMC_DIODE1_DATA 0x01 +#define E1000_EMC_DIODE1_THERM_LIMIT 0x19 +#define E1000_EMC_DIODE2_DATA 0x23 +#define E1000_EMC_DIODE2_THERM_LIMIT 0x1A +#define E1000_EMC_DIODE3_DATA 0x2A +#define E1000_EMC_DIODE3_THERM_LIMIT 0x30 + +s32 e1000_get_thermal_sensor_data_generic(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh_generic(struct e1000_hw *hw); + +/* I2C SDA and SCL timing parameters for standard mode */ +#define E1000_I2C_T_HD_STA 4 +#define E1000_I2C_T_LOW 5 +#define E1000_I2C_T_HIGH 4 +#define E1000_I2C_T_SU_STA 5 +#define E1000_I2C_T_HD_DATA 5 +#define E1000_I2C_T_SU_DATA 1 +#define E1000_I2C_T_RISE 1 +#define E1000_I2C_T_FALL 1 +#define E1000_I2C_T_SU_STO 4 +#define E1000_I2C_T_BUF 5 + +s32 e1000_set_i2c_bb(struct e1000_hw *hw); +s32 e1000_read_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_generic(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +void e1000_i2c_bus_clear(struct e1000_hw *hw); +#endif /* _E1000_82575_H_ */ diff --git a/drivers/staging/igb_avb/e1000_api.c b/drivers/staging/igb_avb/e1000_api.c new file mode 100644 index 000000000000..87bccbd19fc6 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_api.c @@ -0,0 +1,1160 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_init_mac_params - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the MAC + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mac_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mac.ops.init_params) { + ret_val = hw->mac.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("MAC Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mac.init_mac_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_nvm_params - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the NVM + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_nvm_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->nvm.ops.init_params) { + ret_val = hw->nvm.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("NVM Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("nvm.init_nvm_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_phy_params - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_phy_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->phy.ops.init_params) { + ret_val = hw->phy.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("PHY Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("phy.init_phy_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_init_mbx_params - Initialize mailbox function pointers + * @hw: pointer to the HW structure + * + * This function initializes the function pointers for the PHY + * set of functions. Called by drivers or by e1000_setup_init_funcs. + **/ +s32 e1000_init_mbx_params(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + if (hw->mbx.ops.init_params) { + ret_val = hw->mbx.ops.init_params(hw); + if (ret_val) { + DEBUGOUT("Mailbox Initialization Error\n"); + goto out; + } + } else { + DEBUGOUT("mbx.init_mbx_params was NULL\n"); + ret_val = -E1000_ERR_CONFIG; + } + +out: + return ret_val; +} + +/** + * e1000_set_mac_type - Sets MAC type + * @hw: pointer to the HW structure + * + * This function sets the mac type of the adapter based on the + * device ID stored in the hw structure. + * MUST BE FIRST FUNCTION CALLED (explicitly or through + * e1000_setup_init_funcs()). + **/ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82575EB_COPPER: + case E1000_DEV_ID_82575EB_FIBER_SERDES: + case E1000_DEV_ID_82575GB_QUAD_COPPER: + mac->type = e1000_82575; + break; + case E1000_DEV_ID_82576: + case E1000_DEV_ID_82576_FIBER: + case E1000_DEV_ID_82576_SERDES: + case E1000_DEV_ID_82576_QUAD_COPPER: + case E1000_DEV_ID_82576_QUAD_COPPER_ET2: + case E1000_DEV_ID_82576_NS: + case E1000_DEV_ID_82576_NS_SERDES: + case E1000_DEV_ID_82576_SERDES_QUAD: + mac->type = e1000_82576; + break; + case E1000_DEV_ID_82580_COPPER: + case E1000_DEV_ID_82580_FIBER: + case E1000_DEV_ID_82580_SERDES: + case E1000_DEV_ID_82580_SGMII: + case E1000_DEV_ID_82580_COPPER_DUAL: + case E1000_DEV_ID_82580_QUAD_FIBER: + case E1000_DEV_ID_DH89XXCC_SGMII: + case E1000_DEV_ID_DH89XXCC_SERDES: + case E1000_DEV_ID_DH89XXCC_BACKPLANE: + case E1000_DEV_ID_DH89XXCC_SFP: + mac->type = e1000_82580; + break; + case E1000_DEV_ID_I350_COPPER: + case E1000_DEV_ID_I350_FIBER: + case E1000_DEV_ID_I350_SERDES: + case E1000_DEV_ID_I350_SGMII: + case E1000_DEV_ID_I350_DA4: + mac->type = e1000_i350; + break; + case E1000_DEV_ID_I210_COPPER_FLASHLESS: + case E1000_DEV_ID_I210_SERDES_FLASHLESS: + case E1000_DEV_ID_I210_COPPER: + case E1000_DEV_ID_I210_COPPER_OEM1: + case E1000_DEV_ID_I210_COPPER_IT: + case E1000_DEV_ID_I210_FIBER: + case E1000_DEV_ID_I210_SERDES: + case E1000_DEV_ID_I210_SGMII: + case E1000_DEV_ID_I210_AUTOMOTIVE: + mac->type = e1000_i210; + break; + case E1000_DEV_ID_I211_COPPER: + mac->type = e1000_i211; + break; + + case E1000_DEV_ID_I354_BACKPLANE_1GBPS: + case E1000_DEV_ID_I354_SGMII: + case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS: + mac->type = e1000_i354; + break; + default: + /* Should never have loaded on this device */ + ret_val = -E1000_ERR_MAC_INIT; + break; + } + + return ret_val; +} + +/** + * e1000_setup_init_funcs - Initializes function pointers + * @hw: pointer to the HW structure + * @init_device: true will initialize the rest of the function pointers + * getting the device ready for use. false will only set + * MAC type and the function pointers for the other init + * functions. Passing false will not generate any hardware + * reads or writes. + * + * This function must be called by a driver in order to use the rest + * of the 'shared' code files. Called by drivers only. + **/ +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device) +{ + s32 ret_val; + + /* Can't do much good without knowing the MAC type. */ + ret_val = e1000_set_mac_type(hw); + if (ret_val) { + DEBUGOUT("ERROR: MAC type could not be set properly.\n"); + goto out; + } + + if (!hw->hw_addr) { + DEBUGOUT("ERROR: Registers not mapped\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + /* + * Init function pointers to generic implementations. We do this first + * allowing a driver module to override it afterward. + */ + e1000_init_mac_ops_generic(hw); + e1000_init_phy_ops_generic(hw); + e1000_init_nvm_ops_generic(hw); + e1000_init_mbx_ops_generic(hw); + + /* + * Set up the init function pointers. These are functions within the + * adapter family file that sets up function pointers for the rest of + * the functions in that family. + */ + switch (hw->mac.type) { + case e1000_82575: + case e1000_82576: + case e1000_82580: + case e1000_i350: + case e1000_i354: + e1000_init_function_pointers_82575(hw); + break; + case e1000_i210: + case e1000_i211: + e1000_init_function_pointers_i210(hw); + break; + default: + DEBUGOUT("Hardware not supported\n"); + ret_val = -E1000_ERR_CONFIG; + break; + } + + /* + * Initialize the rest of the function pointers. These require some + * register reads/writes in some cases. + */ + if (!(ret_val) && init_device) { + ret_val = e1000_init_mac_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_nvm_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_phy_params(hw); + if (ret_val) + goto out; + + ret_val = e1000_init_mbx_params(hw); + if (ret_val) + goto out; + } + +out: + return ret_val; +} + +/** + * e1000_get_bus_info - Obtain bus information for adapter + * @hw: pointer to the HW structure + * + * This will obtain information about the HW bus for which the + * adapter is attached and stores it in the hw structure. This is a + * function pointer entry point called by drivers. + **/ +s32 e1000_get_bus_info(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_bus_info) + return hw->mac.ops.get_bus_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_vfta - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * This clears the VLAN filter table on the adapter. This is a function + * pointer entry point called by drivers. + **/ +void e1000_clear_vfta(struct e1000_hw *hw) +{ + if (hw->mac.ops.clear_vfta) + hw->mac.ops.clear_vfta(hw); +} + +/** + * e1000_write_vfta - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: the 32-bit offset in which to write the value to. + * @value: the 32-bit value to write at location offset. + * + * This writes a 32-bit value to a 32-bit offset in the VLAN filter + * table. This is a function pointer entry point called by drivers. + **/ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + if (hw->mac.ops.write_vfta) + hw->mac.ops.write_vfta(hw, offset, value); +} + +/** + * e1000_update_mc_addr_list - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates the Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count) +{ + if (hw->mac.ops.update_mc_addr_list) + hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, + mc_addr_count); +} + +/** + * e1000_force_mac_fc - Force MAC flow control + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Currently no func pointer exists + * and all implementations are handled in the generic version of this + * function. + **/ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + return e1000_force_mac_fc_generic(hw); +} + +/** + * e1000_check_for_link - Check/Store link connection + * @hw: pointer to the HW structure + * + * This checks the link condition of the adapter and stores the + * results in the hw->mac structure. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_for_link) + return hw->mac.ops.check_for_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_mng_mode - Check management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has manageability enabled. + * This is a function pointer entry point called by drivers. + **/ +bool e1000_check_mng_mode(struct e1000_hw *hw) +{ + if (hw->mac.ops.check_mng_mode) + return hw->mac.ops.check_mng_mode(hw); + + return false; +} + +/** + * e1000_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + return e1000_mng_write_dhcp_info_generic(hw, buffer, length); +} + +/** + * e1000_reset_hw - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.reset_hw) + return hw->mac.ops.reset_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_init_hw - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_hw) + return hw->mac.ops.init_hw(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_link - Configures link and flow control + * @hw: pointer to the HW structure + * + * This configures link and flow control settings for the adapter. This + * is a function pointer entry point called by drivers. While modules can + * also call this, they probably call their own version of this function. + **/ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_link) + return hw->mac.ops.setup_link(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_get_speed_and_duplex - Returns current speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to a 16-bit value to store the speed + * @duplex: pointer to a 16-bit value to store the duplex. + * + * This returns the speed and duplex of the adapter in the two 'out' + * variables passed in. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + if (hw->mac.ops.get_link_up_info) + return hw->mac.ops.get_link_up_info(hw, speed, duplex); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_setup_led - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.setup_led) + return hw->mac.ops.setup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores SW controllable LED + * @hw: pointer to the HW structure + * + * This restores the SW controllable LED to the value saved off by + * e1000_setup_led. This is a function pointer entry point called by drivers. + **/ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.cleanup_led) + return hw->mac.ops.cleanup_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_blink_led - Blink SW controllable LED + * @hw: pointer to the HW structure + * + * This starts the adapter LED blinking. Request the LED to be setup first + * and cleaned up after. This is a function pointer entry point called by + * drivers. + **/ +s32 e1000_blink_led(struct e1000_hw *hw) +{ + if (hw->mac.ops.blink_led) + return hw->mac.ops.blink_led(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init - store LED configurations in SW + * @hw: pointer to the HW structure + * + * Initializes the LED config in SW. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_id_led_init(struct e1000_hw *hw) +{ + if (hw->mac.ops.id_led_init) + return hw->mac.ops.id_led_init(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turn on SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED on. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_on(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_on) + return hw->mac.ops.led_on(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turn off SW controllable LED + * @hw: pointer to the HW structure + * + * Turns the SW defined LED off. This is a function pointer entry point + * called by drivers. + **/ +s32 e1000_led_off(struct e1000_hw *hw) +{ + if (hw->mac.ops.led_off) + return hw->mac.ops.led_off(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive - Reset adaptive IFS + * @hw: pointer to the HW structure + * + * Resets the adaptive IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e1000_reset_adaptive_generic(hw); +} + +/** + * e1000_update_adaptive - Update adaptive IFS + * @hw: pointer to the HW structure + * + * Updates adapter IFS. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e1000_update_adaptive_generic(hw); +} + +/** + * e1000_disable_pcie_master - Disable PCI-Express master access + * @hw: pointer to the HW structure + * + * Disables PCI-Express master access and verifies there are no pending + * requests. Currently no func pointer exists and all implementations are + * handled in the generic version of this function. + **/ +s32 e1000_disable_pcie_master(struct e1000_hw *hw) +{ + return e1000_disable_pcie_master_generic(hw); +} + +/** + * e1000_config_collision_dist - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + if (hw->mac.ops.config_collision_dist) + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_rar_set - Sets a receive address register + * @hw: pointer to the HW structure + * @addr: address to set the RAR to + * @index: the RAR to set + * + * Sets a Receive Address Register (RAR) to the specified address. + **/ +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + if (hw->mac.ops.rar_set) + return hw->mac.ops.rar_set(hw, addr, index); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state + * @hw: pointer to the HW structure + * + * Ensures that the MDI/MDIX SW state is valid. + **/ +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (hw->mac.ops.validate_mdi_setting) + return hw->mac.ops.validate_mdi_setting(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr - Determines address location in multicast table + * @hw: pointer to the HW structure + * @mc_addr: Multicast address to hash. + * + * This hashes an address to determine its location in the multicast + * table. Currently no func pointer exists and all implementations + * are handled in the generic version of this function. + **/ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + return e1000_hash_mc_addr_generic(hw, mc_addr); +} + +/** + * e1000_enable_tx_pkt_filtering - Enable packet filtering on TX + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + return e1000_enable_tx_pkt_filtering_generic(hw); +} + +/** + * e1000_mng_host_if_write - Writes to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum) +{ + return e1000_mng_host_if_write_generic(hw, buffer, length, offset, sum); +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + return e1000_mng_write_cmd_header_generic(hw, hdr); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + return e1000_mng_enable_host_if_generic(hw); +} + +/** + * e1000_check_reset_block - Verifies PHY can be reset + * @hw: pointer to the HW structure + * + * Checks if the PHY is in a state that can be reset or if manageability + * has it tied up. This is a function pointer entry point called by drivers. + **/ +s32 e1000_check_reset_block(struct e1000_hw *hw) +{ + if (hw->phy.ops.check_reset_block) + return hw->phy.ops.check_reset_block(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg - Reads PHY register + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the buffer to store the 16-bit read. + * + * Reads the PHY register and returns the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + if (hw->phy.ops.read_reg) + return hw->phy.ops.read_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - Writes PHY register + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + if (hw->phy.ops.write_reg) + return hw->phy.ops.write_reg(hw, offset, data); + + return E1000_SUCCESS; +} + +/** + * e1000_release_phy - Generic release PHY + * @hw: pointer to the HW structure + * + * Return if silicon family does not require a semaphore when accessing the + * PHY. + **/ +void e1000_release_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.release) + hw->phy.ops.release(hw); +} + +/** + * e1000_acquire_phy - Generic acquire PHY + * @hw: pointer to the HW structure + * + * Return success if silicon family does not require a semaphore when + * accessing the PHY. + **/ +s32 e1000_acquire_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.acquire) + return hw->phy.ops.acquire(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg - Reads register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to read + * @data: the location to store the 16-bit value read. + * + * Reads a register out of the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return e1000_read_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_write_kmrn_reg - Writes register using Kumeran interface + * @hw: pointer to the HW structure + * @offset: the register to write + * @data: the value to write. + * + * Writes a register to the Kumeran interface. Currently no func pointer + * exists and all implementations are handled in the generic version of + * this function. + **/ +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return e1000_write_kmrn_reg_generic(hw, offset, data); +} + +/** + * e1000_get_cable_length - Retrieves cable length estimation + * @hw: pointer to the HW structure + * + * This function estimates the cable length and stores them in + * hw->phy.min_length and hw->phy.max_length. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_get_cable_length(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_cable_length) + return hw->phy.ops.get_cable_length(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info - Retrieves PHY information from registers + * @hw: pointer to the HW structure + * + * This function gets some information from various PHY registers and + * populates hw->phy values with it. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + if (hw->phy.ops.get_info) + return hw->phy.ops.get_info(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - Hard PHY reset + * @hw: pointer to the HW structure + * + * Performs a hard PHY reset. This is a function pointer entry point called + * by drivers. + **/ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + if (hw->phy.ops.reset) + return hw->phy.ops.reset(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_commit - Soft PHY reset + * @hw: pointer to the HW structure + * + * Performs a soft PHY reset on those that apply. This is a function pointer + * entry point called by drivers. + **/ +s32 e1000_phy_commit(struct e1000_hw *hw) +{ + if (hw->phy.ops.commit) + return hw->phy.ops.commit(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d0_lplu_state - Sets low power link up state for D0 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D0 + * and SmartSpeed is disabled when active is true, else clear lplu for D0 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d0_lplu_state) + return hw->phy.ops.set_d0_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. This is a function pointer entry point called by drivers. + **/ +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + if (hw->phy.ops.set_d3_lplu_state) + return hw->phy.ops.set_d3_lplu_state(hw, active); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - Reads MAC address + * @hw: pointer to the HW structure + * + * Reads the MAC address out of the adapter and stores it in the HW structure. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_read_pba_string - Read device part number string + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size) +{ + return e1000_read_pba_string_generic(hw, pba_num, pba_num_size); +} + +/** + * e1000_read_pba_length - Read device part number string length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num. + * Currently no func pointer exists and all implementations are handled in the + * generic version of this function. + **/ +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size) +{ + return e1000_read_pba_length_generic(hw, pba_num_size); +} + +/** + * e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Validates the NVM checksum is correct. This is a function pointer entry + * point called by drivers. + **/ +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.validate) + return hw->nvm.ops.validate(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum + * @hw: pointer to the HW structure + * + * Updates the NVM checksum. Currently no func pointer exists and all + * implementations are handled in the generic version of this function. + **/ +s32 e1000_update_nvm_checksum(struct e1000_hw *hw) +{ + if (hw->nvm.ops.update) + return hw->nvm.ops.update(hw); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_reload_nvm - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000_reload_nvm(struct e1000_hw *hw) +{ + if (hw->nvm.ops.reload) + hw->nvm.ops.reload(hw); +} + +/** + * e1000_read_nvm - Reads NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to read + * @data: pointer to the properly sized buffer for the data. + * + * Reads 16-bit chunks of data from the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.read) + return hw->nvm.ops.read(hw, offset, words, data); + + return -E1000_ERR_CONFIG; +} + +/** + * e1000_write_nvm - Writes to NVM (EEPROM) + * @hw: pointer to the HW structure + * @offset: the word offset to read + * @words: number of 16-bit words to write + * @data: pointer to the properly sized buffer for the data. + * + * Writes 16-bit chunks of data to the NVM (EEPROM). This is a function + * pointer entry point called by drivers. + **/ +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + if (hw->nvm.ops.write) + return hw->nvm.ops.write(hw, offset, words, data); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg - Writes 8bit Control register + * @hw: pointer to the HW structure + * @reg: 32bit register offset + * @offset: the register to write + * @data: the value to write. + * + * Writes the PHY register at offset with the value in data. + * This is a function pointer entry point called by drivers. + **/ +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data) +{ + return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data); +} + +/** + * e1000_power_up_phy - Restores link in case of PHY power down + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_up_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_up) + hw->phy.ops.power_up(hw); + + e1000_setup_link(hw); +} + +/** + * e1000_power_down_phy - Power down PHY + * @hw: pointer to the HW structure + * + * The phy may be powered down to save power, to turn off link when the + * driver is unloaded, or wake on lan is not enabled (among others). + **/ +void e1000_power_down_phy(struct e1000_hw *hw) +{ + if (hw->phy.ops.power_down) + hw->phy.ops.power_down(hw); +} + +/** + * e1000_power_up_fiber_serdes_link - Power up serdes link + * @hw: pointer to the HW structure + * + * Power on the optics and PCS. + **/ +void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.power_up_serdes) + hw->mac.ops.power_up_serdes(hw); +} + +/** + * e1000_shutdown_fiber_serdes_link - Remove link during power down + * @hw: pointer to the HW structure + * + * Shutdown the optics and PCS on driver unload. + **/ +void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw) +{ + if (hw->mac.ops.shutdown_serdes) + hw->mac.ops.shutdown_serdes(hw); +} + +/** + * e1000_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure + * + * Updates the temperatures in mac.thermal_sensor_data + **/ +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw) +{ + if (hw->mac.ops.get_thermal_sensor_data) + return hw->mac.ops.get_thermal_sensor_data(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_init_thermal_sensor_thresh - Sets thermal sensor thresholds + * @hw: pointer to hardware structure + * + * Sets the thermal sensor thresholds according to the NVM map + **/ +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw) +{ + if (hw->mac.ops.init_thermal_sensor_thresh) + return hw->mac.ops.init_thermal_sensor_thresh(hw); + + return E1000_SUCCESS; +} + diff --git a/drivers/staging/igb_avb/e1000_api.h b/drivers/staging/igb_avb/e1000_api.h new file mode 100644 index 000000000000..32fce254685a --- /dev/null +++ b/drivers/staging/igb_avb/e1000_api.h @@ -0,0 +1,152 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_API_H_ +#define _E1000_API_H_ + +#include "e1000_hw.h" + +extern void e1000_init_function_pointers_82575(struct e1000_hw *hw); +extern void e1000_rx_fifo_flush_82575(struct e1000_hw *hw); +extern void e1000_init_function_pointers_vf(struct e1000_hw *hw); +extern void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw); +extern void e1000_init_function_pointers_i210(struct e1000_hw *hw); + +s32 e1000_set_obff_timer(struct e1000_hw *hw, u32 itr); +s32 e1000_set_mac_type(struct e1000_hw *hw); +s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device); +s32 e1000_init_mac_params(struct e1000_hw *hw); +s32 e1000_init_nvm_params(struct e1000_hw *hw); +s32 e1000_init_phy_params(struct e1000_hw *hw); +s32 e1000_init_mbx_params(struct e1000_hw *hw); +s32 e1000_get_bus_info(struct e1000_hw *hw); +void e1000_clear_vfta(struct e1000_hw *hw); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); +s32 e1000_force_mac_fc(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex); +s32 e1000_disable_pcie_master(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +int e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr); +void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list, + u32 mc_addr_count); +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_check_reset_block(struct e1000_hw *hw); +s32 e1000_blink_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_id_led_init(struct e1000_hw *hw); +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +s32 e1000_get_cable_length(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset, + u8 data); +s32 e1000_get_phy_info(struct e1000_hw *hw); +void e1000_release_phy(struct e1000_hw *hw); +s32 e1000_acquire_phy(struct e1000_hw *hw); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_commit(struct e1000_hw *hw); +void e1000_power_up_phy(struct e1000_hw *hw); +void e1000_power_down_phy(struct e1000_hw *hw); +s32 e1000_read_mac_addr(struct e1000_hw *hw); +s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size); +s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size); +void e1000_reload_nvm(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum(struct e1000_hw *hw); +s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active); +bool e1000_check_mng_mode(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if(struct e1000_hw *hw); +s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, + u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +s32 e1000_get_thermal_sensor_data(struct e1000_hw *hw); +s32 e1000_init_thermal_sensor_thresh(struct e1000_hw *hw); + +/* + * TBI_ACCEPT macro definition: + * + * This macro requires: + * a = a pointer to struct e1000_hw + * status = the 8 bit status field of the Rx descriptor with EOP set + * errors = the 8 bit error field of the Rx descriptor with EOP set + * length = the sum of all the length fields of the Rx descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * min_frame_size = the minimum frame length we want to accept. + * max_frame_size = the maximum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +#define TBI_ACCEPT(a, status, errors, length, last_byte, \ + min_frame_size, max_frame_size) \ + (e1000_tbi_sbp_enabled_82543(a) && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((min_frame_size) - VLAN_TAG_SIZE)) && \ + ((length) <= ((max_frame_size) + 1))) : \ + (((length) > (min_frame_size)) && \ + ((length) <= ((max_frame_size) + VLAN_TAG_SIZE + 1))))) + +#ifndef E1000_MAX +#define E1000_MAX(a, b) ((a) > (b) ? (a) : (b)) +#endif +#ifndef E1000_DIVIDE_ROUND_UP +#define E1000_DIVIDE_ROUND_UP(a, b) (((a) + (b) - 1) / (b)) /* ceil(a/b) */ +#endif +#endif /* _E1000_API_H_ */ diff --git a/drivers/staging/igb_avb/e1000_defines.h b/drivers/staging/igb_avb/e1000_defines.h new file mode 100644 index 000000000000..4022e22be775 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_defines.h @@ -0,0 +1,1486 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* SW Definable Pin 4 data */ +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* SW Definable Pin 6 data */ +#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* SW Definable Pin 2 data */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* SW Definable Pin 3 data */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP2_DIR 0x00000400 /* Direction of SDP2 0=in 1=out */ +#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* Direction of SDP3 0=in 1=out */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +/* Physical Func Reset Done Indication */ +#define E1000_CTRL_EXT_PFRSTD 0x00004000 +#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clk Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +/* Offset of the link mode field in Ctrl Ext register */ +#define E1000_CTRL_EXT_LINK_MODE_OFFSET 22 +#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_IRCA 0x00000001 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Drv loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 +#define E1000_I2CCMD_REG_ADDR_SHIFT 16 +#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 +#define E1000_I2CCMD_OPCODE_READ 0x08000000 +#define E1000_I2CCMD_OPCODE_WRITE 0x00000000 +#define E1000_I2CCMD_READY 0x20000000 +#define E1000_I2CCMD_ERROR 0x80000000 +#define E1000_I2CCMD_SFP_DATA_ADDR(a) (0x0000 + (a)) +#define E1000_I2CCMD_SFP_DIAG_ADDR(a) (0x0100 + (a)) +#define E1000_MAX_SGMII_PHY_REG_ADDR 255 +#define E1000_I2CCMD_PHY_TIMEOUT 200 +#define E1000_IVAR_VALID 0x80 +#define E1000_GPIE_NSICR 0x00000001 +#define E1000_GPIE_MSIX_MODE 0x00000010 +#define E1000_GPIE_EIAME 0x40000000 +#define E1000_GPIE_PBA 0x80000000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_LB 0x00040000 +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promisc enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promisc enable */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min thresh size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x01 +#define E1000_SWFW_PHY0_SM 0x02 +#define E1000_SWFW_PHY1_SM 0x04 +#define E1000_SWFW_CSR_SM 0x08 +#define E1000_SWFW_PHY2_SM 0x20 +#define E1000_SWFW_PHY3_SM 0x40 +#define E1000_SWFW_SW_MNG_SM 0x400 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ +#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_I2C_ENA 0x02000000 /* I2C enable */ + +#define E1000_CONNSW_ENRGSRC 0x4 +#define E1000_CONNSW_PHYSD 0x400 +#define E1000_CONNSW_PHY_PDN 0x800 +#define E1000_CONNSW_SERDESD 0x200 +#define E1000_CONNSW_AUTOSENSE_CONF 0x2 +#define E1000_CONNSW_AUTOSENSE_EN 0x1 +#define E1000_PCS_CFG_PCS_EN 8 +#define E1000_PCS_LCTL_FLV_LINK_UP 1 +#define E1000_PCS_LCTL_FSV_10 0 +#define E1000_PCS_LCTL_FSV_100 2 +#define E1000_PCS_LCTL_FSV_1000 4 +#define E1000_PCS_LCTL_FDV_FULL 8 +#define E1000_PCS_LCTL_FSD 0x10 +#define E1000_PCS_LCTL_FORCE_LINK 0x20 +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 +#define E1000_PCS_LCTL_AN_ENABLE 0x10000 +#define E1000_PCS_LCTL_AN_RESTART 0x20000 +#define E1000_PCS_LCTL_AN_TIMEOUT 0x40000 +#define E1000_ENABLE_SERDES_LOOPBACK 0x0410 + +#define E1000_PCS_LSTS_LINK_OK 1 +#define E1000_PCS_LSTS_SPEED_100 2 +#define E1000_PCS_LSTS_SPEED_1000 4 +#define E1000_PCS_LSTS_DUPLEX_FULL 8 +#define E1000_PCS_LSTS_SYNK_OK 0x10 +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Duplex 0=half 1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Compltn by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */ +#define E1000_STATUS_2P5_SKU 0x00001000 /* Val of 2.5GBE SKU strap */ +#define E1000_STATUS_2P5_SKU_OVER 0x00002000 /* Val of 2.5GBE SKU Over */ + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* Transmit Arbitration Count */ +#define E1000_TARC0_ENABLE 0x00000400 /* Enable Tx Queue 0 */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 +#define E1000_RFCTL_LEF 0x00040000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ + +#define ETHERNET_FCS_SIZE 4 +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_10K 0x000A /* 10KB */ +#define E1000_PBA_12K 0x000C /* 12KB */ +#define E1000_PBA_14K 0x000E /* 14KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ +#define E1000_PBA_18K 0x0012 +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_26K 0x001A +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_35K 0x0023 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB */ +#define E1000_PBA_64K 0x0040 /* 64KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* Rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_VMMB 0x00000100 /* VM MB event */ +#define E1000_ICR_RXCFG 0x00000400 /* Rx /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +#define E1000_ICR_TS 0x00080000 /* Time Sync Interrupt */ +#define E1000_ICR_DRSTA 0x40000000 /* Device Reset Asserted */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_DOUTSYNC 0x10000000 /* NIC DMA out of sync */ +#define E1000_ICR_FER 0x00400000 /* Fatal Error */ + +#define E1000_ICR_THS 0x00800000 /* ICR.THS: Thermal Sensor Event*/ +#define E1000_ICR_MDDET 0x10000000 /* Malicious Driver Detect */ + +/* Extended Interrupt Cause Read */ +#define E1000_EICR_RX_QUEUE0 0x00000001 /* Rx Queue 0 Interrupt */ +#define E1000_EICR_RX_QUEUE1 0x00000002 /* Rx Queue 1 Interrupt */ +#define E1000_EICR_RX_QUEUE2 0x00000004 /* Rx Queue 2 Interrupt */ +#define E1000_EICR_RX_QUEUE3 0x00000008 /* Rx Queue 3 Interrupt */ +#define E1000_EICR_TX_QUEUE0 0x00000100 /* Tx Queue 0 Interrupt */ +#define E1000_EICR_TX_QUEUE1 0x00000200 /* Tx Queue 1 Interrupt */ +#define E1000_EICR_TX_QUEUE2 0x00000400 /* Tx Queue 2 Interrupt */ +#define E1000_EICR_TX_QUEUE3 0x00000800 /* Tx Queue 3 Interrupt */ +#define E1000_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +#define E1000_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ +/* TCP Timer */ +#define E1000_TCPTIMER_KS 0x00000100 /* KickStart */ +#define E1000_TCPTIMER_COUNT_ENABLE 0x00000200 /* Count Enable */ +#define E1000_TCPTIMER_COUNT_FINISH 0x00000400 /* Count finish */ +#define E1000_TCPTIMER_LOOP 0x00000800 /* Loop */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Tx desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* Rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_TS E1000_ICR_TS /* Time Sync Interrupt */ +#define E1000_IMS_DRSTA E1000_ICR_DRSTA /* Device Reset Asserted */ +#define E1000_IMS_DOUTSYNC E1000_ICR_DOUTSYNC /* NIC DMA out of sync */ +#define E1000_IMS_FER E1000_ICR_FER /* Fatal Error */ + +#define E1000_IMS_THS E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/ +#define E1000_IMS_MDDET E1000_ICR_MDDET /* Malicious Driver Detect */ +/* Extended Interrupt Mask Set */ +#define E1000_EIMS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EIMS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EIMS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EIMS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EIMS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EIMS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EIMS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EIMS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EIMS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EIMS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Extended Interrupt Cause Set */ +#define E1000_EICS_RX_QUEUE0 E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */ +#define E1000_EICS_RX_QUEUE1 E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */ +#define E1000_EICS_RX_QUEUE2 E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */ +#define E1000_EICS_RX_QUEUE3 E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */ +#define E1000_EICS_TX_QUEUE0 E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */ +#define E1000_EICS_TX_QUEUE1 E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */ +#define E1000_EICS_TX_QUEUE2 E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */ +#define E1000_EICS_TX_QUEUE3 E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */ +#define E1000_EICS_TCP_TIMER E1000_EICR_TCP_TIMER /* TCP Timer */ +#define E1000_EICS_OTHER E1000_EICR_OTHER /* Interrupt Cause Active */ + +#define E1000_EITR_ITR_INT_MASK 0x0000FFFF +/* E1000_EITR_CNT_IGNR is only for 82576 and newer */ +#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */ +#define E1000_EITR_INTERVAL 0x00007FFC + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of descriptors still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 +#define E1000_RAH_QUEUE_MASK_82575 0x000C0000 +#define E1000_RAH_POOL_1 0x00040000 + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_MBX 15 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 +#define E1000_ERR_I2C 19 +#define E1000_ERR_INVM_VALUE_NOT_FOUND 20 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF +#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01 +#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02 +#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03 +#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04 + +#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00 +#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300 +#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800 +#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900 +#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00 +#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00 +#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00 +#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00 + +#define E1000_TIMINCA_16NS_SHIFT 24 +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* Time Sync Interrupt Cause/Mask Register Bits */ + +#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */ +#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */ +#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */ +#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */ +#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */ +#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */ +#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */ +#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */ + +#define TSYNC_INTERRUPTS TSINTR_TXTS +#define E1000_TSICR_TXTS TSINTR_TXTS + +/* TSAUXC Configuration Bits */ +#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */ +#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */ +#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */ +#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */ +#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */ +#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */ +#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */ +#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */ +#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */ +#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */ +#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */ +#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */ + +/* SDP Configuration Bits */ +#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */ +#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */ +#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */ +#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */ +#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */ +#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */ +#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */ +#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */ +#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */ +#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */ +#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */ +#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */ +#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */ +#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */ +#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */ +#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */ +#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */ +#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */ +#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */ +#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */ +#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */ +#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */ +#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */ +#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */ +#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */ +#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */ +/* TUPLE Filtering Configuration */ +#define E1000_TTQF_DISABLE_MASK 0xF0008000 /* TTQF Disable Mask */ +#define E1000_TTQF_QUEUE_ENABLE 0x100 /* TTQF Queue Enable Bit */ +#define E1000_TTQF_PROTOCOL_MASK 0xFF /* TTQF Protocol Mask */ +/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */ +#define E1000_TTQF_PROTOCOL_TCP 0x0 +/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_UDP 0x1 +/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */ +#define E1000_TTQF_PROTOCOL_SCTP 0x2 +#define E1000_TTQF_PROTOCOL_SHIFT 5 /* TTQF Protocol Shift */ +#define E1000_TTQF_QUEUE_SHIFT 16 /* TTQF Queue Shfit */ +#define E1000_TTQF_RX_QUEUE_MASK 0x70000 /* TTQF Queue Mask */ +#define E1000_TTQF_MASK_ENABLE 0x10000000 /* TTQF Mask Enable Bit */ +#define E1000_IMIR_CLEAR_MASK 0xF001FFFF /* IMIR Reg Clear Mask */ +#define E1000_IMIR_PORT_BYPASS 0x20000 /* IMIR Port Bypass Bit */ +#define E1000_IMIR_PRIORITY_SHIFT 29 /* IMIR Priority Shift */ +#define E1000_IMIREXT_CLEAR_MASK 0x7FFFF /* IMIREXT Reg Clear Mask */ + +#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */ +#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */ +#define E1000_MDICNFG_PHY_MASK 0x03E00000 +#define E1000_MDICNFG_PHY_SHIFT 21 + +#define E1000_MEDIA_PORT_COPPER 1 +#define E1000_MEDIA_PORT_OTHER 2 +#define E1000_M88E1112_AUTO_COPPER_SGMII 0x2 +#define E1000_M88E1112_AUTO_COPPER_BASEX 0x3 +#define E1000_M88E1112_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define E1000_M88E1112_MAC_CTRL_1 0x10 +#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT 7 +#define E1000_M88E1112_PAGE_ADDR 0x16 +#define E1000_M88E1112_STATUS 0x01 + +#define E1000_THSTAT_LOW_EVENT 0x20000000 /* Low thermal threshold */ +#define E1000_THSTAT_MID_EVENT 0x00200000 /* Mid thermal threshold */ +#define E1000_THSTAT_HIGH_EVENT 0x00002000 /* High thermal threshold */ +#define E1000_THSTAT_PWR_DOWN 0x00000001 /* Power Down Event */ +#define E1000_THSTAT_LINK_THROTTLE 0x00000002 /* Link Spd Throttle Event */ + +/* I350 EEE defines */ +#define E1000_IPCNFG_EEE_1G_AN 0x00000008 /* IPCNFG EEE Ena 1G AN */ +#define E1000_IPCNFG_EEE_100M_AN 0x00000004 /* IPCNFG EEE Ena 100M AN */ +#define E1000_EEER_TX_LPI_EN 0x00010000 /* EEER Tx LPI Enable */ +#define E1000_EEER_RX_LPI_EN 0x00020000 /* EEER Rx LPI Enable */ +#define E1000_EEER_LPI_FC 0x00040000 /* EEER Ena on Flow Cntrl */ +/* EEE status */ +#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */ +#define E1000_EEER_RX_LPI_STATUS 0x40000000 /* Rx in LPI state */ +#define E1000_EEER_TX_LPI_STATUS 0x80000000 /* Tx in LPI state */ +#define E1000_EEE_LP_ADV_ADDR_I350 0x040F /* EEE LP Advertisement */ +#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */ +#define E1000_M88E1543_EEE_CTRL_1 0x0 +#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */ +#define E1000_EEE_ADV_DEV_I354 7 +#define E1000_EEE_ADV_ADDR_I354 60 +#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */ +#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */ +#define E1000_PCS_STATUS_DEV_I354 3 +#define E1000_PCS_STATUS_ADDR_I354 1 +#define E1000_PCS_STATUS_RX_LPI_RCVD 0x0400 +#define E1000_PCS_STATUS_TX_LPI_RCVD 0x0800 +#define E1000_M88E1512_CFG_REG_1 0x0010 +#define E1000_M88E1512_CFG_REG_2 0x0011 +#define E1000_M88E1512_CFG_REG_3 0x0007 +#define E1000_M88E1512_MODE 0x0014 +#define E1000_EEE_SU_LPI_CLK_STP 0x00800000 /* EEE LPI Clock Stop */ +#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */ +#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */ +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 +#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 +#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 +#define E1000_GCR_CMPL_TMOUT_RESEND 0x00010000 +#define E1000_GCR_CAP_VER2 0x00040000 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */ + +/* mPHY address control and data registers */ +#define E1000_MPHY_ADDR_CTL 0x0024 /* Address Control Reg */ +#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000 +#define E1000_MPHY_DATA 0x0E10 /* Data Register */ + +/* AFE CSR Offset for PCS CLK */ +#define E1000_MPHY_PCS_CLK_REG_OFFSET 0x0004 +/* Override for near end digital loopback. */ +#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP 10T Half Dplx Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP 10T Full Dplx Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP 100TX Half Dplx Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asym Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP 10T Half Dplx Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP 10T Full Dplx Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP 100TX Half Dplx Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP 100TX Full Dplx Capable */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +/* 1=Repeater/switch device port 0=DTE device */ +#define CR_1000T_REPEATER_DTE 0x0400 +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define CR_1000T_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define CR_1000T_MS_ENABLE 0x1000 +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle err since last rd */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asym pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local Tx Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ + +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CONTROL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define PHY_CONTROL_LB 0x4000 /* PHY Loopback bit */ + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +#define E1000_EECD_BLOCKED 0x00008000 /* Bit banging access blocked flag */ +#define E1000_EECD_ABORT 0x00010000 /* NVM operation aborted flag */ +#define E1000_EECD_TIMEOUT 0x00020000 /* NVM read operation timeout flag */ +#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */ +/* NVM Addressing bits based on type 0=small, 1=large */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Ena Auto FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) +#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */ +#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done */ +#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */ +#define E1000_EECD_SEC1VAL_I210 0x02000000 /* Sector One Valid */ +#define E1000_FLUDONE_ATTEMPTS 20000 +#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */ +#define E1000_I210_FIFO_SEL_RX 0x00 +#define E1000_I210_FIFO_SEL_TX_QAV(_i) (0x02 + (_i)) +#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0) +#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06 +#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01 + +#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */ +/* Secure FLASH mode requires removing MSb */ +#define E1000_I210_FW_PTR_MASK 0x7FFF +/* Firmware code revision field word offset*/ +#define E1000_I210_FW_VER_OFFSET 328 + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_VERSION 0x0005 +#define E1000_I210_NVM_FW_MODULE_PTR 0x0010 +#define E1000_I350_NVM_FW_MODULE_PTR 0x0051 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_ETRACK_WORD 0x0042 +#define NVM_ETRACK_HIWORD 0x0043 +#define NVM_COMB_VER_OFF 0x0083 +#define NVM_COMB_VER_PTR 0x003d + +/* NVM version defines */ +#define NVM_MAJOR_MASK 0xF000 +#define NVM_MINOR_MASK 0x0FF0 +#define NVM_IMAGE_ID_MASK 0x000F +#define NVM_COMB_VER_MASK 0x00FF +#define NVM_MAJOR_SHIFT 12 +#define NVM_MINOR_SHIFT 4 +#define NVM_COMB_VER_SHFT 8 +#define NVM_VER_INVALID 0xFFFF +#define NVM_ETRACK_SHIFT 16 +#define NVM_ETRACK_VALID 0x8000 +#define NVM_NEW_DEC_MASK 0x0F00 +#define NVM_HEX_CONV 16 +#define NVM_HEX_TENS 10 + +/* FW version defines */ +/* Offset of "Loader patch ptr" in Firmware Header */ +#define E1000_I350_NVM_FW_LOADER_PATCH_PTR_OFFSET 0x01 +/* Patch generation hour & minutes */ +#define E1000_I350_NVM_FW_VER_WORD1_OFFSET 0x04 +/* Patch generation month & day */ +#define E1000_I350_NVM_FW_VER_WORD2_OFFSET 0x05 +/* Patch generation year */ +#define E1000_I350_NVM_FW_VER_WORD3_OFFSET 0x06 +/* Patch major & minor numbers */ +#define E1000_I350_NVM_FW_VER_WORD4_OFFSET 0x07 + +#define NVM_MAC_ADDR 0x0000 +#define NVM_SUB_DEV_ID 0x000B +#define NVM_SUB_VEN_ID 0x000C +#define NVM_DEV_ID 0x000D +#define NVM_VEN_ID 0x000E +#define NVM_INIT_CTRL_2 0x000F +#define NVM_INIT_CTRL_4 0x0013 +#define NVM_LED_1_CFG 0x001C +#define NVM_LED_0_2_CFG 0x001F + +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_ETS_CFG 0x003E +#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0 +#define NVM_ETS_LTHRES_DELTA_SHIFT 6 +#define NVM_ETS_TYPE_MASK 0x0038 +#define NVM_ETS_TYPE_SHIFT 3 +#define NVM_ETS_TYPE_EMC 0x000 +#define NVM_ETS_NUM_SENSORS_MASK 0x0007 +#define NVM_ETS_DATA_LOC_MASK 0x3C00 +#define NVM_ETS_DATA_LOC_SHIFT 10 +#define NVM_ETS_DATA_INDEX_MASK 0x0300 +#define NVM_ETS_DATA_INDEX_SHIFT 8 +#define NVM_ETS_DATA_HTHRESH_MASK 0x00FF +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F +#define NVM_COMPATIBILITY_REG_3 0x0003 +#define NVM_COMPATIBILITY_BIT_MASK 0x8000 + +#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */ +#define E1000_NVM_CFG_DONE_PORT_2 0x100000 /* ...for third port */ +#define E1000_NVM_CFG_DONE_PORT_3 0x200000 /* ...for fourth port */ + +#define NVM_82580_LAN_FUNC_OFFSET(a) ((a) ? (0x40 + (0x40 * (a))) : 0) + +/* Mask bits for fields in Word 0x24 of the NVM */ +#define NVM_WORD24_COM_MDIO 0x0008 /* MDIO interface shared */ +#define NVM_WORD24_EXT_MDIO 0x0004 /* MDIO accesses routed extrnl */ +/* Offset of Link Mode bits for 82575/82576 */ +#define NVM_WORD24_LNK_MODE_OFFSET 8 +/* Offset of Link Mode bits for 82580 up */ +#define NVM_WORD24_82580_LNK_MODE_OFFSET 4 + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_RESERVED_WORD 0xFFFF +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 +#define PCIE_DEVICE_CONTROL2 0x28 + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 +#define PCIX_STATUS_LO_FUNC_MASK 0x7 +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 +#define PCIE_LINK_SPEED_MASK 0x0F +#define PCIE_LINK_SPEED_2500 0x01 +#define PCIE_LINK_SPEED_5000 0x02 +#define PCIE_DEVICE_CONTROL2_16ms 0x0005 + +#ifndef ETH_ADDR_LEN +#define ETH_ADDR_LEN 6 +#endif + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1543_E_PHY_ID 0x01410EA0 +#define M88E1512_E_PHY_ID 0x01410DD0 +#define M88E1112_E_PHY_ID 0x01410C90 +#define I347AT4_E_PHY_ID 0x01410DC0 +#define M88E1340M_E_PHY_ID 0x01410DF0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define I82580_I_PHY_ID 0x015403A0 +#define I350_I_PHY_ID 0x015403B0 +#define I210_I_PHY_ID 0x01410C00 +#define IGP04E1000_E_PHY_ID 0x02A80391 +#define M88_VENDOR 0x0141 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Reg */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Reg */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Cntrl */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for pg number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */ +/* MDI Crossover Mode bits 6:5 Manual MDI configuration */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Tx */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0 = <50M + * 1 = 50-80M + * 2 = 80-110M + * 3 = 110-140M + * 4 = >140M + */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* Intel I347AT4 Registers */ +#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */ +#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */ +#define I347AT4_PAGE_SELECT 0x16 + +/* I347AT4 Extended PHY Specific Control Register */ + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000 +#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000 +#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000 +#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000 +#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000 +#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000 +#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000 +#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000 +#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000 + +/* I347AT4 PHY Cable Diagnostics Control */ +#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */ + +/* M88E1112 only registers */ +#define M88E1112_VCT_DSP_DISTANCE 0x001A + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL GG82563_REG(0, 16) /* PHY Spec Cntrl */ +#define GG82563_PHY_PAGE_SELECT GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 GG82563_REG(0, 26) /* PHY Spec Cntrl2 */ +#define GG82563_PHY_PAGE_SELECT_ALT GG82563_REG(0, 29) /* Alt Page Select */ + +/* MAC Specific Control Register */ +#define GG82563_PHY_MAC_SPEC_CTRL GG82563_REG(2, 21) + +#define GG82563_PHY_DSP_DISTANCE GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +/* Kumeran Mode Control */ +#define GG82563_PHY_KMRN_MODE_CTRL GG82563_REG(193, 16) +#define GG82563_PHY_PWR_MGMT_CTRL GG82563_REG(193, 20) /* Pwr Mgt Ctrl */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL GG82563_REG(194, 18) /* Inband Ctrl */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 +#define E1000_MDIC_DEST 0x80000000 + +/* SerDes Control */ +#define E1000_GEN_CTL_READY 0x80000000 +#define E1000_GEN_CTL_ADDRESS_SHIFT 8 +#define E1000_GEN_POLL_TIMEOUT 640 + +/* LinkSec register fields */ +#define E1000_LSECTXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECTXCAP_SUM_SHIFT 16 +#define E1000_LSECRXCAP_SUM_MASK 0x00FF0000 +#define E1000_LSECRXCAP_SUM_SHIFT 16 + +#define E1000_LSECTXCTRL_EN_MASK 0x00000003 +#define E1000_LSECTXCTRL_DISABLE 0x0 +#define E1000_LSECTXCTRL_AUTH 0x1 +#define E1000_LSECTXCTRL_AUTH_ENCRYPT 0x2 +#define E1000_LSECTXCTRL_AISCI 0x00000020 +#define E1000_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +#define E1000_LSECTXCTRL_RSV_MASK 0x000000D8 + +#define E1000_LSECRXCTRL_EN_MASK 0x0000000C +#define E1000_LSECRXCTRL_EN_SHIFT 2 +#define E1000_LSECRXCTRL_DISABLE 0x0 +#define E1000_LSECRXCTRL_CHECK 0x1 +#define E1000_LSECRXCTRL_STRICT 0x2 +#define E1000_LSECRXCTRL_DROP 0x3 +#define E1000_LSECRXCTRL_PLSH 0x00000040 +#define E1000_LSECRXCTRL_RP 0x00000080 +#define E1000_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + +/* Tx Rate-Scheduler Config fields */ +#define E1000_RTTBCNRC_RS_ENA 0x80000000 +#define E1000_RTTBCNRC_RF_DEC_MASK 0x00003FFF +#define E1000_RTTBCNRC_RF_INT_SHIFT 14 +#define E1000_RTTBCNRC_RF_INT_MASK \ + (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) + +/* DMA Coalescing register fields */ +/* DMA Coalescing Watchdog Timer */ +#define E1000_DMACR_DMACWT_MASK 0x00003FFF +/* DMA Coalescing Rx Threshold */ +#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 +#define E1000_DMACR_DMACTHR_SHIFT 16 +/* Lx when no PCIe transactions */ +#define E1000_DMACR_DMAC_LX_MASK 0x30000000 +#define E1000_DMACR_DMAC_LX_SHIFT 28 +#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */ +/* DMA Coalescing BMC-to-OS Watchdog Enable */ +#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000 + +/* DMA Coalescing Transmit Threshold */ +#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF + +#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */ + +/* Rx Traffic Rate Threshold */ +#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF +/* Rx packet rate in current window */ +#define E1000_DMCRTRH_LRPRCW 0x80000000 + +/* DMA Coal Rx Traffic Current Count */ +#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF + +/* Flow ctrl Rx Threshold High val */ +#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 +#define E1000_FCRTC_RTH_COAL_SHIFT 4 +/* Lx power decision based on DMA coal */ +#define E1000_PCIEMISC_LX_DECISION 0x00000080 + +#define E1000_RXPBS_CFG_TS_EN 0x80000000 /* Timestamp in Rx buffer */ +#define E1000_RXPBS_SIZE_I210_MASK 0x0000003F /* Rx packet buffer size */ +#define E1000_TXPB0S_SIZE_I210_MASK 0x0000003F /* Tx packet buffer 0 size */ +#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ +#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ + +/* Proxy Filter Control */ +#define E1000_PROXYFC_D0 0x00000001 /* Enable offload in D0 */ +#define E1000_PROXYFC_EX 0x00000004 /* Directed exact proxy */ +#define E1000_PROXYFC_MC 0x00000008 /* Directed MC Proxy */ +#define E1000_PROXYFC_BC 0x00000010 /* Broadcast Proxy Enable */ +#define E1000_PROXYFC_ARP_DIRECTED 0x00000020 /* Directed ARP Proxy Ena */ +#define E1000_PROXYFC_IPV4 0x00000040 /* Directed IPv4 Enable */ +#define E1000_PROXYFC_IPV6 0x00000080 /* Directed IPv6 Enable */ +#define E1000_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ +#define E1000_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Ena */ +/* Proxy Status */ +#define E1000_PROXYS_CLEAR 0xFFFFFFFF /* Clear */ + +/* Firmware Status */ +#define E1000_FWSTS_FWRI 0x80000000 /* FW Reset Indication */ +/* VF Control */ +#define E1000_VTCTRL_RST 0x04000000 /* Reset VF */ + +#define E1000_STATUS_LAN_ID_MASK 0x00000000C /* Mask for Lan ID field */ +/* Lan ID bit field offset in status register */ +#define E1000_STATUS_LAN_ID_OFFSET 2 +#define E1000_VFTA_ENTRIES 128 +#define E1000_TQAVCC_QUEUEMODE 0x80000000 /* queue mode, 0=strict, 1=SR mode */ +#define E1000_TQAVCTRL_TXMODE 0x00000001 /* Transmit mode, 0=legacy, 1=QAV */ +#define E1000_TQAVCTRL_1588_STAT_EN 0x00000004 /* report DMA time of tx packets */ +#define E1000_TQAVCTRL_DATA_FETCH_ARB 0x00000010 /* data fetch arbitration */ +#define E1000_TQAVCTRL_DATA_TRAN_ARB 0x00000100 /* data tx arbitration */ +#define E1000_TQAVCTRL_DATA_TRAN_TIM 0x00000200 /* data launch time valid */ +#define E1000_TQAVCTRL_SP_WAIT_SR 0x00000400 /* stall SP to guarantee SR */ +#define E1000_TQAVCTRL_FETCH_TM_SHIFT (16) /* ... and associated shift value */ + +/* Tx packet buffer fields */ +#define E1000_TXPBSIZE_PBSZ_MASK 0x3F +#define E1000_TXPBSIZE_TX0PB_SHIFT 0 +#define E1000_TXPBSIZE_TX1PB_SHIFT 6 +#define E1000_TXPBSIZE_TX2PB_SHIFT 12 +#define E1000_TXPBSIZE_TX3PB_SHIFT 18 +#ifndef E1000_UNUSEDARG +#define E1000_UNUSEDARG +#endif /* E1000_UNUSEDARG */ +#ifndef ERROR_REPORT +#define ERROR_REPORT(fmt) do { } while (0) +#endif /* ERROR_REPORT */ +#define E1000_TSAUXC_SAMP_AUTO 0x00000008 /* sample current ts */ +#endif /* _E1000_DEFINES_H_ */ diff --git a/drivers/staging/igb_avb/e1000_hw.h b/drivers/staging/igb_avb/e1000_hw.h new file mode 100644 index 000000000000..74cb22ee8ead --- /dev/null +++ b/drivers/staging/igb_avb/e1000_hw.h @@ -0,0 +1,792 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" +#include "e1000_regs.h" +#include "e1000_defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82576 0x10C9 +#define E1000_DEV_ID_82576_FIBER 0x10E6 +#define E1000_DEV_ID_82576_SERDES 0x10E7 +#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 +#define E1000_DEV_ID_82576_QUAD_COPPER_ET2 0x1526 +#define E1000_DEV_ID_82576_NS 0x150A +#define E1000_DEV_ID_82576_NS_SERDES 0x1518 +#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D +#define E1000_DEV_ID_82575EB_COPPER 0x10A7 +#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 +#define E1000_DEV_ID_82575GB_QUAD_COPPER 0x10D6 +#define E1000_DEV_ID_82580_COPPER 0x150E +#define E1000_DEV_ID_82580_FIBER 0x150F +#define E1000_DEV_ID_82580_SERDES 0x1510 +#define E1000_DEV_ID_82580_SGMII 0x1511 +#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 +#define E1000_DEV_ID_82580_QUAD_FIBER 0x1527 +#define E1000_DEV_ID_I350_COPPER 0x1521 +#define E1000_DEV_ID_I350_FIBER 0x1522 +#define E1000_DEV_ID_I350_SERDES 0x1523 +#define E1000_DEV_ID_I350_SGMII 0x1524 +#define E1000_DEV_ID_I350_DA4 0x1546 +#define E1000_DEV_ID_I210_COPPER 0x1533 +#define E1000_DEV_ID_I210_COPPER_OEM1 0x1534 +#define E1000_DEV_ID_I210_COPPER_IT 0x1535 +#define E1000_DEV_ID_I210_FIBER 0x1536 +#define E1000_DEV_ID_I210_SERDES 0x1537 +#define E1000_DEV_ID_I210_SGMII 0x1538 +#define E1000_DEV_ID_I210_AUTOMOTIVE 0x15F6 +#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B +#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C +#define E1000_DEV_ID_I211_COPPER 0x1539 +#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40 +#define E1000_DEV_ID_I354_SGMII 0x1F41 +#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS 0x1F45 +#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438 +#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A +#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C +#define E1000_DEV_ID_DH89XXCC_SFP 0x0440 + +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 +#define E1000_REVISION_4 4 + +#define E1000_FUNC_0 0 +#define E1000_FUNC_1 1 +#define E1000_FUNC_2 2 +#define E1000_FUNC_3 3 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2 6 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3 9 + +enum e1000_mac_type { + e1000_undefined = 0, + e1000_82575, + e1000_82576, + e1000_82580, + e1000_i350, + e1000_i354, + e1000_i210, + e1000_i211, + e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_invm, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large, +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_82580, + e1000_phy_vf, + e1000_phy_i210, +}; + +enum e1000_bus_type { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_pci_express, + e1000_bus_type_reserved +}; + +enum e1000_bus_speed { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_2500, + e1000_bus_speed_5000, + e1000_bus_speed_reserved +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +#ifndef __le16 +#define __le16 u16 +#endif +#ifndef __le32 +#define __le32 u32 +#endif +#ifndef __le64 +#define __le64 u64 +#endif +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; + u64 cbtmpc; + u64 htdpmc; + u64 cbrdpc; + u64 cbrmpc; + u64 rpthc; + u64 hgptc; + u64 htcbdpc; + u64 hgorc; + u64 hgotc; + u64 lenerrs; + u64 scvpc; + u64 hrmpc; + u64 doosync; + u64 o2bgptc; + u64 o2bspc; + u64 b2ospc; + u64 b2ogprc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "e1000_mac.h" +#include "e1000_phy.h" +#include "e1000_nvm.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + void (*shutdown_serdes)(struct e1000_hw *); + void (*power_up_serdes)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8*, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + s32 (*validate_mdi_setting)(struct e1000_hw *); + s32 (*get_thermal_sensor_data)(struct e1000_hw *); + s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); + s32 (*acquire_swfw_sync)(struct e1000_hw *, u16); + void (*release_swfw_sync)(struct e1000_hw *, u16); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); + s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *); + s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*init_params)(struct e1000_hw *); + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +#define E1000_MAX_SENSORS 3 + +struct e1000_thermal_diode_data { + u8 location; + u8 temp; + u8 caution_thresh; + u8 max_op_thresh; +}; + +struct e1000_thermal_sensor_data { + struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS]; +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ADDR_LEN]; + u8 perm_addr[ETH_ADDR_LEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + u16 uta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool asf_firmware_present; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + enum e1000_serdes_link_state serdes_link_state; + bool serdes_has_link; + bool tx_pkt_filtering; + struct e1000_thermal_sensor_data thermal_sensor_data; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool reset_disable; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_type type; + enum e1000_bus_speed speed; + enum e1000_bus_width width; + + u16 func; + u16 pci_cmd_word; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_mbx_operations { + s32 (*init_params)(struct e1000_hw *hw); + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); + s32 (*check_for_msg)(struct e1000_hw *, u16); + s32 (*check_for_ack)(struct e1000_hw *, u16); + s32 (*check_for_rst)(struct e1000_hw *, u16); +}; + +struct e1000_mbx_stats { + u32 msgs_tx; + u32 msgs_rx; + + u32 acks; + u32 reqs; + u32 rsts; +}; + +struct e1000_mbx_info { + struct e1000_mbx_operations ops; + struct e1000_mbx_stats stats; + u32 timeout; + u32 usec_delay; + u16 size; +}; + +struct e1000_dev_spec_82575 { + bool sgmii_active; + bool global_device_reset; + bool eee_disable; + bool module_plugged; + bool clear_semaphore_once; + u32 mtu; + struct sfp_e1000_flags eth_flags; + u8 media_port; + bool media_changed; +}; + +struct e1000_dev_spec_vf { + u32 vf_number; + u32 v2p_mailbox; +}; + +struct e1000_hw { + void *back; + + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + unsigned long io_base; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_mbx_info mbx; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82575 _82575; + struct e1000_dev_spec_vf vf; + } dev_spec; + + u16 device_id; + u16 subsystem_vendor_id; + u16 subsystem_device_id; + u16 vendor_id; + + u8 revision_id; +}; + +#include "e1000_82575.h" +#include "e1000_i210.h" + +/* These functions must be implemented by drivers */ +s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +s32 e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); +void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value); + +#endif diff --git a/drivers/staging/igb_avb/e1000_i210.c b/drivers/staging/igb_avb/e1000_i210.c new file mode 100644 index 000000000000..7e32fd112f33 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_i210.c @@ -0,0 +1,993 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + + +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw); +static void e1000_release_nvm_i210(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw); +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw); +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data); + +/** + * e1000_acquire_nvm_i210 - Request for access to EEPROM + * @hw: pointer to the HW structure + * + * Acquire the necessary semaphores for exclusive access to the EEPROM. + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +static s32 e1000_acquire_nvm_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_acquire_nvm_i210"); + + ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_i210 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit, + * then release the semaphores acquired. + **/ +static void e1000_release_nvm_i210(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_release_nvm_i210"); + + e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 ret_val = E1000_SUCCESS; + s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + + DEBUGFUNC("e1000_acquire_swfw_sync_i210"); + + while (i < timeout) { + if (e1000_get_hw_semaphore_i210(hw)) { + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* + * Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000_put_hw_semaphore_generic(hw); + msec_delay_irq(5); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n"); + ret_val = -E1000_ERR_SWFW_SYNC; + goto out; + } + + swfw_sync |= swmask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); + +out: + return ret_val; +} + +/** + * e1000_release_swfw_sync_i210 - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + DEBUGFUNC("e1000_release_swfw_sync_i210"); + + while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS) + ; /* Empty */ + + swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC); + swfw_sync &= ~mask; + E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync); + + e1000_put_hw_semaphore_generic(hw); +} + +/** + * e1000_get_hw_semaphore_i210 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_i210"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + /* In rare circumstances, the SW semaphore may already be held + * unintentionally. Clear the semaphore once before giving up. + */ + if (hw->dev_spec._82575.clear_semaphore_once) { + hw->dev_spec._82575.clear_semaphore_once = false; + e1000_put_hw_semaphore_generic(hw); + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + } + } + + /* If we do not have the semaphore here, we have to give up. */ + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the Shadow Ram to read + * @words: number of words to read + * @data: word read from the Shadow Ram + * + * Reads a 16 bit word from the Shadow Ram using the EERD register. + * Uses necessary synchronization semaphores. + **/ +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_read_nvm_srrd_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to read in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_read_nvm_eerd(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow RAM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow RAM + * + * Writes data to Shadow RAM at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * data will not be committed to FLASH and also Shadow RAM will most likely + * contain an invalid checksum. + * + * If error code is returned, data and Shadow RAM may be inconsistent - buffer + * partially written. + **/ +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 status = E1000_SUCCESS; + u16 i, count; + + DEBUGFUNC("e1000_write_nvm_srwr_i210"); + + /* We cannot hold synchronization semaphores for too long, + * because of forceful takeover procedure. However it is more efficient + * to write in bursts than synchronizing access for each word. */ + for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) { + count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ? + E1000_EERD_EEWR_MAX_COUNT : (words - i); + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + status = e1000_write_nvm_srwr(hw, offset, count, + data + i); + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + if (status != E1000_SUCCESS) + break; + } + + return status; +} + +/** + * e1000_write_nvm_srwr - Write to Shadow Ram using EEWR + * @hw: pointer to the HW structure + * @offset: offset within the Shadow Ram to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the Shadow Ram + * + * Writes data to Shadow Ram at offset using EEWR register. + * + * If e1000_update_nvm_checksum is not called after this function , the + * Shadow Ram will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, k, eewr = 0; + u32 attempts = 100000; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_nvm_srwr"); + + /* + * A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | + (data[i] << E1000_NVM_RW_REG_DATA) | + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_SRWR, eewr); + + for (k = 0; k < attempts; k++) { + if (E1000_NVM_RW_REG_DONE & + E1000_READ_REG(hw, E1000_SRWR)) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("Shadow RAM write EEWR timed out\n"); + break; + } + } + +out: + return ret_val; +} + +/** e1000_read_invm_word_i210 - Reads OTP + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Reads 16-bit words from the OTP. Return error when the word is not + * stored in OTP. + **/ +static s32 e1000_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data) +{ + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u32 invm_dword; + u16 i; + u8 record_type, word_address; + + DEBUGFUNC("e1000_read_invm_word_i210"); + + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + /* Get record type */ + record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword); + if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE) + break; + if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE) + i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE) + i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS; + if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) { + word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword); + if (word_address == address) { + *data = INVM_DWORD_TO_WORD_DATA(invm_dword); + DEBUGOUT2("Read INVM Word 0x%02x = %x", + address, *data); + status = E1000_SUCCESS; + break; + } + } + } + if (status != E1000_SUCCESS) + DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address); + return status; +} + +/** e1000_read_invm_i210 - Read invm wrapper function for I210/I211 + * @hw: pointer to the HW structure + * @address: the word address (aka eeprom offset) to read + * @data: pointer to the data read + * + * Wrapper function to return data formerly found in the NVM. + **/ +static s32 e1000_read_invm_i210(struct e1000_hw *hw, u16 offset, + u16 E1000_UNUSEDARG words, u16 *data) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_invm_i210"); + + /* Only the MAC addr is required to be present in the iNVM */ + switch (offset) { + case NVM_MAC_ADDR: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, &data[0]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+1, + &data[1]); + ret_val |= e1000_read_invm_word_i210(hw, (u8)offset+2, + &data[2]); + if (ret_val != E1000_SUCCESS) + DEBUGOUT("MAC Addr not found in iNVM\n"); + break; + case NVM_INIT_CTRL_2: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_2_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_INIT_CTRL_4: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_INIT_CTRL_4_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_1_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_1_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_LED_0_2_CFG: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = NVM_LED_0_2_CFG_DEFAULT_I211; + ret_val = E1000_SUCCESS; + } + break; + case NVM_ID_LED_SETTINGS: + ret_val = e1000_read_invm_word_i210(hw, (u8)offset, data); + if (ret_val != E1000_SUCCESS) { + *data = ID_LED_RESERVED_FFFF; + ret_val = E1000_SUCCESS; + } + break; + case NVM_SUB_DEV_ID: + *data = hw->subsystem_device_id; + break; + case NVM_SUB_VEN_ID: + *data = hw->subsystem_vendor_id; + break; + case NVM_DEV_ID: + *data = hw->device_id; + break; + case NVM_VEN_ID: + *data = hw->vendor_id; + break; + default: + DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset); + *data = NVM_RESERVED_WORD; + break; + } + return ret_val; +} + +/** + * e1000_read_invm_version - Reads iNVM version and image type + * @hw: pointer to the HW structure + * @invm_ver: version structure for the version read + * + * Reads iNVM version and image type. + **/ +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver) +{ + u32 *record = NULL; + u32 *next_record = NULL; + u32 i = 0; + u32 invm_dword = 0; + u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE / + E1000_INVM_RECORD_SIZE_IN_BYTES); + u32 buffer[E1000_INVM_SIZE]; + s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND; + u16 version = 0; + + DEBUGFUNC("e1000_read_invm_version"); + + /* Read iNVM memory */ + for (i = 0; i < E1000_INVM_SIZE; i++) { + invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i)); + buffer[i] = invm_dword; + } + + /* Read version number */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have first version location used */ + if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) { + version = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have second version location used */ + else if ((i == 1) && + ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have odd version location + * used and it is the last one used + */ + else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) && + ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) && + (i != 1))) { + version = (*next_record & E1000_INVM_VER_FIELD_TWO) + >> 13; + status = E1000_SUCCESS; + break; + } + /* + * Check if we have even version location + * used and it is the last one used + */ + else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) && + ((*record & 0x3) == 0)) { + version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3; + status = E1000_SUCCESS; + break; + } + } + + if (status == E1000_SUCCESS) { + invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK) + >> E1000_INVM_MAJOR_SHIFT; + invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK; + } + /* Read Image Type */ + for (i = 1; i < invm_blocks; i++) { + record = &buffer[invm_blocks - i]; + next_record = &buffer[invm_blocks - i + 1]; + + /* Check if we have image type in first location used */ + if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) { + invm_ver->invm_img_type = 0; + status = E1000_SUCCESS; + break; + } + /* Check if we have image type in first location used */ + else if ((((*record & 0x3) == 0) && + ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) || + ((((*record & 0x3) != 0) && (i != 1)))) { + invm_ver->invm_img_type = + (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23; + status = E1000_SUCCESS; + break; + } + } + return status; +} + +/** + * e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 status = E1000_SUCCESS; + s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *); + + DEBUGFUNC("e1000_validate_nvm_checksum_i210"); + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + + /* + * Replace the read function with semaphore grabbing with + * the one that skips this for a while. + * We have semaphore taken already here. + */ + read_op_ptr = hw->nvm.ops.read; + hw->nvm.ops.read = e1000_read_nvm_eerd; + + status = e1000_validate_nvm_checksum_generic(hw); + + /* Revert original read operation. */ + hw->nvm.ops.read = read_op_ptr; + + hw->nvm.ops.release(hw); + } else { + status = E1000_ERR_SWFW_SYNC; + } + + return status; +} + + +/** + * e1000_update_nvm_checksum_i210 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. Next commit EEPROM data onto the Flash. + **/ +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum_i210"); + + /* + * Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data); + if (ret_val != E1000_SUCCESS) { + DEBUGOUT("EEPROM read failed\n"); + goto out; + } + + if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) { + /* + * Do not use hw->nvm.ops.write, hw->nvm.ops.read + * because we do not want to take the synchronization + * semaphores twice here. + */ + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data); + if (ret_val) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Read Error while updating checksum.\n"); + goto out; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1, + &checksum); + if (ret_val != E1000_SUCCESS) { + hw->nvm.ops.release(hw); + DEBUGOUT("NVM Write Error while updating checksum.\n"); + goto out; + } + + hw->nvm.ops.release(hw); + + ret_val = e1000_update_flash_i210(hw); + } else { + ret_val = E1000_ERR_SWFW_SYNC; + } +out: + return ret_val; +} + +/** + * e1000_get_flash_presence_i210 - Check if flash device is detected. + * @hw: pointer to the HW structure + * + **/ +bool e1000_get_flash_presence_i210(struct e1000_hw *hw) +{ + u32 eec = 0; + bool ret_val = false; + + DEBUGFUNC("e1000_get_flash_presence_i210"); + + eec = E1000_READ_REG(hw, E1000_EECD); + + if (eec & E1000_EECD_FLASH_DETECTED_I210) + ret_val = true; + + return ret_val; +} + +/** + * e1000_update_flash_i210 - Commit EEPROM to the flash + * @hw: pointer to the HW structure + * + **/ +s32 e1000_update_flash_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 flup; + + DEBUGFUNC("e1000_update_flash_i210"); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == -E1000_ERR_NVM) { + DEBUGOUT("Flash update time out\n"); + goto out; + } + + flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210; + E1000_WRITE_REG(hw, E1000_EECD, flup); + + ret_val = e1000_pool_flash_update_done_i210(hw); + if (ret_val == E1000_SUCCESS) + DEBUGOUT("Flash update complete\n"); + else + DEBUGOUT("Flash update time out\n"); + +out: + return ret_val; +} + +/** + * e1000_pool_flash_update_done_i210 - Pool FLUDONE status. + * @hw: pointer to the HW structure + * + **/ +s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw) +{ + s32 ret_val = -E1000_ERR_NVM; + u32 i, reg; + + DEBUGFUNC("e1000_pool_flash_update_done_i210"); + + for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) { + reg = E1000_READ_REG(hw, E1000_EECD); + if (reg & E1000_EECD_FLUDONE_I210) { + ret_val = E1000_SUCCESS; + break; + } + usec_delay(5); + } + + return ret_val; +} + +/** + * e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize the i210/i211 NVM parameters and function pointers. + **/ +static s32 e1000_init_nvm_params_i210(struct e1000_hw *hw) +{ + s32 ret_val; + struct e1000_nvm_info *nvm = &hw->nvm; + + DEBUGFUNC("e1000_init_nvm_params_i210"); + + ret_val = e1000_init_nvm_params_82575(hw); + nvm->ops.acquire = e1000_acquire_nvm_i210; + nvm->ops.release = e1000_release_nvm_i210; + nvm->ops.valid_led_default = e1000_valid_led_default_i210; + if (e1000_get_flash_presence_i210(hw)) { + hw->nvm.type = e1000_nvm_flash_hw; + nvm->ops.read = e1000_read_nvm_srrd_i210; + nvm->ops.write = e1000_write_nvm_srwr_i210; + nvm->ops.validate = e1000_validate_nvm_checksum_i210; + nvm->ops.update = e1000_update_nvm_checksum_i210; + } else { + hw->nvm.type = e1000_nvm_invm; + nvm->ops.read = e1000_read_invm_i210; + nvm->ops.write = e1000_null_write_nvm; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.update = e1000_null_ops_generic; + } + return ret_val; +} + +/** + * e1000_init_function_pointers_i210 - Init func ptrs. + * @hw: pointer to the HW structure + * + * Called to initialize all function pointers and parameters. + **/ +void e1000_init_function_pointers_i210(struct e1000_hw *hw) +{ + e1000_init_function_pointers_82575(hw); + hw->nvm.ops.init_params = e1000_init_nvm_params_i210; + + return; +} + +/** + * e1000_valid_led_default_i210 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_i210"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + goto out; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) { + switch (hw->phy.media_type) { + case e1000_media_type_internal_serdes: + *data = ID_LED_DEFAULT_I210_SERDES; + break; + case e1000_media_type_copper: + default: + *data = ID_LED_DEFAULT_I210; + break; + } + } +out: + return ret_val; +} + +/** + * __e1000_access_xmdio_reg - Read/write XMDIO register + * @hw: pointer to the HW structure + * @address: XMDIO address to program + * @dev_addr: device address to program + * @data: pointer to value to read/write from/to the XMDIO address + * @read: boolean flag to indicate read or write + **/ +static s32 __e1000_access_xmdio_reg(struct e1000_hw *hw, u16 address, + u8 dev_addr, u16 *data, bool read) +{ + s32 ret_val; + + DEBUGFUNC("__e1000_access_xmdio_reg"); + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA | + dev_addr); + if (ret_val) + return ret_val; + + if (read) + ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data); + else + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data); + if (ret_val) + return ret_val; + + /* Recalibrate the device back to 0 */ + ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0); + if (ret_val) + return ret_val; + + return ret_val; +} + +/** + * e1000_read_xmdio_reg - Read XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be read from the EMI address + **/ +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, data, true); +} + +/** + * e1000_write_xmdio_reg - Write XMDIO register + * @hw: pointer to the HW structure + * @addr: XMDIO address to program + * @dev_addr: device address to program + * @data: value to be written to the XMDIO address + **/ +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data) +{ + DEBUGFUNC("e1000_read_xmdio_reg"); + + return __e1000_access_xmdio_reg(hw, addr, dev_addr, &data, false); +} + +/** + * e1000_pll_workaround_i210 + * @hw: pointer to the HW structure + * + * Works around an errata in the PLL circuit where it occasionally + * provides the wrong clock frequency after power up. + **/ +static s32 e1000_pll_workaround_i210(struct e1000_hw *hw) +{ + s32 ret_val; + u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val; + u16 nvm_word, phy_word, pci_word, tmp_nvm; + int i; + + /* Get and set needed register values */ + wuc = E1000_READ_REG(hw, E1000_WUC); + mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG); + reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO; + E1000_WRITE_REG(hw, E1000_MDICNFG, reg_val); + + /* Get data from NVM, or set default */ + ret_val = e1000_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD, + &nvm_word); + if (ret_val != E1000_SUCCESS) + nvm_word = E1000_INVM_DEFAULT_AL; + tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL; + for (i = 0; i < E1000_MAX_PLL_TRIES; i++) { + /* check current state directly from internal PHY */ + e1000_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE | + E1000_PHY_PLL_FREQ_REG), &phy_word); + if ((phy_word & E1000_PHY_PLL_UNCONF) + != E1000_PHY_PLL_UNCONF) { + ret_val = E1000_SUCCESS; + break; + } else { + ret_val = -E1000_ERR_PHY; + } + /* directly reset the internal PHY */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl|E1000_CTRL_PHY_RST); + + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE); + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + + E1000_WRITE_REG(hw, E1000_WUC, 0); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + e1000_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + pci_word |= E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + msec_delay(1); + pci_word &= ~E1000_PCI_PMCSR_D3; + e1000_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word); + reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16); + E1000_WRITE_REG(hw, E1000_EEARBC_I210, reg_val); + + /* restore WUC register */ + E1000_WRITE_REG(hw, E1000_WUC, wuc); + } + /* restore MDICNFG setting */ + E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg); + return ret_val; +} + +/** + * e1000_init_hw_i210 - Init hw for I210/I211 + * @hw: pointer to the HW structure + * + * Called to initialize hw for i210 hw family. + **/ +s32 e1000_init_hw_i210(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_init_hw_i210"); + if ((hw->mac.type >= e1000_i210) && + !(e1000_get_flash_presence_i210(hw))) { + ret_val = e1000_pll_workaround_i210(hw); + if (ret_val != E1000_SUCCESS) + return ret_val; + } + ret_val = e1000_init_hw_82575(hw); + return ret_val; +} diff --git a/drivers/staging/igb_avb/e1000_i210.h b/drivers/staging/igb_avb/e1000_i210.h new file mode 100644 index 000000000000..a14e897d26a0 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_i210.h @@ -0,0 +1,101 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_I210_H_ +#define _E1000_I210_H_ + +bool e1000_get_flash_presence_i210(struct e1000_hw *hw); +s32 e1000_update_flash_i210(struct e1000_hw *hw); +s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw); +s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +s32 e1000_read_invm_version(struct e1000_hw *hw, + struct e1000_fw_version *invm_ver); +s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask); +s32 e1000_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 *data); +s32 e1000_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, + u16 data); +s32 e1000_init_hw_i210(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 +#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 + +#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \ + (u8)((invm_dword) & 0x7) +#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \ + (u8)(((invm_dword) & 0x0000FE00) >> 9) +#define INVM_DWORD_TO_WORD_DATA(invm_dword) \ + (u16)(((invm_dword) & 0xFFFF0000) >> 16) + +enum E1000_INVM_STRUCTURE_TYPE { + E1000_INVM_UNINITIALIZED_STRUCTURE = 0x00, + E1000_INVM_WORD_AUTOLOAD_STRUCTURE = 0x01, + E1000_INVM_CSR_AUTOLOAD_STRUCTURE = 0x02, + E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE = 0x03, + E1000_INVM_RSA_KEY_SHA256_STRUCTURE = 0x04, + E1000_INVM_INVALIDATED_STRUCTURE = 0x0F, +}; + +#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8 +#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1 +#define E1000_INVM_ULT_BYTES_SIZE 8 +#define E1000_INVM_RECORD_SIZE_IN_BYTES 4 +#define E1000_INVM_VER_FIELD_ONE 0x1FF8 +#define E1000_INVM_VER_FIELD_TWO 0x7FE000 +#define E1000_INVM_IMGTYPE_FIELD 0x1F800000 + +#define E1000_INVM_MAJOR_MASK 0x3F0 +#define E1000_INVM_MINOR_MASK 0xF +#define E1000_INVM_MAJOR_SHIFT 4 + +#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_OFF2)) +#define ID_LED_DEFAULT_I210_SERDES ((ID_LED_DEF1_DEF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_OFF1_ON2)) + +/* NVM offset defaults for I211 devices */ +#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243 +#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1 +#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 +#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C + +/* PLL Defines */ +#define E1000_PCI_PMCSR 0x44 +#define E1000_PCI_PMCSR_D3 0x03 +#define E1000_MAX_PLL_TRIES 5 +#define E1000_PHY_PLL_UNCONF 0xFF +#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000 +#define E1000_PHY_PLL_FREQ_REG 0x000E +#define E1000_INVM_DEFAULT_AL 0x202F +#define E1000_INVM_AUTOLOAD 0x0A +#define E1000_INVM_PLL_WO_VAL 0x0010 + +#endif diff --git a/drivers/staging/igb_avb/e1000_mac.c b/drivers/staging/igb_avb/e1000_mac.c new file mode 100644 index 000000000000..f848b995c932 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mac.c @@ -0,0 +1,2149 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw); +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +static void e1000_config_collision_dist_generic(struct e1000_hw *hw); +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); + +/** + * e1000_init_mac_ops_generic - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_mac_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + DEBUGFUNC("e1000_init_mac_ops_generic"); + + /* General Setup */ + mac->ops.init_params = e1000_null_ops_generic; + mac->ops.init_hw = e1000_null_ops_generic; + mac->ops.reset_hw = e1000_null_ops_generic; + mac->ops.setup_physical_interface = e1000_null_ops_generic; + mac->ops.get_bus_info = e1000_null_ops_generic; + mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie; + mac->ops.read_mac_addr = e1000_read_mac_addr_generic; + mac->ops.config_collision_dist = e1000_config_collision_dist_generic; + mac->ops.clear_hw_cntrs = e1000_null_mac_generic; + /* LED */ + mac->ops.cleanup_led = e1000_null_ops_generic; + mac->ops.setup_led = e1000_null_ops_generic; + mac->ops.blink_led = e1000_null_ops_generic; + mac->ops.led_on = e1000_null_ops_generic; + mac->ops.led_off = e1000_null_ops_generic; + /* LINK */ + mac->ops.setup_link = e1000_null_ops_generic; + mac->ops.get_link_up_info = e1000_null_link_info; + mac->ops.check_for_link = e1000_null_ops_generic; + /* Management */ + mac->ops.check_mng_mode = e1000_null_mng_mode; + /* VLAN, MC, etc. */ + mac->ops.update_mc_addr_list = e1000_null_update_mc; + mac->ops.clear_vfta = e1000_null_mac_generic; + mac->ops.write_vfta = e1000_null_write_vfta; + mac->ops.rar_set = e1000_rar_set_generic; + mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic; +} + +/** + * e1000_null_ops_generic - No-op function, returns 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_ops_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_ops_generic"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mac_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_mac_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mac_generic"); + return; +} + +/** + * e1000_null_link_info - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_link_info(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *s, u16 E1000_UNUSEDARG *d) +{ + DEBUGFUNC("e1000_null_link_info"); + return E1000_SUCCESS; +} + +/** + * e1000_null_mng_mode - No-op function, return false + * @hw: pointer to the HW structure + **/ +bool e1000_null_mng_mode(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_mng_mode"); + return false; +} + +/** + * e1000_null_update_mc - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_update_mc(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_update_mc"); + return; +} + +/** + * e1000_null_write_vfta - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_write_vfta(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG a, u32 E1000_UNUSEDARG b) +{ + DEBUGFUNC("e1000_null_write_vfta"); + return; +} + +/** + * e1000_null_rar_set - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +int e1000_null_rar_set(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG *h, u32 E1000_UNUSEDARG a) +{ + DEBUGFUNC("e1000_null_rar_set"); + return E1000_SUCCESS; +} + +/** + * e1000_get_bus_info_pcie_generic - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + u16 pcie_link_status; + + DEBUGFUNC("e1000_get_bus_info_pcie_generic"); + + bus->type = e1000_bus_type_pci_express; + + ret_val = e1000_read_pcie_cap_reg(hw, PCIE_LINK_STATUS, + &pcie_link_status); + if (ret_val) { + bus->width = e1000_bus_width_unknown; + bus->speed = e1000_bus_speed_unknown; + } else { + switch (pcie_link_status & PCIE_LINK_SPEED_MASK) { + case PCIE_LINK_SPEED_2500: + bus->speed = e1000_bus_speed_2500; + break; + case PCIE_LINK_SPEED_5000: + bus->speed = e1000_bus_speed_5000; + break; + default: + bus->speed = e1000_bus_speed_unknown; + break; + } + + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = E1000_READ_REG(hw, E1000_STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + DEBUGFUNC("e1000_clear_vfta_generic"); + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + E1000_WRITE_FLUSH(hw); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + DEBUGFUNC("e1000_write_vfta_generic"); + + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_init_rx_addrs_generic - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ADDR_LEN] = {0}; + + DEBUGFUNC("e1000_init_rx_addrs_generic"); + + /* Setup the receive address */ + DEBUGOUT("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ADDR_LEN]; + + DEBUGFUNC("e1000_check_alt_mac_addr_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* Alternate MAC address is handled by the option ROM for 82580 + * and newer. SW support not required. + */ + if (hw->mac.type >= e1000_82580) + return E1000_SUCCESS; + + ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return E1000_SUCCESS; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + if (hw->bus.func == E1000_FUNC_2) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2; + + if (hw->bus.func == E1000_FUNC_3) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3; + for (i = 0; i < ETH_ADDR_LEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (alt_mac_addr[0] & 0x01) { + DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n"); + return E1000_SUCCESS; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return E1000_SUCCESS; +} + +/** + * e1000_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +static int e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + DEBUGFUNC("e1000_rar_set_generic"); + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + E1000_WRITE_REG(hw, E1000_RAL(index), rar_low); + E1000_WRITE_FLUSH(hw); + E1000_WRITE_REG(hw, E1000_RAH(index), rar_high); + E1000_WRITE_FLUSH(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_hash_mc_addr_generic - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + DEBUGFUNC("e1000_hash_mc_addr_generic"); + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16) mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + DEBUGFUNC("e1000_update_mc_addr_list_generic"); + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32) i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ADDR_LEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value + * @hw: pointer to the HW structure + * + * In certain situations, a system BIOS may report that the PCIx maximum + * memory read byte count (MMRBC) value is higher than than the actual + * value. We check the PCIx command register with the current PCIx status + * register. + **/ +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw) +{ + u16 cmd_mmrbc; + u16 pcix_cmd; + u16 pcix_stat_hi_word; + u16 stat_mmrbc; + + DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic"); + + /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */ + if (hw->bus.type != e1000_bus_type_pcix) + return; + + e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word); + cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >> + PCIX_COMMAND_MMRBC_SHIFT; + stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >> + PCIX_STATUS_HI_MMRBC_SHIFT; + if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K) + stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K; + if (cmd_mmrbc > stat_mmrbc) { + pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK; + pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT; + e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd); + } +} + +/** + * e1000_clear_hw_cntrs_base_generic - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_clear_hw_cntrs_base_generic"); + + E1000_READ_REG(hw, E1000_CRCERRS); + E1000_READ_REG(hw, E1000_SYMERRS); + E1000_READ_REG(hw, E1000_MPC); + E1000_READ_REG(hw, E1000_SCC); + E1000_READ_REG(hw, E1000_ECOL); + E1000_READ_REG(hw, E1000_MCC); + E1000_READ_REG(hw, E1000_LATECOL); + E1000_READ_REG(hw, E1000_COLC); + E1000_READ_REG(hw, E1000_DC); + E1000_READ_REG(hw, E1000_SEC); + E1000_READ_REG(hw, E1000_RLEC); + E1000_READ_REG(hw, E1000_XONRXC); + E1000_READ_REG(hw, E1000_XONTXC); + E1000_READ_REG(hw, E1000_XOFFRXC); + E1000_READ_REG(hw, E1000_XOFFTXC); + E1000_READ_REG(hw, E1000_FCRUC); + E1000_READ_REG(hw, E1000_GPRC); + E1000_READ_REG(hw, E1000_BPRC); + E1000_READ_REG(hw, E1000_MPRC); + E1000_READ_REG(hw, E1000_GPTC); + E1000_READ_REG(hw, E1000_GORCL); + E1000_READ_REG(hw, E1000_GORCH); + E1000_READ_REG(hw, E1000_GOTCL); + E1000_READ_REG(hw, E1000_GOTCH); + E1000_READ_REG(hw, E1000_RNBC); + E1000_READ_REG(hw, E1000_RUC); + E1000_READ_REG(hw, E1000_RFC); + E1000_READ_REG(hw, E1000_ROC); + E1000_READ_REG(hw, E1000_RJC); + E1000_READ_REG(hw, E1000_TORL); + E1000_READ_REG(hw, E1000_TORH); + E1000_READ_REG(hw, E1000_TOTL); + E1000_READ_REG(hw, E1000_TOTH); + E1000_READ_REG(hw, E1000_TPR); + E1000_READ_REG(hw, E1000_TPT); + E1000_READ_REG(hw, E1000_MPTC); + E1000_READ_REG(hw, E1000_BPTC); +} + +/** + * e1000_check_for_copper_link_generic - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_check_for_copper_link"); + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return E1000_SUCCESS; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return E1000_SUCCESS; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift_generic(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) + DEBUGOUT("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000_check_for_fiber_link_generic - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_fiber_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + DEBUGFUNC("e1000_check_for_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + status = E1000_READ_REG(hw, E1000_STATUS); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return E1000_SUCCESS; + } + DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up_generic(hw); + if (ret_val) { + DEBUGOUT("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw); + E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) { + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usec_delay(10); + rxcw = E1000_READ_REG(hw, E1000_RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + DEBUGOUT("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + DEBUGOUT("SERDES: Link down - autoneg failed\n"); + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + u16 nvm_offset = 0; + + DEBUGFUNC("e1000_set_default_fc_generic"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->mac.type == e1000_i350) { + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func); + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG + + nvm_offset, + 1, &nvm_data); + } else { + ret_val = hw->nvm.ops.read(hw, + NVM_INIT_CONTROL2_REG, + 1, &nvm_data); + } + + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == + NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_setup_link_generic"); + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return E1000_SUCCESS; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + DEBUGOUT1("After fix-ups FlowControl is now = %x\n", + hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + DEBUGOUT("Initializing the Flow Control address, type and timer regs\n"); + E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE); + E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH); + E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW); + + E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time); + + return e1000_set_fc_watermarks_generic(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + DEBUGFUNC("e1000_commit_fc_settings_generic"); + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + E1000_WRITE_REG(hw, E1000_TXCW, txcw); + mac->txcw = txcw; + + return E1000_SUCCESS; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + DEBUGFUNC("e1000_poll_fiber_serdes_link_generic"); + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + msec_delay(10); + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + DEBUGOUT("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + DEBUGOUT("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + DEBUGOUT("Valid Link Found\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + DEBUGFUNC("e1000_setup_fiber_serdes_link_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + DEBUGOUT("Auto-negotiation enabled\n"); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + msec_delay(1); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + DEBUGOUT("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +static void e1000_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + DEBUGFUNC("e1000_config_collision_dist_generic"); + + tctl = E1000_READ_REG(hw, E1000_TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + E1000_WRITE_REG(hw, E1000_TCTL, tctl); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_set_fc_watermarks_generic - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + DEBUGFUNC("e1000_set_fc_watermarks_generic"); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl); + E1000_WRITE_REG(hw, E1000_FCRTH, fcrth); + + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc_generic - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_force_mac_fc_generic"); + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up_generic - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = E1000_SUCCESS; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + DEBUGFUNC("e1000_config_fc_after_link_up_generic"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000_force_mac_fc_generic(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000_force_mac_fc_generic(hw); + } + + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) { + DEBUGOUT("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + DEBUGOUT("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = E1000_READ_REG(hw, E1000_PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + DEBUGOUT("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = E1000_READ_REG(hw, E1000_PCS_ANADV); + pcs_lp_ability_reg = E1000_READ_REG(hw, E1000_PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + DEBUGOUT("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + DEBUGOUT("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + DEBUGOUT("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = E1000_READ_REG(hw, E1000_PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + E1000_WRITE_REG(hw, E1000_PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000_force_mac_fc_generic(hw); + if (ret_val) { + DEBUGOUT("Error forcing flow control settings\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic"); + + status = E1000_READ_REG(hw, E1000_STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + DEBUGOUT("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + DEBUGOUT("100 Mbs, "); + } else { + *speed = SPEED_10; + DEBUGOUT("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw E1000_UNUSEDARG *hw, + u16 *speed, u16 *duplex) +{ + DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic"); + + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return E1000_SUCCESS; +} + +/** + * e1000_get_hw_semaphore_generic - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + DEBUGFUNC("e1000_get_hw_semaphore_generic"); + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usec_delay(50); + i++; + } + + if (i == timeout) { + DEBUGOUT("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = E1000_READ_REG(hw, E1000_SWSM); + E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI) + break; + + usec_delay(50); + } + + if (i == timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_generic(hw); + DEBUGOUT("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_put_hw_semaphore_generic - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw) +{ + u32 swsm; + + DEBUGFUNC("e1000_put_hw_semaphore_generic"); + + swsm = E1000_READ_REG(hw, E1000_SWSM); + + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + + E1000_WRITE_REG(hw, E1000_SWSM, swsm); +} + +/** + * e1000_get_auto_rd_done_generic - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw) +{ + s32 i = 0; + + DEBUGFUNC("e1000_get_auto_rd_done_generic"); + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD) + break; + msec_delay(1); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + DEBUGOUT("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return E1000_SUCCESS; +} + +/** + * e1000_valid_led_default_generic - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_valid_led_default_generic"); + + ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return E1000_SUCCESS; +} + +/** + * e1000_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + DEBUGFUNC("e1000_id_led_init_generic"); + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + DEBUGFUNC("e1000_setup_led_generic"); + + if (hw->mac.ops.setup_led != e1000_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = E1000_READ_REG(hw, E1000_LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000_cleanup_led_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_cleanup_led_generic"); + + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default); + return E1000_SUCCESS; +} + +/** + * e1000_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + DEBUGFUNC("e1000_blink_led_generic"); + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink); + + return E1000_SUCCESS; +} + +/** + * e1000_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_on_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + DEBUGFUNC("e1000_led_off_generic"); + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + break; + case e1000_media_type_copper: + E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + DEBUGFUNC("e1000_set_pcie_no_snoop_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return; + + if (no_snoop) { + gcr = E1000_READ_REG(hw, E1000_GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + E1000_WRITE_REG(hw, E1000_GCR, gcr); + } +} + +/** + * e1000_disable_pcie_master_generic - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns E1000_SUCCESS if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + DEBUGFUNC("e1000_disable_pcie_master_generic"); + + if (hw->bus.type != e1000_bus_type_pci_express) + return E1000_SUCCESS; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + + while (timeout) { + if (!(E1000_READ_REG(hw, E1000_STATUS) & + E1000_STATUS_GIO_MASTER_ENABLE) || + E1000_REMOVED(hw->hw_addr)) + break; + usec_delay(100); + timeout--; + } + + if (!timeout) { + DEBUGOUT("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return E1000_SUCCESS; +} + +/** + * e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000_reset_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_reset_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); +} + +/** + * e1000_update_adaptive_generic - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000_update_adaptive_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + DEBUGFUNC("e1000_update_adaptive_generic"); + + if (!mac->adaptive_ifs) { + DEBUGOUT("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + E1000_WRITE_REG(hw, E1000_AIT, + mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + E1000_WRITE_REG(hw, E1000_AIT, 0); + } + } +} + +/** + * e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Verify that when not using auto-negotiation that MDI/MDIx is correctly + * set, which is forced to MDI mode only. + **/ +static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_generic"); + + if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) { + DEBUGOUT("Invalid MDI setting detected\n"); + hw->phy.mdix = 1; + return -E1000_ERR_CONFIG; + } + + return E1000_SUCCESS; +} + +/** + * e1000_validate_mdi_setting_crossover_generic - Verify MDI/MDIx settings + * @hw: pointer to the HW structure + * + * Validate the MDI/MDIx setting, allowing for auto-crossover during forced + * operation. + **/ +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_validate_mdi_setting_crossover_generic"); + + return E1000_SUCCESS; +} + +/** + * e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register + * @hw: pointer to the HW structure + * @reg: 32bit register offset such as E1000_SCTL + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes an address/data control type register. There are several of these + * and they all have the format address << 8 | data and bit 31 is polled for + * completion. + **/ +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data) +{ + u32 i, regvalue = 0; + + DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic"); + + /* Set up the address and data */ + regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT); + E1000_WRITE_REG(hw, reg, regvalue); + + /* Poll the ready bit to see if the MDI read completed */ + for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) { + usec_delay(5); + regvalue = E1000_READ_REG(hw, reg); + if (regvalue & E1000_GEN_CTL_READY) + break; + } + if (!(regvalue & E1000_GEN_CTL_READY)) { + DEBUGOUT1("Reg %08x did not indicate ready\n", reg); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} diff --git a/drivers/staging/igb_avb/e1000_mac.h b/drivers/staging/igb_avb/e1000_mac.h new file mode 100644 index 000000000000..a3878361095e --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mac.h @@ -0,0 +1,81 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MAC_H_ +#define _E1000_MAC_H_ + +void e1000_init_mac_ops_generic(struct e1000_hw *hw); +#ifndef E1000_REMOVED +#define E1000_REMOVED(a) (0) +#endif /* E1000_REMOVED */ +void e1000_null_mac_generic(struct e1000_hw *hw); +s32 e1000_null_ops_generic(struct e1000_hw *hw); +s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d); +bool e1000_null_mng_mode(struct e1000_hw *hw); +void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a); +void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b); +int e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a); +s32 e1000_blink_led_generic(struct e1000_hw *hw); +s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw); +s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw); +s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw); +s32 e1000_force_mac_fc_generic(struct e1000_hw *hw); +s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw); +s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000_id_led_init_generic(struct e1000_hw *hw); +s32 e1000_led_on_generic(struct e1000_hw *hw); +s32 e1000_led_off_generic(struct e1000_hw *hw); +void e1000_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw); +s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw); +s32 e1000_setup_led_generic(struct e1000_hw *hw); +s32 e1000_setup_link_generic(struct e1000_hw *hw); +s32 e1000_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); +s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg, + u32 offset, u8 data); + +u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr); + +void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count); +void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw); +void e1000_put_hw_semaphore_generic(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000_reset_adaptive_generic(struct e1000_hw *hw); +void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop); +void e1000_update_adaptive_generic(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +#endif diff --git a/drivers/staging/igb_avb/e1000_manage.c b/drivers/staging/igb_avb/e1000_manage.c new file mode 100644 index 000000000000..36671fbdab13 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_manage.c @@ -0,0 +1,552 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + DEBUGFUNC("e1000_calculate_checksum"); + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8) (0 - sum); +} + +/** + * e1000_mng_enable_host_if_generic - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + DEBUGFUNC("e1000_mng_enable_host_if_generic"); + + if (!hw->mac.arc_subsystem_valid) { + DEBUGOUT("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay_irq(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + DEBUGOUT("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = E1000_READ_REG(hw, E1000_FWSM); + + DEBUGFUNC("e1000_check_mng_mode_generic"); + + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic"); + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val != E1000_SUCCESS) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header_generic - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + DEBUGFUNC("e1000_mng_write_cmd_header_generic"); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *) hdr + i)); + E1000_WRITE_FLUSH(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_host_if_write_generic - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + DEBUGFUNC("e1000_mng_host_if_write_generic"); + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, + data); + } + + return E1000_SUCCESS; +} + +/** + * e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer, + u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + DEBUGFUNC("e1000_mng_write_dhcp_info_generic"); + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if_generic(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write_generic(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header_generic(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + return E1000_SUCCESS; +} + +/** + * e1000_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + DEBUGFUNC("e1000_enable_mng_pass_thru"); + + if (!hw->mac.asf_firmware_present) + return false; + + manc = E1000_READ_REG(hw, E1000_MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + factps = E1000_READ_REG(hw, E1000_FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} + +/** + * e1000_host_interface_command - Writes buffer to host interface + * @hw: pointer to the HW structure + * @buffer: contains a command to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Writes a buffer to the Host Interface. Upon success, returns E1000_SUCCESS + * else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, i; + + DEBUGFUNC("e1000_host_interface_command"); + + if (!(hw->mac.arc_subsystem_valid)) { + DEBUGOUT("Hardware doesn't support host interface command.\n"); + return E1000_SUCCESS; + } + + if (!hw->mac.asf_firmware_present) { + DEBUGOUT("Firmware is not present.\n"); + return E1000_SUCCESS; + } + + if (length == 0 || length & 0x3 || + length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < length; i++) + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i, + *((u32 *)buffer + i)); + + /* Setting this bit tells the ARC that a new command is pending. */ + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check command successful completion. */ + if (i == E1000_HI_COMMAND_TIMEOUT || + (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) { + DEBUGOUT("Command has failed with no status valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + for (i = 0; i < length; i++) + *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, + E1000_HOST_IF, + i); + + return E1000_SUCCESS; +} +/** + * e1000_load_firmware - Writes proxy FW code buffer to host interface + * and execute. + * @hw: pointer to the HW structure + * @buffer: contains a firmware to write + * @length: the byte length of the buffer, must be multiple of 4 bytes + * + * Upon success returns E1000_SUCCESS, returns E1000_ERR_CONFIG if not enabled + * in HW else returns E1000_ERR_HOST_INTERFACE_COMMAND. + **/ +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length) +{ + u32 hicr, hibba, fwsm, icr, i; + + DEBUGFUNC("e1000_load_firmware"); + + if (hw->mac.type < e1000_i210) { + DEBUGOUT("Hardware doesn't support loading FW by the driver\n"); + return -E1000_ERR_CONFIG; + } + + /* Check that the host interface is enabled. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_EN)) { + DEBUGOUT("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + if (!(hicr & E1000_HICR_MEMORY_BASE_EN)) { + DEBUGOUT("E1000_HICR_MEMORY_BASE_EN bit disabled.\n"); + return -E1000_ERR_CONFIG; + } + + if (length == 0 || length & 0x3 || length > E1000_HI_FW_MAX_LENGTH) { + DEBUGOUT("Buffer length failure.\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + /* Clear notification from ROM-FW by reading ICR register */ + icr = E1000_READ_REG(hw, E1000_ICR_V2); + + /* Reset ROM-FW */ + hicr = E1000_READ_REG(hw, E1000_HICR); + hicr |= E1000_HICR_FW_RESET_ENABLE; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + hicr |= E1000_HICR_FW_RESET; + E1000_WRITE_REG(hw, E1000_HICR, hicr); + E1000_WRITE_FLUSH(hw); + + /* Wait till MAC notifies about its readiness after ROM-FW reset */ + for (i = 0; i < (E1000_HI_COMMAND_TIMEOUT * 2); i++) { + icr = E1000_READ_REG(hw, E1000_ICR_V2); + if (icr & E1000_ICR_MNG) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Wait till MAC is ready to accept new FW code */ + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + fwsm = E1000_READ_REG(hw, E1000_FWSM); + if ((fwsm & E1000_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT == + E1000_FWSM_HI_EN_ONLY_MODE)) + break; + msec_delay(1); + } + + /* Check for timeout */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("FW reset failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant FW code block + * into the ram area in DWORDs via 1kB ram addressing window. + */ + for (i = 0; i < length; i++) { + if (!(i % E1000_HI_FW_BLOCK_DWORD_LENGTH)) { + /* Point to correct 1kB ram window */ + hibba = E1000_HI_FW_BASE_ADDRESS + + ((E1000_HI_FW_BLOCK_DWORD_LENGTH << 2) * + (i / E1000_HI_FW_BLOCK_DWORD_LENGTH)); + + E1000_WRITE_REG(hw, E1000_HIBBA, hibba); + } + + E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, + i % E1000_HI_FW_BLOCK_DWORD_LENGTH, + *((u32 *)buffer + i)); + } + + /* Setting this bit tells the ARC that a new FW is ready to execute. */ + hicr = E1000_READ_REG(hw, E1000_HICR); + E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C); + + for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) { + hicr = E1000_READ_REG(hw, E1000_HICR); + if (!(hicr & E1000_HICR_C)) + break; + msec_delay(1); + } + + /* Check for successful FW start. */ + if (i == E1000_HI_COMMAND_TIMEOUT) { + DEBUGOUT("New FW did not start within timeout period.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return E1000_SUCCESS; +} + diff --git a/drivers/staging/igb_avb/e1000_manage.h b/drivers/staging/igb_avb/e1000_manage.h new file mode 100644 index 000000000000..09afc1aed497 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_manage.h @@ -0,0 +1,86 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MANAGE_H_ +#define _E1000_MANAGE_H_ + +bool e1000_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw); +s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw); +s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum); +s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr); +s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, + u8 *buffer, u16 length); +bool e1000_enable_mng_pass_thru(struct e1000_hw *hw); +u8 e1000_calculate_checksum(u8 *buffer, u32 length); +s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length); +s32 e1000_load_firmware(struct e1000_hw *hw, u8 *buffer, u32 length); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 +#define E1000_FWSM_HI_EN_ONLY_MODE 0x4 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Process HI cmd limit */ +#define E1000_HI_FW_BASE_ADDRESS 0x10000 +#define E1000_HI_FW_MAX_LENGTH (64 * 1024) /* Num of bytes */ +#define E1000_HI_FW_BLOCK_DWORD_LENGTH 256 /* Num of DWORDs per page */ +#define E1000_HICR_MEMORY_BASE_EN 0x200 /* MB Enable bit - RO */ +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/drivers/staging/igb_avb/e1000_mbx.c b/drivers/staging/igb_avb/e1000_mbx.c new file mode 100644 index 000000000000..f2998f470ce5 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mbx.c @@ -0,0 +1,523 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_mbx.h" + +/** + * e1000_null_mbx_check_for_flag - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_check_for_flag(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_check_flag"); + + return E1000_SUCCESS; +} + +/** + * e1000_null_mbx_transact - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +static s32 e1000_null_mbx_transact(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG *msg, + u16 E1000_UNUSEDARG size, + u16 E1000_UNUSEDARG mbx_id) +{ + DEBUGFUNC("e1000_null_mbx_rw_msg"); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mbx - Reads a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * + * returns SUCCESS if it successfuly read message from buffer + **/ +s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_mbx"); + + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; + + if (mbx->ops.read) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_write_mbx - Write a message to the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_mbx"); + + if (size > mbx->size) + ret_val = -E1000_ERR_MBX; + + else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_msg - checks to see if someone sent us mail + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg"); + + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_ack - checks to see if someone sent us ACK + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack"); + + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_check_for_rst - checks to see if other side has reset + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to check + * + * returns SUCCESS if the Status bit was found or else ERR_MBX + **/ +s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst"); + + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + + return ret_val; +} + +/** + * e1000_poll_for_msg - Wait for message notification + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification + **/ +static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_msg"); + + if (!countdown || !mbx->ops.check_for_msg) + goto out; + + while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_poll_for_ack - Wait for message acknowledgement + * @hw: pointer to the HW structure + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + + DEBUGFUNC("e1000_poll_for_ack"); + + if (!countdown || !mbx->ops.check_for_ack) + goto out; + + while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) { + countdown--; + if (!countdown) + break; + usec_delay(mbx->usec_delay); + } + + /* if we failed, all future posted messages fail until reset */ + if (!countdown) + mbx->timeout = 0; +out: + return countdown ? E1000_SUCCESS : -E1000_ERR_MBX; +} + +/** + * e1000_read_posted_mbx - Wait for message notification and receive message + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_read_posted_mbx"); + + if (!mbx->ops.read) + goto out; + + ret_val = e1000_poll_for_msg(hw, mbx_id); + + /* if ack received read message, otherwise we timed out */ + if (!ret_val) + ret_val = mbx->ops.read(hw, msg, size, mbx_id); +out: + return ret_val; +} + +/** + * e1000_write_posted_mbx - Write a message to the mailbox, wait for ack + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @mbx_id: id of mailbox to write + * + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_write_posted_mbx"); + + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; + + /* send msg */ + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + /* if msg sent wait until we receive an ack */ + if (!ret_val) + ret_val = e1000_poll_for_ack(hw, mbx_id); +out: + return ret_val; +} + +/** + * e1000_init_mbx_ops_generic - Initialize mbx function pointers + * @hw: pointer to the HW structure + * + * Sets the function pointers to no-op functions + **/ +void e1000_init_mbx_ops_generic(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + mbx->ops.init_params = e1000_null_ops_generic; + mbx->ops.read = e1000_null_mbx_transact; + mbx->ops.write = e1000_null_mbx_transact; + mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag; + mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; +} + +static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask) +{ + u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR); + s32 ret_val = -E1000_ERR_MBX; + + if (mbvficr & mask) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_MBVFICR, mask); + } + + return ret_val; +} + +/** + * e1000_check_for_msg_pf - checks to see if the VF has sent mail + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_msg_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.reqs++; + } + + return ret_val; +} + +/** + * e1000_check_for_ack_pf - checks to see if the VF has ACKed + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_ack_pf"); + + if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { + ret_val = E1000_SUCCESS; + hw->mbx.stats.acks++; + } + + return ret_val; +} + +/** + * e1000_check_for_rst_pf - checks to see if the VF has reset + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) +{ + u32 vflre = E1000_READ_REG(hw, E1000_VFLRE); + s32 ret_val = -E1000_ERR_MBX; + + DEBUGFUNC("e1000_check_for_rst_pf"); + + if (vflre & (1 << vf_number)) { + ret_val = E1000_SUCCESS; + E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number)); + hw->mbx.stats.rsts++; + } + + return ret_val; +} + +/** + * e1000_obtain_mbx_lock_pf - obtain mailbox lock + * @hw: pointer to the HW structure + * @vf_number: the VF index + * + * return SUCCESS if we obtained the mailbox lock + **/ +static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) +{ + s32 ret_val = -E1000_ERR_MBX; + u32 p2v_mailbox; + + DEBUGFUNC("e1000_obtain_mbx_lock_pf"); + + /* Take ownership of the buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) + ret_val = E1000_SUCCESS; + + return ret_val; +} + +/** + * e1000_write_mbx_pf - Places a message in the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_write_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_write; + + /* flush msg and acks as we are overwriting the message buffer */ + e1000_check_for_msg_pf(hw, vf_number); + e1000_check_for_ack_pf(hw, vf_number); + + /* copy the caller specified message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]); + + /* Interrupt VF to tell it a message has been sent and release buffer*/ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); + + /* update stats */ + hw->mbx.stats.msgs_tx++; + +out_no_write: + return ret_val; + +} + +/** + * e1000_read_mbx_pf - Read a message from the mailbox + * @hw: pointer to the HW structure + * @msg: The message buffer + * @size: Length of buffer + * @vf_number: the VF index + * + * This function copies a message from the mailbox buffer to the caller's + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, + u16 vf_number) +{ + s32 ret_val; + u16 i; + + DEBUGFUNC("e1000_read_mbx_pf"); + + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) + goto out_no_read; + + /* copy the message to the mailbox memory buffer */ + for (i = 0; i < size; i++) + msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i); + + /* Acknowledge the message and release buffer */ + E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); + + /* update stats */ + hw->mbx.stats.msgs_rx++; + +out_no_read: + return ret_val; +} + +/** + * e1000_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure + * + * Initializes the hw->mbx struct to correct values for pf mailbox + */ +s32 e1000_init_mbx_params_pf(struct e1000_hw *hw) +{ + struct e1000_mbx_info *mbx = &hw->mbx; + + switch (hw->mac.type) { + case e1000_82576: + case e1000_i350: + case e1000_i354: + mbx->timeout = 0; + mbx->usec_delay = 0; + + mbx->size = E1000_VFMAILBOX_SIZE; + + mbx->ops.read = e1000_read_mbx_pf; + mbx->ops.write = e1000_write_mbx_pf; + mbx->ops.read_posted = e1000_read_posted_mbx; + mbx->ops.write_posted = e1000_write_posted_mbx; + mbx->ops.check_for_msg = e1000_check_for_msg_pf; + mbx->ops.check_for_ack = e1000_check_for_ack_pf; + mbx->ops.check_for_rst = e1000_check_for_rst_pf; + + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; + default: + return E1000_SUCCESS; + } +} + diff --git a/drivers/staging/igb_avb/e1000_mbx.h b/drivers/staging/igb_avb/e1000_mbx.h new file mode 100644 index 000000000000..28900216ac25 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_mbx.h @@ -0,0 +1,84 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_MBX_H_ +#define _E1000_MBX_H_ + +#include "e1000_api.h" + +#define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +#define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +#define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +#define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ + +#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ +#define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ +#define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + +#define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ + +/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is E1000_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +/* Msgs below or'd with this are the ACK */ +#define E1000_VT_MSGTYPE_ACK 0x80000000 +/* Msgs below or'd with this are the NACK */ +#define E1000_VT_MSGTYPE_NACK 0x40000000 +/* Indicates that VF is still clear to send requests */ +#define E1000_VT_MSGTYPE_CTS 0x20000000 +#define E1000_VT_MSGINFO_SHIFT 16 +/* bits 23:16 are used for extra info for certain messages */ +#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) + +#define E1000_VF_RESET 0x01 /* VF requests reset */ +#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */ +#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */ +#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_MULTICAST_OVERFLOW (0x80 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */ +#define E1000_VF_SET_VLAN_ADD (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_LPE 0x05 /* reqs to set VMOLR.LPE */ +#define E1000_VF_SET_PROMISC 0x06 /* reqs to clear VMOLR.ROPE/MPME*/ +#define E1000_VF_SET_PROMISC_UNICAST (0x01 << E1000_VT_MSGINFO_SHIFT) +#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT) + +#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ + +#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +#define E1000_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + +s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); +s32 e1000_check_for_msg(struct e1000_hw *, u16); +s32 e1000_check_for_ack(struct e1000_hw *, u16); +s32 e1000_check_for_rst(struct e1000_hw *, u16); +void e1000_init_mbx_ops_generic(struct e1000_hw *hw); +s32 e1000_init_mbx_params_pf(struct e1000_hw *); + +#endif /* _E1000_MBX_H_ */ diff --git a/drivers/staging/igb_avb/e1000_nvm.c b/drivers/staging/igb_avb/e1000_nvm.c new file mode 100644 index 000000000000..c328f40dfa20 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_nvm.c @@ -0,0 +1,973 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static void e1000_reload_nvm_generic(struct e1000_hw *hw); + +/** + * e1000_init_nvm_ops_generic - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_nvm_ops_generic(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + DEBUGFUNC("e1000_init_nvm_ops_generic"); + + /* Initialize function pointers */ + nvm->ops.init_params = e1000_null_ops_generic; + nvm->ops.acquire = e1000_null_ops_generic; + nvm->ops.read = e1000_null_read_nvm; + nvm->ops.release = e1000_null_nvm_generic; + nvm->ops.reload = e1000_reload_nvm_generic; + nvm->ops.update = e1000_null_ops_generic; + nvm->ops.valid_led_default = e1000_null_led_default; + nvm->ops.validate = e1000_null_ops_generic; + nvm->ops.write = e1000_null_write_nvm; +} + +/** + * e1000_null_nvm_read - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_read_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_null_nvm_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_nvm_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_nvm_generic"); + return; +} + +/** + * e1000_null_led_default - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_led_default(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_led_default"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_nvm - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_nvm(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG a, u16 E1000_UNUSEDARG b, + u16 E1000_UNUSEDARG *c) +{ + DEBUGFUNC("e1000_null_write_nvm"); + return E1000_SUCCESS; +} + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + E1000_WRITE_REG(hw, E1000_EECD, *eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u32 mask; + + DEBUGFUNC("e1000_shift_out_eec_bits"); + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + + usec_delay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + DEBUGFUNC("e1000_shift_in_eec_bits"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = E1000_READ_REG(hw, E1000_EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + DEBUGFUNC("e1000_poll_eerd_eewr_done"); + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = E1000_READ_REG(hw, E1000_EERD); + else + reg = E1000_READ_REG(hw, E1000_EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return E1000_SUCCESS; + + usec_delay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000_acquire_nvm_generic - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + DEBUGFUNC("e1000_acquire_nvm_generic"); + + E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ); + eecd = E1000_READ_REG(hw, E1000_EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + usec_delay(5); + eecd = E1000_READ_REG(hw, E1000_EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + DEBUGOUT("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + + DEBUGFUNC("e1000_standby_nvm"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_stop_nvm"); + + eecd = E1000_READ_REG(hw, E1000_EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000_release_nvm_generic - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000_release_nvm_generic(struct e1000_hw *hw) +{ + u32 eecd; + + DEBUGFUNC("e1000_release_nvm_generic"); + + e1000_stop_nvm(hw); + + eecd = E1000_READ_REG(hw, E1000_EECD); + eecd &= ~E1000_EECD_REQ; + E1000_WRITE_REG(hw, E1000_EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = E1000_READ_REG(hw, E1000_EECD); + u8 spi_stat_reg; + + DEBUGFUNC("e1000_ready_nvm_eeprom"); + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + E1000_WRITE_REG(hw, E1000_EECD, eecd); + E1000_WRITE_FLUSH(hw); + usec_delay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + usec_delay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + DEBUGOUT("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_nvm_spi - Read EEPROM's using SPI + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM. + **/ +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i = 0; + s32 ret_val; + u16 word_in; + u8 read_opcode = NVM_READ_OPCODE_SPI; + + DEBUGFUNC("e1000_read_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) + goto release; + + e1000_standby_nvm(hw); + + if ((nvm->address_bits == 8) && (offset >= 128)) + read_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits); + + /* Read the data. SPI NVMs increment the address with each byte + * read and will roll over if reading beyond the end. This allows + * us to read the whole NVM from any offset + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_eec_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + +release: + nvm->ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_read_nvm_eerd"); + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + E1000_WRITE_REG(hw, E1000_EERD, eerd); + ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (E1000_READ_REG(hw, E1000_EERD) >> + E1000_NVM_RW_REG_DATA); + } + + if (ret_val) + DEBUGOUT1("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + DEBUGFUNC("e1000_write_nvm_spi"); + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + DEBUGOUT("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + msec_delay(10); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + DEBUGFUNC("e1000_read_pba_string_generic"); + + if ((hw->mac.type >= e1000_i210) && + !e1000_get_flash_presence_i210(hw)) { + DEBUGOUT("Flashless no PBA string\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + if (pba_num == NULL) { + DEBUGOUT("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + DEBUGOUT("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + DEBUGOUT("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return E1000_SUCCESS; +} + +/** + * e1000_read_pba_length_generic - Read device part number length + * @hw: pointer to the HW structure + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number length from the EEPROM and + * stores the value in pba_num_size. + **/ +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 length; + + DEBUGFUNC("e1000_read_pba_length_generic"); + + if (pba_num_size == NULL) { + DEBUGOUT("PBA buffer size was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + /* if data is not ptr guard the PBA must be in legacy format */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + *pba_num_size = E1000_PBANUM_LENGTH; + return E1000_SUCCESS; + } + + ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + DEBUGOUT("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + + /* Convert from length in u16 values to u8 chars, add 1 for NULL, + * and subtract 2 because length field is included in length. + */ + *pba_num_size = ((u32)length * 2) - 1; + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = E1000_READ_REG(hw, E1000_RAH(0)); + rar_low = E1000_READ_REG(hw, E1000_RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8)); + + for (i = 0; i < ETH_ADDR_LEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return E1000_SUCCESS; +} + +/** + * e1000_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_validate_nvm_checksum_generic"); + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16) NVM_SUM) { + DEBUGOUT("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + DEBUGFUNC("e1000_update_nvm_checksum"); + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data); + if (ret_val) { + DEBUGOUT("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16) NVM_SUM - checksum; + ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + DEBUGOUT("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +static void e1000_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + DEBUGFUNC("e1000_reload_nvm_generic"); + + usec_delay(10); + ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(hw); +} + +/** + * e1000_get_fw_version - Get firmware version information + * @hw: pointer to the HW structure + * @fw_vers: pointer to output version structure + * + * unsupported/not present features return 0 in version structure + **/ +void e1000_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers) +{ + u16 eeprom_verh, eeprom_verl, etrack_test, fw_version; + u8 q, hval, rem, result; + u16 comb_verh, comb_verl, comb_offset; + + memset(fw_vers, 0, sizeof(struct e1000_fw_version)); + + /* basic eeprom version numbers, bits used vary by part and by tool + * used to create the nvm images */ + /* Check which data format we have */ + switch (hw->mac.type) { + case e1000_i211: + e1000_read_invm_version(hw, fw_vers); + return; + case e1000_82575: + case e1000_82576: + case e1000_82580: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* Use this format, unless EETRACK ID exists, + * then use alternate format + */ + if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK); + goto etrack_id; + } + break; + case e1000_i210: + if (!(e1000_get_flash_presence_i210(hw))) { + e1000_read_invm_version(hw, fw_vers); + return; + } + /* fall through */ + case e1000_i350: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + /* find combo image version */ + hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); + if ((comb_offset != 0x0) && + (comb_offset != NVM_VER_INVALID)) { + + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset + + 1), 1, &comb_verh); + hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), + 1, &comb_verl); + + /* get Option Rom version if it exists and is valid */ + if ((comb_verh && comb_verl) && + ((comb_verh != NVM_VER_INVALID) && + (comb_verl != NVM_VER_INVALID))) { + + fw_vers->or_valid = true; + fw_vers->or_major = + comb_verl >> NVM_COMB_VER_SHFT; + fw_vers->or_build = + (comb_verl << NVM_COMB_VER_SHFT) + | (comb_verh >> NVM_COMB_VER_SHFT); + fw_vers->or_patch = + comb_verh & NVM_COMB_VER_MASK; + } + } + break; + default: + hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test); + return; + } + hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version); + fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) + >> NVM_MAJOR_SHIFT; + + /* check for old style version format in newer images*/ + if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) { + eeprom_verl = (fw_version & NVM_COMB_VER_MASK); + } else { + eeprom_verl = (fw_version & NVM_MINOR_MASK) + >> NVM_MINOR_SHIFT; + } + /* Convert minor value to hex before assigning to output struct + * Val to be converted will not be higher than 99, per tool output + */ + q = eeprom_verl / NVM_HEX_CONV; + hval = q * NVM_HEX_TENS; + rem = eeprom_verl % NVM_HEX_CONV; + result = hval + rem; + fw_vers->eep_minor = result; + +etrack_id: + if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) + | eeprom_verl; + } else if ((etrack_test & NVM_ETRACK_VALID) == 0) { + hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); + hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); + fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | + eeprom_verl; + } +} + diff --git a/drivers/staging/igb_avb/e1000_nvm.h b/drivers/staging/igb_avb/e1000_nvm.h new file mode 100644 index 000000000000..a4263113d72d --- /dev/null +++ b/drivers/staging/igb_avb/e1000_nvm.h @@ -0,0 +1,70 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_NVM_H_ +#define _E1000_NVM_H_ + +struct e1000_fw_version { + u32 etrack_id; + u16 eep_major; + u16 eep_minor; + u16 eep_build; + + u8 invm_major; + u8 invm_minor; + u8 invm_img_type; + + bool or_valid; + u16 or_major; + u16 or_build; + u16 or_patch; +}; + +void e1000_init_nvm_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +void e1000_null_nvm_generic(struct e1000_hw *hw); +s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c); +s32 e1000_acquire_nvm_generic(struct e1000_hw *hw); + +s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size); +s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data); +s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000_release_nvm_generic(struct e1000_hw *hw); +void e1000_get_fw_version(struct e1000_hw *hw, + struct e1000_fw_version *fw_vers); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/drivers/staging/igb_avb/e1000_osdep.h b/drivers/staging/igb_avb/e1000_osdep.h new file mode 100644 index 000000000000..3c6b79586cf8 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_osdep.h @@ -0,0 +1,141 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include +#include +#include +#include +#include +#include "kcompat.h" + +#define usec_delay(x) udelay(x) +#define usec_delay_irq(x) udelay(x) +#ifndef msec_delay +#define msec_delay(x) do { \ + /* Don't mdelay in interrupt context! */ \ + if (in_interrupt()) \ + BUG(); \ + else \ + msleep(x); \ +} while (0) + +/* Some workarounds require millisecond delays and are run during interrupt + * context. Most notably, when establishing link, the phy may need tweaking + * but cannot process phy register reads/writes faster than millisecond + * intervals...and we establish link due to a "link status change" interrupt. + */ +#define msec_delay_irq(x) mdelay(x) + +#define E1000_READ_REG(x, y) e1000_read_reg(x, y) +#endif + +#define PCI_COMMAND_REGISTER PCI_COMMAND +#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE +#define ETH_ADDR_LEN ETH_ALEN + +#ifdef __BIG_ENDIAN +#define E1000_BIG_ENDIAN __BIG_ENDIAN +#endif + +#ifdef DEBUG +#define DEBUGOUT(S) pr_debug(S) +#define DEBUGOUT1(S, A...) pr_debug(S, ## A) +#else +#define DEBUGOUT(S) +#define DEBUGOUT1(S, A...) +#endif + +#ifdef DEBUG_FUNC +#define DEBUGFUNC(F) DEBUGOUT(F "\n") +#else +#define DEBUGFUNC(F) +#endif +#define DEBUGOUT2 DEBUGOUT1 +#define DEBUGOUT3 DEBUGOUT2 +#define DEBUGOUT7 DEBUGOUT3 + +#define E1000_REGISTER(a, reg) reg + +/* forward declaration */ +struct e1000_hw; + +/* write operations, indexed using DWORDS */ +#define E1000_WRITE_REG(hw, reg, val) \ +do { \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ + if (!E1000_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ +} while (0) + +u32 e1000_read_reg(struct e1000_hw *hw, u32 reg); + +#define E1000_WRITE_REG_ARRAY(hw, reg, idx, val) \ + E1000_WRITE_REG((hw), (reg) + ((idx) << 2), (val)) + +#define E1000_READ_REG_ARRAY(hw, reg, idx) ( \ + e1000_read_reg((hw), (reg) + ((idx) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))) + +#define E1000_WRITE_REG_IO(a, reg, offset) do { \ + outl(reg, ((a)->io_base)); \ + outl(offset, ((a)->io_base + 4)); \ + } while (0) + +#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) + +#define E1000_WRITE_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg)) + +#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg)) + +#define E1000_REMOVED(h) unlikely(!(h)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/drivers/staging/igb_avb/e1000_phy.c b/drivers/staging/igb_avb/e1000_phy.c new file mode 100644 index 000000000000..46ab8d5ae2ab --- /dev/null +++ b/drivers/staging/igb_avb/e1000_phy.c @@ -0,0 +1,3398 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000_api.h" + +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_m88_cable_length_table) / \ + sizeof(e1000_m88_cable_length_table[0])) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124}; +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + (sizeof(e1000_igp_2_cable_length_table) / \ + sizeof(e1000_igp_2_cable_length_table[0])) + +/** + * e1000_init_phy_ops_generic - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Setups up the function pointers to no-op functions + **/ +void e1000_init_phy_ops_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + DEBUGFUNC("e1000_init_phy_ops_generic"); + + /* Initialize function pointers */ + phy->ops.init_params = e1000_null_ops_generic; + phy->ops.acquire = e1000_null_ops_generic; + phy->ops.check_polarity = e1000_null_ops_generic; + phy->ops.check_reset_block = e1000_null_ops_generic; + phy->ops.commit = e1000_null_ops_generic; + phy->ops.force_speed_duplex = e1000_null_ops_generic; + phy->ops.get_cfg_done = e1000_null_ops_generic; + phy->ops.get_cable_length = e1000_null_ops_generic; + phy->ops.get_info = e1000_null_ops_generic; + phy->ops.set_page = e1000_null_set_page; + phy->ops.read_reg = e1000_null_read_reg; + phy->ops.read_reg_locked = e1000_null_read_reg; + phy->ops.read_reg_page = e1000_null_read_reg; + phy->ops.release = e1000_null_phy_generic; + phy->ops.reset = e1000_null_ops_generic; + phy->ops.set_d0_lplu_state = e1000_null_lplu_state; + phy->ops.set_d3_lplu_state = e1000_null_lplu_state; + phy->ops.write_reg = e1000_null_write_reg; + phy->ops.write_reg_locked = e1000_null_write_reg; + phy->ops.write_reg_page = e1000_null_write_reg; + phy->ops.power_up = e1000_null_phy_generic; + phy->ops.power_down = e1000_null_phy_generic; + phy->ops.read_i2c_byte = e1000_read_i2c_byte_null; + phy->ops.write_i2c_byte = e1000_write_i2c_byte_null; +} + +/** + * e1000_null_set_page - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_set_page(struct e1000_hw E1000_UNUSEDARG *hw, + u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_set_page"); + return E1000_SUCCESS; +} + +/** + * e1000_null_read_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_read_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_null_read_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_null_phy_generic - No-op function, return void + * @hw: pointer to the HW structure + **/ +void e1000_null_phy_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_null_phy_generic"); + return; +} + +/** + * e1000_null_lplu_state - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_lplu_state(struct e1000_hw E1000_UNUSEDARG *hw, + bool E1000_UNUSEDARG active) +{ + DEBUGFUNC("e1000_null_lplu_state"); + return E1000_SUCCESS; +} + +/** + * e1000_null_write_reg - No-op function, return 0 + * @hw: pointer to the HW structure + **/ +s32 e1000_null_write_reg(struct e1000_hw E1000_UNUSEDARG *hw, + u32 E1000_UNUSEDARG offset, u16 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_null_write_reg"); + return E1000_SUCCESS; +} + +/** + * e1000_read_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value read + * + **/ +s32 e1000_read_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG *data) +{ + DEBUGFUNC("e1000_read_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_write_i2c_byte_null - No-op function, return 0 + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @dev_addr: device address + * @data: data value to write + * + **/ +s32 e1000_write_i2c_byte_null(struct e1000_hw E1000_UNUSEDARG *hw, + u8 E1000_UNUSEDARG byte_offset, + u8 E1000_UNUSEDARG dev_addr, + u8 E1000_UNUSEDARG data) +{ + DEBUGFUNC("e1000_write_i2c_byte_null"); + return E1000_SUCCESS; +} + +/** + * e1000_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return E1000_SUCCESS, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + DEBUGFUNC("e1000_check_reset_block"); + + manc = E1000_READ_REG(hw, E1000_MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? + E1000_BLK_PHY_RESET : E1000_SUCCESS; +} + +/** + * e1000_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = E1000_SUCCESS; + u16 phy_id; + + DEBUGFUNC("e1000_get_phy_id"); + + if (!phy->ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usec_delay(20); + ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_reset_dsp_generic - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + DEBUGFUNC("e1000_phy_reset_dsp_generic"); + + if (!hw->phy.ops.write_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_read_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16) mdic; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + DEBUGFUNC("e1000_write_phy_reg_mdic"); + + if (offset > MAX_PHY_REG_ADDRESS) { + DEBUGOUT1("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + E1000_WRITE_REG(hw, E1000_MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + usec_delay_irq(50); + mdic = E1000_READ_REG(hw, E1000_MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + DEBUGOUT("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + DEBUGOUT("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + DEBUGOUT2("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_i2c - Read PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the i2c interface and stores the + * retrieved information in data. + **/ +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + + DEBUGFUNC("e1000_read_phy_reg_i2c"); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + (E1000_I2CCMD_OPCODE_READ)); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + /* Need to byte-swap the 16-bit value. */ + *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00); + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_i2c - Write PHY register using i2c + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset using the i2c interface. + **/ +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, i2ccmd = 0; + u16 phy_data_swapped; + + DEBUGFUNC("e1000_write_phy_reg_i2c"); + + /* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/ + if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) { + DEBUGOUT1("PHY I2C Address %d is out of range.\n", + hw->phy.addr); + return -E1000_ERR_CONFIG; + } + + /* Swap the data bytes for the I2C interface */ + phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); + + /* Set up Op-code, Phy Address, and register address in the I2CCMD + * register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | + phy_data_swapped); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) + break; + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_sfp_data_byte - Reads SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to be read + * @data: read data buffer pointer + * + * Reads one byte from SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_read_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing with the + * EEPROM to retrieve the desired data. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + + /* Poll the ready bit to see if the I2C read completed */ + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + data_local = E1000_READ_REG(hw, E1000_I2CCMD); + if (data_local & E1000_I2CCMD_READY) + break; + } + if (!(data_local & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (data_local & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + *data = (u8) data_local & 0xFF; + + return E1000_SUCCESS; +} + +/** + * e1000_write_sfp_data_byte - Writes SFP module data. + * @hw: pointer to the HW structure + * @offset: byte location offset to write to + * @data: data to write + * + * Writes one byte to SFP module data stored + * in SFP resided EEPROM memory or SFP diagnostic area. + * Function should be called with + * E1000_I2CCMD_SFP_DATA_ADDR() for SFP module database access + * E1000_I2CCMD_SFP_DIAG_ADDR() for SFP diagnostics parameters + * access + **/ +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data) +{ + u32 i = 0; + u32 i2ccmd = 0; + u32 data_local = 0; + + DEBUGFUNC("e1000_write_sfp_data_byte"); + + if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) { + DEBUGOUT("I2CCMD command address exceeds upper limit\n"); + return -E1000_ERR_PHY; + } + /* The programming interface is 16 bits wide + * so we need to read the whole word first + * then update appropriate byte lane and write + * the updated word back. + */ + /* Set up Op-code, EEPROM Address,in the I2CCMD + * register. The MAC will take care of interfacing + * with an EEPROM to write the data given. + */ + i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_READ); + /* Set a command to read single word */ + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) { + usec_delay(50); + /* Poll the ready bit to see if lastly + * launched I2C operation completed + */ + i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD); + if (i2ccmd & E1000_I2CCMD_READY) { + /* Check if this is READ or WRITE phase */ + if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) == + E1000_I2CCMD_OPCODE_READ) { + /* Write the selected byte + * lane and update whole word + */ + data_local = i2ccmd & 0xFF00; + data_local |= data; + i2ccmd = ((offset << + E1000_I2CCMD_REG_ADDR_SHIFT) | + E1000_I2CCMD_OPCODE_WRITE | data_local); + E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd); + } else { + break; + } + } + } + if (!(i2ccmd & E1000_I2CCMD_READY)) { + DEBUGOUT("I2CCMD Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (i2ccmd & E1000_I2CCMD_ERROR) { + DEBUGOUT("I2CCMD Error bit set\n"); + return -E1000_ERR_PHY; + } + return E1000_SUCCESS; +} + +/** + * e1000_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_read_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + DEBUGFUNC("e1000_write_phy_reg_m88"); + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + DEBUGFUNC("e1000_set_page_igp"); + + DEBUGOUT1("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("__e1000_read_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = E1000_SUCCESS; + + DEBUGFUNC("e1000_write_phy_reg_igp"); + + if (!locked) { + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("__e1000_read_kmrn_reg"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_read_kmrn_reg_generic - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + DEBUGFUNC("e1000_write_kmrn_reg_generic"); + + if (!locked) { + s32 ret_val = E1000_SUCCESS; + + if (!hw->phy.ops.acquire) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta); + E1000_WRITE_FLUSH(hw); + + usec_delay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_write_kmrn_reg_generic - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + /* fall-through */ + default: + break; + } + + return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_82577"); + + if (hw->phy.reset_disable) + return E1000_SUCCESS; + + if (hw->phy.type == e1000_phy_82580) { + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + } + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = hw->phy.ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = hw->phy.ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (phy->revision < E1000_REVISION_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == E1000_REVISION_2) && + (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's. + * Also enables and sets the downshift parameters. + **/ +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + DEBUGFUNC("e1000_copper_link_setup_m88_gen2"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + /* M88E1112 does not support this mode) */ + if (phy->id != M88E1112_E_PHY_ID) { + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + } + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift and setting it to X6 */ + if (phy->id == M88E1543_E_PHY_ID) { + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE; + ret_val = + phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK; + phy_data |= I347AT4_PSCR_DOWNSHIFT_6X; + phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE; + + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + DEBUGOUT("Error committing the PHY changes\n"); + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_copper_link_setup_igp"); + + if (phy->reset_disable) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.reset(hw); + if (ret_val) { + DEBUGOUT("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msec_delay(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + DEBUGOUT("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~CR_1000T_MS_ENABLE; + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + DEBUGFUNC("e1000_phy_setup_autoneg"); + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, + &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS | + NWAY_AR_100TX_HD_CAPS | + NWAY_AR_10T_FD_CAPS | + NWAY_AR_10T_HD_CAPS); + mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS); + + DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + DEBUGOUT("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + DEBUGOUT("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + DEBUGOUT("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + DEBUGOUT("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + DEBUGOUT("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + DEBUGOUT("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + DEBUGOUT("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + DEBUGOUT("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + DEBUGOUT("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000_setup_copper_link_generic - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + DEBUGFUNC("e1000_setup_copper_link_generic"); + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + DEBUGOUT("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + DEBUGOUT("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + DEBUGOUT("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000_config_fc_after_link_up_generic(hw); + } else { + DEBUGOUT("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_igp"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IGP PSCR: %X\n", phy_data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_m88"); + + /* I210 and I211 devices support Auto-Crossover in forced operation. */ + if (phy->type != e1000_phy_i210) { + /* Clear Auto-Crossover to force MDI manually. M88E1000 + * requires MDI forced whenever speed and duplex are forced. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + + DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + bool reset_dsp = true; + + switch (hw->phy.id) { + case I347AT4_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I210_I_PHY_ID: + reset_dsp = false; + break; + default: + if (hw->phy.type != e1000_phy_m88) + reset_dsp = false; + break; + } + + if (!reset_dsp) { + DEBUGOUT("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = phy->ops.write_reg(hw, + M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000_phy_reset_dsp_generic(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return E1000_SUCCESS; + + if (hw->phy.id == I347AT4_E_PHY_ID || + hw->phy.id == M88E1340M_E_PHY_ID || + hw->phy.id == M88E1112_E_PHY_ID) + return E1000_SUCCESS; + if (hw->phy.id == I210_I_PHY_ID) + return E1000_SUCCESS; + if ((hw->phy.id == M88E1543_E_PHY_ID) || + (hw->phy.id == M88E1512_E_PHY_ID)) + return E1000_SUCCESS; + ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_ife"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + DEBUGOUT1("IFE PMC: %X\n", data); + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of PHY_CONTROL + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the PHY_CONTROL register for these settings to + * take affect. + **/ +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + DEBUGFUNC("e1000_phy_force_speed_duplex_setup"); + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = E1000_READ_REG(hw, E1000_CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~MII_CR_AUTO_NEG_EN; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~MII_CR_FULL_DUPLEX; + DEBUGOUT("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= MII_CR_FULL_DUPLEX; + DEBUGOUT("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= MII_CR_SPEED_100; + *phy_ctrl &= ~MII_CR_SPEED_1000; + DEBUGOUT("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + DEBUGOUT("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); +} + +/** + * e1000_set_d3_lplu_state_generic - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_set_d3_lplu_state_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = phy->ops.read_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, + IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT, + data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + } + + return ret_val; +} + +/** + * e1000_check_downshift_generic - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000_check_downshift_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_downshift_generic"); + + switch (phy->type) { + case e1000_phy_i210: + case e1000_phy_m88: + case e1000_phy_gg82563: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return E1000_SUCCESS; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_igp"); + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = phy->ops.read_reg(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + DEBUGFUNC("e1000_check_polarity_ife"); + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = phy->ops.read_reg(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_wait_autoneg"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_AUTONEG_COMPLETE) + break; + msec_delay(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = E1000_SUCCESS; + u16 i, phy_status; + + DEBUGFUNC("e1000_phy_has_link_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the PHY_STATUS register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); + if (ret_val) + break; + if (phy_status & MII_SR_LINK_STATUS) + break; + if (usec_interval >= 1000) + msec_delay(usec_interval/1000); + else + usec_delay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + DEBUGFUNC("e1000_get_cable_length_m88"); + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, phy_data2, is_cm; + u16 index, default_page; + + DEBUGFUNC("e1000_get_cable_length_m88_gen2"); + + switch (hw->phy.id) { + case I210_I_PHY_ID: + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) + + I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + break; + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case M88E1340M_E_PHY_ID: + case I347AT4_E_PHY_ID: + /* Remember the original page select and set it to 7 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07); + if (ret_val) + return ret_val; + + /* Get cable length from PHY Cable Diagnostics Control Reg */ + ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr), + &phy_data); + if (ret_val) + return ret_val; + + /* Check if the unit of cable length is meters or cm */ + ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2); + if (ret_val) + return ret_val; + + is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT); + + /* Populate the phy structure with cable length in meters */ + phy->min_cable_length = phy_data / (is_cm ? 100 : 1); + phy->max_cable_length = phy_data / (is_cm ? 100 : 1); + phy->cable_length = phy_data / (is_cm ? 100 : 1); + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + break; + + case M88E1112_E_PHY_ID: + /* Remember the original page select and set it to 5 */ + ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT, + &default_page); + if (ret_val) + return ret_val; + + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE, + &phy_data); + if (ret_val) + return ret_val; + + index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + + phy->max_cable_length) / 2; + + /* Reset the page select to its original value */ + ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, + default_page); + if (ret_val) + return ret_val; + + break; + default: + return -E1000_ERR_PHY; + } + + return ret_val; +} + +/** + * e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + DEBUGFUNC("e1000_get_cable_length_igp_2"); + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_m88"); + + if (phy->media_type != e1000_media_type_copper) { + DEBUGOUT("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_igp"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_ife"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return E1000_SUCCESS; +} + +/** + * e1000_phy_sw_reset_generic - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + DEBUGFUNC("e1000_phy_sw_reset_generic"); + + if (!hw->phy.ops.read_reg) + return E1000_SUCCESS; + + ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= MII_CR_RESET; + ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl); + if (ret_val) + return ret_val; + + usec_delay(1); + + return ret_val; +} + +/** + * e1000_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + DEBUGFUNC("e1000_phy_hw_reset_generic"); + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return E1000_SUCCESS; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = E1000_READ_REG(hw, E1000_CTRL); + E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(hw); + + usec_delay(phy->reset_delay_us); + + E1000_WRITE_REG(hw, E1000_CTRL, ctrl); + E1000_WRITE_FLUSH(hw); + + usec_delay(150); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000_get_cfg_done_generic(struct e1000_hw E1000_UNUSEDARG *hw) +{ + DEBUGFUNC("e1000_get_cfg_done_generic"); + + msec_delay_irq(10); + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw) +{ + DEBUGOUT("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + hw->phy.ops.write_reg(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + hw->phy.ops.write_reg(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + hw->phy.ops.write_reg(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + hw->phy.ops.write_reg(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + hw->phy.ops.write_reg(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + hw->phy.ops.write_reg(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + hw->phy.ops.write_reg(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + hw->phy.ops.write_reg(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + hw->phy.ops.write_reg(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + hw->phy.ops.write_reg(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + hw->phy.ops.write_reg(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + hw->phy.ops.write_reg(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + hw->phy.ops.write_reg(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + hw->phy.ops.write_reg(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + hw->phy.ops.write_reg(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + hw->phy.ops.write_reg(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + hw->phy.ops.write_reg(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + hw->phy.ops.write_reg(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + hw->phy.ops.write_reg(hw, 0x0000, 0x1340); + + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1543_E_PHY_ID: + case M88E1512_E_PHY_ID: + case I347AT4_E_PHY_ID: + case M88E1112_E_PHY_ID: + case M88E1340M_E_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case I82580_I_PHY_ID: + phy_type = e1000_phy_82580; + break; + case I210_I_PHY_ID: + phy_type = e1000_phy_i210; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000_get_phy_id(hw); + phy_type = e1000_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return E1000_SUCCESS; + + msec_delay(1); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg); + msec_delay(1); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + DEBUGFUNC("e1000_check_polarity_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + DEBUGFUNC("e1000_phy_force_speed_duplex_82577"); + + ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + e1000_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); + if (ret_val) + return ret_val; + + usec_delay(1); + + if (phy->autoneg_wait_to_complete) { + DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + DEBUGOUT("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + DEBUGFUNC("e1000_get_phy_info_82577"); + + ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + DEBUGOUT("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS) + ? e1000_1000t_rx_status_ok + : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return E1000_SUCCESS; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + DEBUGFUNC("e1000_get_cable_length_82577"); + + ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_gs40g - Write GS40G PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_write_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_write_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_gs40g - Read GS40G PHY register + * @hw: pointer to the HW structure + * @offset: lower half is register offset to read to + * upper half is page to use. + * @data: data to read at register offset + * + * Acquires semaphore, if necessary, then reads the data in the PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = offset >> GS40G_PAGE_SHIFT; + + DEBUGFUNC("e1000_read_phy_reg_gs40g"); + + offset = offset & GS40G_OFFSET_MASK; + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page); + if (ret_val) + goto release; + ret_val = e1000_read_phy_reg_mdic(hw, offset, data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_read_phy_reg_mphy - Read mPHY control register + * @hw: pointer to the HW structure + * @address: address to be read + * @data: pointer to the read data + * + * Reads the mPHY control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_read_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK & + ~E1000_MPHY_ADDRESS_FNC_OVERRIDE) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + *data = E1000_READ_REG(hw, E1000_MPHY_DATA); + + /* Disable access to mPHY if it was originally disabled */ + if (locked){ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg_mphy - Write mPHY control register + * @hw: pointer to the HW structure + * @address: address to write to + * @data: data to write to register at offset + * @line_override: used when we want to use different line than default one + * + * Writes data to mPHY control register. + **/ +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override) +{ + u32 mphy_ctrl = 0; + bool locked = false; + bool ready; + + DEBUGFUNC("e1000_write_phy_reg_mphy"); + + /* Check if mPHY is ready to read/write operations */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* Check if mPHY access is disabled and enable it if so */ + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_DIS_ACCESS) { + locked = true; + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + mphy_ctrl |= E1000_MPHY_ENA_ACCESS; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + } + + /* Set the address that we want to read */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + + /* We mask address, because we want to use only current lane */ + if (line_override) + mphy_ctrl |= E1000_MPHY_ADDRESS_FNC_OVERRIDE; + else + mphy_ctrl &= ~E1000_MPHY_ADDRESS_FNC_OVERRIDE; + mphy_ctrl = (mphy_ctrl & ~E1000_MPHY_ADDRESS_MASK) | + (address & E1000_MPHY_ADDRESS_MASK); + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, mphy_ctrl); + + /* Read data from the address */ + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); + + /* Disable access to mPHY if it was originally disabled */ + if (locked) { + ready = e1000_is_mphy_ready(hw); + if (!ready) + return -E1000_ERR_PHY; + E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, + E1000_MPHY_DIS_ACCESS); + } + + return E1000_SUCCESS; +} + +/** + * e1000_is_mphy_ready - Check if mPHY control register is not busy + * @hw: pointer to the HW structure + * + * Returns mPHY control register status. + **/ +bool e1000_is_mphy_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u32 mphy_ctrl = 0; + bool ready = false; + + while (retry_count < 2) { + mphy_ctrl = E1000_READ_REG(hw, E1000_MPHY_ADDR_CTRL); + if (mphy_ctrl & E1000_MPHY_BUSY) { + usec_delay(20); + retry_count++; + continue; + } + ready = true; + break; + } + + if (!ready) + DEBUGOUT("ERROR READING mPHY control register, phy is busy.\n"); + + return ready; +} diff --git a/drivers/staging/igb_avb/e1000_phy.h b/drivers/staging/igb_avb/e1000_phy.h new file mode 100644 index 000000000000..a109c914ce8d --- /dev/null +++ b/drivers/staging/igb_avb/e1000_phy.h @@ -0,0 +1,252 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_PHY_H_ +#define _E1000_PHY_H_ + +void e1000_init_phy_ops_generic(struct e1000_hw *hw); +s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data); +void e1000_null_phy_generic(struct e1000_hw *hw); +s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_null_set_page(struct e1000_hw *hw, u16 data); +s32 e1000_read_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); +s32 e1000_write_i2c_byte_null(struct e1000_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); +s32 e1000_check_downshift_generic(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw); +s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000_get_phy_id(struct e1000_hw *hw); +s32 e1000_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw); +void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw); +s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active); +s32 e1000_setup_copper_link_generic(struct e1000_hw *hw); +s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id); +s32 e1000_determine_phy_address(struct e1000_hw *hw); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data); +s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); +s32 e1000_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data); +s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, + bool line_override); +bool e1000_is_mphy_ready(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* GS40G - I210 PHY defines */ +#define GS40G_PAGE_SELECT 0x16 +#define GS40G_PAGE_SHIFT 16 +#define GS40G_OFFSET_MASK 0xFFFF +#define GS40G_PAGE_2 0x20000 +#define GS40G_MAC_REG2 0x15 +#define GS40G_MAC_LB 0x4140 +#define GS40G_MAC_SPEED_1G 0X0006 +#define GS40G_COPPER_SPEC 0x0010 + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* 82580 PHY Power Management */ +#define E1000_82580_PHY_POWER_MGMT 0xE14 +#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */ +#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */ +#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */ +#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */ + +#define E1000_MPHY_DIS_ACCESS 0x80000000 /* disable_access bit */ +#define E1000_MPHY_ENA_ACCESS 0x40000000 /* enable_access bit */ +#define E1000_MPHY_BUSY 0x00010000 /* busy bit */ +#define E1000_MPHY_ADDRESS_FNC_OVERRIDE 0x20000000 /* fnc_override bit */ +#define E1000_MPHY_ADDRESS_MASK 0x0000FFFF /* address mask */ + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +/* SFP modules ID memory locations */ +#define E1000_SFF_IDENTIFIER_OFFSET 0x00 +#define E1000_SFF_IDENTIFIER_SFF 0x02 +#define E1000_SFF_IDENTIFIER_SFP 0x03 + +#define E1000_SFF_ETH_FLAGS_OFFSET 0x06 +/* Flags for SFP modules compatible with ETH up to 1Gb */ +struct sfp_e1000_flags { + u8 e1000_base_sx:1; + u8 e1000_base_lx:1; + u8 e1000_base_cx:1; + u8 e1000_base_t:1; + u8 e100_base_lx:1; + u8 e100_base_fx:1; + u8 e10_base_bx10:1; + u8 e10_base_px:1; +}; + +/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +#define E1000_SFF_VENDOR_OUI_TYCO 0x00407600 +#define E1000_SFF_VENDOR_OUI_FTL 0x00906500 +#define E1000_SFF_VENDOR_OUI_AVAGO 0x00176A00 +#define E1000_SFF_VENDOR_OUI_INTEL 0x001B2100 + +#endif diff --git a/drivers/staging/igb_avb/e1000_regs.h b/drivers/staging/igb_avb/e1000_regs.h new file mode 100644 index 000000000000..caf1d04dee87 --- /dev/null +++ b/drivers/staging/igb_avb/e1000_regs.h @@ -0,0 +1,633 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2015 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_REGS_H_ +#define _E1000_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_MDICNFG 0x00E04 /* MDI Config - RW */ +#define E1000_REGISTER_SET_SIZE 0x20000 /* CSR Size */ +#define E1000_EEPROM_INIT_CTRL_WORD_2 0x0F /* EEPROM Init Ctrl Word 2 */ +#define E1000_EEPROM_PCIE_CTRL_WORD_2 0x28 /* EEPROM PCIe Ctrl Word 2 */ +#define E1000_BARCTRL 0x5BBC /* BAR ctrl reg */ +#define E1000_BARCTRL_FLSIZE 0x0700 /* BAR ctrl Flsize */ +#define E1000_BARCTRL_CSRSIZE 0x2000 /* BAR ctrl CSR size */ +#define E1000_MPHY_ADDR_CTRL 0x0024 /* GbE MPHY Address Control */ +#define E1000_MPHY_DATA 0x0E10 /* GBE MPHY Data */ +#define E1000_MPHY_STAT 0x0E0C /* GBE MPHY Statistics */ +#define E1000_PPHY_CTRL 0x5b48 /* PCIe PHY Control */ +#define E1000_I350_BARCTRL 0x5BFC /* BAR ctrl reg */ +#define E1000_I350_DTXMXPKTSZ 0x355C /* Maximum sent packet size reg*/ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_CONNSW 0x00034 /* Copper/Fiber switch control - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_TSSDP 0x0003C /* Time Sync SDP Configuration Register - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_EICR 0x01580 /* Ext. Interrupt Cause Read - R/clr */ +#define E1000_EITR(_n) (0x01680 + (0x4 * (_n))) +#define E1000_EICS 0x01520 /* Ext. Interrupt Cause Set - W0 */ +#define E1000_EIMS 0x01524 /* Ext. Interrupt Mask Set/Read - RW */ +#define E1000_EIMC 0x01528 /* Ext. Interrupt Mask Clear - WO */ +#define E1000_EIAC 0x0152C /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAM 0x01530 /* Ext. Interrupt Ack Auto Clear Mask - RW */ +#define E1000_GPIE 0x01514 /* General Purpose Interrupt Enable - RW */ +#define E1000_IVAR0 0x01700 /* Interrupt Vector Allocation (array) - RW */ +#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_LEDMUX 0x08130 /* LED MUX Control */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ +#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */ +#define E1000_I2CBB_EN 0x00000100 /* I2C - Bit Bang Enable */ +#define E1000_I2C_CLK_OUT 0x00000200 /* I2C- Clock */ +#define E1000_I2C_DATA_OUT 0x00000400 /* I2C- Data Out */ +#define E1000_I2C_DATA_OE_N 0x00000800 /* I2C- Data Output Enable */ +#define E1000_I2C_DATA_IN 0x00001000 /* I2C- Data In */ +#define E1000_I2C_CLK_OE_N 0x00002000 /* I2C- Clock Output Enable */ +#define E1000_I2C_CLK_IN 0x00004000 /* I2C- Clock In */ +#define E1000_I2C_CLK_STRETCH_DIS 0x00008000 /* I2C- Dis Clk Stretching */ +#define E1000_WDSTP 0x01040 /* Watchdog Setup - RW */ +#define E1000_SWDSTS 0x01044 /* SW Device Status - RW */ +#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ +#define E1000_TCPTIMER 0x0104C /* TCP Timer - RW */ +#define E1000_VPDDIAG 0x01060 /* VPD Diagnostic - RO */ +#define E1000_ICR_V2 0x01500 /* Intr Cause - new location - RC */ +#define E1000_ICS_V2 0x01504 /* Intr Cause Set - new location - WO */ +#define E1000_IMS_V2 0x01508 /* Intr Mask Set/Read - new location - RW */ +#define E1000_IMC_V2 0x0150C /* Intr Mask Clear - new location - WO */ +#define E1000_IAM_V2 0x01510 /* Intr Ack Auto Mask - new location - RW */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +#define E1000_PBRTH 0x02458 /* PB Rx Arbitration Threshold - RW */ +#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDPUMB 0x025CC /* DMA Rx Descriptor uC Mailbox - RW */ +#define E1000_RDPUAD 0x025D0 /* DMA Rx Descriptor uC Addr Command - RW */ +#define E1000_RDPUWD 0x025D4 /* DMA Rx Descriptor uC Data Write - RW */ +#define E1000_RDPURD 0x025D8 /* DMA Rx Descriptor uC Data Read - RW */ +#define E1000_RDPUCTL 0x025DC /* DMA Rx Descriptor uC Control - RW */ +#define E1000_PBDIAG 0x02458 /* Packet Buffer Diagnostic - RW */ +#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */ +#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer Si - RW */ +#define E1000_PBRWAC 0x024E8 /* Rx packet buffer wrap around counter - RO */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +#define E1000_EMIADD 0x10 /* Extended Memory Indirect Address */ +#define E1000_EMIDATA 0x11 /* Extended Memory Indirect Data */ +#define E1000_SRWR 0x12018 /* Shadow Ram Write Register - RW */ +#define E1000_I210_FLMNGCTL 0x12038 +#define E1000_I210_FLMNGDATA 0x1203C +#define E1000_I210_FLMNGCNT 0x12040 + +#define E1000_I210_FLSWCTL 0x12048 +#define E1000_I210_FLSWDATA 0x1204C +#define E1000_I210_FLSWCNT 0x12050 + +#define E1000_I210_FLA 0x1201C + +#define E1000_INVM_DATA_REG(_n) (0x12120 + 4*(_n)) +#define E1000_INVM_SIZE 64 /* Number of INVM Data Registers */ + +/* QAV Tx mode control register */ +#define E1000_I210_TQAVCTRL 0x3570 +#define E1000_DTXMXPKTSZ 0x0355C + +/* High credit registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVHC(_n) (0x300C + 0x40 * (_n)) + +/* Queues fetch arbitration priority control register */ +#define E1000_I210_TQAVARBCTRL 0x3574 +/* Queues priority masks where _n and _p can be 0-3. */ +#define E1000_TQAVARBCTRL_QUEUE_PRI(_n, _p) ((_p) << (2 * (_n))) +/* QAV Tx mode control registers where _n can be 0 or 1. */ +#define E1000_I210_TQAVCC(_n) (0x3004 + 0x40 * (_n)) + +/* QAV Tx mode control register bitfields masks */ +#define E1000_TQAVCC_IDLE_SLOPE 0xFFFF /* Idle slope */ +#define E1000_TQAVCC_KEEP_CREDITS (1 << 30) /* Keep credits opt enable */ +#define E1000_TQAVCC_QUEUE_MODE (1 << 31) /* SP vs. SR Tx mode */ + +/* Good transmitted packets counter registers */ +#define E1000_PQGPTC(_n) (0x010014 + (0x100 * (_n))) + +/* Queues packet buffer size masks where _n can be 0-3 and _s 0-63 [kB] */ +#define E1000_I210_TXPBS_SIZE(_n, _s) ((_s) << (6 * (_n))) + +#define E1000_MMDAC 13 /* MMD Access Control */ +#define E1000_MMDAAD 14 /* MMD Access Address/Data */ + +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_SRRCTL(_n) ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \ + (0x0C00C + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_RQDPC(_n) ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \ + (0x0C030 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \ + (0x0E038 + ((_n) * 0x40))) +#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \ + (0x0E03C + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_RSRPD 0x02C00 /* Rx Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4)) +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) +#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) +#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) +#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8)) +#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8)) +#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8)) +#define E1000_PBSLAC 0x03100 /* Pkt Buffer Slave Access Control */ +#define E1000_PBSLAD(_n) (0x03110 + (0x4 * (_n))) /* Pkt Buffer DWORD */ +#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */ +/* Same as TXPBS, renamed for newer Si - RW */ +#define E1000_ITPBS 0x03404 +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TDPUMB 0x0357C /* DMA Tx Desc uC Mail Box - RW */ +#define E1000_TDPUAD 0x03580 /* DMA Tx Desc uC Addr Command - RW */ +#define E1000_TDPUWD 0x03584 /* DMA Tx Desc uC Data Write - RW */ +#define E1000_TDPURD 0x03588 /* DMA Tx Desc uC Data Read - RW */ +#define E1000_TDPUCTL 0x0358C /* DMA Tx Desc uC Control - RW */ +#define E1000_DTXCTL 0x03590 /* DMA Tx Control - RW */ +#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */ +#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */ +/* DMA Tx Max Total Allow Size Reqs - RW */ +#define E1000_DTXMXSZRQ 0x03540 +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ + +/* Virtualization statistical counters */ +#define E1000_PFVFGPRC(_n) (0x010010 + (0x100 * (_n))) +#define E1000_PFVFGPTC(_n) (0x010014 + (0x100 * (_n))) +#define E1000_PFVFGORC(_n) (0x010018 + (0x100 * (_n))) +#define E1000_PFVFGOTC(_n) (0x010034 + (0x100 * (_n))) +#define E1000_PFVFMPRC(_n) (0x010038 + (0x100 * (_n))) +#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n))) +#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n))) +#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n))) +#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n))) + +/* LinkSec */ +#define E1000_LSECTXUT 0x04300 /* Tx Untagged Pkt Cnt */ +#define E1000_LSECTXPKTE 0x04304 /* Encrypted Tx Pkts Cnt */ +#define E1000_LSECTXPKTP 0x04308 /* Protected Tx Pkt Cnt */ +#define E1000_LSECTXOCTE 0x0430C /* Encrypted Tx Octets Cnt */ +#define E1000_LSECTXOCTP 0x04310 /* Protected Tx Octets Cnt */ +#define E1000_LSECRXUT 0x04314 /* Untagged non-Strict Rx Pkt Cnt */ +#define E1000_LSECRXOCTD 0x0431C /* Rx Octets Decrypted Count */ +#define E1000_LSECRXOCTV 0x04320 /* Rx Octets Validated */ +#define E1000_LSECRXBAD 0x04324 /* Rx Bad Tag */ +#define E1000_LSECRXNOSCI 0x04328 /* Rx Packet No SCI Count */ +#define E1000_LSECRXUNSCI 0x0432C /* Rx Packet Unknown SCI Count */ +#define E1000_LSECRXUNCH 0x04330 /* Rx Unchecked Packets Count */ +#define E1000_LSECRXDELAY 0x04340 /* Rx Delayed Packet Count */ +#define E1000_LSECRXLATE 0x04350 /* Rx Late Packets Count */ +#define E1000_LSECRXOK(_n) (0x04360 + (0x04 * (_n))) /* Rx Pkt OK Cnt */ +#define E1000_LSECRXINV(_n) (0x04380 + (0x04 * (_n))) /* Rx Invalid Cnt */ +#define E1000_LSECRXNV(_n) (0x043A0 + (0x04 * (_n))) /* Rx Not Valid Cnt */ +#define E1000_LSECRXUNSA 0x043C0 /* Rx Unused SA Count */ +#define E1000_LSECRXNUSA 0x043D0 /* Rx Not Using SA Count */ +#define E1000_LSECTXCAP 0x0B000 /* Tx Capabilities Register - RO */ +#define E1000_LSECRXCAP 0x0B300 /* Rx Capabilities Register - RO */ +#define E1000_LSECTXCTRL 0x0B004 /* Tx Control - RW */ +#define E1000_LSECRXCTRL 0x0B304 /* Rx Control - RW */ +#define E1000_LSECTXSCL 0x0B008 /* Tx SCI Low - RW */ +#define E1000_LSECTXSCH 0x0B00C /* Tx SCI High - RW */ +#define E1000_LSECTXSA 0x0B010 /* Tx SA0 - RW */ +#define E1000_LSECTXPN0 0x0B018 /* Tx SA PN 0 - RW */ +#define E1000_LSECTXPN1 0x0B01C /* Tx SA PN 1 - RW */ +#define E1000_LSECRXSCL 0x0B3D0 /* Rx SCI Low - RW */ +#define E1000_LSECRXSCH 0x0B3E0 /* Rx SCI High - RW */ +/* LinkSec Tx 128-bit Key 0 - WO */ +#define E1000_LSECTXKEY0(_n) (0x0B020 + (0x04 * (_n))) +/* LinkSec Tx 128-bit Key 1 - WO */ +#define E1000_LSECTXKEY1(_n) (0x0B030 + (0x04 * (_n))) +#define E1000_LSECRXSA(_n) (0x0B310 + (0x04 * (_n))) /* Rx SAs - RW */ +#define E1000_LSECRXPN(_n) (0x0B330 + (0x04 * (_n))) /* Rx SAs - RW */ +/* LinkSec Rx Keys - where _n is the SA no. and _m the 4 dwords of the 128 bit + * key - RW. + */ +#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m))) + +#define E1000_SSVPC 0x041A0 /* Switch Security Violation Pkt Cnt */ +#define E1000_IPSCTRL 0xB430 /* IpSec Control Register */ +#define E1000_IPSRXCMD 0x0B408 /* IPSec Rx Command Register - RW */ +#define E1000_IPSRXIDX 0x0B400 /* IPSec Rx Index - RW */ +/* IPSec Rx IPv4/v6 Address - RW */ +#define E1000_IPSRXIPADDR(_n) (0x0B420 + (0x04 * (_n))) +/* IPSec Rx 128-bit Key - RW */ +#define E1000_IPSRXKEY(_n) (0x0B410 + (0x04 * (_n))) +#define E1000_IPSRXSALT 0x0B404 /* IPSec Rx Salt - RW */ +#define E1000_IPSRXSPI 0x0B40C /* IPSec Rx SPI - RW */ +/* IPSec Tx 128-bit Key - RW */ +#define E1000_IPSTXKEY(_n) (0x0B460 + (0x04 * (_n))) +#define E1000_IPSTXSALT 0x0B454 /* IPSec Tx Salt - RW */ +#define E1000_IPSTXIDX 0x0B450 /* IPSec Tx SA IDX - RW */ +#define E1000_PCS_CFG0 0x04200 /* PCS Configuration 0 - RW */ +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_CBTMPC 0x0402C /* Circuit Breaker Tx Packet Count */ +#define E1000_HTDPMC 0x0403C /* Host Transmit Discarded Packets */ +#define E1000_CBRDPC 0x04044 /* Circuit Breaker Rx Dropped Count */ +#define E1000_CBRMPC 0x040FC /* Circuit Breaker Rx Packet Count */ +#define E1000_RPTHC 0x04104 /* Rx Packets To Host */ +#define E1000_HGPTC 0x04118 /* Host Good Packets Tx Count */ +#define E1000_HTCBDPC 0x04124 /* Host Tx Circuit Breaker Dropped Count */ +#define E1000_HGORCL 0x04128 /* Host Good Octets Received Count Low */ +#define E1000_HGORCH 0x0412C /* Host Good Octets Received Count High */ +#define E1000_HGOTCL 0x04130 /* Host Good Octets Transmit Count Low */ +#define E1000_HGOTCH 0x04134 /* Host Good Octets Transmit Count High */ +#define E1000_LENERRS 0x04138 /* Length Errors Count */ +#define E1000_SCVPC 0x04228 /* SerDes/SGMII Code Violation Pkt Count */ +#define E1000_HRMPC 0x0A018 /* Header Redirection Missed Packet Count */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_PCS_NPTX 0x04220 /* AN Next Page Transmit - RW */ +#define E1000_PCS_LPABNP 0x04224 /* Link Partner Ability Next Pg - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RLPML 0x05004 /* Rx Long Packet Max Length */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control*/ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ +#define E1000_CIAA 0x05B88 /* Config Indirect Access Address - RW */ +#define E1000_CIAD 0x05B8C /* Config Indirect Access Data - RW */ +#define E1000_VFQA0 0x0B000 /* VLAN Filter Queue Array 0 - RW Array */ +#define E1000_VFQA1 0x0B200 /* VLAN Filter Queue Array 1 - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_PBACL 0x05B68 /* MSIx PBA Clear - Read/Write 1's to clear */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_HIBBA 0x8F40 /* Host Interface Buffer Base Address */ +/* Flexible Host Filter Table */ +#define E1000_FHFT(_n) (0x09000 + ((_n) * 0x100)) +/* Ext Flexible Host Filter Table */ +#define E1000_FHFT_EXT(_n) (0x09A00 + ((_n) * 0x100)) + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_CCMCTL 0x05B48 /* CCM Control Register */ +#define E1000_GIOCTL 0x05B44 /* GIO Analog Control Register */ +#define E1000_SCCTL 0x05B4C /* PCIc PLL Configuration Register */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_DCA_ID 0x05B70 /* DCA Requester ID Information - RO */ +#define E1000_DCA_CTRL 0x05B74 /* DCA Control - RW */ +#define E1000_UFUSE 0x05B78 /* UFUSE - RO */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ +#define E1000_FWSTS 0x08F0C /* FW Status */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_IMIR(_i) (0x05A80 + ((_i) * 4)) /* Immediate Interrupt */ +#define E1000_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* Immediate INTR Ext*/ +#define E1000_IMIRVP 0x05AC0 /* Immediate INT Rx VLAN Priority -RW */ +#define E1000_MSIXBM(_i) (0x01600 + ((_i) * 4)) /* MSI-X Alloc Reg -RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* VT Registers */ +#define E1000_SWPBS 0x03004 /* Switch Packet Buffer Size - RW */ +#define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ +#define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ +#define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ +#define E1000_VFRE 0x00C8C /* VF Receive Enables */ +#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ +#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ +#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ +#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */ +#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ +#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ +#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ +#define E1000_VMRCTL 0X05D80 /* Virtual Mirror Rule Control */ +#define E1000_VMRVLAN 0x05D90 /* Virtual Mirror Rule VLAN */ +#define E1000_VMRVM 0x05DA0 /* Virtual Mirror Rule VM */ +#define E1000_MDFB 0x03558 /* Malicious Driver free block */ +#define E1000_LVMMC 0x03548 /* Last VM Misbehavior cause */ +#define E1000_TXSWC 0x05ACC /* Tx Switch Control */ +#define E1000_SCCRL 0x05DB0 /* Storm Control Control */ +#define E1000_BSCTRH 0x05DB8 /* Broadcast Storm Control Threshold */ +#define E1000_MSCTRH 0x05DBC /* Multicast Storm Control Threshold */ +/* These act per VF so an array friendly macro is used */ +#define E1000_V2PMAILBOX(_n) (0x00C40 + (4 * (_n))) +#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) +#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) +#define E1000_VFVMBMEM(_n) (0x00800 + (_n)) +#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) +/* VLAN Virtual Machine Filter - RW */ +#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) +#define E1000_VMVIR(_n) (0x03700 + (4 * (_n))) +#define E1000_DVMOLR(_n) (0x0C038 + (0x40 * (_n))) /* DMA VM offload */ +#define E1000_VTCTRL(_n) (0x10000 + (0x100 * (_n))) /* VT Control */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */ +#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_TIMADJL 0x0B60C /* Time sync time adjustment offset Low - RW */ +#define E1000_TIMADJH 0x0B610 /* Time sync time adjustment offset High - RW */ +#define E1000_TSAUXC 0x0B640 /* Timesync Auxiliary Control register */ +#define E1000_TRGTTIML0 0x0B644 /* Target Time Register 0 Low - RW */ +#define E1000_TRGTTIMH0 0x0B648 /* Target Time Register 0 High - RW */ +#define E1000_TRGTTIML1 0x0B64C /* Target Time Register 1 Low - RW */ +#define E1000_TRGTTIMH1 0x0B650 /* Target Time Register 1 High - RW */ +#define E1000_FREQOUT0 0x0B654 /* Frequency Out 0 Control Register - RW */ +#define E1000_FREQOUT1 0x0B658 /* Frequency Out 1 Control Register - RW */ +#define E1000_AUXSTMPL0 0x0B65C /* Auxiliary Time Stamp 0 Register Low - RO */ +#define E1000_AUXSTMPH0 0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */ +#define E1000_AUXSTMPL1 0x0B664 /* Auxiliary Time Stamp 1 Register Low - RO */ +#define E1000_AUXSTMPH1 0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */ +#define E1000_SYSTIMR 0x0B6F8 /* System time register Residue */ +#define E1000_TSICR 0x0B66C /* Interrupt Cause Register */ +#define E1000_TSIM 0x0B674 /* Interrupt Mask Register */ + +/* Filtering Registers */ +#define E1000_SAQF(_n) (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */ +#define E1000_DAQF(_n) (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */ +#define E1000_SPQF(_n) (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */ +#define E1000_FTQF(_n) (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */ +#define E1000_TTQF(_n) (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */ +#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */ +#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ + +#define E1000_RTTDCS 0x3600 /* Reedtown Tx Desc plane control and status */ +#define E1000_RTTPCS 0x3474 /* Reedtown Tx Packet Plane control and status */ +#define E1000_RTRPCS 0x2474 /* Rx packet plane control and status */ +#define E1000_RTRUP2TC 0x05AC4 /* Rx User Priority to Traffic Class */ +#define E1000_RTTUP2TC 0x0418 /* Transmit User Priority to Traffic Class */ +/* Tx Desc plane TC Rate-scheduler config */ +#define E1000_RTTDTCRC(_n) (0x3610 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTTPTCRC(_n) (0x3480 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Config */ +#define E1000_RTRPTCRC(_n) (0x2480 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler Status */ +#define E1000_RTTDTCRS(_n) (0x3630 + ((_n) * 4)) +/* Tx Desc Plane TC Rate-Scheduler MMW */ +#define E1000_RTTDTCRM(_n) (0x3650 + ((_n) * 4)) +/* Tx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTTPTCRS(_n) (0x34A0 + ((_n) * 4)) +/* Tx Packet plane TC Rate-scheduler MMW */ +#define E1000_RTTPTCRM(_n) (0x34C0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler Status */ +#define E1000_RTRPTCRS(_n) (0x24A0 + ((_n) * 4)) +/* Rx Packet plane TC Rate-Scheduler MMW */ +#define E1000_RTRPTCRM(_n) (0x24C0 + ((_n) * 4)) +/* Tx Desc plane VM Rate-Scheduler MMW*/ +#define E1000_RTTDVMRM(_n) (0x3670 + ((_n) * 4)) +/* Tx BCN Rate-Scheduler MMW */ +#define E1000_RTTBCNRM(_n) (0x3690 + ((_n) * 4)) +#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select */ +#define E1000_RTTDVMRC 0x3608 /* Tx Desc Plane VM Rate-Scheduler Config */ +#define E1000_RTTDVMRS 0x360C /* Tx Desc Plane VM Rate-Scheduler Status */ +#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config */ +#define E1000_RTTBCNRS 0x36B4 /* Tx BCN Rate-Scheduler Status */ +#define E1000_RTTBCNCR 0xB200 /* Tx BCN Control Register */ +#define E1000_RTTBCNTG 0x35A4 /* Tx BCN Tagging */ +#define E1000_RTTBCNCP 0xB208 /* Tx BCN Congestion point */ +#define E1000_RTRBCNCR 0xB20C /* Rx BCN Control Register */ +#define E1000_RTTBCNRD 0x36B8 /* Tx BCN Rate Drift */ +#define E1000_PFCTOP 0x1080 /* Priority Flow Control Type and Opcode */ +#define E1000_RTTBCNIDX 0xB204 /* Tx BCN Congestion Point */ +#define E1000_RTTBCNACH 0x0B214 /* Tx BCN Control High */ +#define E1000_RTTBCNACL 0x0B210 /* Tx BCN Control Low */ + +/* DMA Coalescing registers */ +#define E1000_DMACR 0x02508 /* Control Register */ +#define E1000_DMCTXTH 0x03550 /* Transmit Threshold */ +#define E1000_DMCTLX 0x02514 /* Time to Lx Request */ +#define E1000_DMCRTRH 0x05DD0 /* Receive Packet Rate Threshold */ +#define E1000_DMCCNT 0x05DD4 /* Current Rx Count */ +#define E1000_FCRTC 0x02170 /* Flow Control Rx high watermark */ +#define E1000_PCIEMISC 0x05BB8 /* PCIE misc config register */ + +/* PCIe Parity Status Register */ +#define E1000_PCIEERRSTS 0x05BA8 + +#define E1000_PROXYS 0x5F64 /* Proxying Status */ +#define E1000_PROXYFC 0x5F60 /* Proxying Filter Control */ +/* Thermal sensor configuration and status registers */ +#define E1000_THMJT 0x08100 /* Junction Temperature */ +#define E1000_THLOWTC 0x08104 /* Low Threshold Control */ +#define E1000_THMIDTC 0x08108 /* Mid Threshold Control */ +#define E1000_THHIGHTC 0x0810C /* High Threshold Control */ +#define E1000_THSTAT 0x08110 /* Thermal Sensor Status */ + +/* Energy Efficient Ethernet "EEE" registers */ +#define E1000_IPCNFG 0x0E38 /* Internal PHY Configuration */ +#define E1000_LTRC 0x01A0 /* Latency Tolerance Reporting Control */ +#define E1000_EEER 0x0E30 /* Energy Efficient Ethernet "EEE"*/ +#define E1000_EEE_SU 0x0E34 /* EEE Setup */ +#define E1000_TLPIC 0x4148 /* EEE Tx LPI Count - TLPIC */ +#define E1000_RLPIC 0x414C /* EEE Rx LPI Count - RLPIC */ + +/* OS2BMC Registers */ +#define E1000_B2OSPC 0x08FE0 /* BMC2OS packets sent by BMC */ +#define E1000_B2OGPRC 0x04158 /* BMC2OS packets received by host */ +#define E1000_O2BGPTC 0x08FE4 /* OS2BMC packets received by BMC */ +#define E1000_O2BSPC 0x0415C /* OS2BMC packets transmitted by host */ + +#endif diff --git a/drivers/staging/igb_avb/igb.h b/drivers/staging/igb_avb/igb.h new file mode 100644 index 000000000000..c8bbf307f908 --- /dev/null +++ b/drivers/staging/igb_avb/igb.h @@ -0,0 +1,937 @@ +/******************************************************************************* + + Intel(R) Gigabit Ethernet Linux driver + Copyright(c) 2007-2016 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _IGB_H_ +#define _IGB_H_ + +#include + +#ifndef IGB_NO_LRO +#include +#endif + +#include +#include +#include + +#ifdef SIOCETHTOOL +#include +#endif + +struct igb_adapter; + +struct igb_user_page; + +struct igb_user_page { + struct igb_user_page *prev; + struct igb_user_page *next; + struct page *page; + dma_addr_t page_dma; +}; +#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) +#define IGB_DCA +#endif +#ifdef IGB_DCA +#include +#endif + +#include "kcompat.h" + +#ifdef HAVE_SCTP +#include +#endif + +#include "e1000_api.h" +#include "e1000_82575.h" +#include "e1000_manage.h" +#include "e1000_mbx.h" + +#define IGB_ERR(args...) pr_err(KERN_ERR "igb: " args) + +#define PFX "igb: " +#define DPRINTK(nlevel, klevel, fmt, args...) \ + (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ + printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ + __func__ , ## args)) + +#ifdef HAVE_PTP_1588_CLOCK +#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H +#include +#else +#include +#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ +#include +#include +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef HAVE_I2C_SUPPORT +#include +#include +#endif /* HAVE_I2C_SUPPORT */ + +#include +typedef u64 cycle_t; + +/* Interrupt defines */ +#define IGB_START_ITR 648 /* ~6000 ints/sec */ +#define IGB_4K_ITR 980 +#define IGB_20K_ITR 196 +#define IGB_70K_ITR 56 + +/* Interrupt modes, as used by the IntMode paramter */ +#define IGB_INT_MODE_LEGACY 0 +#define IGB_INT_MODE_MSI 1 +#define IGB_INT_MODE_MSIX 2 + +/* TX/RX descriptor defines */ +#define IGB_DEFAULT_TXD 256 +#define IGB_DEFAULT_TX_WORK 128 +#define IGB_MIN_TXD 80 +#define IGB_MAX_TXD 4096 + +#define IGB_DEFAULT_RXD 256 +#define IGB_MIN_RXD 80 +#define IGB_MAX_RXD 4096 + +#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */ +#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */ + +#define NON_Q_VECTORS 1 +#define MAX_Q_VECTORS 10 + +/* Transmit and receive queues */ +#define IGB_MAX_RX_QUEUES 16 +#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES_I211 2 +#define IGB_MAX_TX_QUEUES 16 + +#define IGB_MAX_VF_MC_ENTRIES 30 +#define IGB_MAX_VF_FUNCTIONS 8 +#define IGB_82576_VF_DEV_ID 0x10CA +#define IGB_I350_VF_DEV_ID 0x1520 +#define IGB_MAX_UTA_ENTRIES 128 +#define MAX_EMULATION_MAC_ADDRS 16 +#define OUI_LEN 3 +#define IGB_MAX_VMDQ_QUEUES 8 + +struct vf_data_storage { + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; + u16 default_vf_vlan_id; + u16 vlans_enabled; + unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN]; + u32 uta_table_copy[IGB_MAX_UTA_ENTRIES]; + u32 flags; + unsigned long last_nack; +#ifdef IFLA_VF_MAX + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + bool spoofchk_enabled; +#endif +#endif +}; + +#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ +#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */ +#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */ +#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */ + +/* RX descriptor control thresholds. + * PTHRESH - MAC will consider prefetch if it has fewer than this number of + * descriptors available in its onboard memory. + * Setting this to 0 disables RX descriptor prefetch. + * HTHRESH - MAC will only prefetch if there are at least this many descriptors + * available in host memory. + * If PTHRESH is 0, this should also be 0. + * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back + * descriptors until either it has this many to write back, or the + * ITR timer expires. + */ +#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8) +#define IGB_RX_HTHRESH 8 +#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) +#define IGB_TX_HTHRESH 1 +#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ + adapter->msix_entries) ? 1 : 4) + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we + * reserve 2 more, and skb_shared_info adds an additional 384 more, + * this adds roughly 448 bytes of extra data meaning the smallest + * allocation we could have is 1K. + * i.e. RXBUFFER_512 --> size-1024 slab + */ +/* Supported Rx Buffer Sizes */ +#define IGB_RXBUFFER_256 256 +#define IGB_RXBUFFER_2048 2048 +#define IGB_RXBUFFER_16384 16384 +#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 +#if MAX_SKB_FRAGS < 8 +#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024) +#else +#define IGB_RX_BUFSZ IGB_RXBUFFER_2048 +#endif + + +/* Packet Buffer allocations */ +#define IGB_PBA_BYTES_SHIFT 0xA +#define IGB_TX_HEAD_ADDR_SHIFT 7 +#define IGB_PBA_TX_MASK 0xFFFF0000 + +#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define IGB_EEPROM_APME 0x0400 +#define AUTO_ALL_MODES 0 + +#ifndef IGB_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define IGB_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define IGB_MNG_VLAN_NONE -1 + +#ifndef IGB_NO_LRO +#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/ +struct igb_lro_stats { + u32 flushed; + u32 coal; +}; + +/* + * igb_lro_header - header format to be aggregated by LRO + * @iph: IP header without options + * @tcp: TCP header + * @ts: Optional TCP timestamp data in TCP options + * + * This structure relies on the check above that verifies that the header + * is IPv4 and does not contain any options. + */ +struct igb_lrohdr { + struct iphdr iph; + struct tcphdr th; + __be32 ts[0]; +}; + +struct igb_lro_list { + struct sk_buff_head active; + struct igb_lro_stats stats; +}; + +#endif /* IGB_NO_LRO */ +struct igb_cb { +#ifndef IGB_NO_LRO +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; +#endif + __be32 tsecr; /* timestamp echo response */ + u32 tsval; /* timestamp value in host order */ + u32 next_seq; /* next expected sequence number */ + u16 free; /* 65521 minus total size */ + u16 mss; /* size of data portion of packet */ + u16 append_cnt; /* number of skb's appended */ +#endif /* IGB_NO_LRO */ +#ifdef HAVE_VLAN_RX_REGISTER + u16 vid; /* VLAN tag */ +#endif +}; +#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb) + +enum igb_tx_flags { + /* cmd_type flags */ + IGB_TX_FLAGS_VLAN = 0x01, + IGB_TX_FLAGS_TSO = 0x02, + IGB_TX_FLAGS_TSTAMP = 0x04, + + /* olinfo flags */ + IGB_TX_FLAGS_IPV4 = 0x10, + IGB_TX_FLAGS_CSUM = 0x20, +}; + +/* VLAN info */ +#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 +#define IGB_TX_FLAGS_VLAN_SHIFT 16 + +/* + * The largest size we can write to the descriptor is 65535. In order to + * maintain a power of two alignment we have to limit ourselves to 32K. + */ +#define IGB_MAX_TXD_PWR 15 +#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) + +/* Tx Descriptors needed, worst case */ +#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD) +#ifndef MAX_SKB_FRAGS +#define DESC_NEEDED 4 +#elif (MAX_SKB_FRAGS < 16) +#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) +#else +#define DESC_NEEDED (MAX_SKB_FRAGS + 4) +#endif + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +struct igb_tx_buffer { + union e1000_adv_tx_desc *next_to_watch; + unsigned long time_stamp; + struct sk_buff *skb; + unsigned int bytecount; + u16 gso_segs; + __be16 protocol; + + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); + u32 tx_flags; +}; + +struct igb_rx_buffer { + dma_addr_t dma; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + struct sk_buff *skb; +#else + struct page *page; + u32 page_offset; +#endif +}; + +struct igb_tx_queue_stats { + u64 packets; + u64 bytes; + u64 restart_queue; +}; + +struct igb_rx_queue_stats { + u64 packets; + u64 bytes; + u64 drops; + u64 csum_err; + u64 alloc_failed; +}; + +struct igb_rx_packet_stats { + u64 ipv4_packets; /* IPv4 headers processed */ + u64 ipv4e_packets; /* IPv4E headers with extensions processed */ + u64 ipv6_packets; /* IPv6 headers processed */ + u64 ipv6e_packets; /* IPv6E headers with extensions processed */ + u64 tcp_packets; /* TCP headers processed */ + u64 udp_packets; /* UDP headers processed */ + u64 sctp_packets; /* SCTP headers processed */ + u64 nfs_packets; /* NFS headers processe */ + u64 other_packets; +}; + +struct igb_ring_container { + struct igb_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ + unsigned int total_packets; /* total packets processed this int */ + u16 work_limit; /* total work allowed per interrupt */ + u8 count; /* total number of rings in vector */ + u8 itr; /* current ITR setting for ring */ +}; + +struct igb_ring { + struct igb_q_vector *q_vector; /* backlink to q_vector */ + struct net_device *netdev; /* back pointer to net_device */ + struct device *dev; /* device for dma mapping */ + union { /* array of buffer info structs */ + struct igb_tx_buffer *tx_buffer_info; + struct igb_rx_buffer *rx_buffer_info; + }; + void *desc; /* descriptor ring memory */ + unsigned long flags; /* ring specific flags */ + void __iomem *tail; /* pointer to ring tail register */ + dma_addr_t dma; /* phys address of the ring */ + unsigned int size; /* length of desc. ring in bytes */ + + u16 count; /* number of desc. in the ring */ + u8 queue_index; /* logical index of the ring*/ + u8 reg_idx; /* physical index of the ring */ + + /* everything past this point are written often */ + u16 next_to_clean; + u16 next_to_use; + u16 next_to_alloc; + + union { + /* TX */ + struct { + struct igb_tx_queue_stats tx_stats; + }; + /* RX */ + struct { + struct igb_rx_queue_stats rx_stats; + struct igb_rx_packet_stats pkt_stats; +#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT + u16 rx_buffer_len; +#else + struct sk_buff *skb; +#endif + }; + }; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev; + int vqueue_index; /* queue index for virtual netdev */ +#endif +} ____cacheline_internodealigned_in_smp; + +struct igb_q_vector { + struct igb_adapter *adapter; /* backlink */ + int cpu; /* CPU for DCA */ + u32 eims_value; /* EIMS mask value */ + + u16 itr_val; + u8 set_itr; + void __iomem *itr_register; + + struct igb_ring_container rx, tx; + + struct napi_struct napi; +#ifndef IGB_NO_LRO + struct igb_lro_list lrolist; /* LRO list for queue vector*/ +#endif + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; +#ifndef HAVE_NETDEV_NAPI_LIST + struct net_device poll_dev; +#endif + + /* for dynamic allocation of rings associated with this q_vector */ + struct igb_ring ring[0] ____cacheline_internodealigned_in_smp; +}; + +enum e1000_ring_flags_t { +#ifndef HAVE_NDO_SET_FEATURES + IGB_RING_FLAG_RX_CSUM, +#endif + IGB_RING_FLAG_RX_SCTP_CSUM, + IGB_RING_FLAG_RX_LB_VLAN_BSWAP, + IGB_RING_FLAG_TX_CTX_IDX, + IGB_RING_FLAG_TX_DETECT_HANG, +}; + +struct igb_mac_addr { + u8 addr[ETH_ALEN]; + u16 queue; + u16 state; /* bitmask */ +}; +#define IGB_MAC_STATE_DEFAULT 0x1 +#define IGB_MAC_STATE_MODIFIED 0x2 +#define IGB_MAC_STATE_IN_USE 0x4 + +#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) + +#define IGB_RX_DESC(R, i) \ + (&(((union e1000_adv_rx_desc *)((R)->desc))[i])) +#define IGB_TX_DESC(R, i) \ + (&(((union e1000_adv_tx_desc *)((R)->desc))[i])) +#define IGB_TX_CTXTDESC(R, i) \ + (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) + +#ifdef CONFIG_IGB_VMDQ_NETDEV +#define netdev_ring(ring) \ + ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev)) +#define ring_queue_index(ring) \ + ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index)) +#else +#define netdev_ring(ring) (ring->netdev) +#define ring_queue_index(ring) (ring->queue_index) +#endif /* CONFIG_IGB_VMDQ_NETDEV */ + +/* igb_test_staterr - tests bits within Rx descriptor status and error fields */ +static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc, + const u32 stat_err_bits) +{ + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); +} + +/* igb_desc_unused - calculate if we have unused descriptors */ +static inline u16 igb_desc_unused(const struct igb_ring *ring) +{ + u16 ntc = ring->next_to_clean; + u16 ntu = ring->next_to_use; + + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; +} + +#ifdef CONFIG_BQL +static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) +{ + return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); +} +#endif /* CONFIG_BQL */ + +struct igb_therm_proc_data { + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor_data; +}; + +#ifdef IGB_HWMON +#define IGB_HWMON_TYPE_LOC 0 +#define IGB_HWMON_TYPE_TEMP 1 +#define IGB_HWMON_TYPE_CAUTION 2 +#define IGB_HWMON_TYPE_MAX 3 + +struct hwmon_attr { + struct device_attribute dev_attr; + struct e1000_hw *hw; + struct e1000_thermal_diode_data *sensor; + char name[12]; + }; + +struct hwmon_buff { + struct device *device; + struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +#endif /* IGB_HWMON */ +#define IGB_N_EXTTS 2 +#define IGB_N_PEROUT 2 +#define IGB_N_SDP 4 +#ifdef ETHTOOL_GRXFHINDIR +#define IGB_RETA_SIZE 128 +#endif /* ETHTOOL_GRXFHINDIR */ + +/* board specific private data structure */ +struct igb_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct net_device *netdev; + + unsigned long state; + unsigned int flags; + + unsigned int num_q_vectors; + struct msix_entry *msix_entries; + + + /* TX */ + u16 tx_work_limit; + u32 tx_timeout_count; + int num_tx_queues; + struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES]; + + /* RX */ + int num_rx_queues; + struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES]; + + struct timer_list watchdog_timer; + struct timer_list dma_err_timer; + struct timer_list phy_info_timer; + u16 mng_vlan_id; + u32 bd_number; + u32 wol; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + u8 port_num; + + /* Interrupt Throttle Rate */ + u32 rx_itr_setting; + u32 tx_itr_setting; + + struct work_struct reset_task; + struct work_struct watchdog_task; + struct work_struct dma_err_task; + bool fc_autoneg; + u8 tx_timeout_factor; + +#ifdef DEBUG + bool tx_hang_detected; + bool disable_hw_reset; +#endif + u32 max_frame_size; + + /* OS defined structs */ + struct pci_dev *pdev; + /* user-dma specific variables */ + u32 uring_tx_init; + u32 uring_rx_init; +#ifndef HAVE_NETDEV_STATS_IN_NETDEV + struct net_device_stats net_stats; +#endif +#ifndef IGB_NO_LRO + struct igb_lro_stats lro_stats; +#endif + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + +#ifdef ETHTOOL_TEST + u32 test_icr; + struct igb_ring test_tx_ring; + struct igb_ring test_rx_ring; +#endif + + int msg_enable; + + struct igb_q_vector *q_vector[MAX_Q_VECTORS]; + u32 eims_enable_mask; + u32 eims_other; + + /* to not mess up cache alignment, always add to the bottom */ + u32 *config_space; + u16 tx_ring_count; + u16 rx_ring_count; + struct vf_data_storage *vf_data; +#ifdef IFLA_VF_MAX + int vf_rate_link_speed; +#endif + u32 lli_port; + u32 lli_size; + unsigned int vfs_allocated_count; + /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */ + bool mdd; + int int_mode; + u32 rss_queues; + u32 tss_queues; + u32 vmdq_pools; + char fw_version[32]; + u32 wvbr; + struct igb_mac_addr *mac_table; +#ifdef CONFIG_IGB_VMDQ_NETDEV + struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES]; +#endif + int vferr_refcount; + int dmac; + u32 *shadow_vfta; + + /* External Thermal Sensor support flag */ + bool ets; +#ifdef IGB_HWMON + struct hwmon_buff igb_hwmon_buff; +#else /* IGB_HWMON */ +#ifdef IGB_PROCFS + struct proc_dir_entry *eth_dir; + struct proc_dir_entry *info_dir; + struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS]; + struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS]; + bool old_lsc; +#endif /* IGB_PROCFS */ +#endif /* IGB_HWMON */ + u32 etrack_id; + +#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct delayed_work ptp_overflow_work; + struct work_struct ptp_tx_work; + struct sk_buff *ptp_tx_skb; + struct hwtstamp_config tstamp_config; + unsigned long ptp_tx_start; + unsigned long last_rx_ptp_check; + unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; + struct cyclecounter cc; + struct timecounter tc; + u32 tx_hwtstamp_timeouts; + u32 rx_hwtstamp_cleared; + +#ifdef HAVE_PTP_1588_CLOCK_PINS + struct ptp_pin_desc sdp_config[IGB_N_SDP]; +#endif /* HAVE_PTP_1588_CLOCK_PINS */ + struct { + struct timespec64 start; + struct timespec64 period; + } perout[IGB_N_PEROUT]; +#endif /* HAVE_PTP_1588_CLOCK */ + +#ifdef HAVE_I2C_SUPPORT + struct i2c_algo_bit_data i2c_algo; + struct i2c_adapter i2c_adap; + struct i2c_client *i2c_client; +#endif /* HAVE_I2C_SUPPORT */ + unsigned long link_check_timeout; + + int devrc; + + int copper_tries; + u16 eee_advert; +#ifdef ETHTOOL_GRXFHINDIR + u32 rss_indir_tbl_init; + u8 rss_indir_tbl[IGB_RETA_SIZE]; +#endif + struct mutex lock; +}; + +#ifdef CONFIG_IGB_VMDQ_NETDEV +struct igb_vmdq_adapter { +#ifdef HAVE_VLAN_RX_REGISTER + /* vlgrp must be first member of structure */ + struct vlan_group *vlgrp; +#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +#endif + struct igb_adapter *real_adapter; + struct net_device *vnetdev; + struct net_device_stats net_stats; + struct igb_ring *tx_ring; + struct igb_ring *rx_ring; +}; +#endif + +#define IGB_FLAG_HAS_MSI (1 << 0) +#define IGB_FLAG_DCA_ENABLED (1 << 1) +#define IGB_FLAG_LLI_PUSH (1 << 2) +#define IGB_FLAG_QUAD_PORT_A (1 << 3) +#define IGB_FLAG_QUEUE_PAIRS (1 << 4) +#define IGB_FLAG_EEE (1 << 5) +#define IGB_FLAG_DMAC (1 << 6) +#define IGB_FLAG_DETECT_BAD_DMA (1 << 7) +#define IGB_FLAG_PTP (1 << 8) +#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9) +#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10) +#define IGB_FLAG_WOL_SUPPORTED (1 << 11) +#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12) +#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13) +#define IGB_FLAG_MEDIA_RESET (1 << 14) +#define IGB_FLAG_MAS_ENABLE (1 << 15) + +/* Media Auto Sense */ +#define IGB_MAS_ENABLE_0 0X0001 +#define IGB_MAS_ENABLE_1 0X0002 +#define IGB_MAS_ENABLE_2 0X0004 +#define IGB_MAS_ENABLE_3 0X0008 + +#define IGB_MIN_TXPBSIZE 20408 +#define IGB_TX_BUF_4096 4096 + +#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ + +/* DMA Coalescing defines */ +#define IGB_DMAC_DISABLE 0 +#define IGB_DMAC_MIN 250 +#define IGB_DMAC_500 500 +#define IGB_DMAC_EN_DEFAULT 1000 +#define IGB_DMAC_2000 2000 +#define IGB_DMAC_3000 3000 +#define IGB_DMAC_4000 4000 +#define IGB_DMAC_5000 5000 +#define IGB_DMAC_6000 6000 +#define IGB_DMAC_7000 7000 +#define IGB_DMAC_8000 8000 +#define IGB_DMAC_9000 9000 +#define IGB_DMAC_MAX 10000 + +#define IGB_82576_TSYNC_SHIFT 19 +#define IGB_82580_TSYNC_SHIFT 24 +#define IGB_TS_HDR_LEN 16 + +/* CEM Support */ +#define FW_HDR_LEN 0x4 +#define FW_CMD_DRV_INFO 0xDD +#define FW_CMD_DRV_INFO_LEN 0x5 +#define FW_CMD_RESERVED 0X0 +#define FW_RESP_SUCCESS 0x1 +#define FW_UNUSED_VER 0x0 +#define FW_MAX_RETRIES 3 +#define FW_STATUS_SUCCESS 0x1 +#define FW_FAMILY_DRV_VER 0Xffffffff + +#define IGB_MAX_LINK_TRIES 20 + +struct e1000_fw_hdr { + u8 cmd; + u8 buf_len; + union { + u8 cmd_resv; + u8 ret_status; + } cmd_or_resp; + u8 checksum; +}; + +#pragma pack(push, 1) +struct e1000_fw_drv_info { + struct e1000_fw_hdr hdr; + u8 port_num; + u32 drv_version; + u16 pad; /* end spacing to ensure length is mult. of dword */ + u8 pad2; /* end spacing to ensure length is mult. of dword2 */ +}; +#pragma pack(pop) + +enum e1000_state_t { + __IGB_TESTING, + __IGB_RESETTING, + __IGB_DOWN, + __IGB_PTP_TX_IN_PROGRESS, +}; + +extern char igb_driver_name[]; +extern char igb_driver_version[]; + +extern void igb_up(struct igb_adapter *); +extern void igb_down(struct igb_adapter *); +extern void igb_reinit_locked(struct igb_adapter *); +extern void igb_reset(struct igb_adapter *); +#ifdef ETHTOOL_SRXFHINDIR +extern void igb_write_rss_indir_tbl(struct igb_adapter *); +#endif +extern int igb_set_spd_dplx(struct igb_adapter *, u16); +extern int igb_setup_tx_resources(struct igb_ring *); +extern int igb_setup_rx_resources(struct igb_ring *); +extern void igb_free_tx_resources(struct igb_ring *); +extern void igb_free_rx_resources(struct igb_ring *); +extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *); +extern void igb_setup_tctl(struct igb_adapter *); +extern void igb_setup_rctl(struct igb_adapter *); +extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *); +extern void igb_unmap_and_free_tx_resource(struct igb_ring *, + struct igb_tx_buffer *); +extern void igb_alloc_rx_buffers(struct igb_ring *, u16); +extern void igb_clean_rx_ring(struct igb_ring *); +extern int igb_setup_queues(struct igb_adapter *adapter); +extern void igb_update_stats(struct igb_adapter *); +extern bool igb_has_link(struct igb_adapter *adapter); +extern void igb_set_ethtool_ops(struct net_device *); +extern void igb_check_options(struct igb_adapter *); +extern void igb_power_up_link(struct igb_adapter *); +#ifdef HAVE_PTP_1588_CLOCK +extern void igb_ptp_init(struct igb_adapter *adapter); +extern void igb_ptp_stop(struct igb_adapter *adapter); +extern void igb_ptp_reset(struct igb_adapter *adapter); +extern void igb_ptp_tx_work(struct work_struct *work); +extern void igb_ptp_rx_hang(struct igb_adapter *adapter); +extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); +extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, + struct sk_buff *skb); +extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, + unsigned char *va, + struct sk_buff *skb); +extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd); +#endif /* HAVE_PTP_1588_CLOCK */ +#ifdef ETHTOOL_OPS_COMPAT +extern int ethtool_ioctl(struct ifreq *); +#endif +extern int igb_write_mc_addr_list(struct net_device *netdev); +extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_del_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue); +extern int igb_available_rars(struct igb_adapter *adapter); +extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32); +extern void igb_configure_vt_default_pool(struct igb_adapter *adapter); +extern void igb_enable_vlan_tags(struct igb_adapter *adapter); +#ifndef HAVE_VLAN_RX_REGISTER +extern void igb_vlan_mode(struct net_device *, u32); +#endif + +#define E1000_PCS_CFG_IGN_SD 1 + +int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); +int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); +#ifdef IGB_HWMON +void igb_sysfs_exit(struct igb_adapter *adapter); +int igb_sysfs_init(struct igb_adapter *adapter); +#else +#ifdef IGB_PROCFS +int igb_procfs_init(struct igb_adapter *adapter); +void igb_procfs_exit(struct igb_adapter *adapter); +int igb_procfs_topdir_init(void); +void igb_procfs_topdir_exit(void); +#endif /* IGB_PROCFS */ +#endif /* IGB_HWMON */ + +#define IGB_BIND _IOW('E', 200, int) +#define IGB_UNBIND _IOW('E', 201, int) +#define IGB_MAPRING _IOW('E', 202, int) +#define IGB_MAP_TX_RING IGB_MAPRING +#define IGB_UNMAPRING _IOW('E', 203, int) +#define IGB_UNMAP_TX_RING IGB_UNMAPRING +#define IGB_MAPBUF _IOW('E', 204, int) +#define IGB_UNMAPBUF _IOW('E', 205, int) +#define IGB_LINKSPEED _IOW('E', 206, int) +#define IGB_MAP_RX_RING _IOW('E', 207, int) +#define IGB_UNMAP_RX_RING _IOW('E', 208, int) + +/*set of newly defined ioctl calls - new libigb compatibility + each of them is an equivalent of the old ioctl + changed numberiong convention: new_ioctl = old_ioctl + 100*/ + +#define IGB_IOCTL_MAPRING _IOW('E', 302, int) +#define IGB_IOCTL_MAP_TX_RING IGB_IOCTL_MAPRING +#define IGB_IOCTL_UNMAPRING _IOW('E', 303, int) +#define IGB_IOCTL_UNMAP_TX_RING IGB_IOCTL_UNMAPRING +#define IGB_IOCTL_MAPBUF _IOW('E', 304, int) +#define IGB_IOCTL_UNMAPBUF _IOW('E', 305, int) +#define IGB_IOCTL_MAP_RX_RING _IOW('E', 307, int) +#define IGB_IOCTL_UNMAP_RX_RING _IOW('E', 308, int) + + +/*END*/ + +#define IGB_BIND_NAMESZ 24 + +struct igb_bind_cmd { + char iface[IGB_BIND_NAMESZ]; + u32 mmap_size; +}; + +struct igb_pci_lookup { + struct igb_adapter *adapter; + char *pci_info; +}; + +/* used with both map/unmap ring & buf ioctls */ +struct igb_buf_cmd { + u64 physaddr; + u32 queue; + u32 mmap_size; + u64 pa; +}; + +struct igb_link_cmd { + u32 up; + u32 speed; + u32 duplex; +}; + +struct igb_private_data { + struct igb_adapter *adapter; + /* user-dma specific variable for buffer */ + struct igb_user_page *userpages; + /* user-dma specific variable for TX and RX */ + u32 uring_tx_init; + u32 uring_rx_init; +}; + +#endif /* _IGB_H_ */ diff --git a/drivers/staging/igb_avb/igb_avb.7 b/drivers/staging/igb_avb/igb_avb.7 new file mode 100755 index 000000000000..d17b3de3c61e --- /dev/null +++ b/drivers/staging/igb_avb/igb_avb.7 @@ -0,0 +1,253 @@ +.\" LICENSE +.\" +.\" This software program is released under the terms of a license agreement between you ('Licensee') and Intel. Do not use or load this software or any associated materials (collectively, the 'Software') until you have carefully read the full terms and conditions of the LICENSE located in this software package. By loading or using the Software, you agree to the terms of this Agreement. If you do not agree with the terms of this Agreement, do not install or use the Software. +.\" +.\" * Other names and brands may be claimed as the property of others. +.\" +.TH igb 1 "January 5, 2012" + +.SH NAME +igb \-This file describes the Linux* Base Driver for the Gigabit Family of Adapters. +.SH SYNOPSIS +.PD 0.4v +modprobe igb [