diff mbox

Add Nvlink documentation including device tree bindings

Message ID 1446088350-7858-1-git-send-email-alistair@popple.id.au
State Accepted
Headers show

Commit Message

Alistair Popple Oct. 29, 2015, 3:12 a.m. UTC
Signed-off-by: Alistair Popple <alistair@popple.id.au>
---
 doc/device-tree/nvlink.txt | 137 ++++++++++++++++++++++++++++++++++++++++++
 doc/nvlink.txt             | 146 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 283 insertions(+)
 create mode 100644 doc/device-tree/nvlink.txt
 create mode 100644 doc/nvlink.txt

Comments

Stewart Smith Nov. 12, 2015, 6:11 a.m. UTC | #1
Alistair Popple <alistair@popple.id.au> writes:
> Signed-off-by: Alistair Popple <alistair@popple.id.au>
> ---
>  doc/device-tree/nvlink.txt | 137 ++++++++++++++++++++++++++++++++++++++++++
>  doc/nvlink.txt             | 146
>  +++++++++++++++++++++++++++++++++++++++++++++

Thanks, merged!
as of 03c6c2d.
diff mbox

Patch

diff --git a/doc/device-tree/nvlink.txt b/doc/device-tree/nvlink.txt
new file mode 100644
index 0000000..70e9545
--- /dev/null
+++ b/doc/device-tree/nvlink.txt
@@ -0,0 +1,137 @@ 
+===========================
+Nvlink Device Tree Bindings
+===========================
+
+See doc/nvlink.txt for general Nvlink information.
+
+NPU bindings:
+
+xscom@3fc0000000000 {
+        npu@8013c00 {
+			reg = <0x8013c00 0x2c>;
+                        compatible = "ibm,power8-npu";
+                        ibm,npu-index = <0x0>;
+                        ibm,npu-links = <0x4>;
+
+; Number of links wired up to this npu.
+
+                        phandle = <0x100002bc>;
+                        linux,phandle = <0x100002bc>;
+
+                        link@0 {
+                                ibm,npu-pbcq = <0x1000000b>;
+
+; phandle to the pbcq which connects to the GPU.
+
+				ibm,npu-phy = <0x80000000 0x8010c3f>;
+
+; SCOM address of the IBM PHY controlling this link.
+
+				compatible = "ibm,npu-link";
+                                ibm,npu-lane-mask = <0xff>;
+
+; Mask specifying which IBM PHY lanes are used for this link.
+
+				phandle = <0x100002bd>;
+                                ibm,npu-link-index = <0x0>;
+
+; Hardware link index. Naples systems contain links at index 0,1,4 & 5.
+; Used to calculate various address offsets.
+
+				linux,phandle = <0x100002bd>;
+                        };
+
+                        link@1 {
+                                ibm,npu-pbcq = <0x1000000b>;
+                                ibm,npu-phy = <0x80000000 0x8010c3f>;
+                                compatible = "ibm,npu-link";
+                                ibm,npu-lane-mask = <0xff00>;
+                                phandle = <0x100002be>;
+                                ibm,npu-link-index = <0x1>;
+                                linux,phandle = <0x100002be>;
+                        };
+
+                        link@4 {
+                                ibm,npu-pbcq = <0x1000000a>;
+                                ibm,npu-phy = <0x80000000 0x8010c7f>;
+                                compatible = "ibm,npu-link";
+                                ibm,npu-lane-mask = <0xff00>;
+                                phandle = <0x100002bf>;
+                                ibm,npu-link-index = <0x4>;
+                                linux,phandle = <0x100002bf>;
+			};
+
+			link@5 {
+                                ibm,npu-pbcq = <0x1000000a>;
+                                ibm,npu-phy = <0x80000000 0x8010c7f>;
+                                compatible = "ibm,npu-link";
+                                ibm,npu-lane-mask = <0xff>;
+                                phandle = <0x100002c0>;
+                                ibm,npu-link-index = <0x5>;
+                                linux,phandle = <0x100002c0>;
+                        };
+	};
+};
+
+Emulated PCI device bindings:
+
+       pciex@3fff000400000 {
+                ibm,npcq = <0x100002bc>;
+
+; phandle to the NPU node. Used to find associated PCI GPU devices.
+
+                compatible = "ibm,power8-npu-pciex", "ibm,ioda2-npu-phb";
+
+		pci@0 {
+                        reg = <0x0 0x0 0x0 0x0 0x0>;
+                        revision-id = <0x0>;
+                        interrupts = <0x1>;
+                        device-id = <0x4ea>;
+                        ibm,pci-config-space-type = <0x1>;
+                        vendor-id = <0x1014>;
+                        ibm,gpu = <0x100002f7>;
+
+; phandle pointing the associated GPU PCI device node
+
+  	  	        phandle = <0x100002fc>;
+                };
+
+                pci@1 {
+                        reg = <0x800 0x0 0x0 0x0 0x0>;
+                        revision-id = <0x0>;
+                        interrupts = <0x1>;
+                        device-id = <0x4ea>;
+                        ibm,pci-config-space-type = <0x1>;
+                        vendor-id = <0x1014>;
+                        ibm,gpu = <0x100002f5>;
+                        phandle = <0x100002fe>;
+                        class-code = <0x60400>;
+                        linux,phandle = <0x100002fe>;
+                };
+
+                pci@0,1 {
+                        reg = <0x100 0x0 0x0 0x0 0x0>;
+                        revision-id = <0x0>;
+                        interrupts = <0x2>;
+                        device-id = <0x4ea>;
+                        ibm,pci-config-space-type = <0x1>;
+                        vendor-id = <0x1014>;
+                        ibm,gpu = <0x100002f7>;
+                        phandle = <0x100002fd>;
+                        class-code = <0x60400>;
+                        linux,phandle = <0x100002fd>;
+                };
+
+                pci@1,1 {
+                       reg = <0x900 0x0 0x0 0x0 0x0>;
+                        revision-id = <0x0>;
+                        interrupts = <0x2>;
+                        device-id = <0x4ea>;
+                        ibm,pci-config-space-type = <0x1>;
+                        vendor-id = <0x1014>;
+                        ibm,gpu = <0x100002f5>;
+                        phandle = <0x100002ff>;
+                        class-code = <0x60400>;
+                        linux,phandle = <0x100002ff>;
+                };
+        };
diff --git a/doc/nvlink.txt b/doc/nvlink.txt
new file mode 100644
index 0000000..5673de4
--- /dev/null
+++ b/doc/nvlink.txt
@@ -0,0 +1,146 @@ 
+OPAL/Skiboot Nvlink Interface Documentation
+----------------------------------------------------------------------
+
+========
+Overview
+========
+
+NV-Link is a high speed interconnect that is used in conjunction with
+a PCI-E connection to create an interface between chips that provides
+very high data bandwidth. The PCI-E connection is used as the control
+path to initiate and report status of large data transfers. The data
+transfers themselves are sent over the NV-Link.
+
+On IBM Power systems the NV-Link hardware is similar to our standard
+PCI hardware so to maximise code reuse the NV-Link is exposed as an
+emulated PCI device through system firmware (OPAL/skiboot). Thus each
+NV-Link capable device will appear as two devices on a system, the
+real PCI-E device and at least one emulated PCI device used for the
+NV-Link.
+
+Presently the NV-Link is only capable of data transfers initiated by
+the target, thus the emulated PCI device will only handle registers
+for link initialisation, DMA transfers and error reporting (EEH).
+
+====================
+Emulated PCI Devices
+====================
+
+Each link will be exported as an emulated PCI device with a minimum of
+two emulated PCI devices per GPU. Emulated PCI devices are grouped per
+GPU.
+
+The emulated PCI device will be exported as a standard PCI device by
+the Linux kernel. It has a standard PCI configuration space to expose
+necessary device parameters. The only functionality available is
+related to the setup of DMA windows.
+
+Configuration Space Parameters
+-----------------------------
+
+Vendor ID = 0x1014         (IBM)
+Device ID = 0x04ea
+Revision ID = 0x00
+Class = 0x068000	   (Bridge Device Other, ProgIf = 0x0)
+BAR0/1 = TL/DL Registers
+
+TL/DL Registers
+---------------
+
+Each link has 128KB of TL/DL registers. These will always be mapped
+to 64-bit BAR#0 of the emulated PCI device configuration space.
+
+BAR#0 + 128K +-----------+
+      	     | NTL (64K) |
+BAR#0 + 64K  +-----------+
+      	     | DL (64K)  |
+BAR#0	     +-----------+
+
+Vendor Specific Capabilities
+----------------------------
+
++-----------------+----------------+----------------+----------------+
+|     Reserved    |   Cap Length   |  Next Cap Ptr  |  Cap ID (0x09) |
++-----------------+----------------+----------------+----------------+
+|                      Procedure Status Register                     |
++--------------------------------------------------------------------+
+|                      Procedure Control Register                    |
++---------------------------------------------------+----------------+
+|                      Reserved                     |   Link Number  |
++---------------------------------------------------+----------------+
+
+Procedure Control Register
+
+   Used to start hardware procedures.
+
+   Writes will start the corresponding procedure and set bit 31 in the
+   procedure status register. This register must not be written while
+   bit 31 is set in the status register. Performing a write while
+   another procudure is already in progress will abort that procedure.
+
+   Reads will return the in progress procedure or the last completed
+   procedure number depending on the procedure status field.
+
+   Procedure Numbers:
+    0  - Abort in-progress procedure
+    1  - NOP
+    2  - Unsupported procedure
+    3  - Unsupported procedure
+    4  - Naples PHY - RESET
+    5  - Naples PHY - TX_ZCAL
+    6  - Naples PHY - RX_DCCAL
+    7  - Naples PHY - TX_RXCAL_ENABLE
+    8  - Naples PHY - TX_RXCAL_DISABLE
+    9  - Naples PHY - RX_TRAINING
+    10 - Naples NPU - RESET
+    11 - Naples PHY - PHY preterminate
+    12 - Naples PHY - PHY terminated
+
+   Procedure 5 (TX_ZCAL) should only be run once. System firmware will
+   ensure this so device drivers may call this procedure mutiple
+   times.
+
+Procedure Status Register
+
+   The procedure status register is used to determine when execution
+   of the procedure number in the control register is complete and if
+   it completed successfully.
+
+   This register must be polled frequently to allow system firmware to
+   execute the procedures.
+
+   Fields:
+       Bit 31 - Procedure in progress
+       Bit 30 - Procedure complete
+       Bit 3-0 - Procedure completion code
+
+   Procedure completion codes:
+       0 - Procedure completed successfully.
+       1 - Transient failure. Procedure should be rerun.
+       2 - Permanent failure. Procedure will never complete successfully.
+       3 - Procedure aborted.
+       4 - Unsupported procedure.
+
+Link Number
+
+   Physical link number this emulated PCI device is assoicated
+   with. One of 0, 1, 4 or 5 (links 2 & 3 do not exist on Naples).
+
+Reserved
+
+   These fields must be ignored and no value should be assumed.
+
+Interrupts
+----------
+
+Each link has a single DL/TL interrupt assigned to it. These will be
+exposed as an LSI via the emulated PCI device. There are 4 links
+consuming 4 LSI interrupts. The 4 remaining interrupts supported by the
+corresponding PHB will be routed to OS platform for the purpose of error
+reporting.
+
+====================
+Device Tree Bindings
+====================
+
+See doc/device-tree/nvlink.txt