From patchwork Tue Feb 13 23:26:48 2018 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Bryan Whitehead X-Patchwork-Id: 873199 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 3zgzDy2sJxz9t3C for ; Wed, 14 Feb 2018 10:26:10 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S966126AbeBMX0H (ORCPT ); Tue, 13 Feb 2018 18:26:07 -0500 Received: from esa2.microchip.iphmx.com ([68.232.149.84]:15886 "EHLO esa2.microchip.iphmx.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S966116AbeBMX0D (ORCPT ); Tue, 13 Feb 2018 18:26:03 -0500 X-IronPort-AV: E=Sophos;i="5.46,509,1511852400"; d="scan'208";a="11333548" Received: from smtpout.microchip.com (HELO email.microchip.com) ([198.175.253.82]) by esa2.microchip.iphmx.com with ESMTP/TLS/DHE-RSA-AES256-SHA; 13 Feb 2018 16:25:54 -0700 Received: from BW-Ubuntu-tester.mchp-main.com (10.10.76.4) by chn-sv-exch07.mchp-main.com (10.10.76.108) with Microsoft SMTP Server id 14.3.352.0; Tue, 13 Feb 2018 16:25:53 -0700 From: Bryan Whitehead To: CC: , Subject: [PATCH v1 net-next 1/3] lan743x: Add main source file for new lan743x driver Date: Tue, 13 Feb 2018 18:26:48 -0500 Message-ID: <1518564410-8020-2-git-send-email-Bryan.Whitehead@microchip.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1518564410-8020-1-git-send-email-Bryan.Whitehead@microchip.com> References: <1518564410-8020-1-git-send-email-Bryan.Whitehead@microchip.com> MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Add main source files for new lan743x driver Signed-off-by: Bryan Whitehead --- drivers/net/ethernet/microchip/lan743x_main.c | 2964 +++++++++++++++++++++++++ drivers/net/ethernet/microchip/lan743x_main.h | 1331 +++++++++++ 2 files changed, 4295 insertions(+) create mode 100644 drivers/net/ethernet/microchip/lan743x_main.c create mode 100644 drivers/net/ethernet/microchip/lan743x_main.h diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c new file mode 100644 index 0000000..6cfb439 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -0,0 +1,2964 @@ +/* + * Copyright (C) 2018 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "lan743x_main.h" + +#define LAN743X_COMPONENT_FLAG_PCI BIT(0) +#define LAN743X_COMPONENT_FLAG_CSR BIT(1) +#define LAN743X_COMPONENT_FLAG_INTR BIT(2) +#define LAN743X_COMPONENT_FLAG_DP BIT(3) +#define LAN743X_COMPONENT_FLAG_MAC BIT(4) +#define LAN743X_COMPONENT_FLAG_PHY BIT(5) +#define LAN743X_COMPONENT_FLAG_RFE BIT(6) +#define LAN743X_COMPONENT_FLAG_FCT BIT(7) +#define LAN743X_COMPONENT_FLAG_TX(channel) BIT(16 + (channel)) +#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel)) + +#define LAN743X_INIT_FLAG_NETDEV_REGISTERED BIT(24) +#define LAN743X_INIT_FLAG_MDIOBUS_ALLOCATED BIT(25) +#define LAN743X_INIT_FLAG_MDIOBUS_REGISTERED BIT(26) + +/* PCI */ +#define INIT_FLAG_PCI_DEVICE_ENABLED BIT(0) +#define INIT_FLAG_PCI_REGIONS_REQUESTED BIT(1) +#define INIT_FLAG_CSR_MAPPED BIT(2) + +static void lan743x_pci_cleanup(struct lan743x_adapter *adapter) +{ + struct lan743x_pci *pci = &adapter->pci; + + if (pci->init_flags & INIT_FLAG_PCI_REGIONS_REQUESTED) { + pci_release_selected_regions(pci->pdev, + pci_select_bars(pci->pdev, + IORESOURCE_MEM)); + pci->init_flags &= ~INIT_FLAG_PCI_REGIONS_REQUESTED; + } + if (pci->init_flags & INIT_FLAG_PCI_DEVICE_ENABLED) { + pci_disable_device(pci->pdev); + pci->init_flags &= ~INIT_FLAG_PCI_DEVICE_ENABLED; + } +} + +static int lan743x_pci_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + struct lan743x_pci *pci = &adapter->pci; + unsigned long bars = 0; + int ret; + + pci->pdev = pdev; + ret = pci_enable_device_mem(pdev); + if (ret) + goto clean_up; + pci->init_flags |= INIT_FLAG_PCI_DEVICE_ENABLED; + + netif_info(adapter, probe, adapter->netdev, + "PCI: Vendor ID = 0x%04X, Device ID = 0x%04X\n", + pdev->vendor, pdev->device); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (!test_bit(0, &bars)) + goto clean_up; + + ret = pci_request_selected_regions(pdev, bars, DRIVER_NAME); + if (ret) + goto clean_up; + pci->init_flags |= INIT_FLAG_PCI_REGIONS_REQUESTED; + + pci_set_master(pdev); + return 0; +clean_up: + lan743x_pci_cleanup(adapter); + return ret; +} + +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset) +{ + return ioread32(&adapter->csr.csr_address[offset]); +} + +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data) +{ + iowrite32(data, &adapter->csr.csr_address[offset]); +} + +static int lan743x_csr_light_reset(struct lan743x_adapter *adapter) +{ + unsigned long timeout; + u32 data; + + data = lan743x_csr_read(adapter, HW_CFG); + data |= HW_CFG_LRST_; + lan743x_csr_write(adapter, HW_CFG, data); + timeout = jiffies + (10 * HZ); + do { + if (time_after(jiffies, timeout)) + return -EIO; + msleep(100); + data = lan743x_csr_read(adapter, HW_CFG); + } while (data & HW_CFG_LRST_); + + return 0; +} + +static int lan743x_csr_wait_for_bit(struct lan743x_adapter *adapter, + int offset, u32 bit_mask, + int target_value, int usleep_min, + int usleep_max, int count) +{ + int timeout = count; + int current_value; + int ret = -EIO; + + while (timeout) { + current_value = (lan743x_csr_read(adapter, offset) & bit_mask) + ? 1 : 0; + if (target_value == current_value) { + ret = 0; + break; + } + usleep_range(usleep_min, usleep_max); + timeout--; + } + return ret; +} + +static int lan743x_csr_init(struct lan743x_adapter *adapter) +{ + struct lan743x_csr *csr = &adapter->csr; + resource_size_t bar_start, bar_length; + int result; + + bar_start = pci_resource_start(adapter->pci.pdev, 0); + bar_length = pci_resource_len(adapter->pci.pdev, 0); + csr->csr_address = ioremap(bar_start, bar_length); + if (!csr->csr_address) { + result = -ENOMEM; + goto clean_up; + } + + csr->id_rev = lan743x_csr_read(adapter, ID_REV); + csr->fpga_rev = lan743x_csr_read(adapter, FPGA_REV); + netif_info(adapter, probe, adapter->netdev, + "ID_REV = 0x%08X, FPGA_REV = %d.%d\n", + csr->id_rev, (csr->fpga_rev) & 0x000000FF, + ((csr->fpga_rev) >> 8) & 0x000000FF); + if ((csr->id_rev & 0xFFF00000) != 0x74300000) { + result = -ENODEV; + goto clean_up; + } + + csr->flags = LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + switch (csr->id_rev & ID_REV_CHIP_REV_MASK_) { + case ID_REV_CHIP_REV_A0_: + csr->flags |= LAN743X_CSR_FLAG_IS_A0; + csr->flags &= ~LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR; + break; + case ID_REV_CHIP_REV_B0_: + csr->flags |= LAN743X_CSR_FLAG_IS_B0; + break; + } + + result = lan743x_csr_light_reset(adapter); + if (result) + goto clean_up; + return 0; +clean_up: + if (csr->csr_address) + iounmap(csr->csr_address); + return result; +} + +#define INTR_FLAG_IRQ_REQUESTED(vector_index) BIT(0 + vector_index) +#define INTR_FLAG_MSI_ENABLED BIT(8) +#define INTR_FLAG_MSIX_ENABLED BIT(9) +#define INTR_FLAG_OPENED BIT(10) + +static void lan743x_intr_software_isr(void *context) +{ + struct lan743x_adapter *adapter = context; + struct lan743x_intr *intr = &adapter->intr; + u32 int_sts; + + int_sts = lan743x_csr_read(adapter, INT_STS); + if (int_sts & INT_BIT_SW_GP_) { + lan743x_csr_write(adapter, INT_STS, INT_BIT_SW_GP_); + intr->software_isr_flag = 1; + } +} + +static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_tx *tx = context; + struct lan743x_adapter *adapter = tx->adapter; + int enable_flag = 1; + u32 int_en = 0; + + int_en = lan743x_csr_read(adapter, INT_EN_SET); + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + } + if (int_sts & INT_BIT_DMA_TX_(tx->channel_number)) { + u32 ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = ioc_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = ioc_bit; + + dmac_int_en &= ioc_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & ioc_bit) { + tasklet_schedule(&tx->tx_isr_bottom_half); + enable_flag = 0;/* tasklet will re-enable later */ + } + } + if (enable_flag) + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); +} + +static void lan743x_rx_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_rx *rx = context; + struct lan743x_adapter *adapter = rx->adapter; + int enable_flag = 1; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) { + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + } + if (int_sts & INT_BIT_DMA_RX_(rx->channel_number)) { + u32 rx_frame_bit = DMAC_INT_BIT_RXFRM_(rx->channel_number); + u32 dmac_int_sts; + u32 dmac_int_en; + + if (flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) + dmac_int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + else + dmac_int_sts = rx_frame_bit; + if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) + dmac_int_en = lan743x_csr_read(adapter, + DMAC_INT_EN_SET); + else + dmac_int_en = rx_frame_bit; + + dmac_int_en &= rx_frame_bit; + dmac_int_sts &= dmac_int_en; + if (dmac_int_sts & rx_frame_bit) { + napi_schedule(&rx->napi); + enable_flag = 0;/* poll function will re-enable later */ + } + } + if (enable_flag) { + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } +} + +static void lan743x_intr_shared_isr(void *context, u32 int_sts, u32 flags) +{ + struct lan743x_adapter *adapter = context; + int channel; + + if (int_sts & INT_BIT_ALL_RX_) { + for (channel = 0; channel < LAN743X_USED_RX_CHANNELS; + channel++) { + u32 int_bit = INT_BIT_DMA_RX_(channel); + + if (int_sts & int_bit) { + lan743x_rx_isr(&adapter->rx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_TX_) { + for (channel = 0; channel < LAN743X_USED_TX_CHANNELS; + channel++) { + u32 int_bit = INT_BIT_DMA_TX_(channel); + + if (int_sts & int_bit) { + lan743x_tx_isr(&adapter->tx[channel], + int_bit, flags); + int_sts &= ~int_bit; + } + } + } + if (int_sts & INT_BIT_ALL_OTHER_) { + if (int_sts & INT_BIT_SW_GP_) { + lan743x_intr_software_isr(adapter); + int_sts &= ~INT_BIT_SW_GP_; + } + } + if (int_sts) + lan743x_csr_write(adapter, INT_EN_CLR, int_sts); +} + +static irqreturn_t lan743x_intr_entry_isr(int irq, void *ptr) +{ + struct lan743x_vector *vector = ptr; + struct lan743x_adapter *adapter = vector->adapter; + irqreturn_t result = IRQ_NONE; + u32 int_enables; + u32 int_sts; + + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ) { + int_sts = lan743x_csr_read(adapter, INT_STS); + } else if (vector->flags & + (LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C)) { + int_sts = lan743x_csr_read(adapter, INT_STS_R2C); + } else { + /* use mask as implied status */ + int_sts = vector->int_mask | INT_BIT_MAS_; + } + if (!(int_sts & INT_BIT_MAS_)) + goto irq_done; + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR) + /* disable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_CLR, + INT_VEC_EN_(vector->vector_index)); + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR) + /* disable master interrupt */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + if (vector->flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK) { + int_enables = lan743x_csr_read(adapter, INT_EN_SET); + } else { + /* use vector mask as implied enable mask */ + int_enables = vector->int_mask; + } + + int_sts &= int_enables; + int_sts &= vector->int_mask; + if (int_sts) { + if (vector->handler) { + vector->handler(vector->context, + int_sts, vector->flags); + } else { + /* disable interrupts on this vector */ + lan743x_csr_write(adapter, INT_EN_CLR, + vector->int_mask); + } + result = IRQ_HANDLED; + } + if (vector->flags & LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET) + /* enable master interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + if (vector->flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET) + /* enable vector interrupt */ + lan743x_csr_write(adapter, + INT_VEC_EN_SET, + INT_VEC_EN_(vector->vector_index)); +irq_done: + return result; +} + +static int lan743x_intr_test_isr(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int result = -ENODEV; + int timeout = 10; + + intr->software_isr_flag = 0; + /* enable interrupt */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_SW_GP_); + /* activate interrupt here */ + lan743x_csr_write(adapter, INT_SET, INT_BIT_SW_GP_); + while ((timeout > 0) && (!(intr->software_isr_flag))) { + usleep_range(1000, 20000); + timeout--; + } + if (intr->software_isr_flag) + result = 0; + /* disable interrupts */ + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_SW_GP_); + return result; +} + +static int lan743x_intr_register_isr(struct lan743x_adapter *adapter, + int vector_index, u32 flags, + u32 int_mask, + lan743x_vector_handler handler, + void *context) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + int ret; + + vector->adapter = adapter; + vector->flags = flags; + vector->vector_index = vector_index; + vector->int_mask = int_mask; + vector->handler = handler; + vector->context = context; + ret = request_irq(vector->irq, + lan743x_intr_entry_isr, + (flags & LAN743X_VECTOR_FLAG_IRQ_SHARED) ? + IRQF_SHARED : 0, + DRIVER_NAME, + vector); + if (ret) { + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; + } + return ret; +} + +static void lan743x_intr_unregister_isr(struct lan743x_adapter *adapter, + int vector_index) +{ + struct lan743x_vector *vector = &adapter->intr.vector_list + [vector_index]; + + free_irq(vector->irq, vector); + vector->handler = NULL; + vector->context = NULL; + vector->int_mask = 0; + vector->flags = 0; +} + +static u32 lan743x_intr_get_vector_flags(struct lan743x_adapter *adapter, + u32 int_mask) +{ + int index; + + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + if (adapter->intr.vector_list[index].int_mask & int_mask) + return adapter->intr.vector_list[index].flags; + } + return 0; +} + +static void lan743x_intr_close(struct lan743x_adapter *adapter) +{ + struct lan743x_intr *intr = &adapter->intr; + int index = 0; + + intr->flags &= ~INTR_FLAG_OPENED; + lan743x_csr_write(adapter, INT_EN_CLR, INT_BIT_MAS_); + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0x000000FF); + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) { + if (intr->flags & INTR_FLAG_IRQ_REQUESTED(index)) { + lan743x_intr_unregister_isr(adapter, index); + intr->flags &= ~INTR_FLAG_IRQ_REQUESTED(index); + } + } + if (intr->flags & INTR_FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pci.pdev); + intr->flags &= ~INTR_FLAG_MSI_ENABLED; + } + if (intr->flags & INTR_FLAG_MSIX_ENABLED) { + pci_disable_msix(adapter->pci.pdev); + intr->flags &= ~INTR_FLAG_MSIX_ENABLED; + } +} + +static int lan743x_intr_open(struct lan743x_adapter *adapter) +{ +#if LAN743X_TRY_MSIX + struct msix_entry msix_entries[LAN743X_MAX_VECTOR_COUNT]; +#endif + struct lan743x_intr *intr = &adapter->intr; + u32 int_vec_en_auto_clr = 0; + u32 int_vec_map0 = 0; + u32 int_vec_map1 = 0; + int ret = -ENODEV; + int index = 0; + u32 flags = 0; + + intr->number_of_vectors = 0; +#if LAN743X_TRY_MSIX + memset(&msix_entries[0], 0, + sizeof(struct msix_entry) * LAN743X_MAX_VECTOR_COUNT); + for (index = 0; index < LAN743X_MAX_VECTOR_COUNT; index++) + msix_entries[index].entry = index; + ret = pci_enable_msix_range(adapter->pci.pdev, + msix_entries, 1, + 1 + LAN743X_USED_TX_CHANNELS + + LAN743X_USED_RX_CHANNELS); + if (ret > 0) { + intr->flags |= INTR_FLAG_MSIX_ENABLED; + intr->number_of_vectors = ret; + intr->using_vectors = true; + for (index = 0; index < intr->number_of_vectors; index++) + intr->vector_list[index].irq = msix_entries + [index].vector; + netif_info(adapter, ifup, adapter->netdev, + "using MSIX interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } +#endif +#if LAN743X_TRY_MSI + if (!intr->number_of_vectors) { + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + if (!pci_enable_msi(adapter->pci.pdev)) { + intr->flags |= INTR_FLAG_MSI_ENABLED; + intr->number_of_vectors = 1; + intr->using_vectors = true; + intr->vector_list[0].irq = + adapter->pci.pdev->irq; + netif_info(adapter, ifup, adapter->netdev, + "using MSI interrupts, number of vectors = %d\n", + intr->number_of_vectors); + } + } + } +#endif + if (!intr->number_of_vectors) { + intr->number_of_vectors = 1; + intr->using_vectors = false; + intr->vector_list[0].irq = intr->irq; + netif_info(adapter, ifup, adapter->netdev, + "using legacy interrupts\n"); + } + /* At this point we must have at least one irq */ + lan743x_csr_write(adapter, INT_VEC_EN_CLR, 0xFFFFFFFF); + /* map all interrupts to vector 0 */ + lan743x_csr_write(adapter, INT_VEC_MAP0, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP1, 0x00000000); + lan743x_csr_write(adapter, INT_VEC_MAP2, 0x00000000); + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + if (intr->using_vectors) { + flags |= LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + } else { + flags |= LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET | + LAN743X_VECTOR_FLAG_IRQ_SHARED; + } + if (adapter->csr.flags & LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR; + flags &= ~LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK; + flags |= LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C; + flags |= LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C; + } + ret = lan743x_intr_register_isr(adapter, 0, flags, + INT_BIT_ALL_RX_ | INT_BIT_ALL_TX_ | + INT_BIT_ALL_OTHER_, + lan743x_intr_shared_isr, adapter); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(0); + if (intr->using_vectors) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(0)); + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + lan743x_csr_write(adapter, INT_MOD_CFG0, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG1, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG2, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG3, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG4, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG5, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG6, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_CFG7, LAN743X_INT_MOD); + lan743x_csr_write(adapter, INT_MOD_MAP0, 0x00005432); + lan743x_csr_write(adapter, INT_MOD_MAP1, 0x00000001); + lan743x_csr_write(adapter, INT_MOD_MAP2, 0x00FFFFFF); + } + /* enable interrupts */ + lan743x_csr_write(adapter, INT_EN_SET, INT_BIT_MAS_); + ret = lan743x_intr_test_isr(adapter); + if (ret) + goto clean_up; + if (intr->number_of_vectors > 1) { + int number_of_tx_vectors = intr->number_of_vectors - 1; + + if (number_of_tx_vectors > LAN743X_USED_TX_CHANNELS) + number_of_tx_vectors = LAN743X_USED_TX_CHANNELS; + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + for (index = 0; index < number_of_tx_vectors; index++) { + u32 int_bit = INT_BIT_DMA_TX_(index); + int vector = index + 1; + + /* map TX interrupt to vector */ + int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1); + if (flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { + int_vec_en_auto_clr |= INT_VEC_EN_(vector); + lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, + int_vec_en_auto_clr); + } + /* Remove TX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_tx_isr, + &adapter->tx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + if (!(flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + if ((intr->number_of_vectors - LAN743X_USED_TX_CHANNELS) > 1) { + int number_of_rx_vectors = intr->number_of_vectors - + LAN743X_USED_TX_CHANNELS - 1; + + if (number_of_rx_vectors > LAN743X_USED_RX_CHANNELS) + number_of_rx_vectors = LAN743X_USED_RX_CHANNELS; + flags = LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET; + if (adapter->csr.flags & + LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) { + flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET | + LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR | + LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR; + } + for (index = 0; index < number_of_rx_vectors; index++) { + int vector = index + 1 + LAN743X_USED_TX_CHANNELS; + u32 int_bit = INT_BIT_DMA_RX_(index); + + /* map RX interrupt to vector */ + int_vec_map0 |= INT_VEC_MAP0_RX_VEC_(index, vector); + lan743x_csr_write(adapter, INT_VEC_MAP0, int_vec_map0); + if (flags & + LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) { + int_vec_en_auto_clr |= INT_VEC_EN_(vector); + lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR, + int_vec_en_auto_clr); + } + /* Remove RX interrupt from shared mask */ + intr->vector_list[0].int_mask &= ~int_bit; + ret = lan743x_intr_register_isr(adapter, vector, flags, + int_bit, lan743x_rx_isr, + &adapter->rx[index]); + if (ret) + goto clean_up; + intr->flags |= INTR_FLAG_IRQ_REQUESTED(vector); + lan743x_csr_write(adapter, INT_VEC_EN_SET, + INT_VEC_EN_(vector)); + } + } + intr->flags |= INTR_FLAG_OPENED; + return 0; +clean_up: + lan743x_intr_close(adapter); + return ret; +} + +static int lan743x_dp_write(struct lan743x_adapter *adapter, + u32 select, u32 addr, u32 length, u32 *buf) +{ + struct lan743x_dp *dp = &adapter->dp; + int ret = -EIO; + u32 dp_sel; + int i; + + mutex_lock(&dp->lock); + if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) + goto unlock; + dp_sel = lan743x_csr_read(adapter, DP_SEL); + dp_sel &= ~DP_SEL_MASK_; + dp_sel |= select; + lan743x_csr_write(adapter, DP_SEL, dp_sel); + for (i = 0; i < length; i++) { + lan743x_csr_write(adapter, DP_ADDR, addr + i); + lan743x_csr_write(adapter, DP_DATA_0, buf[i]); + lan743x_csr_write(adapter, DP_CMD, DP_CMD_WRITE_); + if (lan743x_csr_wait_for_bit(adapter, DP_SEL, DP_SEL_DPRDY_, + 1, 40, 100, 100)) + goto unlock; + } + ret = 0; +unlock: + mutex_unlock(&dp->lock); + return ret; +} + +#define MAC_MII_READ 1 +#define MAC_MII_WRITE 0 +static u32 lan743x_mac_mii_access(u16 id, u16 index, int read) +{ + u32 ret; + + ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) & + MAC_MII_ACC_PHY_ADDR_MASK_; + ret |= (index << MAC_MII_ACC_MIIRINDA_SHIFT_) & + MAC_MII_ACC_MIIRINDA_MASK_; + if (read) + ret |= MAC_MII_ACC_MII_READ_; + else + ret |= MAC_MII_ACC_MII_WRITE_; + ret |= MAC_MII_ACC_MII_BUSY_; + + return ret; +} + +static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter) +{ + unsigned long start_time = jiffies; + u32 data; + + do { + data = lan743x_csr_read(adapter, MAC_MII_ACC); + if (!(data & MAC_MII_ACC_MII_BUSY_)) + return 0; + } while (!time_after(jiffies, start_time + HZ)); + return -EIO; +} + +static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* comfirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + /* set the address, index & direction (read from PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_READ); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + + val = lan743x_csr_read(adapter, MAC_MII_DATA); + return (int)(val & 0xFFFF); +} + +static int lan743x_mdiobus_write(struct mii_bus *bus, + int phy_id, int index, u16 regval) +{ + struct lan743x_adapter *adapter = bus->priv; + u32 val, mii_access; + int ret; + + /* confirm MII not busy */ + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + if (ret < 0) + return ret; + val = (u32)regval; + lan743x_csr_write(adapter, MAC_MII_DATA, val); + + /* set the address, index & direction (write to PHY) */ + mii_access = lan743x_mac_mii_access(phy_id, index, MAC_MII_WRITE); + lan743x_csr_write(adapter, MAC_MII_ACC, mii_access); + ret = lan743x_mac_mii_wait_till_not_busy(adapter); + return ret; +} + +static void lan743x_mac_set_address(struct lan743x_adapter *adapter, + u8 *addr) +{ + u32 addr_lo, addr_hi; + + addr_lo = addr[0] | + addr[1] << 8 | + addr[2] << 16 | + addr[3] << 24; + addr_hi = addr[4] | + addr[5] << 8; + lan743x_csr_write(adapter, MAC_RX_ADDRL, addr_lo); + lan743x_csr_write(adapter, MAC_RX_ADDRH, addr_hi); + ether_addr_copy(adapter->mac.mac_address, addr); + netif_info(adapter, drv, adapter->netdev, + "MAC address set to %02X:%02X:%02X:%02X:%02X:%02X\n", + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); +} + +static int lan743x_mac_init(struct lan743x_adapter *adapter) +{ + struct lan743x_mac *mac = &adapter->mac; + bool mac_address_valid = true; + struct net_device *netdev; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + u32 data; + int ret; + + netdev = adapter->netdev; + lan743x_csr_write(adapter, MAC_CR, MAC_CR_RST_); + ret = lan743x_csr_wait_for_bit(adapter, MAC_CR, MAC_CR_RST_, + 0, 1000, 20000, 100); + if (ret) + return ret; + + /* setup auto duplex, and speed detection */ + data = lan743x_csr_read(adapter, MAC_CR); + data |= MAC_CR_ADD_ | MAC_CR_ASD_; + data |= MAC_CR_CNTR_RST_; + lan743x_csr_write(adapter, MAC_CR, data); + mac_addr_hi = lan743x_csr_read(adapter, MAC_RX_ADDRH); + mac_addr_lo = lan743x_csr_read(adapter, MAC_RX_ADDRL); + mac->mac_address[0] = mac_addr_lo & 0xFF; + mac->mac_address[1] = (mac_addr_lo >> 8) & 0xFF; + mac->mac_address[2] = (mac_addr_lo >> 16) & 0xFF; + mac->mac_address[3] = (mac_addr_lo >> 24) & 0xFF; + mac->mac_address[4] = mac_addr_hi & 0xFF; + mac->mac_address[5] = (mac_addr_hi >> 8) & 0xFF; + if (((mac_addr_hi & 0x0000FFFF) == 0x0000FFFF) && + mac_addr_lo == 0xFFFFFFFF) { + mac_address_valid = false; + } else if (!is_valid_ether_addr(mac->mac_address)) { + mac_address_valid = false; + } + if (!mac_address_valid) + random_ether_addr(mac->mac_address); + lan743x_mac_set_address(adapter, mac->mac_address); + netif_info(adapter, probe, adapter->netdev, + "MAC Address = %02X:%02X:%02X:%02X:%02X:%02X\n", + mac->mac_address[0], mac->mac_address[1], + mac->mac_address[2], mac->mac_address[3], + mac->mac_address[4], mac->mac_address[5]); + ether_addr_copy(netdev->dev_addr, mac->mac_address); + return 0; +} + +static int lan743x_mac_open(struct lan743x_adapter *adapter) +{ + int ret = 0; + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_RX); + lan743x_csr_write(adapter, MAC_RX, temp | MAC_RX_RXEN_); + temp = lan743x_csr_read(adapter, MAC_TX); + lan743x_csr_write(adapter, MAC_TX, temp | MAC_TX_TXEN_); + return ret; +} + +static void lan743x_mac_close(struct lan743x_adapter *adapter) +{ + u32 temp; + + temp = lan743x_csr_read(adapter, MAC_TX); + temp &= ~MAC_TX_TXEN_; + lan743x_csr_write(adapter, MAC_TX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_TX, MAC_TX_TXD_, + 1, 1000, 20000, 100); + temp = lan743x_csr_read(adapter, MAC_RX); + temp &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, temp); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); +} + +static void lan743x_mac_flow_ctrl_set_enables(struct lan743x_adapter *adapter, + bool tx_enable, bool rx_enable) +{ + u32 flow_setting = 0; + + /* set maximum pause time because when fifo space frees + * up a zero value pause frame will be sent to release the pause + */ + flow_setting = MAC_FLOW_CR_FCPT_MASK_; + if (tx_enable) + flow_setting |= MAC_FLOW_CR_TX_FCEN_; + if (rx_enable) + flow_setting |= MAC_FLOW_CR_RX_FCEN_; + lan743x_csr_write(adapter, MAC_FLOW, flow_setting); +} + +static int lan743x_mac_set_mtu(struct lan743x_adapter *adapter, int new_mtu) +{ + int enabled = 0; + u32 mac_rx = 0; + + mac_rx = lan743x_csr_read(adapter, MAC_RX); + if (mac_rx & MAC_RX_RXEN_) { + enabled = 1; + if (mac_rx & MAC_RX_RXD_) { + lan743x_csr_write(adapter, MAC_RX, mac_rx); + mac_rx &= ~MAC_RX_RXD_; + } + mac_rx &= ~MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + lan743x_csr_wait_for_bit(adapter, MAC_RX, MAC_RX_RXD_, + 1, 1000, 20000, 100); + lan743x_csr_write(adapter, MAC_RX, mac_rx | MAC_RX_RXD_); + } + mac_rx &= ~(MAC_RX_MAX_SIZE_MASK_); + mac_rx |= (((new_mtu + ETH_HLEN + 4) << MAC_RX_MAX_SIZE_SHIFT_) & + MAC_RX_MAX_SIZE_MASK_); + lan743x_csr_write(adapter, MAC_RX, mac_rx); + if (enabled) { + mac_rx |= MAC_RX_RXEN_; + lan743x_csr_write(adapter, MAC_RX, mac_rx); + } + return 0; +} + +/* PHY */ +#define PHY_FLAG_OPENED BIT(0) +#define PHY_FLAG_ATTACHED BIT(1) +static int lan743x_phy_reset(struct lan743x_adapter *adapter) +{ + unsigned long timeout; + u32 data; + + data = lan743x_csr_read(adapter, PMT_CTL); + data |= PMT_CTL_ETH_PHY_RST_; + lan743x_csr_write(adapter, PMT_CTL, data); + timeout = jiffies + HZ; + do { + if (time_after(jiffies, timeout)) + return -EIO; + msleep(50); + data = lan743x_csr_read(adapter, PMT_CTL); + } while ((data & PMT_CTL_ETH_PHY_RST_) || !(data & PMT_CTL_READY_)); + return 0; +} + +static void lan743x_phy_update_flowcontrol(struct lan743x_adapter *adapter, + u8 duplex, u16 local_adv, + u16 remote_adv) +{ + struct lan743x_phy *phy = &adapter->phy; + u8 cap; + + if (phy->fc_autoneg) + cap = mii_resolve_flowctrl_fdx(local_adv, remote_adv); + else + cap = phy->fc_request_control; + lan743x_mac_flow_ctrl_set_enables(adapter, + cap & FLOW_CTRL_TX, + cap & FLOW_CTRL_RX); +} + +static int lan743x_phy_init(struct lan743x_adapter *adapter) +{ + struct net_device *netdev; + int ret; + + netdev = adapter->netdev; + ret = lan743x_phy_reset(adapter); + if (ret) + return ret; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + return 0; +} + +static void lan743x_phy_link_status_change(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = netdev->phydev; + + if (phydev) { + if (phydev->state == PHY_RUNNING) { + struct ethtool_link_ksettings ksettings; + struct lan743x_phy *phy = NULL; + int remote_advertisement = 0; + int local_advertisement = 0; + + phy = &adapter->phy; + memset(&ksettings, 0, sizeof(ksettings)); + phy_ethtool_get_link_ksettings(netdev, &ksettings); + local_advertisement = phy_read(phydev, MII_ADVERTISE); + if (local_advertisement < 0) + goto done; + remote_advertisement = phy_read(phydev, MII_LPA); + if (remote_advertisement < 0) + goto done; + netif_info(adapter, link, adapter->netdev, + "link UP: speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x\n", + ksettings.base.speed, ksettings.base.duplex, + local_advertisement, remote_advertisement); + lan743x_phy_update_flowcontrol(adapter, + ksettings.base.duplex, + local_advertisement, + remote_advertisement); + } else if (phydev->state == PHY_NOLINK) { + netif_info(adapter, link, adapter->netdev, + "link DOWN\n"); + } + } +done: + return; +} + +static void lan743x_phy_close(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct lan743x_phy *phy = &adapter->phy; + + if (phy->flags & PHY_FLAG_OPENED) { + netif_carrier_off(netdev); + phy_stop(netdev->phydev); + phy->flags &= ~PHY_FLAG_OPENED; + } + if (phy->flags & PHY_FLAG_ATTACHED) { + phy_disconnect(netdev->phydev); + netdev->phydev = NULL; + phy->flags &= ~PHY_FLAG_ATTACHED; + } +} + +static int lan743x_phy_open(struct lan743x_adapter *adapter) +{ + struct lan743x_phy *phy = &adapter->phy; + struct phy_device *phydev; + struct net_device *netdev; + int ret = -EIO; + u32 mii_adv; + + netdev = adapter->netdev; + phydev = phy_find_first(adapter->mdiobus); + if (!phydev) { + ret = -EIO; + goto clean_up; + } + ret = phy_connect_direct(netdev, phydev, + lan743x_phy_link_status_change, + PHY_INTERFACE_MODE_GMII); + if (ret) { + ret = -EIO; + goto clean_up; + } + phy->flags |= PHY_FLAG_ATTACHED; + + /* MAC doesn't support 1000T Half */ + phydev->supported &= ~SUPPORTED_1000baseT_Half; + + /* support both flow controls */ + phy->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + mii_adv = (u32)mii_advertise_flowctrl(phy->fc_request_control); + phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + phy->fc_autoneg = phydev->autoneg; + + /* PHY interrupt enabled here */ + phy_start(phydev); + phy_start_aneg(phydev); + phy->flags |= PHY_FLAG_OPENED; + return 0; +clean_up: + lan743x_phy_close(adapter); + return ret; +} + +static void lan743x_rfe_update_mac_address(struct lan743x_adapter *adapter) +{ + u8 mac_addr[ETH_ALEN]; + u32 mac_addr_hi = 0; + u32 mac_addr_lo = 0; + + /* Add mac address to perfect Filter */ + ether_addr_copy(mac_addr, adapter->mac.mac_address); + mac_addr_lo = ((((u32)(mac_addr[0])) << 0) | + (((u32)(mac_addr[1])) << 8) | + (((u32)(mac_addr[2])) << 16) | + (((u32)(mac_addr[3])) << 24)); + mac_addr_hi = ((((u32)(mac_addr[4])) << 0) | + (((u32)(mac_addr[5])) << 8)); + lan743x_csr_write(adapter, RFE_ADDR_FILT_LO(0), mac_addr_lo); + lan743x_csr_write(adapter, RFE_ADDR_FILT_HI(0), + mac_addr_hi | RFE_ADDR_FILT_HI_VALID_); +} + +static void lan743x_rfe_set_multicast(struct lan743x_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u32 hash_table[DP_SEL_VHF_HASH_LEN]; + u32 rfctl; + u32 data; + + rfctl = lan743x_csr_read(adapter, RFE_CTL); + rfctl &= ~(RFE_CTL_AU_ | RFE_CTL_AM_ | + RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_); + rfctl |= RFE_CTL_AB_; + if (netdev->flags & IFF_PROMISC) { + rfctl |= RFE_CTL_AM_ | RFE_CTL_AU_; + } else { + if (netdev->flags & IFF_ALLMULTI) + rfctl |= RFE_CTL_AM_; + } + memset(hash_table, 0, DP_SEL_VHF_HASH_LEN * sizeof(u32)); + if (netdev_mc_count(netdev)) { + struct netdev_hw_addr *ha; + int i; + + rfctl |= RFE_CTL_DA_PERFECT_; + i = 1; + netdev_for_each_mc_addr(ha, netdev) { + /* set first 32 into Perfect Filter */ + if (i < 33) { + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), 0); + data = ha->addr[3]; + data = ha->addr[2] | (data << 8); + data = ha->addr[1] | (data << 8); + data = ha->addr[0] | (data << 8); + lan743x_csr_write(adapter, + RFE_ADDR_FILT_LO(i), data); + data = ha->addr[5]; + data = ha->addr[4] | (data << 8); + data |= RFE_ADDR_FILT_HI_VALID_; + lan743x_csr_write(adapter, + RFE_ADDR_FILT_HI(i), data); + } else { + u32 bitnum = (ether_crc(ETH_ALEN, ha->addr) >> + 23) & 0x1FF; + hash_table[bitnum / 32] |= (1 << (bitnum % 32)); + rfctl |= RFE_CTL_MCAST_HASH_; + } + i++; + } + } + lan743x_dp_write(adapter, DP_SEL_RFE_RAM, + DP_SEL_VHF_VLAN_LEN, + DP_SEL_VHF_HASH_LEN, hash_table); + lan743x_csr_write(adapter, RFE_CTL, rfctl); +} + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) +#else +#define DMA_ADDR_HIGH32(dma_addr) ((u32)(0)) +#endif +#define DMA_ADDR_LOW32(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) +#define DMA_DESCRIPTOR_SPACING_16 (16) +#define DMA_DESCRIPTOR_SPACING_32 (32) +#define DMA_DESCRIPTOR_SPACING_64 (64) +#define DMA_DESCRIPTOR_SPACING_128 (128) +#define DEFAULT_DMA_DESCRIPTOR_SPACING (L1_CACHE_BYTES) + +static int lan743x_dmac_init(struct lan743x_adapter *adapter) +{ + struct lan743x_dmac *dmac = &adapter->dmac; + u32 data = 0; + + dmac->flags = 0; + dmac->descriptor_spacing = DEFAULT_DMA_DESCRIPTOR_SPACING; + lan743x_csr_write(adapter, DMAC_CMD, DMAC_CMD_SWR_); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, DMAC_CMD_SWR_, + 0, 1000, 20000, 100); + switch (dmac->descriptor_spacing) { + case DMA_DESCRIPTOR_SPACING_16: + data = DMAC_CFG_MAX_DSPACE_16_; + break; + case DMA_DESCRIPTOR_SPACING_32: + data = DMAC_CFG_MAX_DSPACE_32_; + break; + case DMA_DESCRIPTOR_SPACING_64: + data = DMAC_CFG_MAX_DSPACE_64_; + break; + case DMA_DESCRIPTOR_SPACING_128: + data = DMAC_CFG_MAX_DSPACE_128_; + break; + default: + return -EPERM; + } + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= DMAC_CFG_COAL_EN_; + data |= DMAC_CFG_CH_ARB_SEL_RX_HIGH_; + data |= DMAC_CFG_MAX_READ_REQ_SET_(6); + lan743x_csr_write(adapter, DMAC_CFG, data); + data = DMAC_COAL_CFG_TIMER_LIMIT_SET_(1); + data |= DMAC_COAL_CFG_TIMER_TX_START_; + data |= DMAC_COAL_CFG_FLUSH_INTS_; + data |= DMAC_COAL_CFG_INT_EXIT_COAL_; + data |= DMAC_COAL_CFG_CSR_EXIT_COAL_; + data |= DMAC_COAL_CFG_TX_THRES_SET_(0x0A); + data |= DMAC_COAL_CFG_RX_THRES_SET_(0x0C); + lan743x_csr_write(adapter, DMAC_COAL_CFG, data); + data = DMAC_OBFF_TX_THRES_SET_(0x08); + data |= DMAC_OBFF_RX_THRES_SET_(0x0A); + lan743x_csr_write(adapter, DMAC_OBFF_CFG, data); + return 0; +} + +#define DMAC_CHANNEL_STATE_SET(start_bit, stop_bit) \ + (((start_bit) ? 2 : 0) | ((stop_bit) ? 1 : 0)) +#define DMAC_CHANNEL_STATE_INITIAL DMAC_CHANNEL_STATE_SET(0, 0) +#define DMAC_CHANNEL_STATE_STARTED DMAC_CHANNEL_STATE_SET(1, 0) +#define DMAC_CHANNEL_STATE_STOP_PENDING DMAC_CHANNEL_STATE_SET(1, 1) +#define DMAC_CHANNEL_STATE_STOPPED DMAC_CHANNEL_STATE_SET(0, 1) + +static int lan743x_dmac_tx_get_state(struct lan743x_adapter *adapter, + int tx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_T_(tx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_T_(tx_channel))); +} + +static int lan743x_dmac_tx_wait_till_stopped(struct lan743x_adapter *adapter, + int tx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_tx_get_state(adapter, tx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +static int lan743x_dmac_rx_get_state(struct lan743x_adapter *adapter, + int rx_channel) +{ + u32 dmac_cmd = 0; + + dmac_cmd = lan743x_csr_read(adapter, DMAC_CMD); + return DMAC_CHANNEL_STATE_SET((dmac_cmd & + DMAC_CMD_START_R_(rx_channel)), + (dmac_cmd & + DMAC_CMD_STOP_R_(rx_channel))); +} + +static int lan743x_dmac_rx_wait_till_stopped(struct lan743x_adapter *adapter, + int rx_channel) +{ + int timeout = 100; + int result = 0; + + while (timeout && + ((result = lan743x_dmac_rx_get_state(adapter, rx_channel)) == + DMAC_CHANNEL_STATE_STOP_PENDING)) { + usleep_range(1000, 20000); + timeout--; + } + if (result == DMAC_CHANNEL_STATE_STOP_PENDING) + result = -ENODEV; + return result; +} + +/* TX Descriptor bits */ +#define TX_DESC_DATA0_DTYPE_MASK_ (0xC0000000) +#define TX_DESC_DATA0_DTYPE_DATA_ (0x00000000) +#define TX_DESC_DATA0_DTYPE_EXT_ (0x40000000) +#define TX_DESC_DATA0_FS_ (0x20000000) +#define TX_DESC_DATA0_LS_ (0x10000000) +#define TX_DESC_DATA0_EXT_ (0x08000000) +#define TX_DESC_DATA0_IOC_ (0x04000000) +#define TX_DESC_DATA0_DTI_ (0x02000000) +#define TX_DESC_DATA0_TSI_ (0x01000000) +#define TX_DESC_DATA0_IGE_ (0x00800000) +#define TX_DESC_DATA0_ICE_ (0x00400000) +#define TX_DESC_DATA0_IPE_ (0x00200000) +#define TX_DESC_DATA0_TPE_ (0x00100000) +#define TX_DESC_DATA0_IVTG_ (0x00080000) +#define TX_DESC_DATA0_RVTG_ (0x00040000) +#define TX_DESC_DATA0_FCS_ (0x00020000) +#define TX_DESC_DATA0_TSE_ (0x00010000) +#define TX_DESC_DATA0_BUF_LENGTH_MASK_ (0x0000FFFF) +#define TX_DESC_DATA0_EXT_LSO_ (0x00200000) +#define TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_ (0x000FFFFF) +#define TX_DESC_DATA1_TADDRL_MASK_ (0xFFFFFFFF) +#define TX_DESC_DATA2_TADDRH_MASK_ (0xFFFFFFFF) +#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000) +#define TX_DESC_DATA3_VTAG_MASK_ (0x0000FFFF) + +struct lan743x_tx_descriptor { + u32 data0; + u32 data1; + u32 data2; + u32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +#define TX_BUFFER_INFO_FLAG_IGNORE_SYNC BIT(2) +#define TX_BUFFER_INFO_FLAG_SKB_FRAGMENT BIT(3) +struct lan743x_tx_buffer_info { + int flags; + struct sk_buff *skb; + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_TX_RING_SIZE (50) + +static void lan743x_tx_release_desc(struct lan743x_tx *tx, + int descriptor_index, bool cleanup) +{ + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_tx_descriptor *descriptor = NULL; + u32 descriptor_type = 0; + + descriptor = &tx->ring_cpu_ptr[descriptor_index]; + buffer_info = &tx->buffer_info[descriptor_index]; + if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE)) + goto done; + + descriptor_type = (descriptor->data0) & + TX_DESC_DATA0_DTYPE_MASK_; + if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_) + goto clean_up_data_descriptor; + else + goto clear_active; + +clean_up_data_descriptor: + if (buffer_info->dma_ptr) { + if (buffer_info->flags & + TX_BUFFER_INFO_FLAG_SKB_FRAGMENT) { + dma_unmap_page(&tx->adapter->pci.pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } else { + dma_unmap_single(&tx->adapter->pci.pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_TO_DEVICE); + } + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + +clear_active: + buffer_info->flags &= ~TX_BUFFER_INFO_FLAG_ACTIVE; + +done: + memset(buffer_info, 0, sizeof(*buffer_info)); + memset(descriptor, 0, sizeof(*descriptor)); +} + +static int lan743x_tx_next_index(struct lan743x_tx *tx, int index) +{ + return ((++index) % tx->ring_size); +} + +static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx) +{ + while ((*tx->head_cpu_ptr) != (tx->last_head)) { + lan743x_tx_release_desc(tx, tx->last_head, false); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } +} + +static void lan743x_tx_release_all_descriptors(struct lan743x_tx *tx) +{ + u32 original_head = 0; + + original_head = tx->last_head; + do { + lan743x_tx_release_desc(tx, tx->last_head, true); + tx->last_head = lan743x_tx_next_index(tx, tx->last_head); + } while (tx->last_head != original_head); + memset(tx->ring_cpu_ptr, 0, + sizeof(*tx->ring_cpu_ptr) * (tx->ring_size)); + memset(tx->buffer_info, 0, + sizeof(*tx->buffer_info) * (tx->ring_size)); +} + +static int lan743x_tx_get_desc_cnt(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int result = 1;/* 1 for the main skb buffer */ + int nr_frags = 0; + + if (skb_is_gso(skb)) + result++;/* requires an extension descriptor */ + nr_frags = skb_shinfo(skb)->nr_frags; + result += nr_frags; /* 1 for each fragment buffer */ + return result; +} + +static int lan743x_tx_get_avail_desc(struct lan743x_tx *tx) +{ + int last_head = tx->last_head; + int last_tail = tx->last_tail; + + if (last_tail >= last_head) + return tx->ring_size - last_tail + last_head - 1; + else + return last_head - last_tail - 1; +} + +static int lan743x_tx_frame_start(struct lan743x_tx *tx, + unsigned char *first_buffer, + unsigned int first_buffer_length, + unsigned int frame_length, + bool check_sum) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pci.pdev->dev; + dma_addr_t dma_ptr; + + tx->frame_flags |= TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = tx->last_tail; + tx->frame_tail = tx->frame_first; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = dma_map_single(dev, first_buffer, first_buffer_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) + return -ENOMEM; + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); + tx_descriptor->data3 = (frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = first_buffer_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + tx->frame_data0 = (first_buffer_length & + TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FS_ | + TX_DESC_DATA0_FCS_; + if (check_sum) + tx->frame_data0 |= TX_DESC_DATA0_ICE_ | + TX_DESC_DATA0_IPE_ | + TX_DESC_DATA0_TPE_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx, + unsigned int frame_length) +{ + /* called only from within lan743x_tx_xmit_frame. + * assuming tx->ring_lock has already been acquired. + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + + /* wrap up previous descriptor */ + tx->frame_data0 |= TX_DESC_DATA0_EXT_; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = tx->frame_data0; + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + + /* add extension descriptor */ + tx_descriptor->data1 = 0; + tx_descriptor->data2 = 0; + tx_descriptor->data3 = 0; + buffer_info->skb = NULL; + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + tx->frame_data0 = (frame_length & TX_DESC_DATA0_EXT_PAY_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_EXT_ | + TX_DESC_DATA0_EXT_LSO_; + + /* data0 will be programmed in one of other frame assembler functions */ +} + +static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx, + const struct skb_frag_struct *fragment, + unsigned int frame_length) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + struct device *dev = &adapter->pci.pdev->dev; + unsigned int fragment_length = 0; + dma_addr_t dma_ptr; + + fragment_length = skb_frag_size(fragment); + if (!fragment_length) + return 0; + + /* wrap up previous descriptor */ + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + tx_descriptor->data0 = tx->frame_data0; + + /* move to next descriptor */ + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + dma_ptr = skb_frag_dma_map(dev, fragment, + 0, fragment_length, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, dma_ptr)) { + int desc_index; + + /* cleanup all previously setup descriptors */ + desc_index = tx->frame_first; + while (desc_index != tx->frame_tail) { + lan743x_tx_release_desc(tx, desc_index, true); + desc_index = lan743x_tx_next_index(tx, desc_index); + } + dma_wmb(); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; + tx->frame_first = 0; + tx->frame_data0 = 0; + tx->frame_tail = 0; + return -ENOMEM; + } + tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr); + tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr); + tx_descriptor->data3 = (frame_length << 16) & + TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_; + buffer_info->skb = NULL; + buffer_info->dma_ptr = dma_ptr; + buffer_info->buffer_length = fragment_length; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_ACTIVE; + buffer_info->flags |= TX_BUFFER_INFO_FLAG_SKB_FRAGMENT; + tx->frame_data0 = (fragment_length & TX_DESC_DATA0_BUF_LENGTH_MASK_) | + TX_DESC_DATA0_DTYPE_DATA_ | + TX_DESC_DATA0_FCS_; + + /* data0 will be programmed in one of other frame assembler functions */ + return 0; +} + +static void lan743x_tx_frame_end(struct lan743x_tx *tx, + struct sk_buff *skb, + bool ignore_sync) +{ + /* called only from within lan743x_tx_xmit_frame + * assuming tx->ring_lock has already been acquired + */ + struct lan743x_tx_descriptor *tx_descriptor = NULL; + struct lan743x_tx_buffer_info *buffer_info = NULL; + struct lan743x_adapter *adapter = tx->adapter; + u32 tx_tail_flags = 0; + + /* wrap up previous descriptor */ + tx->frame_data0 |= TX_DESC_DATA0_LS_; + tx->frame_data0 |= TX_DESC_DATA0_IOC_; + tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail]; + buffer_info = &tx->buffer_info[tx->frame_tail]; + buffer_info->skb = skb; + if (ignore_sync) + buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC; + tx_descriptor->data0 = tx->frame_data0; + tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail); + tx->last_tail = tx->frame_tail; + dma_wmb(); + if (tx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_TOP_INT_VEC_EN_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) + tx_tail_flags |= TX_TAIL_SET_DMAC_INT_EN_ | + TX_TAIL_SET_TOP_INT_EN_; + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + tx_tail_flags | tx->frame_tail); + tx->frame_flags &= ~TX_FRAME_FLAG_IN_PROGRESS; +} + +static netdev_tx_t lan743x_tx_xmit_frame(struct lan743x_tx *tx, + struct sk_buff *skb) +{ + int required_number_of_descriptors = 0; + unsigned int start_frame_length = 0; + unsigned int frame_length = 0; + unsigned int head_length = 0; + unsigned long irq_flags = 0; + bool ignore_sync = false; + int nr_frags = 0; + bool gso = false; + int j; + + spin_lock_irqsave(&tx->ring_lock, irq_flags); + required_number_of_descriptors = lan743x_tx_get_desc_cnt(tx, skb); + if (required_number_of_descriptors > + lan743x_tx_get_avail_desc(tx)) { + if (required_number_of_descriptors > (tx->ring_size - 1)) { + dev_kfree_skb(skb); + } else { + /* save to overflow buffer */ + tx->overflow_skb = skb; + netif_stop_queue(tx->adapter->netdev); + } + goto unlock; + } + + /* space available, transmit skb */ + head_length = skb_headlen(skb); + frame_length = skb_pagelen(skb); + nr_frags = skb_shinfo(skb)->nr_frags; + start_frame_length = frame_length; + gso = skb_is_gso(skb); + if (gso) { + start_frame_length = max(skb_shinfo(skb)->gso_size, + (unsigned short)8); + } + if (lan743x_tx_frame_start(tx, + skb->data, head_length, + start_frame_length, + skb->ip_summed == CHECKSUM_PARTIAL)) { + dev_kfree_skb(skb); + goto unlock; + } + if (gso) + lan743x_tx_frame_add_lso(tx, frame_length); + if (nr_frags <= 0) + goto finish; + for (j = 0; j < nr_frags; j++) { + const struct skb_frag_struct *frag; + + frag = &(skb_shinfo(skb)->frags[j]); + if (lan743x_tx_frame_add_fragment(tx, frag, frame_length)) { + /* upon error no need to call + * lan743x_tx_frame_end + * frame assembler clean up was performed inside + * lan743x_tx_frame_add_fragment + */ + dev_kfree_skb(skb); + goto unlock; + } + } +finish: + lan743x_tx_frame_end(tx, skb, ignore_sync); +unlock: + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + return NETDEV_TX_OK; +} + +static void lan743x_tx_isr_bottom_half(unsigned long param) +{ + struct lan743x_adapter *adapter = NULL; + bool start_transmitter = false; + struct lan743x_tx *tx = NULL; + unsigned long irq_flags = 0; + u32 ioc_bit = 0; + u32 int_sts = 0; + + tx = (struct lan743x_tx *)param; + adapter = tx->adapter; + ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number); + int_sts = lan743x_csr_read(adapter, DMAC_INT_STS); + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) + lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit); + spin_lock_irqsave(&tx->ring_lock, irq_flags); + /* clean up tx ring */ + lan743x_tx_release_completed_descriptors(tx); + if (netif_queue_stopped(adapter->netdev)) { + if (tx->overflow_skb) { + if (lan743x_tx_get_desc_cnt(tx, tx->overflow_skb) <= + lan743x_tx_get_avail_desc(tx)) + start_transmitter = true; + } else { + netif_wake_queue(adapter->netdev); + } + } + spin_unlock_irqrestore(&tx->ring_lock, irq_flags); + + if (start_transmitter) { + /* space is now available, transmit overflow skb */ + lan743x_tx_xmit_frame(tx, tx->overflow_skb); + tx->overflow_skb = NULL; + netif_wake_queue(adapter->netdev); + } + + /* enable isr */ + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_read(adapter, INT_STS); +} + +static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx) +{ + if (tx->head_cpu_ptr) { + pci_free_consistent(tx->adapter->pci.pdev, + sizeof(*tx->head_cpu_ptr), + (void *)(tx->head_cpu_ptr), + tx->head_dma_ptr); + tx->head_cpu_ptr = NULL; + tx->head_dma_ptr = 0; + } + kfree(tx->buffer_info); + tx->buffer_info = NULL; + if (tx->ring_cpu_ptr) { + pci_free_consistent(tx->adapter->pci.pdev, + tx->ring_allocation_size, + tx->ring_cpu_ptr, + tx->ring_dma_ptr); + tx->ring_allocation_size = 0; + tx->ring_cpu_ptr = NULL; + tx->ring_dma_ptr = 0; + } + tx->ring_size = 0; +} + +static int lan743x_tx_ring_init(struct lan743x_tx *tx) +{ + size_t ring_allocation_size = 0; + int descriptor_spacing = 0; + void *cpu_ptr = NULL; + dma_addr_t dma_ptr; + int ret = -ENOMEM; + + descriptor_spacing = tx->adapter->dmac.descriptor_spacing; + if (sizeof(struct lan743x_tx_descriptor) != descriptor_spacing) { + ret = -EPERM; + goto cleanup; + } + tx->ring_size = LAN743X_TX_RING_SIZE; + if (tx->ring_size & ~TX_CFG_B_TX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + ring_allocation_size = ALIGN(tx->ring_size * descriptor_spacing, + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(tx->adapter->pci.pdev, + ring_allocation_size, &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + tx->ring_allocation_size = ring_allocation_size; + tx->ring_cpu_ptr = (struct lan743x_tx_descriptor *)cpu_ptr; + tx->ring_dma_ptr = dma_ptr; + + cpu_ptr = kcalloc(tx->ring_size, sizeof(*tx->buffer_info), GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + tx->buffer_info = (struct lan743x_tx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(tx->adapter->pci.pdev, + sizeof(*tx->head_cpu_ptr), &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + tx->head_cpu_ptr = cpu_ptr; + tx->head_dma_ptr = dma_ptr; + if (tx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + return 0; +cleanup: + lan743x_tx_ring_cleanup(tx); + return ret; +} + +static void lan743x_tx_close(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = tx->adapter; + + if (tx->flags & TX_FLAG_DMAC_STARTED) { + lan743x_csr_write(adapter, + DMAC_CMD, + DMAC_CMD_STOP_T_(tx->channel_number)); + lan743x_dmac_tx_wait_till_stopped(adapter, tx->channel_number); + tx->flags &= ~TX_FLAG_DMAC_STARTED; + } + if (tx->flags & TX_FLAG_ISR_ENABLED) { + lan743x_csr_write(adapter, + DMAC_INT_EN_CLR, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_TX_(tx->channel_number)); + tasklet_disable(&tx->tx_isr_bottom_half); + tx->flags &= ~TX_FLAG_ISR_ENABLED; + } + if (tx->flags & TX_FLAG_FIFO_ENABLED) { + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_DIS_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number), + 0, 1000, 20000, 100); + tx->flags &= ~TX_FLAG_FIFO_ENABLED; + } + lan743x_tx_release_all_descriptors(tx); + if (tx->overflow_skb) { + dev_kfree_skb(tx->overflow_skb); + tx->overflow_skb = NULL; + } + if (tx->flags & TX_FLAG_RING_ALLOCATED) { + lan743x_tx_ring_cleanup(tx); + tx->flags &= ~TX_FLAG_RING_ALLOCATED; + } +} + +static int lan743x_tx_open(struct lan743x_tx *tx) +{ + struct lan743x_adapter *adapter = NULL; + int ret = -ENODEV; + u32 data = 0; + + adapter = tx->adapter; + ret = lan743x_tx_ring_init(tx); + if (ret) { + ret = -ENOMEM; + goto cleanup; + } + tx->flags |= TX_FLAG_RING_ALLOCATED; + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_TX_CTL, + FCT_TX_CTL_RESET_(tx->channel_number), + 0, 1000, 20000, 100); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_TX_CTL, + FCT_TX_CTL_EN_(tx->channel_number)); + tx->flags |= TX_FLAG_FIFO_ENABLED; + + /* reset tx channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_TX_SWR_(tx->channel_number), + 0, 1000, 20000, 100); + + /* Write TX_BASE_ADDR */ + lan743x_csr_write(adapter, + TX_BASE_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->ring_dma_ptr)); + lan743x_csr_write(adapter, + TX_BASE_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->ring_dma_ptr)); + + /* Write TX_CFG_B */ + data = lan743x_csr_read(adapter, TX_CFG_B(tx->channel_number)); + data &= ~TX_CFG_B_TX_RING_LEN_MASK_; + data |= ((tx->ring_size) & TX_CFG_B_TX_RING_LEN_MASK_); + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= TX_CFG_B_TDMABL_512_; + lan743x_csr_write(adapter, TX_CFG_B(tx->channel_number), data); + + /* Write TX_CFG_A */ + data = TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ | TX_CFG_A_TX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= TX_CFG_A_TX_HP_WB_ON_INT_TMR_; + data |= TX_CFG_A_TX_PF_THRES_SET_(0x10); + data |= TX_CFG_A_TX_PF_PRI_THRES_SET_(0x04); + data |= TX_CFG_A_TX_HP_WB_THRES_SET_(0x07); + } + lan743x_csr_write(adapter, TX_CFG_A(tx->channel_number), data); + + /* Write TX_HEAD_WRITEBACK_ADDR */ + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRH(tx->channel_number), + DMA_ADDR_HIGH32(tx->head_dma_ptr)); + lan743x_csr_write(adapter, + TX_HEAD_WRITEBACK_ADDRL(tx->channel_number), + DMA_ADDR_LOW32(tx->head_dma_ptr)); + + /* set last head */ + tx->last_head = lan743x_csr_read(adapter, TX_HEAD(tx->channel_number)); + + /* write TX_TAIL */ + tx->last_tail = 0; + lan743x_csr_write(adapter, TX_TAIL(tx->channel_number), + (u32)(tx->last_tail)); + tx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_TX_ + (tx->channel_number)); + tasklet_enable(&tx->tx_isr_bottom_half); + data = 0; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_; + if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= TX_CFG_C_TX_INT_EN_R2C_; + lan743x_csr_write(adapter, TX_CFG_C(tx->channel_number), data); + if (!(tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET)) + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_TX_(tx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_TX_IOC_(tx->channel_number)); + tx->flags |= TX_FLAG_ISR_ENABLED; + + /* start dmac channel */ + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_T_(tx->channel_number)); + tx->flags |= TX_FLAG_DMAC_STARTED; + return 0; +cleanup: + lan743x_tx_close(tx); + return ret; +} + +/* OWN bit is set. ie, Descs are owned by RX DMAC */ +#define RX_DESC_DATA0_OWN_ (0x00008000) +#define RX_DESC_DATA0_LENGTH_MASK_ (0x00003FFF) +#define RX_DESC_DATA1_RADDRL_MASK_ (0xFFFFFFFF) +#define RX_DESC_DATA2_RADDRH_MASK_ (0xFFFFFFFF) +/* OWN bit is clear. ie, Descs are owned by host */ +#define RX_DESC_DATA0_FS_ (0x80000000) +#define RX_DESC_DATA0_LS_ (0x40000000) +#define RX_DESC_DATA0_FRAME_LENGTH_MASK_ (0x3FFF0000) +#define RX_DESC_DATA0_FRAME_LENGTH_GET_(data0) \ + (((data0) & RX_DESC_DATA0_FRAME_LENGTH_MASK_) >> 16) +#define RX_DESC_DATA0_EXT_ (0x00004000) +#define RX_DESC_DATA0_BUF_LENGTH_MASK_ (0x00003FFF) +#define RX_DESC_DATA1_RSS_TYPE_MASK_ (0xF0000000) +#define RX_DESC_DATA1_RSS_TYPE_GET_(data1) (((data1) >> 28) & 0x0F) +#define RX_DESC_DATA1_RX_STATUS_MASK_ (0x00FFFFFF) +#define RX_DESC_DATA1_RX_STATUS_PRI_ (0x00800000) +#define RX_DESC_DATA1_RX_STATUS_LEN_ERR_ (0x00400000) +#define RX_DESC_DATA1_RX_STATUS_TS_ (0x00200000) +#define RX_DESC_DATA1_RX_STATUS_1588_ (0x00100000) +#define RX_DESC_DATA1_RX_STATUS_WAKE_ (0x00080000) +#define RX_DESC_DATA1_RX_STATUS_RFE_FAIL_ (0x00040000) +#define RX_DESC_DATA1_RX_STATUS_ICE_ (0x00020000) +#define RX_DESC_DATA1_RX_STATUS_TCE_ (0x00010000) +#define RX_DESC_DATA1_RX_STATUS_IPV_ (0x00008000) +#define RX_DESC_DATA1_RX_STATUS_PID_MASK_ (0x00006000) +#define RX_DESC_DATA1_RX_STATUS_PFF_ (0x00001000) +#define RX_DESC_DATA1_RX_STATUS_BAM_ (0x00000800) +#define RX_DESC_DATA1_RX_STATUS_MAM_ (0x00000400) +#define RX_DESC_DATA1_RX_STATUS_FVTG_ (0x00000200) +#define RX_DESC_DATA1_RX_STATUS_RED_ (0x00000100) +#define RX_DESC_DATA1_RX_STATUS_RWT_ (0x00000080) +#define RX_DESC_DATA1_RX_STATUS_RUNT_ (0x00000040) +#define RX_DESC_DATA1_RX_STATUS_LONG_ (0x00000020) +#define RX_DESC_DATA1_RX_STATUS_RXE_ (0x00000010) +#define RX_DESC_DATA1_RX_STATUS_ALN_ (0x00000008) +#define RX_DESC_DATA1_RX_STATUS_FCS_ (0x00000004) +#define RX_DESC_DATA1_RX_STATUS_UAM_ (0x00000002) +#define RX_DESC_DATA1_RX_STATUS_ICSM_ (0x00000001) +#define RX_DESC_DATA2_CSUM_MASK_ (0xFFFF0000) +#define RX_DESC_DATA2_VTAG_MASK_ (0x0000FFFF) +#define RX_DESC_DATA2_TS_NS_MASK_ (0x3FFFFFFF) +#define RX_DESC_DATA3_RSSHASH_MASK_ (0xFFFFFFFF) +#if ((NET_IP_ALIGN != 0) && (NET_IP_ALIGN != 2)) +#error NET_IP_ALIGN must be 0 or 2 +#endif +#define RX_HEAD_PADDING NET_IP_ALIGN + +struct lan743x_rx_descriptor { + u32 data0; + u32 data1; + u32 data2; + u32 data3; +} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING); + +#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0) +struct lan743x_rx_buffer_info { + int flags; + struct sk_buff *skb; + + dma_addr_t dma_ptr; + unsigned int buffer_length; +}; + +#define LAN743X_RX_RING_SIZE (65) + +static int lan743x_rx_next_index(struct lan743x_rx *rx, int index) +{ + return ((++index) % rx->ring_size); +} + +static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + int length = 0; + + length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING); + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev, + length, + GFP_ATOMIC | GFP_DMA); + if (!(buffer_info->skb)) + return -ENOMEM; + buffer_info->dma_ptr = dma_map_single(&rx->adapter->pci.pdev->dev, + buffer_info->skb->data, + length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&rx->adapter->pci.pdev->dev, + buffer_info->dma_ptr)) { + buffer_info->dma_ptr = 0; + return -ENOMEM; + } + buffer_info->buffer_length = length; + descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); + descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); + descriptor->data3 = 0; + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + (length & RX_DESC_DATA0_BUF_LENGTH_MASK_)); + skb_reserve(buffer_info->skb, RX_HEAD_PADDING); + return 0; +} + +static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr); + descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr); + descriptor->data3 = 0; + descriptor->data0 = (RX_DESC_DATA0_OWN_ | + ((buffer_info->buffer_length) & + RX_DESC_DATA0_BUF_LENGTH_MASK_)); +} + +static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index) +{ + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + + descriptor = &rx->ring_cpu_ptr[index]; + buffer_info = &rx->buffer_info[index]; + memset(descriptor, 0, sizeof(*descriptor)); + if (buffer_info->dma_ptr) { + dma_unmap_single(&rx->adapter->pci.pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE); + buffer_info->dma_ptr = 0; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + memset(buffer_info, 0, sizeof(*buffer_info)); +} + +#define RX_PROCESS_RESULT_NOTHING_TO_DO (0) +#define RX_PROCESS_RESULT_PACKET_RECEIVED (1) +#define RX_PROCESS_RESULT_PACKET_DROPPED (2) +static int lan743x_rx_process_packet(struct lan743x_rx *rx) +{ + struct skb_shared_hwtstamps *hwtstamps = NULL; + int result = RX_PROCESS_RESULT_NOTHING_TO_DO; + struct lan743x_rx_buffer_info *buffer_info; + struct lan743x_rx_descriptor *descriptor; + int current_head_index = -1; + int extension_index = -1; + int first_index = -1; + int last_index = -1; + + current_head_index = *rx->head_cpu_ptr; + if (current_head_index < 0 || current_head_index >= rx->ring_size) + goto done; + if (rx->last_head < 0 || rx->last_head >= rx->ring_size) + goto done; + if (rx->last_head != current_head_index) { + descriptor = &rx->ring_cpu_ptr[rx->last_head]; + if (descriptor->data0 & RX_DESC_DATA0_OWN_) + goto done; + if (!(descriptor->data0 & RX_DESC_DATA0_FS_)) + goto done; + first_index = rx->last_head; + if (descriptor->data0 & RX_DESC_DATA0_LS_) { + last_index = rx->last_head; + } else { + int index; + + index = lan743x_rx_next_index(rx, first_index); + while (index != current_head_index) { + descriptor = &rx->ring_cpu_ptr[index]; + if (descriptor->data0 & RX_DESC_DATA0_OWN_) + goto done; + if (descriptor->data0 & RX_DESC_DATA0_LS_) { + last_index = index; + break; + } + index = lan743x_rx_next_index(rx, index); + } + } + if (last_index >= 0) { + descriptor = &rx->ring_cpu_ptr[last_index]; + if (descriptor->data0 & RX_DESC_DATA0_EXT_) { + /* extension is expected to follow */ + int index = lan743x_rx_next_index(rx, + last_index); + if (index != current_head_index) { + descriptor = &rx->ring_cpu_ptr[index]; + if (descriptor->data0 & + RX_DESC_DATA0_OWN_) { + goto done; + } + if (descriptor->data0 & + RX_DESC_DATA0_EXT_) { + extension_index = index; + } else { + goto done; + } + } else { + /* extension is not yet available */ + /* prevent processing of this packet */ + first_index = -1; + last_index = -1; + } + } + } + } + if (first_index >= 0 && last_index >= 0) { + int real_last_index = last_index; + struct sk_buff *skb = NULL; + u32 ts_sec = 0; + u32 ts_nsec = 0; + + /* packet is available */ + if (first_index == last_index) { + /* single buffer packet */ + int packet_length; + + buffer_info = &rx->buffer_info[first_index]; + skb = buffer_info->skb; + descriptor = &rx->ring_cpu_ptr[first_index]; + + /* unmap from dma */ + if (buffer_info->dma_ptr) { + dma_unmap_single(&rx->adapter->pci.pdev->dev, + buffer_info->dma_ptr, + buffer_info->buffer_length, + DMA_FROM_DEVICE); + buffer_info->dma_ptr = 0; + buffer_info->buffer_length = 0; + } + buffer_info->skb = NULL; + packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_ + (descriptor->data0); + skb_put(skb, packet_length - 4); + skb->protocol = eth_type_trans(skb, + rx->adapter->netdev); + lan743x_rx_allocate_ring_element(rx, first_index); + } else { + int index = first_index; + + /* multi buffer packet not supported */ + /* this should not happen since + * buffers are allocated to be at least jumbo size + */ + + /* clean up buffers */ + if (first_index <= last_index) { + while ((index >= first_index) && + (index <= last_index)) { + lan743x_rx_release_ring_element(rx, + index); + lan743x_rx_allocate_ring_element(rx, + index); + index = lan743x_rx_next_index(rx, + index); + } + } else { + while ((index >= first_index) || + (index <= last_index)) { + lan743x_rx_release_ring_element(rx, + index); + lan743x_rx_allocate_ring_element(rx, + index); + index = lan743x_rx_next_index(rx, + index); + } + } + } + if (extension_index >= 0) { + descriptor = &rx->ring_cpu_ptr[extension_index]; + buffer_info = &rx->buffer_info[extension_index]; + ts_sec = descriptor->data1; + ts_nsec = (descriptor->data2 & + RX_DESC_DATA2_TS_NS_MASK_); + lan743x_rx_reuse_ring_element(rx, extension_index); + real_last_index = extension_index; + } + if (!skb) { + result = RX_PROCESS_RESULT_PACKET_DROPPED; + goto move_forward; + } + if (extension_index < 0) + goto pass_packet_to_os; + hwtstamps = skb_hwtstamps(skb); + if (hwtstamps) + hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec); + +pass_packet_to_os: + /* pass packet to OS */ + napi_gro_receive(&rx->napi, skb); + result = RX_PROCESS_RESULT_PACKET_RECEIVED; + +move_forward: + /* push tail and head forward */ + rx->last_tail = real_last_index; + rx->last_head = lan743x_rx_next_index(rx, real_last_index); + } +done: + return result; +} + +static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight) +{ + struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi); + struct lan743x_adapter *adapter = rx->adapter; + u32 rx_tail_flags = 0; + int count; + + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C) { + /* clear int status bit before reading packet */ + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + } + count = 0; + while (count < weight) { + int rx_process_result = -1; + + rx_process_result = lan743x_rx_process_packet(rx); + if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) { + count++; + } else if (rx_process_result == + RX_PROCESS_RESULT_NOTHING_TO_DO) { + break; + } else if (rx_process_result == + RX_PROCESS_RESULT_PACKET_DROPPED) { + continue; + } + } + rx->frame_count += count; + if (count == weight) + goto done; + if (!napi_complete_done(napi, count)) + goto done; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET) + rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) { + rx_tail_flags |= RX_TAIL_SET_TOP_INT_EN_; + } else { + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + } + + /* update RX_TAIL */ + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx_tail_flags | rx->last_tail); +done: + return count; +} + +static void lan743x_rx_ring_cleanup(struct lan743x_rx *rx) +{ + if (rx->buffer_info && rx->ring_cpu_ptr) { + int index; + + for (index = 0; index < rx->ring_size; index++) + lan743x_rx_release_ring_element(rx, index); + } + if (rx->head_cpu_ptr) { + pci_free_consistent(rx->adapter->pci.pdev, + sizeof(*rx->head_cpu_ptr), + rx->head_cpu_ptr, + rx->head_dma_ptr); + rx->head_cpu_ptr = NULL; + rx->head_dma_ptr = 0; + } + kfree(rx->buffer_info); + rx->buffer_info = NULL; + if (rx->ring_cpu_ptr) { + pci_free_consistent(rx->adapter->pci.pdev, + rx->ring_allocation_size, + rx->ring_cpu_ptr, + rx->ring_dma_ptr); + rx->ring_allocation_size = 0; + rx->ring_cpu_ptr = NULL; + rx->ring_dma_ptr = 0; + } + rx->ring_size = 0; + rx->last_head = 0; +} + +static int lan743x_rx_ring_init(struct lan743x_rx *rx) +{ + size_t ring_allocation_size = 0; + int descriptor_spacing = 0; + dma_addr_t dma_ptr = 0; + int index = 0; + void *cpu_ptr = NULL; + int ret = -ENOMEM; + + descriptor_spacing = rx->adapter->dmac.descriptor_spacing; + if (sizeof(struct lan743x_rx_descriptor) != descriptor_spacing) { + ret = -EPERM; + goto cleanup; + } + rx->ring_size = LAN743X_RX_RING_SIZE; + if (rx->ring_size <= 1) { + ret = -EINVAL; + goto cleanup; + } + if (rx->ring_size & ~RX_CFG_B_RX_RING_LEN_MASK_) { + ret = -EINVAL; + goto cleanup; + } + ring_allocation_size = ALIGN(rx->ring_size * descriptor_spacing, + PAGE_SIZE); + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(rx->adapter->pci.pdev, + ring_allocation_size, &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->ring_allocation_size = ring_allocation_size; + rx->ring_cpu_ptr = (struct lan743x_rx_descriptor *)cpu_ptr; + rx->ring_dma_ptr = dma_ptr; + cpu_ptr = kcalloc(rx->ring_size, sizeof(*rx->buffer_info), + GFP_KERNEL); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->buffer_info = (struct lan743x_rx_buffer_info *)cpu_ptr; + dma_ptr = 0; + cpu_ptr = pci_zalloc_consistent(rx->adapter->pci.pdev, + sizeof(*rx->head_cpu_ptr), &dma_ptr); + if (!cpu_ptr) { + ret = -ENOMEM; + goto cleanup; + } + rx->head_cpu_ptr = cpu_ptr; + rx->head_dma_ptr = dma_ptr; + if (rx->head_dma_ptr & 0x3) { + ret = -ENOMEM; + goto cleanup; + } + rx->last_head = 0; + for (index = 0; index < rx->ring_size; index++) { + ret = lan743x_rx_allocate_ring_element(rx, index); + if (ret) + goto cleanup; + } + return 0; +cleanup: + lan743x_rx_ring_cleanup(rx); + return ret; +} + +static void lan743x_rx_close(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + + if (rx->flags & RX_FLAG_FIFO_ENABLED) { + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_DIS_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number), + 0, 1000, 20000, 100); + rx->flags &= ~RX_FLAG_FIFO_ENABLED; + } + if (rx->flags & RX_FLAG_DMAC_STARTED) { + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_STOP_R_(rx->channel_number)); + lan743x_dmac_rx_wait_till_stopped(adapter, rx->channel_number); + rx->flags &= ~RX_FLAG_DMAC_STARTED; + } + if (rx->flags & RX_FLAG_ISR_ENABLED) { + lan743x_csr_write(adapter, DMAC_INT_EN_CLR, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, INT_EN_CLR, + INT_BIT_DMA_RX_(rx->channel_number)); + napi_disable(&rx->napi); + rx->flags &= ~RX_FLAG_ISR_ENABLED; + } + if (rx->flags & RX_FLAG_NAPI_ADDED) { + netif_napi_del(&rx->napi); + rx->flags &= ~RX_FLAG_NAPI_ADDED; + } + if (rx->flags & RX_FLAG_RING_ALLOCATED) { + lan743x_rx_ring_cleanup(rx); + rx->flags &= ~RX_FLAG_RING_ALLOCATED; + } +} + +static int lan743x_rx_open(struct lan743x_rx *rx) +{ + struct lan743x_adapter *adapter = rx->adapter; + int ret = -ENODEV; + u32 data = 0; + + rx->frame_count = 0; + ret = lan743x_rx_ring_init(rx); + if (ret) + goto cleanup; + rx->flags |= RX_FLAG_RING_ALLOCATED; + netif_napi_add(adapter->netdev, + &rx->napi, lan743x_rx_napi_poll, + rx->ring_size - 1); + rx->flags |= RX_FLAG_NAPI_ADDED; + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, DMAC_CMD, + DMAC_CMD_RX_SWR_(rx->channel_number), + 0, 1000, 20000, 100); + + /* set ring base address */ + lan743x_csr_write(adapter, + RX_BASE_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->ring_dma_ptr)); + lan743x_csr_write(adapter, + RX_BASE_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->ring_dma_ptr)); + + /* set rx write back address */ + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRH(rx->channel_number), + DMA_ADDR_HIGH32(rx->head_dma_ptr)); + lan743x_csr_write(adapter, + RX_HEAD_WRITEBACK_ADDRL(rx->channel_number), + DMA_ADDR_LOW32(rx->head_dma_ptr)); + data = RX_CFG_A_RX_HP_WB_EN_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) { + data |= (RX_CFG_A_RX_WB_ON_INT_TMR_ | + RX_CFG_A_RX_WB_THRES_SET_(0x7) | + RX_CFG_A_RX_PF_THRES_SET_(16) | + RX_CFG_A_RX_PF_PRI_THRES_SET_(4)); + } + + /* set RX_CFG_A */ + lan743x_csr_write(adapter, + RX_CFG_A(rx->channel_number), data); + + /* set RX_CFG_B */ + data = lan743x_csr_read(adapter, RX_CFG_B(rx->channel_number)); + data &= ~RX_CFG_B_RX_PAD_MASK_; + if (!RX_HEAD_PADDING) + data |= RX_CFG_B_RX_PAD_0_; + else + data |= RX_CFG_B_RX_PAD_2_; + data &= ~RX_CFG_B_RX_RING_LEN_MASK_; + data |= ((rx->ring_size) & RX_CFG_B_RX_RING_LEN_MASK_); + data |= RX_CFG_B_TS_ALL_RX_; + if (!(adapter->csr.flags & LAN743X_CSR_FLAG_IS_A0)) + data |= RX_CFG_B_RDMABL_512_; + lan743x_csr_write(adapter, RX_CFG_B(rx->channel_number), data); + rx->vector_flags = lan743x_intr_get_vector_flags(adapter, + INT_BIT_DMA_RX_ + (rx->channel_number)); + + /* set RX_CFG_C */ + data = 0; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR) + data |= RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR) + data |= RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C) + data |= RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_; + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C) + data |= RX_CFG_C_RX_INT_EN_R2C_; + lan743x_csr_write(adapter, RX_CFG_C(rx->channel_number), data); + rx->last_tail = ((u32)(rx->ring_size - 1)); + lan743x_csr_write(adapter, RX_TAIL(rx->channel_number), + rx->last_tail); + rx->last_head = lan743x_csr_read(adapter, RX_HEAD(rx->channel_number)); + if (rx->last_head) { + ret = -EIO; + goto cleanup; + } + napi_enable(&rx->napi); + lan743x_csr_write(adapter, INT_EN_SET, + INT_BIT_DMA_RX_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_STS, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + lan743x_csr_write(adapter, DMAC_INT_EN_SET, + DMAC_INT_BIT_RXFRM_(rx->channel_number)); + rx->flags |= RX_FLAG_ISR_ENABLED; + lan743x_csr_write(adapter, DMAC_CMD, + DMAC_CMD_START_R_(rx->channel_number)); + rx->flags |= RX_FLAG_DMAC_STARTED; + + /* initialize fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number)); + lan743x_csr_wait_for_bit(adapter, FCT_RX_CTL, + FCT_RX_CTL_RESET_(rx->channel_number), + 0, 1000, 20000, 100); + lan743x_csr_write(adapter, FCT_FLOW(rx->channel_number), + FCT_FLOW_CTL_REQ_EN_ | + FCT_FLOW_CTL_ON_THRESHOLD_SET_(0x2A) | + FCT_FLOW_CTL_OFF_THRESHOLD_SET_(0xA)); + + /* enable fifo */ + lan743x_csr_write(adapter, FCT_RX_CTL, + FCT_RX_CTL_EN_(rx->channel_number)); + rx->flags |= RX_FLAG_FIFO_ENABLED; + return 0; +cleanup: + lan743x_rx_close(rx); + return ret; +} + +static int lan743x_netdev_close(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + + if (adapter->open_flags & LAN743X_COMPONENT_FLAG_TX(0)) { + lan743x_tx_close(&adapter->tx[0]); + adapter->open_flags &= ~LAN743X_COMPONENT_FLAG_TX(0); + } + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + if (adapter->open_flags & LAN743X_COMPONENT_FLAG_RX(index)) { + lan743x_rx_close(&adapter->rx[index]); + adapter->open_flags &= + ~LAN743X_COMPONENT_FLAG_RX(index); + } + } + if (adapter->open_flags & LAN743X_COMPONENT_FLAG_PHY) { + lan743x_phy_close(adapter); + adapter->open_flags &= ~LAN743X_COMPONENT_FLAG_PHY; + } + if (adapter->open_flags & LAN743X_COMPONENT_FLAG_MAC) { + lan743x_mac_close(adapter); + adapter->open_flags &= ~LAN743X_COMPONENT_FLAG_MAC; + } + if (adapter->open_flags & LAN743X_COMPONENT_FLAG_INTR) { + lan743x_intr_close(adapter); + adapter->open_flags &= ~LAN743X_COMPONENT_FLAG_INTR; + } + return 0; +} + +static int lan743x_netdev_open(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int index; + int ret; + + ret = lan743x_intr_open(adapter); + if (ret) + goto clean_up; + adapter->open_flags |= LAN743X_COMPONENT_FLAG_INTR; + ret = lan743x_mac_open(adapter); + if (ret) + goto clean_up; + adapter->open_flags |= LAN743X_COMPONENT_FLAG_MAC; + ret = lan743x_phy_open(adapter); + if (ret) + goto clean_up; + adapter->open_flags |= LAN743X_COMPONENT_FLAG_PHY; + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + ret = lan743x_rx_open(&adapter->rx[index]); + if (ret) + goto clean_up; + adapter->open_flags |= LAN743X_COMPONENT_FLAG_RX(index); + } + ret = lan743x_tx_open(&adapter->tx[0]); + if (ret) + goto clean_up; + adapter->open_flags |= LAN743X_COMPONENT_FLAG_TX(0); + netif_info(adapter, ifup, adapter->netdev, + "LAN743x opened successfully\n"); + return 0; +clean_up: + netif_warn(adapter, ifup, adapter->netdev, + "Error opening LAN743x, performing cleanup\n"); + lan743x_netdev_close(netdev); + return ret; +} + +static netdev_tx_t lan743x_netdev_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + return lan743x_tx_xmit_frame(&adapter->tx[0], skb); +} + +static int lan743x_netdev_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + if (!netif_running(netdev)) + return -EINVAL; + return phy_mii_ioctl(netdev->phydev, ifr, cmd); +} + +static void lan743x_netdev_set_multicast(struct net_device *netdev) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_rfe_set_multicast(adapter); +} + +static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + int ret = 0; + + ret = lan743x_mac_set_mtu(adapter, new_mtu); + if (!ret) + netdev->mtu = new_mtu; + return ret; +} + +static struct net_device_stats *lan743x_netdev_get_stats(struct net_device *nd) +{ + struct lan743x_adapter *adapter = netdev_priv(nd); + + memset(&nd->stats, 0, sizeof(nd->stats)); + nd->stats.rx_packets = lan743x_csr_read(adapter, STAT_RX_TOTAL_FRAMES); + nd->stats.tx_packets = lan743x_csr_read(adapter, STAT_TX_TOTAL_FRAMES); + nd->stats.rx_bytes = lan743x_csr_read(adapter, + STAT_RX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_RX_MULTICAST_BYTE_COUNT); + nd->stats.tx_bytes = lan743x_csr_read(adapter, + STAT_TX_UNICAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_BROADCAST_BYTE_COUNT) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_BYTE_COUNT); + nd->stats.rx_errors = lan743x_csr_read(adapter, STAT_RX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_ALIGNMENT_ERRORS) + + lan743x_csr_read(adapter, STAT_RX_JABBER_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_UNDERSIZE_FRAME_ERRORS) + + lan743x_csr_read(adapter, + STAT_RX_OVERSIZE_FRAME_ERRORS); + nd->stats.tx_errors = lan743x_csr_read(adapter, STAT_TX_FCS_ERRORS) + + lan743x_csr_read(adapter, + STAT_TX_EXCESS_DEFERRAL_ERRORS) + + lan743x_csr_read(adapter, STAT_TX_CARRIER_ERRORS); + nd->stats.rx_dropped = lan743x_csr_read(adapter, + STAT_RX_DROPPED_FRAMES); + nd->stats.tx_dropped = lan743x_csr_read(adapter, + STAT_TX_EXCESSIVE_COLLISION); + nd->stats.multicast = lan743x_csr_read(adapter, + STAT_RX_MULTICAST_FRAMES) + + lan743x_csr_read(adapter, + STAT_TX_MULTICAST_FRAMES); + nd->stats.collisions = lan743x_csr_read(adapter, + STAT_TX_SINGLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_MULTIPLE_COLLISIONS) + + lan743x_csr_read(adapter, + STAT_TX_LATE_COLLISIONS); + return &nd->stats; +} + +static int lan743x_netdev_set_mac_address(struct net_device *netdev, + void *addr) +{ + struct lan743x_adapter *adapter = netdev_priv(netdev); + struct sockaddr *sock_addr = addr; + int ret; + + ret = eth_prepare_mac_addr_change(netdev, sock_addr); + if (ret) + return ret; + ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); + lan743x_mac_set_address(adapter, sock_addr->sa_data); + lan743x_rfe_update_mac_address(adapter); + return 0; +} + +static const struct net_device_ops lan743x_netdev_ops = { + .ndo_open = lan743x_netdev_open, + .ndo_stop = lan743x_netdev_close, + .ndo_start_xmit = lan743x_netdev_xmit_frame, + .ndo_do_ioctl = lan743x_netdev_ioctl, + .ndo_set_rx_mode = lan743x_netdev_set_multicast, + .ndo_change_mtu = lan743x_netdev_change_mtu, + .ndo_get_stats = lan743x_netdev_get_stats, + .ndo_set_mac_address = lan743x_netdev_set_mac_address, +}; + +static void lan743x_hardware_cleanup(struct lan743x_adapter *adapter) +{ + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); +} + +static void lan743x_mdiobus_cleanup(struct lan743x_adapter *adapter) +{ + if (adapter->init_flags & LAN743X_INIT_FLAG_MDIOBUS_REGISTERED) { + mdiobus_unregister(adapter->mdiobus); + adapter->init_flags &= ~LAN743X_INIT_FLAG_MDIOBUS_REGISTERED; + } + if (adapter->init_flags & LAN743X_INIT_FLAG_MDIOBUS_ALLOCATED) { + mdiobus_free(adapter->mdiobus); + adapter->mdiobus = NULL; + adapter->init_flags &= ~LAN743X_INIT_FLAG_MDIOBUS_ALLOCATED; + } +} + +static void lan743x_full_cleanup(struct lan743x_adapter *adapter) +{ + if (adapter->init_flags & LAN743X_INIT_FLAG_NETDEV_REGISTERED) { + unregister_netdev(adapter->netdev); + adapter->init_flags &= ~LAN743X_INIT_FLAG_NETDEV_REGISTERED; + } + lan743x_mdiobus_cleanup(adapter); + lan743x_hardware_cleanup(adapter); + if (adapter->init_flags & LAN743X_COMPONENT_FLAG_CSR) { + if (adapter->csr.csr_address) + iounmap(adapter->csr.csr_address); + adapter->init_flags &= ~LAN743X_COMPONENT_FLAG_CSR; + } + if (adapter->init_flags & LAN743X_COMPONENT_FLAG_PCI) { + lan743x_pci_cleanup(adapter); + adapter->init_flags &= ~LAN743X_COMPONENT_FLAG_PCI; + } + free_netdev(adapter->netdev); +} + +static int lan743x_hardware_init(struct lan743x_adapter *adapter, + struct pci_dev *pdev) +{ + struct lan743x_tx *tx; + int index; + int ret; + + adapter->intr.irq = adapter->pci.pdev->irq; + lan743x_csr_write(adapter, INT_EN_CLR, 0xFFFFFFFF); + mutex_init(&adapter->dp.lock); + ret = lan743x_mac_init(adapter); + if (ret) + return ret; + ret = lan743x_phy_init(adapter); + if (ret) + return ret; + lan743x_rfe_update_mac_address(adapter); + ret = lan743x_dmac_init(adapter); + if (ret) + return ret; + for (index = 0; index < LAN743X_USED_RX_CHANNELS; index++) { + adapter->rx[index].adapter = adapter; + adapter->rx[index].channel_number = index; + } + tx = &adapter->tx[0]; + tx->adapter = adapter; + tx->channel_number = 0; + spin_lock_init(&tx->ring_lock); + tasklet_init(&tx->tx_isr_bottom_half, + lan743x_tx_isr_bottom_half, (unsigned long)tx); + tasklet_disable(&tx->tx_isr_bottom_half); + return 0; +} + +static int lan743x_mdiobus_init(struct lan743x_adapter *adapter) +{ + int ret; + + adapter->mdiobus = mdiobus_alloc(); + if (!(adapter->mdiobus)) { + ret = -ENOMEM; + goto clean_up; + } + adapter->init_flags |= LAN743X_INIT_FLAG_MDIOBUS_ALLOCATED; + adapter->mdiobus->priv = (void *)adapter; + adapter->mdiobus->read = lan743x_mdiobus_read; + adapter->mdiobus->write = lan743x_mdiobus_write; + adapter->mdiobus->name = "lan743x-mdiobus"; + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, + "pci-%s", pci_name(adapter->pci.pdev)); + + /* set to internal PHY id */ + adapter->mdiobus->phy_mask = ~(int)BIT(1); + + /* register mdiobus */ + ret = mdiobus_register(adapter->mdiobus); + if (ret < 0) + goto clean_up; + adapter->init_flags |= LAN743X_INIT_FLAG_MDIOBUS_REGISTERED; + return 0; +clean_up: + if (adapter->mdiobus) + mdiobus_free(adapter->mdiobus); + adapter->init_flags &= ~(LAN743X_INIT_FLAG_MDIOBUS_REGISTERED | + LAN743X_INIT_FLAG_MDIOBUS_ALLOCATED); + return ret; +} + +/* lan743x_pcidev_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @id: entry in lan743x_pci_tbl + * + * Returns 0 on success, negative on failure + * + * initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int lan743x_pcidev_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct lan743x_adapter *adapter = NULL; + struct net_device *netdev = NULL; + int ret = -ENODEV; + + netdev = alloc_etherdev(sizeof(struct lan743x_adapter)); + if (!netdev) + goto clean_up; + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; + netdev->max_mtu = LAN743X_MAX_FRAME_SIZE; + ret = lan743x_pci_init(adapter, pdev); + if (ret) + goto clean_up; + adapter->init_flags |= LAN743X_COMPONENT_FLAG_PCI; + ret = lan743x_csr_init(adapter); + if (ret) + goto clean_up; + adapter->init_flags |= LAN743X_COMPONENT_FLAG_CSR; + ret = lan743x_hardware_init(adapter, pdev); + if (ret) + goto clean_up; + ret = lan743x_mdiobus_init(adapter); + if (ret) + goto clean_up; + adapter->netdev->netdev_ops = &lan743x_netdev_ops; + adapter->netdev->features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; + adapter->netdev->hw_features = adapter->netdev->features; + ret = register_netdev(adapter->netdev); + if (ret < 0) + goto clean_up; + adapter->init_flags |= LAN743X_INIT_FLAG_NETDEV_REGISTERED; + netif_info(adapter, probe, adapter->netdev, "Probe succeeded\n"); + return 0; + +clean_up: + if (adapter) { + netif_warn(adapter, probe, adapter->netdev, + "Incomplete initialization, performing clean up\n"); + lan743x_full_cleanup(adapter); + } + return ret; +} + +/** + * lan743x_pcidev_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * this is called by the PCI subsystem to alert the driver + * that it should release a PCI device. This could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void lan743x_pcidev_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + lan743x_full_cleanup(adapter); +} + +static void lan743x_pcidev_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct lan743x_adapter *adapter = netdev_priv(netdev); + + rtnl_lock(); + netif_device_detach(netdev); + + /* close netdev when netdev is at running state. + * For instance, it is true when system goes to sleep by pm-suspend + * However, it is false when system goes to sleep by suspend GUI menu + */ + if (netif_running(netdev)) + lan743x_netdev_close(netdev); + rtnl_unlock(); + + /* clean up lan743x portion */ + lan743x_hardware_cleanup(adapter); +} + +static const struct pci_device_id lan743x_pcidev_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, + { 0, } +}; + +static struct pci_driver lan743x_pcidev_driver = { + .name = DRIVER_NAME, + .id_table = lan743x_pcidev_tbl, + .probe = lan743x_pcidev_probe, + .remove = lan743x_pcidev_remove, + .shutdown = lan743x_pcidev_shutdown, +}; + +static int __init lan743x_module_init(void) +{ + int result = -EINVAL; + + pr_info(DRIVER_DESC "\n"); + result = pci_register_driver(&lan743x_pcidev_driver); + if (result) + pr_warn("pci_register_driver returned error code, %d\n", + result); + return result; +} + +module_init(lan743x_module_init); + +static void __exit lan743x_module_exit(void) +{ + pci_unregister_driver(&lan743x_pcidev_driver); +} + +module_exit(lan743x_module_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h new file mode 100644 index 0000000..51e0361 --- /dev/null +++ b/drivers/net/ethernet/microchip/lan743x_main.h @@ -0,0 +1,1331 @@ +/* + * Copyright (C) 2018 Microchip Technology + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#ifndef _LAN743X_H +#define _LAN743X_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_AUTHOR "Bryan Whitehead " +#define DRIVER_DESC "LAN743x PCIe Gigabit Ethernet Driver" +#define DRIVER_NAME "lan743x" + +/* Register Definitions */ +#define ID_REV (0x00) +#define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) +#define ID_REV_CHIP_ID_7430_ (0x7430) +#define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) +#define ID_REV_CHIP_REV_A0_ (0x00000000) +#define ID_REV_CHIP_REV_B0_ (0x00000010) + +#define FPGA_REV (0x04) +#define FPGA_REV_MINOR_MASK_ (0x0000FF00) +#define FPGA_REV_MAJOR_MASK_ (0x000000FF) + +#define HW_CFG (0x010) +#define HW_CFG_INVERT_LED3_POLARITY_ BIT(31) +#define HW_CFG_INVERT_LED2_POLARITY_ BIT(30) +#define HW_CFG_INVERT_LED1_POLARITY_ BIT(29) +#define HW_CFG_INVERT_LED0_POLARITY_ BIT(28) +#define HW_CFG_CLK125_EN_ BIT(25) +#define HW_CFG_REFCLK25_EN_ BIT(24) +#define HW_CFG_LED3_EN_ BIT(23) +#define HW_CFG_LED2_EN_ BIT(22) +#define HW_CFG_LED1_EN_ BIT(21) +#define HW_CFG_LED0_EN_ BIT(20) +#define HW_CFG_EEE_PHY_LUSU_ BIT(17) +#define HW_CFG_EEE_TSU_ BIT(16) +#define HW_CFG_RST_PROTECT_ BIT(12) +#define HW_CFG_RL_TYPE_EEPROM_UIT_CSR_ BIT(11) +#define HW_CFG_RL_TYPE_EEPROM_UIT_PCIE_ BIT(10) +#define HW_CFG_RL_TYPE_LED_CONFIG_ BIT(9) +#define HW_CFG_RL_TYPE_MAC_CONFIG_ BIT(8) +#define HW_CFG_RL_TYPE_PCI_CONFIG_ BIT(7) +#define HW_CFG_RL_TYPE_MAC_ADDR_ BIT(6) +#define HW_CFG_EE_OTP_DL_ BIT(5) +#define HW_CFG_EE_OTP_RELOAD_ BIT(4) +#define HW_CFG_ETC_ BIT(3) +#define HW_CFG_EEP_GPIO_LED_PIN_DIS_ BIT(2) +#define HW_CFG_LRST_ BIT(1) +#define HW_CFG_SRST_ BIT(0) + +#define PMT_CTL (0x014) +#define PMT_CTL_ETH_PHY_D3_COLD_OVR_ BIT(27) +#define PMT_CTL_MAC_D3_TX_CLK_OVR_ BIT(26) +#define PMT_CTL_MAC_D3_RX_CLK_OVR_ BIT(25) +#define PMT_CTL_ETH_PHY_EDPD_PLL_CTL_ BIT(24) +#define PMT_CTL_ETH_PHY_D3_OVR_ BIT(23) +#define PMT_CTL_INT_D3_CLK_OVR_ BIT(22) +#define PMT_CTL_DMAC_D3_CLK_OVR_ BIT(21) +#define PMT_CTL_1588_D3_CLK_OVR_ BIT(20) +#define PMT_CTL_MAC_D3_CLK_OVR_ BIT(19) +#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18) +#define PMT_CTL_TX_FCT_LSO_D3_CLK_OVR_ BIT(17) +#define PMT_CTL_OTP_EEPROM_D3_CLK_OVR_ BIT(16) +#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15) +#define PMT_CTL_GPIO_WUPS_ BIT(14) +#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13) +#define PMT_CTL_EEE_WUPS_ BIT(12) +#define PMT_CTL_RES_CLR_WKP_MASK_ (0x00000300) +#define PMT_CTL_RES_CLR_WKP_STS_ BIT(9) +#define PMT_CTL_RES_CLR_WKP_EN_ BIT(8) +#define PMT_CTL_READY_ BIT(7) +#define PMT_CTL_EXT_PHY_RDY_EN_ BIT(5) +#define PMT_CTL_ETH_PHY_RST_ BIT(4) +#define PMT_CTL_WOL_EN_ BIT(3) +#define PMT_CTL_ETH_PHY_WAKE_EN_ BIT(2) +#define PMT_CTL_WUPS_MASK_ (0x00000003) +#define PMT_CTL_WUPS_MLT_ (0x00000003) +#define PMT_CTL_WUPS_MAC_ (0x00000002) +#define PMT_CTL_WUPS_PHY_ (0x00000001) + +#define DP_SEL (0x024) +#define DP_SEL_DPRDY_ BIT(31) +#define DP_SEL_MASK_ (0x0000001F) +#define DP_SEL_PCIE_DRCV_RAM (0x00000016) +#define DP_SEL_PCIE_HRCV_RAM (0x00000015) +#define DP_SEL_PCIE_SOT_RAM (0x00000014) +#define DP_SEL_PCIE_RETRY_RAM (0x00000013) +#define DP_SEL_DMAC_TX_RAM_0 (0x0000000F) +#define DP_SEL_DMAC_RX_RAM_3 (0x0000000E) +#define DP_SEL_DMAC_RX_RAM_2 (0x0000000D) +#define DP_SEL_DMAC_RX_RAM_1 (0x0000000C) +#define DP_SEL_DMAC_RX_RAM_0 (0x0000000B) +#define DP_SEL_DMAC_REORDER_BUFFER (0x0000000A) +#define DP_SEL_FCT_TX_RAM_0 (0x00000006) +#define DP_SEL_FCT_RX_RAM_3 (0x00000005) +#define DP_SEL_FCT_RX_RAM_2 (0x00000004) +#define DP_SEL_FCT_RX_RAM_1 (0x00000003) +#define DP_SEL_FCT_RX_RAM_0 (0x00000002) +#define DP_SEL_RFE_RAM (0x00000001) +#define DP_SEL_LSO_RAM (0x00000000) + +#define DP_SEL_VHF_HASH_LEN (16) +#define DP_SEL_VHF_VLAN_LEN (128) + +#define DP_CMD (0x028) +#define DP_CMD_WRITE_ (0x00000001) +#define DP_CMD_READ_ (0x00000000) + +#define DP_ADDR (0x02C) +#define DP_ADDR_MASK_ (0x00003FFF) + +#define DP_DATA_0 (0x030) +#define DP_DATA_1 (0x034) +#define DP_DATA_2 (0x038) +#define DP_DATA_3 (0x03C) + +#define E2P_CMD (0x040) +#define E2P_CMD_EPC_BUSY_ BIT(31) +#define E2P_CMD_EPC_CMD_MASK_ (0x70000000) +#define E2P_CMD_EPC_CMD_ERAL_ (0x60000000) +#define E2P_CMD_EPC_CMD_ERASE_ (0x50000000) +#define E2P_CMD_EPC_CMD_WRAL_ (0x40000000) +#define E2P_CMD_EPC_CMD_WRITE_ (0x30000000) +#define E2P_CMD_EPC_CMD_EWEN_ (0x20000000) +#define E2P_CMD_EPC_CMD_EWDS_ (0x10000000) +#define E2P_CMD_EPC_CMD_READ_ (0x00000000) +#define E2P_CMD_EPC_TIMEOUT_ BIT(10) +#define E2P_CMD_EPC_ADDR_MASK_ (0x000001FF) + +#define E2P_DATA (0x044) +#define E2P_DATA_EEPROM_DATA_MASK_ (0x000000FF) + +#define GPIO_CFG0 (0x050) +#define GPIO_CFG0_GPIO_DIR_MASK_ (0x0FFF0000) +#define GPIO_CFG0_GPIO_DIR_M(bit_mask) \ + ((((u32)(bit_mask)) << 16) & GPIO_CFG0_GPIO_DIR_MASK_) +#define GPIO_CFG0_GPIO_DIR_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG0_GPIO_DATA_MASK_ (0x00000FFF) +#define GPIO_CFG0_GPIO_DATA_M(bit_mask) \ + (((u32)(bit_mask)) & GPIO_CFG0_GPIO_DATA_MASK_) +#define GPIO_CFG0_GPIO_DATA_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG1 (0x054) +#define GPIO_CFG1_GPIOEN_MASK_ (0x0FFF0000) +#define GPIO_CFG1_GPIOEN_M(bit_mask) \ + ((((u32)(bit_mask)) << 16) & GPIO_CFG1_GPIOEN_MASK_) +#define GPIO_CFG1_GPIOEN_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG1_GPIOBUF_MASK_ (0x00000FFF) +#define GPIO_CFG1_GPIOBUF_M(bit_mask) \ + (((u32)(bit_mask)) & GPIO_CFG1_GPIOBUF_MASK_) +#define GPIO_CFG1_GPIOBUF_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG2 (0x058) +#define GPIO_CFG2_1588_POL_MASK_ (0x00000FFF) +#define GPIO_CFG2_1588_POL_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_CFG3 (0x05C) +#define GPIO_CFG3_1588_CH_SEL_MASK_ (0x0FFF0000) +#define GPIO_CFG3_1588_CH_SEL_BIT_(bit) BIT(16 + (bit)) +#define GPIO_CFG3_1588_OE_MASK_ (0x00000FFF) +#define GPIO_CFG3_1588_OE_BIT_(bit) BIT(0 + (bit)) + +#define GPIO_WAKE (0x060) +#define GPIO_WAKE_GPIOPOL_MASK_ (0x0FFF0000) +#define GPIO_WAKE_GPIOPOL_(bit) BIT(16 + (bit)) +#define GPIO_WAKE_GPIOWK_MASK_ (0x00000FFF) +#define GPIO_WAKE_GPIOWK_(bit) BIT(0 + (bit)) + +#define GPIO_INT_STS (0x64) +#define GPIO_INT_EN_SET (0x68) +#define GPIO_INT_EN_CLR (0x6C) +#define GPIO_INT_BIT_(bit) BIT(0 + (bit)) + +#define FCT_INT_STS (0xA0) +#define FCT_INT_EN_SET (0xA4) +#define FCT_INT_EN_CLR (0xA8) +#define FCT_INT_MASK_RDFPA_ (0xF0000000) +#define FCT_INT_BIT_RDFPA_3_ BIT(31) +#define FCT_INT_BIT_RDFPA_2_ BIT(30) +#define FCT_INT_BIT_RDFPA_1_ BIT(29) +#define FCT_INT_BIT_RDFPA_0_ BIT(28) +#define FCT_INT_MASK_RDFO_ (0x0F000000) +#define FCT_INT_BIT_RDFO_3_ BIT(27) +#define FCT_INT_BIT_RDFO_2_ BIT(26) +#define FCT_INT_BIT_RDFO_1_ BIT(25) +#define FCT_INT_BIT_RDFO_0_ BIT(24) +#define FCT_INT_MASK_RXDF_ (0x00F00000) +#define FCT_INT_BIT_RXDF_3_ BIT(23) +#define FCT_INT_BIT_RXDF_2_ BIT(22) +#define FCT_INT_BIT_RXDF_1_ BIT(21) +#define FCT_INT_BIT_RXDF_0_ BIT(20) +#define FCT_INT_BIT_TXE_ BIT(16) +#define FCT_INT_BIT_TDFO_ BIT(12) +#define FCT_INT_BIT_TDFU_ BIT(8) +#define FCT_INT_BIT_RX_DIS_3_ BIT(7) +#define FCT_INT_BIT_RX_DIS_2_ BIT(6) +#define FCT_INT_BIT_RX_DIS_1_ BIT(5) +#define FCT_INT_BIT_RX_DIS_0_ BIT(4) +#define FCT_INT_BIT_TX_DIS_ BIT(0) +#define FCT_INT_MASK_ERRORS_ \ + (FCT_INT_MASK_RDFO_ | \ + FCT_INT_MASK_RXDF_ | \ + FCT_INT_BIT_TXE_ | \ + FCT_INT_BIT_TDFO_ | \ + FCT_INT_BIT_TDFU_) + +#define FCT_RX_CTL (0xAC) +#define FCT_RX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_RX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_RX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_RX_FIFO_END (0xB0) +#define FCT_RX_FIFO_END_3_ (0x3F000000) +#define FCT_RX_FIFO_END_2_ (0x003F0000) +#define FCT_RX_FIFO_END_1_ (0x00003F00) +#define FCT_RX_FIFO_END_0_ (0x0000003F) + +#define FCT_RX_USED_0 (0xB4) +#define FCT_RX_USED_1 (0xB8) +#define FCT_RX_USED_2 (0xBC) +#define FCT_RX_USED_3 (0xC0) +#define FCT_RX_USED_MASK_ (0x0000FFFF) + +#define FCT_TX_CTL (0xC4) +#define FCT_TX_CTL_EN_(channel) BIT(28 + (channel)) +#define FCT_TX_CTL_DIS_(channel) BIT(24 + (channel)) +#define FCT_TX_CTL_RESET_(channel) BIT(20 + (channel)) + +#define FCT_TX_FIFO_END (0xC8) +#define FCT_TX_FIFO_END_0_ (0x0000003F) + +#define FCT_TX_USED_0 (0xCC) +#define FCT_TX_USED_0_MASK_ (0x0000FFFF) + +#define FCT_CFG (0xDC) +#define FCT_CFG_ENABLE_OTHER_ROUTING_HEADERS_ BIT(4) +#define FCT_CFG_STORE_BAD_FRAMES_ BIT(0) + +#define FCT_FLOW(rx_channel) (0xE0 + ((rx_channel) << 2)) +#define FCT_FLOW_CTL_OFF_THRESHOLD_ (0x00007F00) +#define FCT_FLOW_CTL_OFF_THRESHOLD_SET_(value) \ + ((value << 8) & FCT_FLOW_CTL_OFF_THRESHOLD_) +#define FCT_FLOW_CTL_REQ_EN_ BIT(7) +#define FCT_FLOW_CTL_ON_THRESHOLD_ (0x0000007F) +#define FCT_FLOW_CTL_ON_THRESHOLD_SET_(value) \ + ((value << 0) & FCT_FLOW_CTL_ON_THRESHOLD_) + +#define MAC_CR (0x100) +#define MAC_CR_MII_EN_ BIT(19) +#define MAC_CR_EEE_TX_CLK_STOP_EN_ BIT(18) +#define MAC_CR_EEE_EN_ BIT(17) +#define MAC_CR_EEE_TLAR_EN_ BIT(16) +#define MAC_CR_ADP_ BIT(13) +#define MAC_CR_ADD_ BIT(12) +#define MAC_CR_ASD_ BIT(11) +#define MAC_CR_INT_LOOP_ BIT(10) +#define MAC_CR_BOLMT_MASK_ (0x000000C0) +#define MAC_CR_CNTR_RST_ BIT(5) +#define MAC_CR_CNTR_WEN_ BIT(4) +#define MAC_CR_DPX_ BIT(3) +#define MAC_CR_SPEED_MASK_ (0x00000006) +#define MAC_CR_SPEED_1000_ (0x00000004) +#define MAC_CR_SPEED_100_ (0x00000002) +#define MAC_CR_SPEED_10_ (0x00000000) +#define MAC_CR_RST_ BIT(0) + +#define MAC_RX (0x104) +#define MAC_RX_MAX_SIZE_SHIFT_ (16) +#define MAC_RX_MAX_SIZE_MASK_ (0x3FFF0000) +#define MAC_RX_LEN_FLD_LT_CHK_ BIT(6) +#define MAC_RX_WTL_ BIT(5) +#define MAC_RX_FCS_STRIP_ BIT(4) +#define MAC_RX_LFCD_ BIT(3) +#define MAC_RX_VLAN_FSE_ BIT(2) +#define MAC_RX_RXD_ BIT(1) +#define MAC_RX_RXEN_ BIT(0) + +#define MAC_TX (0x108) +#define MAC_TX_BAD_FCS_ BIT(2) +#define MAC_TX_TXD_ BIT(1) +#define MAC_TX_TXEN_ BIT(0) + +#define MAC_FLOW (0x10C) +#define MAC_FLOW_CR_FORCE_FC_ BIT(31) +#define MAC_FLOW_CR_TX_FCEN_ BIT(30) +#define MAC_FLOW_CR_RX_FCEN_ BIT(29) +#define MAC_FLOW_CR_FPF_ BIT(28) +#define MAC_FLOW_CR_FCPT_MASK_ (0x0000FFFF) + +#define MAC_RAND_SEED (0x110) +#define MAC_RAND_SEED_MASK_ (0x0000FFFF) + +#define MAC_ERR_STS (0x114) +#define MAC_ERR_STS_RESERVED_ (0xFFFFF803) +#define MAC_ERR_STS_LEN_ERR_ BIT(10) +#define MAC_ERR_STS_RXERR_ BIT(9) +#define MAC_ERR_STS_FERR_ BIT(8) +#define MAC_ERR_STS_LFERR_ BIT(7) +#define MAC_ERR_STS_RFERR_ BIT(6) +#define MAC_ERR_STS_RWTERR_ BIT(5) +#define MAC_ERR_STS_ECERR_ BIT(4) +#define MAC_ERR_STS_ALERR_ BIT(3) +#define MAC_ERR_STS_URERR_ BIT(2) + +#define MAC_RX_ADDRH (0x118) +#define MAC_RX_ADDRH_MASK_ (0x0000FFFF) + +#define MAC_RX_ADDRL (0x11C) +#define MAC_RX_ADDRL_MASK_ (0xFFFFFFFF) + +#define MAC_MII_ACC (0x120) +#define MAC_MII_ACC_PHY_ADDR_SHIFT_ (11) +#define MAC_MII_ACC_PHY_ADDR_MASK_ (0x0000F800) +#define MAC_MII_ACC_MIIRINDA_SHIFT_ (6) +#define MAC_MII_ACC_MIIRINDA_MASK_ (0x000007C0) +#define MAC_MII_ACC_MII_READ_ (0x00000000) +#define MAC_MII_ACC_MII_WRITE_ (0x00000002) +#define MAC_MII_ACC_MII_BUSY_ BIT(0) + +#define MAC_MII_DATA (0x124) +#define MAC_MII_DATA_MASK_ (0x0000FFFF) + +#define MAC_RGMII_ID (0x128) +#define MAC_RGMII_ID_TXC_DELAY_EN_ BIT(1) +#define MAC_RGMII_ID_RXC_DELAY_EN_ BIT(0) + +#define MAC_EEE_TX_LPI_REQ_DLY_CNT (0x130) +#define MAC_EEE_TX_LPI_REQ_DLY_CNT_MASK_ (0xFFFFFFFF) + +#define MAC_EEE_TW_TX_SYS (0x134) +#define MAC_EEE_TW_TX_SYS_CNT1G_MASK_ (0xFFFF0000) +#define MAC_EEE_TW_TX_SYS_CNT100M_MASK_ (0x0000FFFF) + +#define MAC_EEE_TX_LPI_AUTO_REM_DLY (0x138) +#define MAC_EEE_TX_LPI_AUTO_REM_DLY_CNT_ (0x00FFFFFF) + +#define MAC_WUCSR (0x140) +#define MAC_WUCSR_TESTMODE_ BIT(31) +#define MAC_WUCSR_IGNORE_WU_ BIT(20) +#define MAC_WUCSR_IGNORE_WU_TIME_ (0x000F0000) +#define MAC_WUCSR_DISCARD_FRAMES_D0A_ BIT(15) +#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14) +#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13) +#define MAC_WUCSR_EEE_TX_WAKE_EN_ BIT(12) +#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11) +#define MAC_WUCSR_EEE_RX_WAKE_EN_ BIT(10) +#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9) +#define MAC_WUCSR_STORE_WAKE_ BIT(8) +#define MAC_WUCSR_PFDA_FR_ BIT(7) +#define MAC_WUCSR_WUFR_ BIT(6) +#define MAC_WUCSR_MPR_ BIT(5) +#define MAC_WUCSR_BCST_FR_ BIT(4) +#define MAC_WUCSR_PFDA_EN_ BIT(3) +#define MAC_WUCSR_WAKE_EN_ BIT(2) +#define MAC_WUCSR_MPEN_ BIT(1) +#define MAC_WUCSR_BCST_EN_ BIT(0) + +#define MAC_WK_SRC (0x144) +#define MAC_WK_SRC_GPIOX_INT_WK_SHIFT_ (20) +#define MAC_WK_SRC_GPIOX_INT_WK_MASK_ (0xFFF00000) +#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17) +#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16) +#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15) +#define MAC_WK_SRC_EEE_TX_WK_ BIT(14) +#define MAC_WK_SRC_EEE_RX_WK_ BIT(13) +#define MAC_WK_SRC_RFE_FR_WK_ BIT(12) +#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11) +#define MAC_WK_SRC_MP_FR_WK_ BIT(10) +#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9) +#define MAC_WK_SRC_WU_FR_WK_ BIT(8) +#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7) +#define MAC_WK_SRC_WK_FR_SAVE_RX_CH_ (0x00000060) +#define MAC_WK_SRC_WUFF_MATCH_MASK_ (0x0000001F) + +#define MAC_WUF_CFG0 (0x150) +#define MAC_NUM_OF_WUF_CFG (32) +#define MAC_WUF_CFG_BEGIN (MAC_WUF_CFG0) +#define MAC_WUF_CFG(index) (MAC_WUF_CFG_BEGIN + (4 * (index))) +#define MAC_WUF_CFG_EN_ BIT(31) +#define MAC_WUF_CFG_TYPE_MASK_ (0x03000000) +#define MAC_WUF_CFG_TYPE_MCAST_ (0x02000000) +#define MAC_WUF_CFG_TYPE_ALL_ (0x01000000) +#define MAC_WUF_CFG_TYPE_UCAST_ (0x00000000) +#define MAC_WUF_CFG_OFFSET_SHIFT_ (16) +#define MAC_WUF_CFG_OFFSET_MASK_ (0x00FF0000) +#define MAC_WUF_CFG_CRC16_MASK_ (0x0000FFFF) + +#define MAC_WUF_MASK0_0 (0x200) +#define MAC_WUF_MASK0_1 (0x204) +#define MAC_WUF_MASK0_2 (0x208) +#define MAC_WUF_MASK0_3 (0x20C) +#define MAC_NUM_OF_WUF_MASK (32) +#define MAC_WUF_MASK0_BEGIN (MAC_WUF_MASK0_0) +#define MAC_WUF_MASK1_BEGIN (MAC_WUF_MASK0_1) +#define MAC_WUF_MASK2_BEGIN (MAC_WUF_MASK0_2) +#define MAC_WUF_MASK3_BEGIN (MAC_WUF_MASK0_3) +#define MAC_WUF_MASK0(index) \ + (MAC_WUF_MASK0_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK1(index) \ + (MAC_WUF_MASK1_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK2(index) \ + (MAC_WUF_MASK2_BEGIN + (0x10 * (index))) +#define MAC_WUF_MASK3(index) \ + (MAC_WUF_MASK3_BEGIN + (0x10 * (index))) + +/* offset 0x400 - 0x500, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_HI(x) (0x400 + (8 * (x))) +#define RFE_ADDR_FILT_HI_VALID_ BIT(31) +#define RFE_ADDR_FILT_HI_TYPE_MASK_ (0x40000000) +#define RFE_ADDR_FILT_HI_TYPE_SRC_ (0x40000000) +#define RFE_ADDR_FILT_HI_TYPE_DST_ (0x00000000) +#define RFE_ADDR_FILT_HI_PRI_FRM_ BIT(20) +#define RFE_ADDR_FILT_HI_RSS_EN_ BIT(19) +#define RFE_ADDR_FILT_HI_CH_EN_ BIT(18) +#define RFE_ADDR_FILT_HI_CH_NUM_MASK_ (0x00030000) +#define RFE_ADDR_FILT_HI_ADDR_MASK_ (0x0000FFFF) + +/* offset 0x404 - 0x504, x may range from 0 to 32, for a total of 33 entries */ +#define RFE_ADDR_FILT_LO(x) (0x404 + (8 * (x))) +#define RFE_ADDR_FILT_LO_ADDR_MASK_ (0xFFFFFFFF) + +#define RFE_CTL (0x508) +#define RFE_CTL_EN_OTHER_RT_HEADER_ BIT(18) +#define RFE_CTL_DEFAULT_RX_CH_0_ (0x00000000) +#define RFE_CTL_DEFAULT_RX_CH_1_ (0x00010000) +#define RFE_CTL_DEFAULT_RX_CH_2_ (0x00020000) +#define RFE_CTL_DEFAULT_RX_CH_3_ (0x00030000) +#define RFE_CTL_DEFAULT_RX_CH_MASK_ (0x00030000) +#define RFE_CTL_DEFAULT_RX_CH_(ch) \ + (((ch) << 16) & RFE_CTL_DEFAULT_RX_CH_MASK_) +#define RFE_CTL_PASS_WKP_ BIT(15) +#define RFE_CTL_IGMP_COE_ BIT(14) +#define RFE_CTL_ICMP_COE_ BIT(13) +#define RFE_CTL_TCPUDP_COE_ BIT(12) +#define RFE_CTL_IP_COE_ BIT(11) +#define RFE_CTL_AB_ BIT(10) +#define RFE_CTL_AM_ BIT(9) +#define RFE_CTL_AU_ BIT(8) +#define RFE_CTL_VLAN_STRIP_ BIT(7) +#define RFE_CTL_DISCARD_UNTAGGED_ BIT(6) +#define RFE_CTL_VLAN_FILTER_ BIT(5) +#define RFE_CTL_SA_FILTER_ BIT(4) +#define RFE_CTL_MCAST_HASH_ BIT(3) +#define RFE_CTL_DA_HASH_ BIT(2) +#define RFE_CTL_DA_PERFECT_ BIT(1) +#define RFE_CTL_RST_ BIT(0) + +#define RFE_PRI_SEL (0x50C) +#define RFE_PRI_SEL_CH_NUM_PRI_7_ (0xC0000000) +#define RFE_PRI_SEL_CH_NUM_PRI_6_ (0x30000000) +#define RFE_PRI_SEL_CH_NUM_PRI_5_ (0x0C000000) +#define RFE_PRI_SEL_CH_NUM_PRI_4_ (0x03000000) +#define RFE_PRI_SEL_CH_NUM_PRI_3_ (0x00C00000) +#define RFE_PRI_SEL_CH_NUM_PRI_2_ (0x00300000) +#define RFE_PRI_SEL_CH_NUM_PRI_1_ (0x000C0000) +#define RFE_PRI_SEL_CH_NUM_PRI_0_ (0x00030000) +#define RFE_PRI_SEL_RSS_EN_PRI_7_ BIT(15) +#define RFE_PRI_SEL_RSS_EN_PRI_6_ BIT(14) +#define RFE_PRI_SEL_RSS_EN_PRI_5_ BIT(13) +#define RFE_PRI_SEL_RSS_EN_PRI_4_ BIT(12) +#define RFE_PRI_SEL_RSS_EN_PRI_3_ BIT(11) +#define RFE_PRI_SEL_RSS_EN_PRI_2_ BIT(10) +#define RFE_PRI_SEL_RSS_EN_PRI_1_ BIT(9) +#define RFE_PRI_SEL_RSS_EN_PRI_0_ BIT(8) +#define RFE_PRI_SEL_FM_PRI_EN_ BIT(7) +#define RFE_PRI_SEL_FM_PRI_THRESH_ (0x00000070) +#define RFE_PRI_SEL_USE_PRECEDENCE_ BIT(3) +#define RFE_PRI_SEL_USE_IP_ BIT(2) +#define RFE_PRI_SEL_USE_TAG_ BIT(1) +#define RFE_PRI_SEL_VL_HIGHER_PRI_ BIT(0) + +#define RFE_DIFFSERV0 (0x510) +#define RFE_DIFFSERV1 (0x514) +#define RFE_DIFFSERV2 (0x518) +#define RFE_DIFFSERV3 (0x51C) +#define RFE_DIFFSERV4 (0x520) +#define RFE_DIFFSERV5 (0x524) +#define RFE_DIFFSERV6 (0x528) +#define RFE_DIFFSERV7 (0x52C) + +#define RFE_RSS_CFG (0x554) +#define RFE_RSS_CFG_UDP_IPV6_EX_ BIT(16) +#define RFE_RSS_CFG_TCP_IPV6_EX_ BIT(15) +#define RFE_RSS_CFG_IPV6_EX_ BIT(14) +#define RFE_RSS_CFG_UDP_IPV6_ BIT(13) +#define RFE_RSS_CFG_TCP_IPV6_ BIT(12) +#define RFE_RSS_CFG_IPV6_ BIT(11) +#define RFE_RSS_CFG_UDP_IPV4_ BIT(10) +#define RFE_RSS_CFG_TCP_IPV4_ BIT(9) +#define RFE_RSS_CFG_IPV4_ BIT(8) +#define RFE_RSS_CFG_VALID_HASH_BITS_ (0x000000E0) +#define RFE_RSS_CFG_RSS_QUEUE_ENABLE_ BIT(2) +#define RFE_RSS_CFG_RSS_HASH_STORE_ BIT(1) +#define RFE_RSS_CFG_RSS_ENABLE_ BIT(0) + +#define RFE_HASH_KEY0 (0x558) +#define RFE_HASH_KEY1 (0x55C) +#define RFE_HASH_KEY2 (0x560) +#define RFE_HASH_KEY3 (0x564) +#define RFE_HASH_KEY4 (0x568) +#define RFE_HASH_KEY5 (0x56C) +#define RFE_HASH_KEY6 (0x570) +#define RFE_HASH_KEY7 (0x574) +#define RFE_HASH_KEY8 (0x578) +#define RFE_HASH_KEY9 (0x57C) +#define RFE_HASH_KEY(index) (0x558 + (index << 2)) + +#define RFE_INDX(index) (0x580 + (index << 2)) + +#define MAC_WUCSR2 (0x600) +#define MAC_WUCSR2_CSUM_DISABLE_ BIT(31) +#define MAC_WUCSR2_EN_OTHER_RT_HDRS_ BIT(30) +#define MAC_WUCSR2_FARP_FR_ BIT(10) +#define MAC_WUCSR2_FNS_FR_ BIT(9) +#define MAC_WUCSR2_NA_SA_SEL_ BIT(8) +#define MAC_WUCSR2_NS_RCD_ BIT(7) +#define MAC_WUCSR2_ARP_RCD_ BIT(6) +#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5) +#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4) +#define MAC_WUCSR2_NS_OFFLOAD_EN_ BIT(3) +#define MAC_WUCSR2_ARP_OFFLOAD_EN_ BIT(2) +#define MAC_WUCSR2_IPV6_TCPSYN_WAKE_EN_ BIT(1) +#define MAC_WUCSR2_IPV4_TCPSYN_WAKE_EN_ BIT(0) + +#define MAC_INT_STS (0x604) +#define MAC_INT_EN_SET (0x608) +#define MAC_INT_EN_CLR (0x60C) +#define MAC_INT_BIT_EEE_START_TX_LPI_ BIT(26) +#define MAC_INT_BIT_EEE_STOP_TX_LPI_ BIT(25) +#define MAC_INT_BIT_EEE_RX_LPI_ BIT(24) +#define MAC_INT_BIT_MACRTO_ BIT(23) +#define MAC_INT_BIT_MAC_TX_DIS_ BIT(19) +#define MAC_INT_BIT_MAC_RX_DIS_ BIT(18) +#define MAC_INT_BIT_MAC_ERR_ BIT(15) +#define MAC_INT_BIT_MAC_RX_CNT_ROLL_ BIT(14) +#define MAC_INT_BIT_MAC_TX_CNT_ROLL_ BIT(13) + +#define INT_STS (0x780) +#define INT_BIT_RESERVED_ (0xF0FEF000) +#define INT_BIT_DMA_RX_(channel) BIT(24 + (channel)) +#define INT_BIT_ALL_RX_ (0x0F000000) +#define INT_BIT_DMA_TX_(channel) BIT(16 + (channel)) +#define INT_BIT_ALL_TX_ (0x000F0000) +#define INT_BIT_GPIO_ BIT(11) +#define INT_BIT_DMA_GEN_ BIT(10) +#define INT_BIT_SW_GP_ BIT(9) +#define INT_BIT_PCIE_ BIT(8) +#define INT_BIT_1588_ BIT(7) +#define INT_BIT_OTP_RDY_ BIT(6) +#define INT_BIT_PHY_ BIT(5) +#define INT_BIT_DP_ BIT(4) +#define INT_BIT_MAC_ BIT(3) +#define INT_BIT_FCT_ BIT(2) +#define INT_BIT_GPT_ BIT(1) +#define INT_BIT_ALL_OTHER_ (0x00000280) +#define INT_BIT_MAS_ BIT(0) + +#define INT_SET (0x784) + +#define INT_EN_SET (0x788) + +#define INT_EN_CLR (0x78C) + +#define INT_STS_R2C (0x790) + +#define INT_VEC_EN_SET (0x794) +#define INT_VEC_EN_CLR (0x798) +#define INT_VEC_EN_AUTO_CLR (0x79C) +#define INT_VEC_EN_(vector_index) BIT(0 + vector_index) + +#define INT_VEC_MAP0 (0x7A0) +#define INT_VMAP0_DMA_RX3_VEC_MASK_ (0x0000F000) +#define INT_VMAP0_DMA_RX2_VEC_MASK_ (0x00000F00) +#define INT_VMAP0_DMA_RX1_VEC_MASK_ (0x000000F0) +#define INT_VMAP0_DMA_RX0_VEC_MASK_ (0x0000000F) +#define INT_VEC_MAP0_RX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP1 (0x7A4) +#define INT_VMAP1_DMA_TX0_VEC_MASK_ (0x0000000F) +#define INT_VEC_MAP1_TX_VEC_(channel, vector) \ + (((u32)(vector)) << ((channel) << 2)) + +#define INT_VEC_MAP2 (0x7A8) +#define INT_VMAP2_FCT_VEC_MASK_ (0x00F00000) +#define INT_VMAP2_DMA_GEN_VEC_MASK_ (0x000F0000) +#define INT_VMAP2_SW_GP_VEC_MASK_ (0x0000F000) +#define INT_VMAP2_1588_VEC_MASK_ (0x00000F00) +#define INT_VMAP2_GPT_VEC_MASK_ (0x000000F0) +#define INT_VMAP2_OTHER_VEC_MASK_ (0x0000000F) + +#define INT_MOD_MAP0 (0x7B0) +#define INT_MMAP0_DMA_RX3_MASK_ (0x0000F000) +#define INT_MMAP0_DMA_RX2_MASK_ (0x00000F00) +#define INT_MMAP0_DMA_RX1_MASK_ (0x000000F0) +#define INT_MMAP0_DMA_RX0_MASK_ (0x0000000F) + +#define INT_MOD_MAP1 (0x7B4) +#define INT_MMAP1_DMA_TX0_MASK_ (0x0000000F) + +#define INT_MOD_MAP2 (0x7B8) +#define INT_MMAP2_FCT_MOD_MASK_ (0x00F00000) +#define INT_MMAP2_DMA_GEN_MASK_ (0x000F0000) +#define INT_MMAP2_SW_GP_MASK_ (0x0000F000) +#define INT_MMAP2_1588_MASK_ (0x00000F00) +#define INT_MMAP2_GPT_MASK_ (0x000000F0) +#define INT_MMAP2_OTHER_MASK_ (0x0000000F) + +#define INT_MOD_CFG0 (0x7C0) +#define INT_MOD_CFG1 (0x7C4) +#define INT_MOD_CFG2 (0x7C8) +#define INT_MOD_CFG3 (0x7CC) +#define INT_MOD_CFG4 (0x7D0) +#define INT_MOD_CFG5 (0x7D4) +#define INT_MOD_CFG6 (0x7D8) +#define INT_MOD_CFG7 (0x7DC) +#define INT_MOD_CFG_STATUS_ BIT(18) +#define INT_MOD_CFG_START_ BIT(17) +#define INT_MOD_CFG_TMODE_MASK_ (0x00010000) +#define INT_MOD_CFG_TMODE_ABS_ (0x00000000) +#define INT_MOD_CFG_TMODE_CREDIT_ (0x00010000) +#define INT_MOD_CFG_INTERVAL_MASK_ (0x00001FFF) + +#define PTP_CMD_CTL (0x0A00) +#define PTP_CMD_CTL_PTP_CLOCK_TARGET_READ_ BIT(13) +#define PTP_CMD_CTL_PTP_MANUAL_CAPTURE_SEL_MASK_ (0x00001E00) +#define PTP_CMD_CTL_PTP_MANUAL_CAPTURE_ BIT(8) +#define PTP_CMD_CTL_PTP_CLOCK_TEMP_RATE_ BIT(7) +#define PTP_CMD_CTL_PTP_CLK_STP_NSEC_ BIT(6) +#define PTP_CMD_CTL_PTP_CLOCK_STEP_SEC_ BIT(5) +#define PTP_CMD_CTL_PTP_CLOCK_LOAD_ BIT(4) +#define PTP_CMD_CTL_PTP_CLOCK_READ_ BIT(3) +#define PTP_CMD_CTL_PTP_ENABLE_ BIT(2) +#define PTP_CMD_CTL_PTP_DISABLE_ BIT(1) +#define PTP_CMD_CTL_PTP_RESET_ BIT(0) +#define PTP_GENERAL_CONFIG (0x0A04) +#define PTP_GENERAL_CONFIG_TSU_ENABLE_ BIT(31) +#define PTP_GENERAL_CONFIG_GPIO_FECR_ BIT(25) +#define PTP_GENERAL_CONFIG_GPIO_RECR_ BIT(24) +#define PTP_GENERAL_CONFIG_GPIO_PTP_TIMER_INT_X_CLEAR_EN_(channel) \ + (BIT(12 + ((channel) << 3))) +#define PTP_GENERAL_CONFIG_GPIO_PTP_TIMER_INT_X_CLEAR_SEL_SET_(channel, value) \ + (((value) & 0xF) << (8 + ((channel) << 3))) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_MASK_(channel) \ + (0x7 << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_ (0) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_ (1) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_ (2) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_INT_ (7) +#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \ + (((value) & 0x7) << (1 + ((channel) << 2))) +#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2)) + +#define PTP_INT_STS (0x0A08) +#define PTP_INT_EN_SET (0x0A0C) +#define PTP_INT_EN_CLR (0x0A10) +#define PTP_INT_BIT_GPIO_FE(gpio_num) BIT(24 + (gpio_num)) +#define PTP_INT_BIT_GPIO_RE(gpio_num) BIT(16 + (gpio_num)) +#define PTP_INT_BIT_TX_SWTS_ERR_ BIT(13) +#define PTP_INT_BIT_TX_TS_ BIT(12) +#define PTP_INT_BIT_RX_TS_ BIT(8) +#define PTP_INT_BIT_TIMER_B_ BIT(1) +#define PTP_INT_BIT_TIMER_A_ BIT(0) +#define PTP_INT_BIT_TIMER_(channel) BIT(channel) + +#define PTP_CLOCK_SEC (0x0A14) +#define PTP_CLOCK_NS (0x0A18) +#define PTP_CLOCK_SUBNS (0x0A1C) +#define PTP_CLOCK_RATE_ADJ (0x0A20) +#define PTP_CLOCK_RATE_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_RATE_ADJ_VALUE_MASK_ (0x3FFFFFFF) +#define PTP_CLOCK_TEMP_RATE_ADJ (0x0A24) +#define PTP_CLOCK_TEMP_RATE_DURATION (0x0A28) +#define PTP_CLOCK_STEP_ADJ (0x0A2C) +#define PTP_CLOCK_STEP_ADJ_DIR_ BIT(31) +#define PTP_CLOCK_STEP_ADJ_VALUE_MASK_ (0x3FFFFFFF) +#define PTP_CLOCK_TARGET_SEC_X(channel) (0x0A30 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_NS_X(channel) (0x0A34 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_SEC_X(channel) (0x0A38 + ((channel) << 4)) +#define PTP_CLOCK_TARGET_RELOAD_NS_X(channel) (0x0A3C + ((channel) << 4)) +#define PTP_USER_MAC_HI (0x0A50) +#define PTP_USER_MAC_LO (0x0A54) +#define PTP_GPIO_SEL (0x0A58) +#define PTP_LATENCY (0x0A5C) +#define PTP_CAP_INFO (0x0A60) +#define PTP_CAP_INFO_TX_TS_CNT_GET_(reg_val) ((reg_val & 0x00000070) >> 4) +#define PTP_RX_PARSE_CONFIG (0x0A64) +#define PTP_RX_TIMESTAMP_CONFIG (0x0A68) + +#define PTP_RX_INGRESS_SEC (0x0A78) +#define PTP_RX_INGRESS_NS (0x0A7C) +#define PTP_RX_MSG_HEADER (0x0A80) +#define PTP_TX_PARSE_CONFIG (0x0A9C) +#define PTP_TX_TIMESTAMP_CONFIG (0x0AA0) +#define PTP_TX_MOD (0x0AA4) +#define PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ (0x10000000) +#define PTP_TX_MOD_TX_SW_TS_INS_OFFSET_MASK_ (0x00001FFF) +#define PTP_TX_MOD2 (0x0AA8) +#define PTP_TX_MOD2_TX_PTP_CLR_UDPV4_CHKSUM_ (0x00000001) +#define PTP_TX_EGRESS_SEC (0x0AAC) +#define PTP_TX_EGRESS_NS (0x0AB0) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_MASK_ (0xC0000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_AUTO_ (0x00000000) +#define PTP_TX_EGRESS_NS_CAPTURE_CAUSE_SW_ (0x40000000) +#define PTP_TX_EGRESS_NS_TS_NS_MASK_ (0x3FFFFFFF) +#define PTP_TX_MSG_HEADER (0x0AB4) +#define PTP_TX_MSG_HEADER_MSG_TYPE_ (0x000F0000) +#define PTP_TX_MSG_HEADER_MSG_TYPE_SYNC_ (0x00000000) +#define PTP_TX_ONE_STEP_SYNC_SEC (0x0AC0) +#define PTP_GPIO_CAP_CONFIG (0x0AC4) +#define PTP_GPIO_RE_CLOCK_SEC_CAP (0x0AC8) +#define PTP_GPIO_RE_CLOCK_NS_CAP (0x0ACC) +#define PTP_GPIO_FE_CLOCK_SEC_CAP (0x0AD0) +#define PTP_GPIO_FE_CLOCK_NS_CAP (0x0AD4) + +#define DMAC_CFG (0xC00) +#define DMAC_CFG_INTR_DSCR_RD_EN_ BIT(18) +#define DMAC_CFG_INTR_DSCR_WR_EN_ BIT(17) +#define DMAC_CFG_COAL_EN_ BIT(16) +#define DMAC_CFG_CMPL_RETRY_CNT_MASK_ (0x00006000) +#define DMAC_CFG_CMPL_RETRY_EN_ BIT(12) +#define DMAC_CFG_CH_ARB_SEL_MASK_ (0x00000C00) +#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_ (0x00000000) +#define DMAC_CFG_CH_ARB_SEL_CH_ORDER_ BIT(10) +#define DMAC_CFG_CH_ARB_SEL_RX_HIGH_RR_ BIT(11) +#define DMAC_CFG_CH_ARB_SEL_RR_ (0x00000C00) +#define DMAC_CFG_MAX_READ_REQ_MASK_ (0x00000070) +#define DMAC_CFG_MAX_READ_REQ_SET_(val) \ + ((((u32)(val)) << 4) & DMAC_CFG_MAX_READ_REQ_MASK_) +#define DMAC_CFG_MAX_DSPACE_MASK_ (0x00000003) +#define DMAC_CFG_MAX_DSPACE_16_ (0x00000000) +#define DMAC_CFG_MAX_DSPACE_32_ (0x00000001) +#define DMAC_CFG_MAX_DSPACE_64_ BIT(1) +#define DMAC_CFG_MAX_DSPACE_128_ (0x00000003) + +#define DMAC_COAL_CFG (0xC04) +#define DMAC_COAL_CFG_TIMER_LIMIT_MASK_ (0xFFF00000) +#define DMAC_COAL_CFG_TIMER_LIMIT_SET_(val) \ + ((((u32)(val)) << 20) & DMAC_COAL_CFG_TIMER_LIMIT_MASK_) +#define DMAC_COAL_CFG_TIMER_TX_START_ BIT(19) +#define DMAC_COAL_CFG_FLUSH_INTS_ BIT(18) +#define DMAC_COAL_CFG_INT_EXIT_COAL_ BIT(17) +#define DMAC_COAL_CFG_CSR_EXIT_COAL_ BIT(16) +#define DMAC_COAL_CFG_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_COAL_CFG_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_COAL_CFG_TX_THRES_MASK_) +#define DMAC_COAL_CFG_RX_THRES_MASK_ (0x000000FF) +#define DMAC_COAL_CFG_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_COAL_CFG_RX_THRES_MASK_) + +#define DMAC_OBFF_CFG (0xC08) +#define DMAC_OBFF_TX_THRES_MASK_ (0x0000FF00) +#define DMAC_OBFF_TX_THRES_SET_(val) \ + ((((u32)(val)) << 8) & DMAC_OBFF_TX_THRES_MASK_) +#define DMAC_OBFF_RX_THRES_MASK_ (0x000000FF) +#define DMAC_OBFF_RX_THRES_SET_(val) \ + (((u32)(val)) & DMAC_OBFF_RX_THRES_MASK_) + +#define DMAC_CMD (0xC0C) +#define DMAC_CMD_SWR_ BIT(31) +#define DMAC_CMD_COAL_EXIT_ BIT(28) +#define DMAC_CMD_TX_SWR_(channel) BIT(24 + (channel)) +#define DMAC_CMD_START_T_(channel) BIT(20 + (channel)) +#define DMAC_CMD_STOP_T_(channel) BIT(16 + (channel)) +#define DMAC_CMD_RX_SWR_(channel) BIT(8 + (channel)) +#define DMAC_CMD_START_R_(channel) BIT(4 + (channel)) +#define DMAC_CMD_STOP_R_(channel) BIT(0 + (channel)) + +#define DMAC_INT_STS (0xC10) +#define DMAC_INT_EN_SET (0xC14) +#define DMAC_INT_EN_CLR (0xC18) +#define DMAC_INT_BIT_RXPRI_(channel) BIT(24 + (channel)) +#define DMAC_INT_BIT_ERR_ BIT(21) +#define DMAC_INT_BIT_RXFRM_(channel) BIT(16 + (channel)) +#define DMAC_INT_BIT_RX_STOP_(channel) BIT(12 + (channel)) +#define DMAC_INT_BIT_TX_STOP_(channel) BIT(8 + (channel)) +#define DMAC_INT_BIT_TX_IOC_(channel) BIT(0 + (channel)) + +#define DMAC_RX_ABS_TIMER_CFG (0xC1C) +#define DMAC_RX_ABS_TIMER_CFG_SHARE_MASK_ (0x00F00000) +#define DMAC_RX_ABS_TIMER_CFG_SHARE_3_ BIT(23) +#define DMAC_RX_ABS_TIMER_CFG_SHARE_2_ BIT(22) +#define DMAC_RX_ABS_TIMER_CFG_SHARE_1_ BIT(21) +#define DMAC_RX_ABS_TIMER_CFG_SHARE_0_ BIT(20) +#define DMAC_RX_ABS_TIMER_CFG_WR_ BIT(19) +#define DMAC_RX_ABS_TIMER_CFG_SEL_MASK_ (0x00070000) +#define DMAC_RX_ABS_TIMER_CFG_CNT_MASK_ (0x0000FFFF) + +#define DMAC_RX_TIMER_CFG (0xC20) +#define DMAC_RX_TIMER_CFG_TMR_MODE_MASK_ (0x1F000000) +#define DMAC_RX_TIMER_CFG_TMR_SHARED_FRAME_MODE_ BIT(28) +#define DMAC_RX_TIMER_CFG_TMR_TIMER3_FRAME_MODE_ BIT(27) +#define DMAC_RX_TIMER_CFG_TMR_TIMER2_FRAME_MODE_ BIT(26) +#define DMAC_RX_TIMER_CFG_TMR_TIMER1_FRAME_MODE_ BIT(25) +#define DMAC_RX_TIMER_CFG_TMR_TIMER0_FRAME_MODE_ BIT(24) +#define DMAC_RX_TIMER_CFG_SHARE_MAP_MASK_ (0x00F00000) +#define DMAC_RX_TIMER_CFG_SHARE_MAP_TIMER3_ BIT(23) +#define DMAC_RX_TIMER_CFG_SHARE_MAP_TIMER2_ BIT(22) +#define DMAC_RX_TIMER_CFG_SHARE_MAP_TIMER1_ BIT(21) +#define DMAC_RX_TIMER_CFG_SHARE_MAP_TIMER0_ BIT(20) +#define DMAC_RX_TIMER_CFG_WR_ BIT(19) +#define DMAC_RX_TIMER_CFG_CH_SEL_MASK_ (0x00070000) +#define DMAC_RX_TIMER_CFG_CH_SEL_TIMER0_ (0x00000000) +#define DMAC_RX_TIMER_CFG_CH_SEL_TIMER1_ (0x00010000) +#define DMAC_RX_TIMER_CFG_CH_SEL_TIMER2_ (0x00020000) +#define DMAC_RX_TIMER_CFG_CH_SEL_TIMER3_ (0x00030000) +#define DMAC_RX_TIMER_CFG_CH_SEL_SHARED_ (0x00040000) +#define DMAC_RX_TIMER_CFG_CNT_MASK_ (0x0000FFFF) + +#define DMAC_TXTMR_CFG (0xC24) +#define DMAC_TXTMR_CFG_TX_DELAY_WR_ BIT(23) +#define DMAC_TXTMR_CFG_TX_DELAY_CNT_ (0x0000FFFF) + +#define DMAC_TX_ABSTMR_CFG (0xC28) +#define DMAC_TX_ABSTMR_WR_ BIT(23) +#define DMAC_TX_ABSTMR_CNT_ (0x0000FFFF) + +#define RX_CFG_A(channel) (0xC40 + ((channel) << 6)) +#define RX_CFG_A_RX_WB_SWFLUSH_ BIT(31) +#define RX_CFG_A_RX_WB_ON_INT_TMR_ BIT(30) +#define RX_CFG_A_RX_WB_THRES_MASK_ (0x1F000000) +#define RX_CFG_A_RX_WB_THRES_SET_(val) \ + ((((u32)(val)) << 24) & RX_CFG_A_RX_WB_THRES_MASK_) +#define RX_CFG_A_RX_PF_THRES_MASK_ (0x001F0000) +#define RX_CFG_A_RX_PF_THRES_SET_(val) \ + ((((u32)(val)) << 16) & RX_CFG_A_RX_PF_THRES_MASK_) +#define RX_CFG_A_RX_PF_PRI_THRES_MASK_ (0x00001F00) +#define RX_CFG_A_RX_PF_PRI_THRES_SET_(val) \ + ((((u32)(val)) << 8) & RX_CFG_A_RX_PF_PRI_THRES_MASK_) +#define RX_CFG_A_RX_HP_WB_EN_ BIT(5) +#define RX_CFG_A_RX_HP_WB_THRES_MASK_ (0x0000000F) +#define RX_CFG_A_RX_HP_WB_THRES_SET_(val) \ + (((u32)(val)) & RX_CFG_A_RX_HP_WB_THRES_MASK_) + +#define RX_CFG_B(channel) (0xC44 + ((channel) << 6)) +#define RX_CFG_B_TS_ALL_RX_ BIT(29) +#define RX_CFG_B_TS_DECR_EN_ BIT(28) +#define RX_CFG_B_RX_PAD_MASK_ (0x03000000) +#define RX_CFG_B_RX_PAD_0_ (0x00000000) +#define RX_CFG_B_RX_PAD_2_ (0x02000000) +#define RX_CFG_B_RX_COAL_DIS_ BIT(23) +#define RX_CFG_B_RX_DESCR_RO_EN_ BIT(21) +#define RX_CFG_B_RX_DATA_RO_EN_ BIT(20) +#define RX_CFG_B_RDMABL_MASK_ (0x00070000) +#define RX_CFG_B_RDMABL_32_ (0x00000000) +#define RX_CFG_B_RDMABL_64_ (0x00010000) +#define RX_CFG_B_RDMABL_128_ (0x00020000) +#define RX_CFG_B_RDMABL_256_ (0x00030000) +#define RX_CFG_B_RDMABL_512_ (0x00040000) +#define RX_CFG_B_RDMABL_1024_ (0x00050000) +#define RX_CFG_B_RDMABL_2048_ (0x00060000) +#define RX_CFG_B_RDMABL_4096_ (0x00070000) +#define RX_CFG_B_RX_RING_LEN_MASK_ (0x0000FFFF) + +#define RX_BASE_ADDRH(channel) (0xC48 + ((channel) << 6)) +#define RX_BASE_ADDRH_MASK_ (0xFFFFFFFF) + +#define RX_BASE_ADDRL(channel) (0xC4C + ((channel) << 6)) +#define RX_BASE_ADDRL_MASK_ (0xFFFFFFFC) + +#define RX_HEAD_WRITEBACK_ADDRH(channel) (0xC50 + ((channel) << 6)) + +#define RX_HEAD_WRITEBACK_ADDRL(channel) (0xC54 + ((channel) << 6)) + +#define RX_HEAD(channel) (0xC58 + ((channel) << 6)) +#define RX_HEAD_MASK_ (0x0000FFFF) + +#define RX_TAIL(channel) (0xC5C + ((channel) << 6)) +#define RX_TAIL_MASK_ (0x0000FFFF) +#define RX_TAIL_SET_DMAC_INT_EN_ BIT(31) +#define RX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define RX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define DMAC_RX_ERR_STS(channel) (0xC60 + ((channel) << 6)) +#define DMAC_RX_ERR_STS_RESERVED_ (0xFFDFFF9F) +#define DMAC_RX_ERR_STS_RX_DESC_TAIL_ERR_EN_ BIT(21) +#define DMAC_RX_ERR_STS_RX_DESC_READ_ERR_ BIT(6) +#define DMAC_RX_ERR_STS_RX_DESC_TAIL_ERR_ BIT(5) + +#define RX_CFG_C(channel) (0xC64 + ((channel) << 6)) +#define RX_CFG_C_RX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define RX_CFG_C_RX_DMA_INT_EN_AUTO_CLR_ BIT(5) +#define RX_CFG_C_RX_INT_EN_R2C_ BIT(4) +#define RX_CFG_C_RX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define RX_CFG_C_RX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +#define TX_CFG_A(channel) (0xD40 + ((channel) << 6)) +#define TX_CFG_A_TX_HP_WB_SWFLUSH_ BIT(31) +#define TX_CFG_A_TX_HP_WB_ON_INT_TMR_ BIT(30) +#define TX_CFG_A_TX_TMR_HPWB_SEL_MASK_ (0x30000000) +#define TX_CFG_A_TX_TMR_HPWB_SEL_DIS_ (0x00000000) +#define TX_CFG_A_TX_TMR_HPWB_SEL_IOC_ (0x10000000) +#define TX_CFG_A_TX_TMR_HPWB_SEL_LS_ (0x20000000) +#define TX_CFG_A_TX_TMR_HPWB_SEL_IOC_LS_ (0x30000000) +#define TX_CFG_A_TX_PF_THRES_MASK_ (0x001F0000) +#define TX_CFG_A_TX_PF_THRES_SET_(value) \ + ((((u32)(value)) << 16) & TX_CFG_A_TX_PF_THRES_MASK_) +#define TX_CFG_A_TX_PF_PRI_THRES_MASK_ (0x00001F00) +#define TX_CFG_A_TX_PF_PRI_THRES_SET_(value) \ + ((((u32)(value)) << 8) & TX_CFG_A_TX_PF_PRI_THRES_MASK_) +#define TX_CFG_A_TX_STOP_TXE_ BIT(7) +#define TX_CFG_A_TX_HP_WB_EN_ BIT(5) +#define TX_CFG_A_TX_HP_WB_ON_TXTMR_ BIT(4) +#define TX_CFG_A_TX_HP_WB_THRES_MASK_ (0x0000000F) +#define TX_CFG_A_TX_HP_WB_THRES_SET_(value) \ + (((u32)(value)) & TX_CFG_A_TX_HP_WB_THRES_MASK_) + +#define TX_CFG_B(channel) (0xD44 + ((channel) << 6)) +#define TX_CFG_B_TX_COAL_DIS_ BIT(23) +#define TX_CFG_B_TX_DESC_RO_EN_ BIT(22) +#define TX_CFG_B_TX_DATA_RO_EN_ BIT(21) +#define TX_CFG_B_TX_HEAD_RO_EN_ BIT(20) +#define TX_CFG_B_TDMABL_MASK_ (0x00070000) +#define TX_CFG_B_TDMABL_32_ (0x00000000) +#define TX_CFG_B_TDMABL_64_ (0x00010000) +#define TX_CFG_B_TDMABL_128_ (0x00020000) +#define TX_CFG_B_TDMABL_256_ (0x00030000) +#define TX_CFG_B_TDMABL_512_ (0x00040000) +#define TX_CFG_B_TX_RING_LEN_MASK_ (0x0000FFFF) + +#define TX_BASE_ADDRH(channel) (0xD48 + ((channel) << 6)) +#define TX_BASE_ADDRH_MASK_ (0xFFFFFFFF) + +#define TX_BASE_ADDRL(channel) (0xD4C + ((channel) << 6)) +#define TX_BASE_ADDRL_MASK_ (0xFFFFFFFC) + +#define TX_HEAD_WRITEBACK_ADDRH(channel) (0xD50 + ((channel) << 6)) +#define TX_HEAD_WRITEBACK_ADDRH_MASK_ (0xFFFFFFFF) + +#define TX_HEAD_WRITEBACK_ADDRL(channel) (0xD54 + ((channel) << 6)) +#define TX_HEAD_WRITEBACK_ADDRL_MASK_ (0xFFFFFFFC) + +#define TX_HEAD(channel) (0xD58 + ((channel) << 6)) +#define TX_HEAD_MASK_ (0x0000FFFF) + +#define TX_TAIL(channel) (0xD5C + ((channel) << 6)) +#define TX_TAIL_MASK_ (0x0000FFFF) +#define TX_TAIL_SET_DMAC_INT_EN_ BIT(31) +#define TX_TAIL_SET_TOP_INT_EN_ BIT(30) +#define TX_TAIL_SET_TOP_INT_VEC_EN_ BIT(29) + +#define DMAC_TX_ERR_STS(channel) (0xD60 + ((channel) << 6)) +#define DMAC_TX_ERR_STS_RESERVED_ (0xFFDEFF00) +#define DMAC_TX_ERR_STS_TX_DESC_TAIL_ERR_EN_ BIT(21) +#define DMAC_TX_ERR_STS_TX_DESC_SEQ_ERR_EN_ BIT(16) +#define DMAC_TX_ERR_STS_TX_DATA_READ_ERR_ BIT(7) +#define DMAC_TX_ERR_STS_TX_DESC_READ_ERR_ BIT(6) +#define DMAC_TX_ERR_STS_TX_DESC_TAIL_ERR_ BIT(5) +#define DMAC_TX_ERR_STS_TX_FCT_TXE_ BIT(4) +#define DMAC_TX_ERR_STS_TX_DESC_DATATYPE_ERR_ BIT(3) +#define DMAC_TX_ERR_STS_TX_DESC_EXTNTYPE_ERR_ BIT(2) +#define DMAC_TX_ERR_STS_TX_DESC_EXTRAFS_ERR_ BIT(1) +#define DMAC_TX_ERR_STS_TX_DESC_NOFS_ERR_ BIT(0) + +#define TX_CFG_C(channel) (0xD64 + ((channel) << 6)) +#define TX_CFG_C_TX_TOP_INT_EN_AUTO_CLR_ BIT(6) +#define TX_CFG_C_TX_DMA_INT_EN_AUTO_CLR_ BIT(5) +#define TX_CFG_C_TX_INT_EN_R2C_ BIT(4) +#define TX_CFG_C_TX_DMA_INT_STS_AUTO_CLR_ BIT(3) +#define TX_CFG_C_TX_INT_STS_R2C_MODE_MASK_ (0x00000007) + +#define DMAC_DEBUG_0 (0xFF0) +#define DMAC_DEBUG_1 (0xFF4) +#define DMAC_DEBUG_2 (0xFF8) + +/* OTP */ +#define OTP_PWR_DN (0x1000) +#define OTP_PWR_DN_PWRDN_N_ BIT(0) + +#define OTP_ADDR1 (0x1004) +#define OTP_ADDR1_15_11_MASK_ (0x1F) + +#define OTP_ADDR2 (0x1008) +#define OTP_ADDR2_10_3_MASK_ (0xFF) + +#define OTP_ADDR3 (0x100C) +#define OTP_ADDR3_2_0 (0x03) + +#define OTP_PRGM_DATA (0x1010) + +#define OTP_PRGM_MODE (0x1014) +#define OTP_PRGM_MODE_BYTE_ BIT(0) + +#define OTP_RD_DATA (0x1018) +#define OTP_RD_DATA_MASK_ (0x000000FF) + +#define OTP_FUNC_CMD (0x1020) +#define OTP_FUNC_CMD_RESET_ BIT(2) +#define OTP_FUNC_CMD_PROGRAM_ BIT(1) +#define OTP_FUNC_CMD_READ_ BIT(0) + +#define OTP_TST_CMD (0x1024) +#define OTP_TST_CMD_TEST_DEC_SEL_ BIT(4) +#define OTP_TST_CMD_PRGVRFY_ BIT(3) +#define OTP_TST_CMD_WRTEST_ BIT(2) +#define OTP_TST_CMD_TESTDEC_ BIT(1) +#define OTP_TST_CMD_BLANKCHECK_ BIT(0) + +#define OTP_CMD_GO (0x1028) +#define OTP_CMD_GO_GO_ BIT(0) + +#define OTP_PASS_FAIL (0x102C) +#define OTP_PASS_FAIL_PASS_ BIT(1) +#define OTP_PASS_FAIL_FAIL_ BIT(0) + +#define OTP_STATUS (0x1030) +#define OTP_STATUS_OTP_LOCK_ BIT(4) +#define OTP_STATUS_WEB_ BIT(3) +#define OTP_STATUS_PGMEN BIT(2) +#define OTP_STATUS_CPUMPEN_ BIT(1) +#define OTP_STATUS_BUSY_ BIT(0) + +/* MAC statistics registers */ +#define STAT_RX_FCS_ERRORS (0x1200) +#define STAT_RX_ALIGNMENT_ERRORS (0x1204) +#define STAT_RX_FRAGMENT_ERRORS (0x1208) +#define STAT_RX_JABBER_ERRORS (0x120C) +#define STAT_RX_UNDERSIZE_FRAME_ERRORS (0x1210) +#define STAT_RX_OVERSIZE_FRAME_ERRORS (0x1214) +#define STAT_RX_DROPPED_FRAMES (0x1218) +#define STAT_RX_UNICAST_BYTE_COUNT (0x121C) +#define STAT_RX_BROADCAST_BYTE_COUNT (0x1220) +#define STAT_RX_MULTICAST_BYTE_COUNT (0x1224) +#define STAT_RX_UNICAST_FRAMES (0x1228) +#define STAT_RX_BROADCAST_FRAMES (0x122C) +#define STAT_RX_MULTICAST_FRAMES (0x1230) +#define STAT_RX_PAUSE_FRAMES (0x1234) +#define STAT_RX_64_BYTE_FRAMES (0x1238) +#define STAT_RX_65_127_BYTE_FRAMES (0x123C) +#define STAT_RX_128_255_BYTE_FRAMES (0x1240) +#define STAT_RX_256_511_BYTES_FRAMES (0x1244) +#define STAT_RX_512_1023_BYTE_FRAMES (0x1248) +#define STAT_RX_1024_1518_BYTE_FRAMES (0x124C) +#define STAT_RX_GREATER_1518_BYTE_FRAMES (0x1250) +#define STAT_RX_TOTAL_FRAMES (0x1254) +#define STAT_EEE_RX_LPI_TRANSITIONS (0x1258) +#define STAT_EEE_RX_LPI_TIME (0x125C) +#define STAT_RX_COUNTER_ROLLOVER_STATUS (0x127C) + +#define STAT_TX_FCS_ERRORS (0x1280) +#define STAT_TX_EXCESS_DEFERRAL_ERRORS (0x1284) +#define STAT_TX_CARRIER_ERRORS (0x1288) +#define STAT_TX_BAD_BYTE_COUNT (0x128C) +#define STAT_TX_SINGLE_COLLISIONS (0x1290) +#define STAT_TX_MULTIPLE_COLLISIONS (0x1294) +#define STAT_TX_EXCESSIVE_COLLISION (0x1298) +#define STAT_TX_LATE_COLLISIONS (0x129C) +#define STAT_TX_UNICAST_BYTE_COUNT (0x12A0) +#define STAT_TX_BROADCAST_BYTE_COUNT (0x12A4) +#define STAT_TX_MULTICAST_BYTE_COUNT (0x12A8) +#define STAT_TX_UNICAST_FRAMES (0x12AC) +#define STAT_TX_BROADCAST_FRAMES (0x12B0) +#define STAT_TX_MULTICAST_FRAMES (0x12B4) +#define STAT_TX_PAUSE_FRAMES (0x12B8) +#define STAT_TX_64_BYTE_FRAMES (0x12BC) +#define STAT_TX_65_127_BYTE_FRAMES (0x12C0) +#define STAT_TX_128_255_BYTE_FRAMES (0x12C4) +#define STAT_TX_256_511_BYTES_FRAMES (0x12C8) +#define STAT_TX_512_1023_BYTE_FRAMES (0x12CC) +#define STAT_TX_1024_1518_BYTE_FRAMES (0x12D0) +#define STAT_TX_GREATER_1518_BYTE_FRAMES (0x12D4) +#define STAT_TX_TOTAL_FRAMES (0x12D8) +#define STAT_EEE_TX_LPI_TRANSITIONS (0x12DC) +#define STAT_EEE_TX_LPI_TIME (0x12E0) +#define STAT_TX_COUNTER_ROLLOVER_STATUS (0x12FC) + +/* End of Register definitions */ + +#define LAN743X_MAX_RX_CHANNELS (4) +#define LAN743X_MAX_TX_CHANNELS (1) +struct lan743x_adapter; + +/* The following settings are available for easy testing + * of different interrupt modes. + * By default the driver first tries to use MSIX mode, + * if that fails then the driver tries to use MSI mode, + * if that fails then the driver tries to use legacy mode. + * if you want to test MSI mode then set + * LAN743X_TRY_MSIX (0) + * if you want to test legacy mode then set + * LAN743X_TRY_MSIX (0) + * LAN743X_TRY_MSI (0) + */ +#define LAN743X_TRY_MSIX (1) +#define LAN743X_TRY_MSI (1) + +#define LAN743X_USED_RX_CHANNELS (4) +#define LAN743X_USED_TX_CHANNELS (1) +#define LAN743X_INT_MOD (400) + +#if (LAN743X_USED_RX_CHANNELS > LAN743X_MAX_RX_CHANNELS) +#error Invalid LAN743X_USED_RX_CHANNELS +#endif +#if (LAN743X_USED_TX_CHANNELS > LAN743X_MAX_TX_CHANNELS) +#error Invalid LAN743X_USED_TX_CHANNELS +#endif + +/* PCI */ +/* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */ +#define PCI_VENDOR_ID_SMSC PCI_VENDOR_ID_EFAR +#define PCI_DEVICE_ID_SMSC_LAN7430 (0x7430) + +#define PCI_CONFIG_LENGTH (0x1000) + +struct lan743x_pci { + struct pci_dev *pdev; + int init_flags; +}; + +/* CSR */ +#define CSR_LENGTH (0x2000) + +#define LAN743X_CSR_FLAG_IS_A0 BIT(0) +#define LAN743X_CSR_FLAG_IS_B0 BIT(1) +#define LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR BIT(8) + +struct lan743x_csr { + u32 flags; + u8 __iomem *csr_address; + u32 id_rev; + u32 fpga_rev; +}; + +u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset); +void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data); + +/* INTERRUPTS */ +typedef void(*lan743x_vector_handler)(void *context, u32 int_sts, u32 flags); + +#define LAN743X_VECTOR_FLAG_IRQ_SHARED BIT(0) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_READ BIT(1) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_R2C BIT(2) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C BIT(3) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CHECK BIT(4) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR BIT(5) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_R2C BIT(6) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_CLEAR BIT(7) +#define LAN743X_VECTOR_FLAG_MASTER_ENABLE_SET BIT(8) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_CLEAR BIT(9) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_ISR_SET BIT(10) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR BIT(11) +#define LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET BIT(12) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR BIT(13) +#define LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET BIT(14) +#define LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR BIT(15) + +struct lan743x_vector { + int irq; + u32 flags; + struct lan743x_adapter *adapter; + int vector_index; + u32 int_mask; + lan743x_vector_handler handler; + void *context; +}; + +#define LAN743X_MAX_VECTOR_COUNT (8) + +struct lan743x_intr { + int flags; + + unsigned int irq; + + struct lan743x_vector vector_list[LAN743X_MAX_VECTOR_COUNT]; + int number_of_vectors; + bool using_vectors; + + int software_isr_flag; +}; + +/* DP */ +struct lan743x_dp { + int flags; + + /* lock, used to prevent concurrent access to data port */ + struct mutex lock; +}; + +/* MAC */ +struct lan743x_mac { + int flags; + + u8 mac_address[ETH_ALEN]; +}; + +#define LAN743X_MAX_FRAME_SIZE (9 * 1024) + +/* PHY */ +struct lan743x_phy { + int flags; + + bool fc_autoneg; + u8 fc_request_control; +}; + +/* RFE */ +struct lan743x_rfe { + int flags; + u8 rss_hash_key[40]; +}; + +/* DMAC */ +struct lan743x_dmac { + int flags; + + int descriptor_spacing; +}; + +/* TX */ +struct lan743x_tx_descriptor; +struct lan743x_tx_buffer_info; + +#define TX_FLAG_FIFO_ENABLED BIT(0) +#define TX_FLAG_ISR_ENABLED BIT(1) +#define TX_FLAG_DMAC_STARTED BIT(2) +#define TX_FLAG_RING_ALLOCATED BIT(3) + +#define GPIO_QUEUE_STARTED (0) +#define GPIO_TX_FUNCTION (1) +#define GPIO_TX_COMPLETION (2) +#define GPIO_TX_FRAGMENT (3) + +#define TX_FRAME_FLAG_IN_PROGRESS BIT(0) + +struct lan743x_tx { + struct lan743x_adapter *adapter; + int flags; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_tx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + /* ring_lock: used to prevent concurrent access to tx ring */ + spinlock_t ring_lock; + u32 frame_flags; + u32 frame_first; + u32 frame_data0; + u32 frame_tail; + + struct lan743x_tx_buffer_info *buffer_info; + + u32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + int last_head; + int last_tail; + + struct tasklet_struct tx_isr_bottom_half; + + struct sk_buff *overflow_skb; +}; + +/* RX */ +struct lan743x_rx_descriptor; +struct lan743x_rx_buffer_info; + +#define RX_FLAG_NAPI_ADDED BIT(0) +#define RX_FLAG_DMAC_STARTED BIT(1) +#define RX_FLAG_ISR_ENABLED BIT(2) +#define RX_FLAG_FIFO_ENABLED BIT(3) +#define RX_FLAG_RING_ALLOCATED BIT(4) + +struct lan743x_rx { + struct lan743x_adapter *adapter; + int flags; + u32 vector_flags; + int channel_number; + + int ring_size; + size_t ring_allocation_size; + struct lan743x_rx_descriptor *ring_cpu_ptr; + dma_addr_t ring_dma_ptr; + + struct lan743x_rx_buffer_info *buffer_info; + + u32 *head_cpu_ptr; + dma_addr_t head_dma_ptr; + u32 last_head; + u32 last_tail; + + struct napi_struct napi; + + u32 frame_count; +}; + +struct lan743x_adapter { + struct net_device *netdev; + struct mii_bus *mdiobus; + int init_flags; + int open_flags; + int msg_enable; + struct lan743x_pci pci; + struct lan743x_csr csr; + struct lan743x_intr intr; + struct lan743x_dp dp; + struct lan743x_mac mac; + struct lan743x_phy phy; + struct lan743x_rfe rfe; + struct lan743x_dmac dmac; + struct lan743x_tx tx[LAN743X_MAX_TX_CHANNELS]; + struct lan743x_rx rx[LAN743X_MAX_RX_CHANNELS]; +}; + +#endif /* _LAN743X_H */