diff --git a/drivers/net/ethernet/motorcomm/yt6801/Makefile b/drivers/net/ethernet/motorcomm/yt6801/Makefile index 93b5c4510eb05..904db987b7cee 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/Makefile +++ b/drivers/net/ethernet/motorcomm/yt6801/Makefile @@ -12,4 +12,4 @@ yt6801-objs := fuxi-gmac-common.o \ fuxi-gmac-pci.o \ fuxi-gmac-phy.o \ fuxi-efuse.o \ - fuxi-gmac-debugfs.o + fuxi-gmac-ioctl.o diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c index ae4ca3d59ac4c..1d2c4b9644fbf 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-efuse.c @@ -13,7 +13,7 @@ bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, u32 regval = 0; bool succeed = false; - if (index >= FUXI_EFUSE_MAX_ENTRY) { + if (index >= FXGMAC_EFUSE_MAX_ENTRY) { FXGMAC_PR("Reading efuse out of range, index %d\n", index); return false; } @@ -105,8 +105,91 @@ bool fxgmac_read_patch_from_efuse_per_index(struct fxgmac_pdata *pdata, return succeed; } +bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, + u32 *subsys, u32 *revid) +{ + u32 offset = 0, value = 0; + u32 machr = 0, maclr = 0; + bool succeed = true; + u8 index = 0; + + for (index = 0; index < FXGMAC_EFUSE_MAX_ENTRY; index++) { + if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, + &offset, &value)) { + succeed = false; + break; /* reach the last item. */ + } + if (offset == 0x00) + break; /* reach the blank. */ + + if (offset == MACA0LR_FROM_EFUSE) + maclr = value; + + if (offset == MACA0HR_FROM_EFUSE) + machr = value; + + if (offset == 0x08 && revid) + *revid = value; + + if (offset == 0x2C && subsys) + *subsys = value; + } + if (mac_addr) { + mac_addr[5] = (u8)(maclr & 0xFF); + mac_addr[4] = (u8)((maclr >> 8) & 0xFF); + mac_addr[3] = (u8)((maclr >> 16) & 0xFF); + mac_addr[2] = (u8)((maclr >> 24) & 0xFF); + mac_addr[1] = (u8)(machr & 0xFF); + mac_addr[0] = (u8)((machr >> 8) & 0xFF); + } + + return succeed; +} + +bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value) +{ + bool succeed = false; + unsigned int wait; + u32 reg_val = 0; + + if (value) + *value = 0; + + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, + EFUSE_OP_ADDR_LEN, offset); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, + EFUSE_OP_START_LEN, 1); + reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, + EFUSE_OP_MODE_LEN, + EFUSE_OP_MODE_ROW_READ); + writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); + wait = 1000; + while (wait--) { + usleep_range_ex(pdata->pAdapter, 20, 50); + reg_val = readreg(pdata->pAdapter, + pdata->base_mem + EFUSE_OP_CTRL_1); + if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, + EFUSE_OP_DONE_LEN)) { + succeed = true; + break; + } + } + + if (succeed) { + if (value) { + *value = FXGMAC_GET_REG_BITS(reg_val, + EFUSE_OP_RD_DATA_POS, + EFUSE_OP_RD_DATA_LEN); + } + } else { + FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); + } + + return succeed; +} + bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, - u32 *value) /* read patch per index. */ + u32 *value) { u32 reg_offset, reg_val; u32 cur_val = 0; @@ -120,7 +203,7 @@ bool fxgmac_read_patch_from_efuse(struct fxgmac_pdata *pdata, u32 offset, return false; } - for (index = 0; index < FUXI_EFUSE_MAX_ENTRY; index++) { + for (index = 0; index < FXGMAC_EFUSE_MAX_ENTRY; index++) { if (!fxgmac_read_patch_from_efuse_per_index( pdata, index, ®_offset, ®_val)) { succeed = false; @@ -146,7 +229,7 @@ bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, u32 reg_val; bool succeed = false; u32 cur_reg, cur_val; - u8 max_index = FUXI_EFUSE_MAX_ENTRY; + u8 max_index = FXGMAC_EFUSE_MAX_ENTRY; if (offset >> 16) { FXGMAC_PR( @@ -157,7 +240,7 @@ bool fxgmac_write_patch_to_efuse_per_index(struct fxgmac_pdata *pdata, u8 index, fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®_val); if (EFUSE_LED_COMMON_SOLUTION == reg_val) { - max_index = FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; + max_index = FXGMAC_EFUSE_MAX_ENTRY_UNDER_LED_COMMON; } if (index >= max_index) { @@ -270,7 +353,7 @@ bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, return false; } - for (index = 0;; index++) { + for (index = 0; index < FXGMAC_EFUSE_MAX_ENTRY; index++) { if (!fxgmac_read_patch_from_efuse_per_index( pdata, index, ®_offset, ®_val)) { return false; @@ -370,55 +453,17 @@ bool fxgmac_write_patch_to_efuse(struct fxgmac_pdata *pdata, u32 offset, return succeed; } -bool fxgmac_read_mac_subsys_from_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, - u32 *subsys, u32 *revid) -{ - u32 offset = 0, value = 0; - u32 machr = 0, maclr = 0; - bool succeed = true; - u8 index = 0; - - for (index = 0;; index++) { - if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, - &offset, &value)) { - succeed = false; - break; /* reach the last item. */ - } - if (0x00 == offset) { - break; /* reach the blank. */ - } - if (MACA0LR_FROM_EFUSE == offset) { - maclr = value; - } - if (MACA0HR_FROM_EFUSE == offset) { - machr = value; - } - - if ((0x08 == offset) && revid) { - *revid = value; - } - if ((0x2C == offset) && subsys) { - *subsys = value; - } - } - if (mac_addr) { - mac_addr[5] = (u8)(maclr & 0xFF); - mac_addr[4] = (u8)((maclr >> 8) & 0xFF); - mac_addr[3] = (u8)((maclr >> 16) & 0xFF); - mac_addr[2] = (u8)((maclr >> 24) & 0xFF); - mac_addr[1] = (u8)(machr & 0xFF); - mac_addr[0] = (u8)((machr >> 8) & 0xFF); - } - - return succeed; -} - bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid) { - u32 machr = 0, maclr = 0, pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; - bool succeed = true; +#ifdef DBG + u32 machr = 0, maclr = 0; +#endif + u32 cur_subsysid = 0; + u32 pcie_cfg_ctrl = PCIE_CFG_CTRL_DEFAULT_VAL; + if (mac_addr) { +#ifdef DBG machr = readreg(pdata->pAdapter, pdata->base_mem + MACA0HR_FROM_EFUSE); maclr = readreg(pdata->pAdapter, @@ -427,56 +472,64 @@ bool fxgmac_write_mac_subsys_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr, (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, maclr & 0xFF); - +#endif if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, (((u32)mac_addr[0]) << 8) | mac_addr[1])) { - succeed = false; + return false; } if (!fxgmac_write_patch_to_efuse( pdata, MACA0LR_FROM_EFUSE, (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[4]) << 8) | mac_addr[5])) { - succeed = false; + return false; } } if (revid) { if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, *revid)) { - succeed = false; + return false; } } if (subsys) { - pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( - pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, - MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 1); - if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, - pcie_cfg_ctrl)) { - succeed = false; - } - if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, - *subsys)) { - succeed = false; - } - pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( - pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, - MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 0); - if (!fxgmac_write_patch_to_efuse(pdata, MGMT_PCIE_CFG_CTRL, - pcie_cfg_ctrl)) { - succeed = false; + if (!fxgmac_read_mac_subsys_from_efuse(pdata, NULL, + &cur_subsysid, NULL)) + return false; + + if (cur_subsysid != *subsys) { + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 1); + if (!fxgmac_write_patch_to_efuse( + pdata, MGMT_PCIE_CFG_CTRL, pcie_cfg_ctrl)) { + return false; + } + if (!fxgmac_write_patch_to_efuse( + pdata, EFUSE_SUBSYS_REGISTER, *subsys)) { + return false; + } + pcie_cfg_ctrl = FXGMAC_SET_REG_BITS( + pcie_cfg_ctrl, MGMT_PCIE_CFG_CTRL_CS_EN_POS, + MGMT_PCIE_CFG_CTRL_CS_EN_LEN, 0); + if (!fxgmac_write_patch_to_efuse( + pdata, MGMT_PCIE_CFG_CTRL, pcie_cfg_ctrl)) { + return false; + } } } - return succeed; + return true; } bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) { +#ifdef DBG u32 machr = 0, maclr = 0; - bool succeed = true; +#endif if (mac_addr) { +#ifdef DBG machr = readreg(pdata->pAdapter, pdata->base_mem + MACA0HR_FROM_EFUSE); maclr = readreg(pdata->pAdapter, @@ -485,22 +538,22 @@ bool fxgmac_write_mac_addr_to_efuse(struct fxgmac_pdata *pdata, u8 *mac_addr) (machr >> 8) & 0xFF, machr & 0xFF, (maclr >> 24) & 0xFF, (maclr >> 16) & 0xFF, (maclr >> 8) & 0xFF, maclr & 0xFF); - +#endif if (!fxgmac_write_patch_to_efuse(pdata, MACA0HR_FROM_EFUSE, (((u32)mac_addr[0]) << 8) | mac_addr[1])) { - succeed = false; + return false; } if (!fxgmac_write_patch_to_efuse( pdata, MACA0LR_FROM_EFUSE, (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) | (((u32)mac_addr[4]) << 8) | mac_addr[5])) { - succeed = false; + return false; } } - return succeed; + return true; } bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, @@ -510,7 +563,7 @@ bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u8 index; bool succeed = true; - for (index = 0;; index++) { + for (index = 0; index < FXGMAC_EFUSE_MAX_ENTRY; index++) { if (!fxgmac_read_patch_from_efuse_per_index(pdata, index, &offset, &value)) { succeed = false; @@ -538,22 +591,20 @@ bool fxgmac_read_subsys_from_efuse(struct fxgmac_pdata *pdata, u32 *subsys, bool fxgmac_write_subsys_to_efuse(struct fxgmac_pdata *pdata, u32 *subsys, u32 *revid) { - bool succeed = true; - /* write subsys info */ if (revid) { if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_REVID_REGISTER, *revid)) { - succeed = false; + return false; } } if (subsys) { if (!fxgmac_write_patch_to_efuse(pdata, EFUSE_SUBSYS_REGISTER, *subsys)) { - succeed = false; + return false; } } - return succeed; + return true; } bool fxgmac_efuse_load(struct fxgmac_pdata *pdata) @@ -585,49 +636,6 @@ bool fxgmac_efuse_load(struct fxgmac_pdata *pdata) return succeed; } -bool fxgmac_efuse_read_data(struct fxgmac_pdata *pdata, u32 offset, u32 *value) -{ - bool succeed = false; - unsigned int wait; - u32 reg_val = 0; - - if (value) { - *value = 0; - } - - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_ADDR_POS, - EFUSE_OP_ADDR_LEN, offset); - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_START_POS, - EFUSE_OP_START_LEN, 1); - reg_val = FXGMAC_SET_REG_BITS(reg_val, EFUSE_OP_MODE_POS, - EFUSE_OP_MODE_LEN, - EFUSE_OP_MODE_ROW_READ); - writereg(pdata->pAdapter, reg_val, pdata->base_mem + EFUSE_OP_CTRL_0); - wait = 1000; - while (wait--) { - usleep_range_ex(pdata->pAdapter, 20, 50); - reg_val = readreg(pdata->pAdapter, - pdata->base_mem + EFUSE_OP_CTRL_1); - if (FXGMAC_GET_REG_BITS(reg_val, EFUSE_OP_DONE_POS, - EFUSE_OP_DONE_LEN)) { - succeed = true; - break; - } - } - - if (succeed) { - if (value) { - *value = FXGMAC_GET_REG_BITS(reg_val, - EFUSE_OP_RD_DATA_POS, - EFUSE_OP_RD_DATA_LEN); - } - } else { - FXGMAC_PR("Fail to reading efuse Byte%d\n", offset); - } - - return succeed; -} - bool fxgmac_efuse_write_oob(struct fxgmac_pdata *pdata) { bool succeed = false; @@ -1341,4 +1349,4 @@ bool fxgmac_read_led_setting_from_efuse(struct fxgmac_pdata *pdata) } return bsucceed; -} \ No newline at end of file +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c index 63cbf948cbfa2..2c1ab733dfd04 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-common.c @@ -1,44 +1,46 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include - -#include "fuxi-os.h" #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -MODULE_LICENSE("Dual BSD/GPL"); +MODULE_LICENSE("GPL"); static int debug = 16; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "FUXI ethernet debug level (0=none,...,16=all)"); -static unsigned char dev_addr[6] = { 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7 }; - -static void fxgmac_read_mac_addr(struct fxgmac_pdata *pdata) +static int fxgmac_read_mac_addr(struct fxgmac_pdata *pdata) { struct net_device *netdev = pdata->netdev; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + unsigned char dev_addr[6] = { 0, 0x55, 0x7b, 0xb5, 0x7d, 0xf7 }; + int ret; + /* DPRINTK("read mac from eFuse\n"); */ + + /* if efuse have mac addr,use it.if not,use static mac address. */ + ret = hw_ops->read_mac_subsys_from_efuse(pdata, pdata->mac_addr, NULL, + NULL); + if (!ret) { + DPRINTK("eFuse mac addr err\n"); + return -1; + } - DPRINTK("read mac from eFuse\n"); - - /* if efuse have mac addr, use it.if not, use static mac address. */ - hw_ops->read_mac_subsys_from_efuse(pdata, pdata->mac_addr, NULL, NULL); if (ETH_IS_ZEROADDRESS(pdata->mac_addr)) { /* Currently it uses a static mac address for test */ memcpy(pdata->mac_addr, dev_addr, netdev->addr_len); } + return 0; } static void fxgmac_default_config(struct fxgmac_pdata *pdata) { pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->tx_sf_mode = MTL_TSF_ENABLE; - pdata->rx_sf_mode = MTL_RSF_DISABLE; /* MTL_RSF_DISABLE 20210514 */ + pdata->rx_sf_mode = MTL_RSF_ENABLE; /* MTL_RSF_DISABLE 20210514 */ pdata->pblx8 = DMA_PBL_X8_ENABLE; /* DMA_PBL_X8_ENABLE 20210514 */ - pdata->tx_pbl = DMA_PBL_32; - pdata->rx_pbl = DMA_PBL_32; /* DMA_PBL_32 20210514 */ + pdata->tx_pbl = DMA_PBL_16; + pdata->rx_pbl = DMA_PBL_4; /* DMA_PBL_32 20210514 */ pdata->tx_threshold = MTL_TX_THRESHOLD_128; pdata->rx_threshold = MTL_RX_THRESHOLD_128; pdata->tx_pause = 1; @@ -53,21 +55,35 @@ static void fxgmac_default_config(struct fxgmac_pdata *pdata) pdata->intr_mod = 1; pdata->crc_check = 1; - /* set based on phy status. pdata->phy_speed = SPEED_1000; */ pdata->sysclk_rate = FXGMAC_SYSCLOCK; pdata->phy_autoeng = AUTONEG_ENABLE; /* default to autoneg */ pdata->phy_duplex = DUPLEX_FULL; pdata->expansion.phy_link = false; pdata->phy_speed = SPEED_1000; - - /* default to magic */ + pdata->support_10m_link = true; + pdata->expansion.pre_phy_speed = pdata->phy_speed; + pdata->expansion.pre_phy_duplex = pdata->phy_duplex; + pdata->expansion.pre_phy_autoneg = pdata->phy_autoeng; + pdata->expansion.recover_phy_state = 0; + // default to magic pdata->expansion.wol = WAKE_MAGIC; +#ifdef FXGMAC_ASPM_ENABLED + pdata->expansion.recover_from_aspm = false; + pdata->expansion.aspm_en = false; + pdata->expansion.aspm_work_active = false; +#endif + +#ifdef FXGMAC_SMART_SPEED_DISABLE + pdata->phy_disablesmartspeed = 1; +#else + pdata->phy_disablesmartspeed = 0; +#endif + strscpy(pdata->drv_name, FXGMAC_DRV_NAME, sizeof(pdata->drv_name)); strscpy(pdata->drv_ver, FXGMAC_DRV_VERSION, sizeof(pdata->drv_ver)); - - printk("FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", FXGMAC_DRV_NAME, - FXGMAC_DRV_VERSION); + dev_info(pdata->dev, "FXGMAC_DRV_NAME:%s, FXGMAC_DRV_VERSION:%s\n", + FXGMAC_DRV_NAME, FXGMAC_DRV_VERSION); } static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) @@ -75,14 +91,14 @@ static void fxgmac_init_all_ops(struct fxgmac_pdata *pdata) fxgmac_init_desc_ops(&pdata->desc_ops); fxgmac_init_hw_ops(&pdata->hw_ops); - DPRINTK("register desc_ops and hw ops\n"); + /* DPRINTK("register desc_ops and hw ops\n"); */ } int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct net_device *netdev = pdata->netdev; - unsigned int i, dma_width; + unsigned int i; int ret; /* Set all the function pointers */ @@ -94,7 +110,10 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) /* Set irq, base_addr, MAC address, */ netdev->irq = pdata->dev_irq; netdev->base_addr = (unsigned long)pdata->base_mem; - fxgmac_read_mac_addr(pdata); + ret = fxgmac_read_mac_addr(pdata); + if (ret < 0) + return ret; + eth_hw_addr_set(netdev, pdata->mac_addr); if (save_private_reg) { @@ -108,18 +127,17 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) fxgmac_get_all_hw_features(pdata); fxgmac_print_all_hw_features(pdata); - /* TODO: Set the PHY mode to XLGMII */ - /* Set the DMA mask */ -#ifdef CONFIG_ARM64 - dma_width = FUXI_DMA_BIT_MASK; -#else - dma_width = pdata->hw_feat.dma_width; -#endif - ret = dma_set_mask_and_coherent(pdata->dev, DMA_BIT_MASK(dma_width)); + ret = dma_set_mask_and_coherent(pdata->dev, + DMA_BIT_MASK(FXGMAC_DMA_BIT_MASK64)); if (ret) { - dev_err(pdata->dev, "dma_set_mask_and_coherent failed\n"); - return ret; + ret = dma_set_mask_and_coherent( + pdata->dev, DMA_BIT_MASK(FXGMAC_DMA_BIT_MASK32)); + if (ret) { + dev_err(pdata->dev, + "dma_set_mask_and_coherent failed\n"); + return ret; + } } /* Channel and ring params initializtion @@ -152,13 +170,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) pdata->hw_feat.tx_q_cnt); pdata->tx_q_count = pdata->tx_ring_count; -#if !(FXGMAC_NUM_OF_TX_Q_USED) ret = netif_set_real_num_tx_queues(netdev, pdata->tx_q_count); -#else - ret = netif_set_real_num_tx_queues( - netdev, FXGMAC_NUM_OF_TX_Q_USED /*pdata->tx_q_count*/); -#endif - DPRINTK("num_online_cpus:%u, tx_ch_cnt:%u, tx_q_cnt:%u, tx_ring_count:%u\n", num_online_cpus(), pdata->hw_feat.tx_ch_cnt, pdata->hw_feat.tx_q_cnt, pdata->tx_ring_count); @@ -192,7 +204,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) netif_get_num_default_rss_queues(), pdata->hw_feat.rx_ch_cnt, pdata->hw_feat.rx_q_cnt, pdata->rx_ring_count); DPRINTK("channel_count:%u, netdev tx channel_num=%u\n", - pdata->channel_count, netdev->num_tx_queues); + pdata->channel_count, netdev->real_num_tx_queues); /* Initialize RSS hash key and lookup table */ #if FXGMAC_RSS_HASH_KEY_LINUX @@ -202,7 +214,7 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) hw_ops->get_rss_hash_key(pdata, (u8 *)pdata->rss_key); #endif -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED for (i = 0; i < FXGMAC_RSS_MAX_TABLE_SIZE; i++) { pdata->rss_table[i] = FXGMAC_SET_REG_BITS( pdata->rss_table[i], MAC_RSSDR_DMCH_POS, @@ -294,7 +306,8 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) /* Use default watchdog timeout */ netdev->watchdog_timeo = msecs_to_jiffies(5000); /* refer to sunxi-gmac, 5s */ - netdev->gso_max_size = NIC_MAX_TCP_OFFLOAD_SIZE; + + netif_set_tso_max_size(netdev, NIC_MAX_TCP_OFFLOAD_SIZE); /* Tx coalesce parameters initialization */ pdata->tx_usecs = FXGMAC_INIT_DMA_TX_USECS; @@ -306,6 +319,8 @@ int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg) pdata->rx_usecs = FXGMAC_INIT_DMA_RX_USECS; pdata->rx_frames = FXGMAC_INIT_DMA_RX_FRAMES; + mutex_init(&pdata->expansion.mutex); + DPRINTK("fxgmac_init callout, ok.\n"); return 0; @@ -320,14 +335,14 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) * otherwise, just roll back to legacy */ vectors = num_online_cpus(); - DPRINTK("num of cpu=%d\n", vectors); if (vectors >= FXGMAC_MAX_DMA_CHANNELS) { - /* 0-3 for rx, 4 for tx, 5 for phy */ + /* 0-3 for rx, 4 for tx, 5 for misc */ req_vectors = FXGMAC_MSIX_INT_NUMS; pdata->expansion.msix_entries = kcalloc( req_vectors, sizeof(struct msix_entry), GFP_KERNEL); if (!pdata->expansion.msix_entries) { - DPRINTK("MSIx, kcalloc err for msix entries, rollback to MSI..\n"); + dev_err(pdata->dev, + "MSIx, kcalloc err for msix entries, rollback to MSI\n"); goto enable_msi_interrupt; } else { for (i = 0; i < req_vectors; i++) @@ -337,15 +352,17 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) pdata->pdev, pdata->expansion.msix_entries, req_vectors, req_vectors); if (rc < 0) { - DPRINTK("enable MSIx failed,%d.\n", rc); - req_vectors = 0; /* indicate failure */ + dev_err(pdata->dev, "enable MSIx failed,%d.\n", + rc); + req_vectors = 0; /* indicate failure */ } else { req_vectors = rc; } if (req_vectors >= FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX) { - DPRINTK("enable MSIx ok, cpu=%d, vectors=%d.\n", - vectors, req_vectors); + dev_info(pdata->dev, + "enable MSIx ok, cpu=%d, vectors=%d.\n", + vectors, req_vectors); pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, @@ -353,13 +370,16 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSIX_ENABLED); pdata->per_channel_irq = 1; - pdata->expansion.phy_irq = +#ifdef FXGMAC_MISC_ENABLED + pdata->expansion.misc_irq = pdata->expansion .msix_entries[MSI_ID_PHY_OTHER] .vector; +#endif return; } else if (req_vectors) { - DPRINTK("enable MSIx with only %d vector, while we need %d, rollback to MSI.\n", + dev_err(pdata->dev, + "enable MSIx with only %d vector, while we need %d, rollback to MSI.\n", req_vectors, vectors); /* roll back to msi */ pci_disable_msix(pdata->pdev); @@ -367,7 +387,8 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) pdata->expansion.msix_entries = NULL; req_vectors = 0; } else { - DPRINTK("enable MSIx failure and clear msix entries.\n"); + dev_err(pdata->dev, + "enable MSIx failure and clear msix entries.\n"); /* roll back to msi */ kfree(pdata->expansion.msix_entries); pdata->expansion.msix_entries = NULL; @@ -382,16 +403,18 @@ static void fxgmac_init_interrupt_scheme(struct fxgmac_pdata *pdata) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); - DPRINTK("enable MSI failure, rollback to LEGACY.\n"); + dev_err(pdata->dev, + "dev_err MSI failure, rollback to LEGACY.\n"); } else { pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_MSI_ENABLED); pdata->dev_irq = pdata->pdev->irq; - DPRINTK("enable MSI ok, irq=%d.\n", pdata->pdev->irq); + dev_info(pdata->dev, "enable MSI ok, cpu=%d, irq=%d.\n", + vectors, pdata->pdev->irq); } #else - pdata = pdata; + (void)pdata; #endif } @@ -417,23 +440,20 @@ int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) pdata->netdev = netdev; pdata->dev_irq = res->irq; - + pdata->msg_enable = NETIF_MSG_DRV; + pdata->expansion.dev_state = FXGMAC_DEV_PROBE; /* default to legacy interrupt */ pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_INTERRUPT_POS, FXGMAC_FLAG_INTERRUPT_LEN, FXGMAC_FLAG_LEGACY_ENABLED); - pdata->expansion.phy_irq = pdata->dev_irq; - +#ifdef FXGMAC_MISC_ENABLED + pdata->expansion.misc_irq = pdata->dev_irq; +#endif fxgmac_init_interrupt_scheme(pdata); - pdata->expansion.current_state = CURRENT_STATE_INIT; - - pdata->msg_enable = NETIF_MSG_DRV; - DPRINTK("netif msg_enable init to %08x\n", pdata->msg_enable); - pdata->mac_regs = res->addr; pdata->base_mem = res->addr; - pdata->mac_regs = pdata->mac_regs + FUXI_MAC_REGS_OFFSET; + pdata->mac_regs = pdata->mac_regs + FXGMAC_MAC_REGS_OFFSET; ret = fxgmac_init(pdata, true); if (ret) { @@ -451,12 +471,7 @@ int fxgmac_drv_probe(struct device *dev, struct fxgmac_resources *res) } if (netif_msg_drv(pdata)) DPRINTK("fxgamc_drv_prob callout, netdev num_tx_q=%u\n", - netdev->num_tx_queues); - -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_init(pdata); - fxgmac_dbg_adapter_init(pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ + netdev->real_num_tx_queues); return 0; @@ -473,9 +488,6 @@ int fxgmac_drv_remove(struct device *dev) struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_adapter_exit(pdata); -#endif /*HAVE_FXGMAC_DEBUG_FS */ hw_ops->led_under_shutdown(pdata); unregister_netdev(netdev); @@ -557,9 +569,20 @@ void fxgmac_dbg_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) { +#ifdef FXGMAC_DEBUG + struct ethhdr *eth = (struct ethhdr *)skb->data; +#endif unsigned char buffer[128]; unsigned int i; + DPRINTK("\n************** SKB dump ****************\n"); + DPRINTK("%s packet of %d bytes\n", (tx_rx ? "TX" : "RX"), skb->len); + +#ifdef FXGMAC_DEBUG + DPRINTK("Dst MAC addr: %pM\n", eth->h_dest); + DPRINTK("Src MAC addr: %pM\n", eth->h_source); + DPRINTK("Protocol: %#06hx\n", ntohs(eth->h_proto)); +#endif for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); @@ -567,6 +590,8 @@ void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, sizeof(buffer), false); DPRINTK(" %#06x: %s\n", i, buffer); } + + DPRINTK("\n************** SKB dump ****************\n"); } void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) @@ -583,8 +608,7 @@ void fxgmac_get_all_hw_features(struct fxgmac_pdata *pdata) hw_feat->version = readl(pdata->mac_regs + MAC_VR); if (netif_msg_drv(pdata)) - DPRINTK("get offset 0x110, ver=%#x\n", - readl(pdata->mac_regs + 0x110)); + DPRINTK("Mac ver=%#x\n", hw_feat->version); /* Hardware feature register 0 */ hw_feat->phyifsel = FXGMAC_GET_REG_BITS( diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c index 969d84eb44e2a..efd7e542c24a8 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-desc.c @@ -18,12 +18,33 @@ static void fxgmac_unmap_desc_data(struct fxgmac_pdata *pdata, desc_data->skb_dma = 0; desc_data->skb_dma_len = 0; } - +#ifdef FXGMAC_NOT_USE_PAGE_MAPPING if (desc_data->rx.buf.dma_base) { dma_unmap_single(pdata->dev, desc_data->rx.buf.dma_base, pdata->rx_buf_size, DMA_FROM_DEVICE); desc_data->rx.buf.dma_base = 0; } +#else + if (desc_data->rx.hdr.pa.pages) + put_page(desc_data->rx.hdr.pa.pages); + + if (desc_data->rx.hdr.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.hdr.pa_unmap.pages_dma, + desc_data->rx.hdr.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.hdr.pa_unmap.pages); + } + + if (desc_data->rx.buf.pa.pages) + put_page(desc_data->rx.buf.pa.pages); + + if (desc_data->rx.buf.pa_unmap.pages) { + dma_unmap_page(pdata->dev, desc_data->rx.buf.pa_unmap.pages_dma, + desc_data->rx.buf.pa_unmap.pages_len, + DMA_FROM_DEVICE); + put_page(desc_data->rx.buf.pa_unmap.pages); + } +#endif if (desc_data->skb) { dev_kfree_skb_any(desc_data->skb); @@ -34,13 +55,6 @@ static void fxgmac_unmap_desc_data(struct fxgmac_pdata *pdata, memset(&desc_data->rx, 0, sizeof(desc_data->rx)); desc_data->mapped_as_page = 0; - - if (desc_data->state_saved) { - desc_data->state_saved = 0; - desc_data->state.skb = NULL; - desc_data->state.len = 0; - desc_data->state.error = 0; - } } static void fxgmac_free_ring(struct fxgmac_pdata *pdata, @@ -52,15 +66,42 @@ static void fxgmac_free_ring(struct fxgmac_pdata *pdata, if (!ring) return; +#ifndef FXGMAC_USE_STATIC_ALLOC if (ring->desc_data_head) { +#endif for (i = 0; i < ring->dma_desc_count; i++) { desc_data = FXGMAC_GET_DESC_DATA(ring, i); fxgmac_unmap_desc_data(pdata, desc_data); } - +#ifndef FXGMAC_USE_STATIC_ALLOC kfree(ring->desc_data_head); ring->desc_data_head = NULL; } +#endif + +#ifndef FXGMAC_NOT_USE_PAGE_MAPPING + if (ring->rx_hdr_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma, + ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_hdr_pa.pages); + + ring->rx_hdr_pa.pages = NULL; + ring->rx_hdr_pa.pages_len = 0; + ring->rx_hdr_pa.pages_offset = 0; + ring->rx_hdr_pa.pages_dma = 0; + } + + if (ring->rx_buf_pa.pages) { + dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma, + ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE); + put_page(ring->rx_buf_pa.pages); + + ring->rx_buf_pa.pages = NULL; + ring->rx_buf_pa.pages_len = 0; + ring->rx_buf_pa.pages_offset = 0; + ring->rx_buf_pa.pages_dma = 0; + } +#endif if (ring->dma_desc_head) { dma_free_coherent( @@ -85,11 +126,13 @@ static int fxgmac_init_ring(struct fxgmac_pdata *pdata, if (!ring->dma_desc_head) return -ENOMEM; +#ifndef FXGMAC_USE_STATIC_ALLOC /* Array of descriptor data */ ring->desc_data_head = kcalloc( dma_desc_count, sizeof(struct fxgmac_desc_data), GFP_KERNEL); if (!ring->desc_data_head) return -ENOMEM; +#endif netif_dbg( pdata, drv, pdata->netdev, @@ -147,12 +190,17 @@ static int fxgmac_alloc_rings(struct fxgmac_pdata *pdata) "error initializing Rx ring\n"); goto err_init_ring; } - if (netif_msg_drv(pdata)) - DPRINTK("fxgmac_alloc_ring..ch=%u, tx_desc_cnt=%u, rx_desc_cnt=%u\n", - i, pdata->tx_desc_count, pdata->rx_desc_count); + if (netif_msg_drv(pdata)) { + DPRINTK("fxgmac_alloc_ring..ch=%u,", i); + if (i < pdata->tx_ring_count) + DPRINTK(" tx_desc_cnt=%u,", + pdata->tx_desc_count); + + DPRINTK(" rx_desc_cnt=%u.\n", pdata->rx_desc_count); + } } if (netif_msg_drv(pdata)) - DPRINTK("alloc_rings callout ok\n"); + DPRINTK("alloc_rings callout ok ch=%u\n", i); return 0; @@ -168,27 +216,22 @@ static void fxgmac_free_channels(struct fxgmac_pdata *pdata) if (!pdata->channel_head) return; if (netif_msg_drv(pdata)) - DPRINTK("free_channels, tx_ring=%p\n", - pdata->channel_head->tx_ring); + DPRINTK("free_channels,tx_ring=%p ,rx_ring=%p ,channel=%p\n", + pdata->channel_head->tx_ring, + pdata->channel_head->rx_ring, pdata->channel_head); +#ifndef FXGMAC_USE_STATIC_ALLOC kfree(pdata->channel_head->tx_ring); - pdata->channel_head->tx_ring = NULL; - - if (netif_msg_drv(pdata)) - DPRINTK("free_channels, rx_ring=%p\n", - pdata->channel_head->rx_ring); kfree(pdata->channel_head->rx_ring); - pdata->channel_head->rx_ring = NULL; - - if (netif_msg_drv(pdata)) - DPRINTK("free_channels, channel=%p\n", pdata->channel_head); kfree(pdata->channel_head); - +#endif + pdata->channel_head->tx_ring = NULL; + pdata->channel_head->rx_ring = NULL; pdata->channel_head = NULL; } static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) { - struct fxgmac_channel *channel_head, *channel; + struct fxgmac_channel *channel, *channel_head; struct fxgmac_ring *tx_ring, *rx_ring; int ret = -ENOMEM; unsigned int i; @@ -199,6 +242,14 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) FXGMAC_FLAG_MSIX_LEN); #endif +#ifdef FXGMAC_USE_STATIC_ALLOC + channel_head = pdata->channel; + tx_ring = &pdata->ring[0]; + rx_ring = &pdata->ring[1]; + if (netif_msg_drv(pdata)) + DPRINTK("static_alloc_channels,channel_head=%p,tx_ring=%p,rx_ring=%p\n", + channel_head, tx_ring, rx_ring); +#else channel_head = kcalloc(pdata->channel_count, sizeof(struct fxgmac_channel), GFP_KERNEL); if (netif_msg_drv(pdata)) @@ -209,8 +260,6 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) if (!channel_head) return ret; - netif_dbg(pdata, drv, pdata->netdev, "channel_head=%p\n", channel_head); - tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct fxgmac_ring), GFP_KERNEL); if (!tx_ring) @@ -227,7 +276,7 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) if (netif_msg_drv(pdata)) DPRINTK("alloc_channels, rx_ring=%p, size=%d*%ld\n", rx_ring, pdata->rx_ring_count, sizeof(struct fxgmac_ring)); - +#endif for (i = 0, channel = channel_head; i < pdata->channel_count; i++, channel++) { snprintf(channel->name, sizeof(channel->name), "channel-%u", i); @@ -249,17 +298,6 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) .msix_entries [FXGMAC_MAX_DMA_CHANNELS] .vector; - - if (pdata->channel_irq - [FXGMAC_MAX_DMA_CHANNELS] < - 0) { - netdev_err( - pdata->netdev, - "get_irq %u for tx failed\n", - i + 1); - goto err_irq; - } - channel->expansion.dma_irq_tx = pdata->channel_irq [FXGMAC_MAX_DMA_CHANNELS]; @@ -285,11 +323,6 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) if (i < pdata->rx_ring_count) channel->rx_ring = rx_ring++; - - netif_dbg(pdata, drv, pdata->netdev, - "%s: dma_regs=%p, tx_ring=%p, rx_ring=%p\n", - channel->name, channel->dma_regs, channel->tx_ring, - channel->rx_ring); } pdata->channel_head = channel_head; @@ -299,6 +332,7 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) return 0; err_irq: +#ifndef FXGMAC_USE_STATIC_ALLOC kfree(rx_ring); err_rx_ring: @@ -306,7 +340,7 @@ static int fxgmac_alloc_channels(struct fxgmac_pdata *pdata) err_tx_ring: kfree(channel_head); - +#endif DPRINTK("fxgmac alloc_channels callout err,%d\n", ret); return ret; } @@ -338,10 +372,70 @@ static int fxgmac_alloc_channels_and_rings(struct fxgmac_pdata *pdata) return ret; } +static void fxgmac_set_buffer_data(struct fxgmac_buffer_data *bd, + struct fxgmac_page_alloc *pa, + unsigned int len) +{ + get_page(pa->pages); + bd->pa = *pa; + + bd->dma_base = pa->pages_dma; + bd->dma_off = pa->pages_offset; + bd->dma_len = len; + + pa->pages_offset += len; + if ((pa->pages_offset + len) > pa->pages_len) { + /* This data descriptor is responsible for unmapping page(s) */ + bd->pa_unmap = *pa; + + /* Get a new allocation next time */ + pa->pages = NULL; + pa->pages_len = 0; + pa->pages_offset = 0; + pa->pages_dma = 0; + } +} + +static int fxgmac_alloc_pages(struct fxgmac_pdata *pdata, + struct fxgmac_page_alloc *pa, gfp_t gfp, + int order) +{ + struct page *pages = NULL; + dma_addr_t pages_dma; + + /* Try to obtain pages, decreasing order if necessary */ + gfp |= __GFP_COMP | __GFP_NOWARN; + while (order >= 0) { + pages = alloc_pages(gfp, order); + if (pages) + break; + + order--; + } + if (!pages) + return -ENOMEM; + + /* Map the pages */ + pages_dma = dma_map_page(pdata->dev, pages, 0, PAGE_SIZE << order, + DMA_FROM_DEVICE); + if (dma_mapping_error(pdata->dev, pages_dma)) { + put_page(pages); + return -ENOMEM; + } + + pa->pages = pages; + pa->pages_len = PAGE_SIZE << order; + pa->pages_offset = 0; + pa->pages_dma = pages_dma; + + return 0; +} + static int fxgmac_map_rx_buffer(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, struct fxgmac_desc_data *desc_data) { +#ifdef FXGMAC_NOT_USE_PAGE_MAPPING struct sk_buff *skb; skb = __netdev_alloc_skb_ip_align(pdata->netdev, pdata->rx_buf_size, GFP_ATOMIC); @@ -359,13 +453,79 @@ static int fxgmac_map_rx_buffer(struct fxgmac_pdata *pdata, dev_kfree_skb_any(skb); return -EINVAL; } +#else + int ret; + int order; + + if (!ring->rx_hdr_pa.pages) { + if (pdata->jumbo) + order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); + else + order = 0; + ret = fxgmac_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, + order); + if (ret) + return ret; + } + /* Set up the header page info */ + fxgmac_set_buffer_data(&desc_data->rx.hdr, &ring->rx_hdr_pa, + pdata->rx_buf_size); + +#endif return 0; } +static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + + /* Reset the Tx descriptor + * Set buffer 1 (lo) address to zero + * Set buffer 1 (hi) address to zero + * Reset all other control bits (IC, TTSE, B2L & B1L) + * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) + */ + dma_desc->desc0 = 0; + dma_desc->desc1 = 0; + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_tx_desc_init_channel(struct fxgmac_channel *channel) +{ + struct fxgmac_ring *ring = channel->tx_ring; + struct fxgmac_desc_data *desc_data; + int start_index = ring->cur; + unsigned int i; + (void)start_index; + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Tx descriptor */ + fxgmac_tx_desc_reset(desc_data); + } + + /* Update the total number of Tx descriptors */ + writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(channel->pdata->pAdapter, + upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); + writereg(channel->pdata->pAdapter, + lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); +} + static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) { - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct fxgmac_desc_data *desc_data; struct fxgmac_dma_desc *dma_desc; struct fxgmac_channel *channel; @@ -399,19 +559,100 @@ static void fxgmac_tx_desc_init(struct fxgmac_pdata *pdata) ring->dirty = 0; memset(&ring->tx, 0, sizeof(ring->tx)); - hw_ops->tx_desc_init(channel); + fxgmac_tx_desc_init_channel(channel); + } +} + +static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index) +{ + struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; + dma_addr_t buf_dma; + + /* Reset the Rx descriptor + * Set buffer 1 (lo) address to header dma address (lo) + * Set buffer 1 (hi) address to header dma address (hi) + * Set buffer 2 (lo) address to buffer dma address (lo) + * Set buffer 2 (hi) address to buffer dma address (hi) and + * set control bits OWN and INTE + */ +#ifdef FXGMAC_NOT_USE_PAGE_MAPPING + buf_dma = desc_data->rx.buf.dma_base; +#else + buf_dma = desc_data->rx.hdr.dma_base + desc_data->rx.hdr.dma_off; +#endif + dma_desc->desc0 = cpu_to_le32(lower_32_bits(buf_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(buf_dma)); + dma_desc->desc2 = 0; + dma_desc->desc3 = 0; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_INTE_POS, + RX_NORMAL_DESC3_INTE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF2V_POS, + RX_NORMAL_DESC3_BUF2V_LEN, 0); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_BUF1V_POS, + RX_NORMAL_DESC3_BUF1V_LEN, 1); + + /* Since the Rx DMA engine is likely running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the descriptor + */ + dma_wmb(); + + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN, 1); + + /* Make sure ownership is written to the descriptor */ + dma_wmb(); +} + +static void fxgmac_rx_desc_init_channel(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + unsigned int start_index = ring->cur; + struct fxgmac_desc_data *desc_data; + unsigned int i; + + /* Initialize all descriptors */ + for (i = 0; i < ring->dma_desc_count; i++) { + desc_data = FXGMAC_GET_DESC_DATA(ring, i); + + /* Initialize Rx descriptor */ + fxgmac_rx_desc_reset(pdata, desc_data, i); } + + /* Update the total number of Rx descriptors */ + writereg(pdata->pAdapter, ring->dma_desc_count - 1, + FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); + + /* Update the starting address of descriptor ring */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); + + /* Update the Rx Descriptor Tail Pointer */ + desc_data = FXGMAC_GET_DESC_DATA( + ring, start_index + ring->dma_desc_count - 1); + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); } -static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) +static int fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) { - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; struct fxgmac_desc_data *desc_data; struct fxgmac_dma_desc *dma_desc; struct fxgmac_channel *channel; struct fxgmac_ring *ring; dma_addr_t dma_desc_addr; unsigned int i, j; + int ret; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -428,8 +669,9 @@ static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) desc_data->dma_desc = dma_desc; desc_data->dma_desc_addr = dma_desc_addr; - if (fxgmac_map_rx_buffer(pdata, ring, desc_data)) - break; + ret = fxgmac_map_rx_buffer(pdata, ring, desc_data); + if (ret) + return ret; dma_desc++; dma_desc_addr += sizeof(struct fxgmac_dma_desc); @@ -438,8 +680,9 @@ static void fxgmac_rx_desc_init(struct fxgmac_pdata *pdata) ring->cur = 0; ring->dirty = 0; - hw_ops->rx_desc_init(channel); + fxgmac_rx_desc_init_channel(channel); } + return 0; } static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, @@ -455,6 +698,10 @@ static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, unsigned int tso, vlan; dma_addr_t skb_dma; unsigned int i; +#ifdef FXGMAC_TX_DMA_MAP_SINGLE + void *addr; + struct skb_shared_info *info = skb_shinfo(skb); +#endif offset = 0; start_index = ring->cur; @@ -528,16 +775,26 @@ static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { netif_dbg(pdata, tx_queued, pdata->netdev, "mapping frag %u\n", i); +#ifdef FXGMAC_TX_DMA_MAP_SINGLE + frag = info->frags + i; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); +#else frag = &skb_shinfo(skb)->frags[i]; +#endif offset = 0; for (datalen = skb_frag_size(frag); datalen;) { len = min_t(unsigned int, datalen, FXGMAC_TX_MAX_BUF_SIZE); +#ifdef FXGMAC_TX_DMA_MAP_SINGLE + skb_dma = dma_map_single(pdata->dev, addr + offset, len, + DMA_TO_DEVICE); +#else skb_dma = skb_frag_dma_map(pdata->dev, frag, offset, len, DMA_TO_DEVICE); - +#endif if (dma_mapping_error(pdata->dev, skb_dma)) { netdev_alert(pdata->netdev, "skb_frag_dma_map failed\n"); @@ -545,7 +802,11 @@ static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, } desc_data->skb_dma = skb_dma; desc_data->skb_dma_len = len; +#ifdef FXGMAC_TX_DMA_MAP_SINGLE + desc_data->mapped_as_page = 0; +#else desc_data->mapped_as_page = 1; +#endif netif_dbg(pdata, tx_queued, pdata->netdev, "skb frag: index=%u, dma=%pad, len=%u\n", cur_index, &skb_dma, len); @@ -591,11 +852,15 @@ static int fxgmac_map_tx_skb(struct fxgmac_channel *channel, void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops) { - desc_ops->alloc_channles_and_rings = fxgmac_alloc_channels_and_rings; + desc_ops->alloc_channels_and_rings = fxgmac_alloc_channels_and_rings; desc_ops->free_channels_and_rings = fxgmac_free_channels_and_rings; desc_ops->map_tx_skb = fxgmac_map_tx_skb; desc_ops->map_rx_buffer = fxgmac_map_rx_buffer; desc_ops->unmap_desc_data = fxgmac_unmap_desc_data; desc_ops->tx_desc_init = fxgmac_tx_desc_init; desc_ops->rx_desc_init = fxgmac_rx_desc_init; + desc_ops->tx_desc_init_channel = fxgmac_tx_desc_init_channel; + desc_ops->rx_desc_init_channel = fxgmac_rx_desc_init_channel; + desc_ops->tx_desc_reset = fxgmac_tx_desc_reset; + desc_ops->rx_desc_reset = fxgmac_rx_desc_reset; } diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c index 05aa42f90ad83..e1822facea56d 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ethtool.c @@ -1,10 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" @@ -115,7 +111,6 @@ static void fxgmac_ethtool_get_drvinfo(struct net_device *netdev, devid = FXGMAC_GET_REG_BITS(ver, MAC_VR_DEVID_POS, MAC_VR_DEVID_LEN); userver = FXGMAC_GET_REG_BITS(ver, MAC_VR_USERVER_POS, MAC_VR_USERVER_LEN); - /*DPRINTK("xlgma: No userver (%x) here, sver (%x) should be 0x51\n", userver, sver);*/ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "S.D.U: %x.%x.%x", sver, devid, userver); } @@ -251,7 +246,7 @@ static void fxgmac_get_reta(struct fxgmac_pdata *pdata, u32 *indir) { int i, reta_size = FXGMAC_RSS_MAX_TABLE_SIZE; u16 rss_m; -#ifdef FXGMAC_ONE_CHANNLE +#ifdef FXGMAC_ONE_CHANNEL rss_m = FXGMAC_MAX_DMA_CHANNELS; #else rss_m = FXGMAC_MAX_DMA_CHANNELS - @@ -267,10 +262,6 @@ static int fxgmac_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, { struct fxgmac_pdata *pdata = netdev_priv(netdev); - /* ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP) - * ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR) - * ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32) - */ if (hfunc) { *hfunc = ETH_RSS_HASH_TOP; DPRINTK("fxmac, get_rxfh for hash function\n"); @@ -294,7 +285,7 @@ static int fxgmac_set_rxfh(struct net_device *netdev, const u32 *indir, { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - int i; + int i = 0; u32 reta_entries = fxgmac_rss_indir_size(netdev); int max_queues = FXGMAC_MAX_DMA_CHANNELS; @@ -306,7 +297,7 @@ static int fxgmac_set_rxfh(struct net_device *netdev, const u32 *indir, /* Fill out the redirection table */ if (indir) { -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED max_queues = max_queues; reta_entries = reta_entries; i = i; @@ -444,9 +435,6 @@ static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLALL: cmd->rule_cnt = 0; ret = 0; - /*ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, - (u32 *)rule_locs); - */ DPRINTK("fxmac, get_rxnfc for classify both cnt and rules\n"); break; case ETHTOOL_GRXFH: @@ -460,7 +448,6 @@ static int fxgmac_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | BIT(MAC_RSSCR_UDP6TE_POS)) static int fxgmac_set_rss_hash_opt(struct fxgmac_pdata *pdata, struct ethtool_rxnfc *nfc) { @@ -578,11 +565,9 @@ static int fxgmac_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - /* no support. rx classifier rule insert */ DPRINTK("set_rxnfc for rx cls rule insert-n\\a\n"); break; case ETHTOOL_SRXCLSRLDEL: - /* no support. rx classifier rule delete */ DPRINTK("set_rxnfc for rx cls rule del-n\\a\n"); break; case ETHTOOL_SRXFH: @@ -597,16 +582,10 @@ static int fxgmac_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } #endif /* FXGMAC_RSS_FEATURE_ENABLED */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) static void fxgmac_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *exact) - -#else -static void fxgmac_get_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -#endif { struct fxgmac_pdata *pdata = netdev_priv(netdev); @@ -622,20 +601,18 @@ static void fxgmac_get_ringparam(struct net_device *netdev, ring->rx_jumbo_pending = 0; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) static int fxgmac_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *exact) - -#else -static int fxgmac_set_ringparam(struct net_device *netdev, - struct ethtool_ringparam *ring) -#endif { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + return 0; + + fxgmac_lock(pdata); DPRINTK("fxmac, set_ringparam callin\n"); pdata->tx_desc_count = ring->tx_pending; @@ -644,8 +621,9 @@ static int fxgmac_set_ringparam(struct net_device *netdev, fxgmac_stop(pdata); fxgmac_free_tx_data(pdata); fxgmac_free_rx_data(pdata); - desc_ops->alloc_channles_and_rings(pdata); + desc_ops->alloc_channels_and_rings(pdata); fxgmac_start(pdata); + fxgmac_unlock(pdata); return 0; } @@ -656,11 +634,6 @@ static void fxgmac_get_wol(struct net_device *netdev, { struct fxgmac_pdata *pdata = netdev_priv(netdev); - /* for further feature implementation - * wol->supported = WAKE_PHY | WAKE_UCAST | WAKE_MCAST | - * WAKE_BCAST | WAKE_MAGIC; - */ - wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC | WAKE_ARP; #if FXGMAC_WOL_UPON_EPHY_LINK @@ -674,30 +647,141 @@ static void fxgmac_get_wol(struct net_device *netdev, return; } wol->wolopts = pdata->expansion.wol; - DPRINTK("fxmac, get_wol, 0x%x, 0x%x\n", wol->wolopts, - pdata->expansion.wol); +} + +// only supports four patterns, and patterns will be cleared on every call +static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 ip_addr, i = 0; + u8 type_offset, op_offset, tip_offset; + struct pattern_packet packet; + struct wol_bitmap_pattern + pattern[4]; // for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. + + memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); + + //config ucast + if (pdata->expansion.wol & WAKE_UCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + // config bcast + if (pdata->expansion.wol & WAKE_BCAST) { + pattern[i].mask_info[0] = 0x3F; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); + pattern[i].pattern_offset = 0; + i++; + } + + // config mcast + if (pdata->expansion.wol & WAKE_MCAST) { + pattern[i].mask_info[0] = 0x7; + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_info[0] = 0x1; + pattern[i].pattern_info[1] = 0x0; + pattern[i].pattern_info[2] = 0x5E; + pattern[i].pattern_offset = 0; + i++; + } + + // config arp + if (pdata->expansion.wol & WAKE_ARP) { + memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); + type_offset = offsetof(struct pattern_packet, ar_pro); + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + type_offset++; + pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; + op_offset = offsetof(struct pattern_packet, ar_op); + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + op_offset++; + pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; + tip_offset = offsetof(struct pattern_packet, ar_tip); + pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + tip_offset++; + pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; + + /* arp type is 0x0800 (big endian) */ + packet.ar_pro = 0x0 << 8 | 0x08; + /* 1 is arp request,2 is arp replay, 3 is rarp request, + * 4 is rarp replay + */ + packet.ar_op = 0x1 << 8; + ip_addr = fxgmac_get_netdev_ip4addr(pdata); + packet.ar_tip[0] = ip_addr & 0xFF; + packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; + packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; + packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; + memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); + pattern[i].mask_size = sizeof(pattern[0].mask_info); + pattern[i].pattern_offset = 0; + i++; + } + + hw_ops->set_wake_pattern(pdata, pattern, i); +} + +void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) +{ + /* enable or disable WOL. this function only set wake-up type, and + * power related configure will be in other place, see power management. + */ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (!pdata->hw_feat.rwk) { + netdev_err(pdata->netdev, + "error configuring WOL - not supported.\n"); + return; + } + + hw_ops->disable_wake_magic_pattern(pdata); + hw_ops->disable_wake_pattern(pdata); + hw_ops->disable_wake_link_change(pdata); + + if (en) { + /* config mac address for rx of magic or ucast */ + hw_ops->set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); + + /* Enable Magic packet */ + if (pdata->expansion.wol & WAKE_MAGIC) + hw_ops->enable_wake_magic_pattern(pdata); + + /* Enable global unicast packet */ + if (pdata->expansion.wol & WAKE_UCAST || + pdata->expansion.wol & WAKE_MCAST || + pdata->expansion.wol & WAKE_BCAST || + pdata->expansion.wol & WAKE_ARP) + hw_ops->enable_wake_pattern(pdata); + + /* Enable ephy link change */ + if ((FXGMAC_WOL_UPON_EPHY_LINK) && + (pdata->expansion.wol & WAKE_PHY)) + hw_ops->enable_wake_link_change(pdata); + } + device_set_wakeup_enable(pdata->dev, en); + + DPRINTK("config_wol callout\n"); } static int fxgmac_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - int ret; /* currently, we do not support these options */ #if FXGMAC_WOL_UPON_EPHY_LINK -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) if (wol->wolopts & (WAKE_MAGICSECURE | WAKE_FILTER)) { #else - if (wol->wolopts & WAKE_MAGICSECURE) { -#endif -#else -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE | WAKE_FILTER)) { -#else - if (wol->wolopts & (WAKE_PHY | WAKE_MAGICSECURE)) { -#endif #endif DPRINTK("fxmac, set_wol, not supported wol options, 0x%x\n", wol->wolopts); @@ -706,8 +790,7 @@ static int fxgmac_set_wol(struct net_device *netdev, if (!(pdata->hw_feat.rwk)) { DPRINTK("fxmac, set_wol, hw wol feature is n/a\n"); - ret = (wol->wolopts ? -EOPNOTSUPP : 0); - return ret; + return wol->wolopts ? -EOPNOTSUPP : 0; } pdata->expansion.wol = 0; @@ -729,10 +812,9 @@ static int fxgmac_set_wol(struct net_device *netdev, if (wol->wolopts & WAKE_ARP) pdata->expansion.wol |= WAKE_ARP; - hw_ops->set_pattern_data(pdata); - - hw_ops->config_wol(pdata, (!!(pdata->expansion.wol))); + fxgmac_set_pattern_data(pdata); + fxgmac_config_wol(pdata, (!!(pdata->expansion.wol))); DPRINTK("fxmac, set_wol, opt=0x%x, 0x%x\n", wol->wolopts, pdata->expansion.wol); @@ -762,7 +844,6 @@ static void fxgmac_get_regs(struct net_device *netdev, regs_buff[REG_MII_PHYSID2]; } -#if FXGMAC_PAUSE_FEATURE_ENABLED static int fxgmac_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd) { @@ -770,10 +851,7 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 duplex, regval, link_status; u32 adv = 0xFFFFFFFF; - - regval = fxgmac_ephy_autoneg_ability_get(pdata, &adv); - if (regval) - return -ETIMEDOUT; + int ret; ethtool_link_ksettings_zero_link_mode(cmd, supported); ethtool_link_ksettings_zero_link_mode(cmd, advertising); @@ -788,14 +866,26 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, /* Indicate pause support */ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); - ethtool_link_ksettings_add_link_mode(cmd, advertising, Asym_Pause); + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + if (ret < 0) + return ret; + + if (FXGMAC_GET_REG_BITS(regval, PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + if (FXGMAC_GET_REG_BITS(regval, PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN)) + ethtool_link_ksettings_add_link_mode(cmd, advertising, + Asym_Pause); ethtool_link_ksettings_add_link_mode(cmd, supported, MII); cmd->base.port = PORT_MII; ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); - hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + if (ret < 0) + return ret; + regval = FXGMAC_GET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN); if (regval) { @@ -803,8 +893,11 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); else - clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, - cmd->link_modes.advertising); + goto FORCE_MODE; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); + if (ret < 0) + return ret; if (adv & FXGMAC_ADVERTISE_10HALF) ethtool_link_ksettings_add_link_mode(cmd, advertising, @@ -818,10 +911,16 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, if (adv & FXGMAC_ADVERTISE_100FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_CTRL1000, &adv); + if (ret < 0) + return ret; + if (adv & FXGMAC_ADVERTISE_1000FULL) ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); } else { +FORCE_MODE: clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, cmd->link_modes.advertising); switch (pdata->phy_speed) { @@ -855,8 +954,12 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, } cmd->base.autoneg = pdata->phy_autoeng ? regval : 0; - hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); - link_status = regval & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + regval = 0; + ret = hw_ops->read_ephy_reg(pdata, REG_MII_SPEC_STATUS, ®val); + if (ret < 0) + return ret; + + link_status = regval & (BIT(FXGMAC_EPHY_LINK_STATUS_BIT)); if (link_status) { duplex = FXGMAC_GET_REG_BITS(regval, PHY_MII_SPEC_DUPLEX_POS, PHY_MII_SPEC_DUPLEX_LEN); @@ -873,16 +976,17 @@ static int fxgmac_get_link_ksettings(struct net_device *netdev, static int fxgmac_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { - u32 advertising, support, adv; - int ret; - struct fxphy_ag_adv; + u32 advertising, support; struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int ret; if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) return -EINVAL; pdata->phy_autoeng = cmd->base.autoneg; + pdata->phy_duplex = cmd->base.duplex; + pdata->phy_speed = cmd->base.speed; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); @@ -892,63 +996,28 @@ static int fxgmac_set_link_ksettings(struct net_device *netdev, if (pdata->phy_autoeng || (!pdata->phy_autoeng && cmd->base.speed == SPEED_1000)) { - ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); - if (ret < 0) - return -ETIMEDOUT; - adv &= ~REG_BIT_ADVERTISE_100_10_CAP; - adv |= ethtool_adv_to_mii_adv_t(advertising); - ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); - if (ret < 0) - return -ETIMEDOUT; - ret = hw_ops->read_ephy_reg(pdata, REG_MII_CTRL1000, &adv); - if (ret < 0) - return -ETIMEDOUT; - adv &= ~REG_BIT_ADVERTISE_1000_CAP; - adv |= ethtool_adv_to_mii_ctrl1000_t(advertising); - ret = hw_ops->write_ephy_reg(pdata, REG_MII_CTRL1000, adv); - if (ret < 0) - return -ETIMEDOUT; - - ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); - if (ret < 0) - return -ETIMEDOUT; - adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_AUTOENG_POS, - PHY_CR_AUTOENG_LEN, 1); - ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); - if (ret < 0) - return -ETIMEDOUT; - - ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); - if (ret < 0) - return -ETIMEDOUT; - adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, - PHY_CR_RE_AUTOENG_LEN, 1); - ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + ret = hw_ops->phy_config(pdata); if (ret < 0) - return -ETIMEDOUT; + return ret; } else { - pdata->phy_duplex = cmd->base.duplex; - pdata->phy_speed = cmd->base.speed; - fxgmac_phy_force_speed(pdata, pdata->phy_speed); - fxgmac_phy_force_duplex(pdata, pdata->phy_duplex); - fxgmac_phy_force_autoneg(pdata, pdata->phy_autoeng); + fxgmac_phy_force_mode(pdata); } - ret = fxgmac_ephy_soft_reset(pdata); - if (ret) { - printk("%s: ephy soft reset timeout.\n", __func__); - return -ETIMEDOUT; - } + /* Save speed is used to restore it when resuming */ + pdata->expansion.pre_phy_speed = cmd->base.speed; + pdata->expansion.pre_phy_autoneg = cmd->base.autoneg; + pdata->expansion.pre_phy_duplex = cmd->base.duplex; return 0; } +#if FXGMAC_PAUSE_FEATURE_ENABLED static void fxgmac_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - pause->autoneg = 1; + pause->autoneg = pdata->phy_autoeng; pause->rx_pause = pdata->rx_pause; pause->tx_pause = pdata->tx_pause; @@ -963,10 +1032,16 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; unsigned int pre_rx_pause = pdata->rx_pause; unsigned int pre_tx_pause = pdata->tx_pause; + u32 adv; + int ret; + int enable_pause = 0; pdata->rx_pause = pause->rx_pause; pdata->tx_pause = pause->tx_pause; + if (pdata->rx_pause || pdata->tx_pause) + enable_pause = 1; + if (pre_rx_pause != pdata->rx_pause) { hw_ops->config_rx_flow_control(pdata); DPRINTK("fxgmac set pause parameter, rx from %d to %d\n", @@ -978,6 +1053,32 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, pre_tx_pause, pdata->tx_pause); } + if (pause->autoneg) { + ret = hw_ops->read_ephy_reg(pdata, REG_MII_ADVERTISE, &adv); + if (ret < 0) + return ret; + adv = FXGMAC_SET_REG_BITS(adv, PHY_MII_ADVERTISE_PAUSE_POS, + PHY_MII_ADVERTISE_PAUSE_LEN, + enable_pause); + adv = FXGMAC_SET_REG_BITS(adv, PHY_MII_ADVERTISE_ASYPAUSE_POS, + PHY_MII_ADVERTISE_ASYPAUSE_LEN, + enable_pause); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_ADVERTISE, adv); + if (ret < 0) + return ret; + + ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &adv); + if (ret < 0) + return ret; + adv = FXGMAC_SET_REG_BITS(adv, PHY_CR_RE_AUTOENG_POS, + PHY_CR_RE_AUTOENG_LEN, 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, adv); + if (ret < 0) + return ret; + } else { + DPRINTK("Can't set phy pause because autoneg is off.\n"); + } + DPRINTK("fxgmac set pause parameter, autoneg=%d, rx=%d, tx=%d\n", pause->autoneg, pause->rx_pause, pause->tx_pause); @@ -985,14 +1086,6 @@ static int fxgmac_set_pauseparam(struct net_device *netdev, } #endif /*FXGMAC_PAUSE_FEATURE_ENABLED*/ -/* yzhang added for debug sake. descriptors status checking - * 2021.03.29 - */ -#define FXGMAC_ETH_GSTRING_LEN 32 - -#define FXGMAC_TEST_LEN (sizeof(fxgmac_gstrings_test) / FXGMAC_ETH_GSTRING_LEN) -#define DBG_ETHTOOL_CHECK_NUM_OF_DESC 5 - static void fxgmac_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { @@ -1038,7 +1131,6 @@ static void fxgmac_ethtool_get_ethtool_stats(struct net_device *netdev, int i; #if FXGMAC_PM_FEATURE_ENABLED - /* 20210709 for net power down */ if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) #endif { @@ -1051,11 +1143,50 @@ static void fxgmac_ethtool_get_ethtool_stats(struct net_device *netdev, } } -static inline bool fxgmac_removed(void __iomem *addr) +static int fxgmac_ethtool_reset(struct net_device *netdev, u32 *flag) { - return unlikely(!addr); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 val; + int ret = 0; + + val = (*flag & ETH_RESET_ALL) || (*flag & ETH_RESET_PHY); + if (!val) { + DPRINTK("Operation not support.\n"); + return -EINVAL; + } + + switch (*flag) { + case ETH_RESET_ALL: + fxgmac_restart_dev(pdata); + *flag = 0; + break; + case ETH_RESET_PHY: + /* power off and on the phy in order to properly + * configure the MAC timing + */ + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, &val); + val = FXGMAC_SET_REG_BITS(val, PHY_CR_POWER_POS, + PHY_CR_POWER_LEN, PHY_POWER_DOWN); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, val); + if (ret < 0) + return ret; + + usleep_range_ex(pdata->pAdapter, 9000, 10000); + val = FXGMAC_SET_REG_BITS(val, PHY_CR_POWER_POS, + PHY_CR_POWER_LEN, PHY_POWER_UP); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, val); + if (ret < 0) + return ret; + + *flag = 0; + break; + default: + break; + } + + return 0; } -#define FXGMAC_REMOVED(a) fxgmac_removed(a) static const struct ethtool_ops fxgmac_ethtool_ops = { .get_drvinfo = fxgmac_ethtool_get_drvinfo, @@ -1065,7 +1196,7 @@ static const struct ethtool_ops fxgmac_ethtool_ops = { .get_channels = fxgmac_ethtool_get_channels, .get_coalesce = fxgmac_ethtool_get_coalesce, .set_coalesce = fxgmac_ethtool_set_coalesce, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0)) + .reset = fxgmac_ethtool_reset, /* The process of set is to get first and then set, * and the result of get is preserved for values that have not been modified. @@ -1077,7 +1208,6 @@ static const struct ethtool_ops fxgmac_ethtool_ops = { */ #ifdef ETHTOOL_COALESCE_USECS .supported_coalesce_params = ETHTOOL_COALESCE_USECS, -#endif #endif .get_strings = fxgmac_ethtool_get_strings, .get_sset_count = fxgmac_ethtool_get_sset_count, @@ -1099,10 +1229,8 @@ static const struct ethtool_ops fxgmac_ethtool_ops = { .set_wol = fxgmac_set_wol, #endif #if (FXGMAC_PAUSE_FEATURE_ENABLED) -#ifdef ETHTOOL_GLINKSETTINGS .get_link_ksettings = fxgmac_get_link_ksettings, .set_link_ksettings = fxgmac_set_link_ksettings, -#endif /* ETHTOOL_GLINKSETTINGS */ .get_pauseparam = fxgmac_get_pauseparam, .set_pauseparam = fxgmac_set_pauseparam, #endif diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c index ba0d1d86ddaaf..319749890871f 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-hw.c @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include "fuxi-os.h" #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" #include "fuxi-efuse.h" @@ -12,9 +11,6 @@ static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata); static int fxgmac_tx_complete(struct fxgmac_dma_desc *dma_desc) { -#if (FXGMAC_DUMMY_TX_DEBUG) - return 1; -#endif return !FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, TX_NORMAL_DESC3_OWN_LEN); } @@ -27,7 +23,7 @@ static int fxgmac_disable_rx_csum(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - DPRINTK("fxgmac disable rx checksum.\n"); + DPRINTK("fxgmac disable rx checksum, set val = %x.\n", regval); return 0; } @@ -39,7 +35,7 @@ static int fxgmac_enable_rx_csum(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_IPC_POS, MAC_CR_IPC_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - DPRINTK("fxgmac enable rx checksum.\n"); + DPRINTK("fxgmac enable rx checksum, set val = %x.\n", regval); return 0; } @@ -61,7 +57,7 @@ static int fxgmac_set_mac_address(struct fxgmac_pdata *pdata, u8 *addr) static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, struct netdev_hw_addr *ha, unsigned int *mac_reg) { - unsigned int mac_addr_hi, mac_addr_lo; + u32 mac_addr_hi, mac_addr_lo; u8 *mac_addr; mac_addr_lo = 0; @@ -77,9 +73,10 @@ static void fxgmac_set_mac_reg(struct fxgmac_pdata *pdata, mac_addr[0] = ha->addr[4]; mac_addr[1] = ha->addr[5]; + netif_dbg(pdata, drv, pdata->netdev, "adding mac address %pM\n", + ha->addr); netif_dbg(pdata, drv, pdata->netdev, - "adding mac address %pM at %#x\n", ha->addr, - *mac_reg); + "adding mac addredd at %#x\n", *mac_reg); mac_addr_hi = FXGMAC_SET_REG_BITS( mac_addr_hi, MAC_MACA1HR_AE_POS, MAC_MACA1HR_AE_LEN, 1); @@ -162,7 +159,7 @@ static int fxgmac_enable_rx_vlan_stripping(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0x3); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); - DPRINTK("fxgmac enable MAC rx vlan stripping.\n"); + DPRINTK("fxgmac enable MAC rx vlan stripping , set val = %x\n", regval); return 0; } @@ -175,7 +172,7 @@ static int fxgmac_disable_rx_vlan_stripping(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_VLANTR_EVLS_POS, MAC_VLANTR_EVLS_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_VLANTR); - DPRINTK("fxgmac disable MAC rx vlan stripping.\n"); + DPRINTK("fxgmac disable MAC rx vlan stripping, set val = %x\n", regval); return 0; } @@ -305,21 +302,17 @@ static int fxgmac_set_promiscuous_mode(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, "" STR_FORMAT " - promiscuous mode=%d, reg=%x.", - __FUNCTION__, enable, regval); - DbgPrintF( - MP_TRACE, - "" STR_FORMAT - " - note, vlan filter is called when set promiscuous mode=%d.", - __FUNCTION__, enable); + DPRINTK("promiscuous mode=%d", enable); + DPRINTK("set val = %x", regval); + DPRINTK("note, vlan filter is called when set promiscuous mode=%d", + enable); /* Hardware will still perform VLAN filtering in promiscuous mode */ if (enable) { fxgmac_disable_rx_vlan_filtering(pdata); } else { - if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (FXGMAC_RX_VLAN_FILTERING_ENABLED) fxgmac_enable_rx_vlan_filtering(pdata); - } } DPRINTK("fxgmac set promisc mode=%d\n", enable); @@ -344,8 +337,9 @@ static int fxgmac_enable_rx_broadcast(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, "%s - bcast en=%d, bit-val=%d, reg=%x.", - __FUNCTION__, enable, val, regval); + DPRINTK("bcast en=%d", enable); + DPRINTK("bit-val=%d", val); + DPRINTK("reg=%x", regval); return 0; } @@ -367,9 +361,8 @@ static int fxgmac_set_all_multicast_mode(struct fxgmac_pdata *pdata, val); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PFR); - DbgPrintF(MP_TRACE, - "" STR_FORMAT " - Enable all Multicast=%d, regval=%#x.", - __FUNCTION__, enable, regval); + DPRINTK("Enable all Multicast=%d", enable); + DPRINTK("set val = %#x.", regval); return 0; } @@ -381,7 +374,7 @@ static void fxgmac_set_mac_addn_addrs(struct fxgmac_pdata *pdata) struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; #endif - unsigned int addn_macs; + u32 addn_macs; unsigned int mac_reg; mac_reg = MAC_MACA1HR; @@ -421,25 +414,28 @@ static void fxgmac_set_mac_addn_addrs(struct fxgmac_pdata *pdata) bitOut = ((reversalval) & 0x1f); \ } while (0) +#ifndef CRC32_POLY_LE +#define CRC32_POLY_LE 0xedb88320 +#endif static u32 fxgmac_crc32(unsigned char *Data, int Length) { - u32 Crc = (u32)~0; /* Initial value. 0xFFFFFFFF */ + u32 crc = (u32)~0; /* Initial value. 0xFFFFFFFF */ while (--Length >= 0) { unsigned char Byte = *Data++; int Bit; for (Bit = 8; --Bit >= 0; Byte >>= 1) { - if ((Crc ^ Byte) & 1) { - Crc >>= 1; - Crc ^= 0xedb88320; + if ((crc ^ Byte) & 1) { + crc >>= 1; + crc ^= CRC32_POLY_LE; } else { - Crc >>= 1; + crc >>= 1; } } } - return ~Crc; + return ~crc; } /* @@ -451,7 +447,7 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, unsigned char *pmc_mac, int b_add) { - unsigned int hash_reg, reg_bit; + u32 hash_reg, reg_bit; unsigned int j; u32 crc, reversal_crc, regval; @@ -462,9 +458,8 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, writereg(pdata->pAdapter, 0, pdata->mac_regs + hash_reg); } - DBGPRINT( - MP_TRACE, - ("> 24), hash_reg, reg_bit); @@ -486,25 +481,26 @@ static void fxgmac_config_multicast_mac_hash_table(struct fxgmac_pdata *pdata, writereg(pdata->pAdapter, regval, pdata->mac_regs + hash_reg); } -#ifndef DPDK -#if FUXI_MAC_HASH_TABLE static void fxgmac_set_mac_hash_table(struct fxgmac_pdata *pdata) { +#ifndef DPDK +#if FXGMAC_MAC_HASH_TABLE struct net_device *netdev = pdata->netdev; struct netdev_hw_addr *ha; + fxgmac_config_multicast_mac_hash_table(pdata, (unsigned char *)0, 1); netdev_for_each_mc_addr(ha, netdev) { fxgmac_config_multicast_mac_hash_table(pdata, ha->addr, 1); } -} #else -static inline void fxgmac_set_mac_hash_table(void) -{ + (void)pdata; +#endif +#else + (void)pdata; +#endif } -#endif /* FUXI_MAC_HASH_TABLE */ -#endif /* DPDK */ -static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) +static int fxgmac_set_mc_addresses(struct fxgmac_pdata *pdata) { if (pdata->hw_feat.hash_table_size) fxgmac_set_mac_hash_table(pdata); @@ -514,6 +510,16 @@ static int fxgmac_add_mac_addresses(struct fxgmac_pdata *pdata) return 0; } +static void fxgmac_set_multicast_mode(struct fxgmac_pdata *pdata, + unsigned int enable) +{ + if (enable) + fxgmac_set_mc_addresses(pdata); + else + fxgmac_config_multicast_mac_hash_table(pdata, + (unsigned char *)0, 1); +} + static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) { u32 regval; @@ -524,7 +530,7 @@ static void fxgmac_config_mac_address(struct fxgmac_pdata *pdata) regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PFR); regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HPF_POS, MAC_PFR_HPF_LEN, 1); -#if FUXI_MAC_HASH_TABLE +#if FXGMAC_MAC_HASH_TABLE regval = FXGMAC_SET_REG_BITS(regval, MAC_PFR_HUC_POS, MAC_PFR_HUC_LEN, 1); #endif @@ -560,7 +566,7 @@ static int fxgmac_config_jumbo(struct fxgmac_pdata *pdata) static void fxgmac_config_checksum_offload(struct fxgmac_pdata *pdata) { - if (pdata->netdev->features & NETIF_F_RXCSUM) + if (FXGMAC_RX_CHECKSUM_ENABLED) fxgmac_enable_rx_csum(pdata); else fxgmac_disable_rx_csum(pdata); @@ -587,62 +593,29 @@ static void fxgmac_config_vlan_support(struct fxgmac_pdata *pdata) static int fxgmac_config_rx_mode(struct fxgmac_pdata *pdata) { - struct net_device *netdev = pdata->netdev; - unsigned int pr_mode, am_mode; + unsigned int pr_mode, am_mode, mu_mode, bd_mode; + +#ifndef FXGMAC_NETDEV_MU_MODE_ENABLED +#define FXGMAC_NETDEV_MU_MODE_ENABLED 0 +#endif + +#ifndef FXGMAC_NETDEV_BD_MODE_ENABLED +#define FXGMAC_NETDEV_BD_MODE_ENABLED 0 +#endif - pr_mode = ((netdev->flags & IFF_PROMISC) != 0); - am_mode = ((netdev->flags & IFF_ALLMULTI) != 0); + pr_mode = FXGMAC_NETDEV_PR_MODE_ENABLED; + am_mode = FXGMAC_NETDEV_AM_MODE_ENABLED; + mu_mode = FXGMAC_NETDEV_MU_MODE_ENABLED; + bd_mode = FXGMAC_NETDEV_BD_MODE_ENABLED; + fxgmac_enable_rx_broadcast(pdata, bd_mode); fxgmac_set_promiscuous_mode(pdata, pr_mode); fxgmac_set_all_multicast_mode(pdata, am_mode); - - fxgmac_add_mac_addresses(pdata); + fxgmac_set_multicast_mode(pdata, mu_mode); return 0; } -static void fxgmac_prepare_tx_stop(struct fxgmac_pdata *pdata, - struct fxgmac_channel *channel) -{ - unsigned int tx_dsr, tx_pos, tx_qidx; - unsigned long tx_timeout; - unsigned int tx_status; - - /* Calculate the status register to read and the position within */ - if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) { - tx_dsr = DMA_DSR0; - tx_pos = (channel->queue_index * DMA_DSR_Q_LEN) + - DMA_DSR0_TPS_START; - } else { - tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE; - - tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); - tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_LEN) + - DMA_DSRX_TPS_START; - } - -#if FXGMAC_TX_HANG_TIMER_EN - tx_timeout = jiffies + msecs_to_jiffies(100); /* 100ms */ -#else - tx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); -#endif - while (time_before(jiffies, tx_timeout)) { - tx_status = readreg(pdata->pAdapter, pdata->mac_regs + tx_dsr); - tx_status = - FXGMAC_GET_REG_BITS(tx_status, tx_pos, DMA_DSR_TPS_LEN); - if ((tx_status == DMA_TPS_STOPPED) || - (tx_status == DMA_TPS_SUSPENDED)) - break; - - usleep_range_ex(pdata->pAdapter, 500, 1000); - } - - if (!time_before(jiffies, tx_timeout)) - netdev_info(pdata->netdev, - "timed out waiting for Tx DMA channel %u to stop\n", - channel->queue_index); -} - static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) { #ifndef DPDK @@ -651,7 +624,7 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) unsigned int i; u32 regval; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED pdata->tx_hang_restart_queuing = 0; #endif @@ -659,6 +632,8 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) #ifndef DPDK channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->tx_ring) + break; regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_ST_POS, @@ -673,6 +648,12 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } + /* Enable Tx DMA channel */ FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); } @@ -680,6 +661,11 @@ static void fxgmac_enable_tx(struct fxgmac_pdata *pdata) /* Enable each Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + if (i > 0) + break; +#endif + regval = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TXQEN_POS, @@ -703,31 +689,10 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) unsigned int i; u32 regval; - /* Prepare for Tx DMA channel stop */ -#ifndef DPDK - channel = pdata->channel_head; - if (channel != NULL) { - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) - break; - - fxgmac_prepare_tx_stop(pdata, channel); - -#if FXGMAC_TX_HANG_TIMER_EN - pdata->tx_hang_restart_queuing = 0; -#endif - } - } - -#else +#ifdef DPDK PMD_INIT_FUNC_TRACE(); struct fxgmac_tx_queue *txq; struct rte_eth_dev *dev = pdata->expansion.eth_dev; - - for (i = 0; i < pdata->tx_q_count; i++) { - txq = dev->data->tx_queues[i]; - fxgmac_txq_prepare_tx_stop(pdata, i); - } #endif /* Disable MAC Tx */ @@ -764,57 +729,31 @@ static void fxgmac_disable_tx(struct fxgmac_pdata *pdata) #else for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + dev->data->port_id); + return; + } + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); } #endif } -static void fxgmac_prepare_rx_stop(struct fxgmac_pdata *pdata, - unsigned int queue) -{ - unsigned int rx_status, prxq; - unsigned int rxqsts; - unsigned long rx_timeout; - /* The Rx engine cannot be stopped if it is actively processing - * packets. Wait for the Rx queue to empty the Rx fifo. Don't - * wait forever though... - */ -#if FXGMAC_TX_HANG_TIMER_EN - rx_timeout = - jiffies + msecs_to_jiffies(500); /* 500ms, larger is better */ -#else - rx_timeout = jiffies + (FXGMAC_DMA_STOP_TIMEOUT * HZ); -#endif - while (time_before(jiffies, rx_timeout)) { - rx_status = readreg(pdata->pAdapter, - FXGMAC_MTL_REG(pdata, queue, MTL_Q_RQDR)); - prxq = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_PRXQ_POS, - MTL_Q_RQDR_PRXQ_LEN); - rxqsts = FXGMAC_GET_REG_BITS(rx_status, MTL_Q_RQDR_RXQSTS_POS, - MTL_Q_RQDR_RXQSTS_LEN); - if ((prxq == 0) && (rxqsts == 0)) - break; - - usleep_range_ex(pdata->pAdapter, 500, 1000); - } - - if (!time_before(jiffies, rx_timeout)) - netdev_info(pdata->netdev, - "timed out waiting for Rx queue %u to empty\n", - queue); -} - static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) { #ifndef DPDK struct fxgmac_channel *channel; #endif - unsigned int regval, i; + unsigned int i; + u32 regval; /* Enable each Rx DMA channel */ #ifndef DPDK channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { + if (!channel->rx_ring) + break; regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_RCR_SR_POS, @@ -830,6 +769,11 @@ static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + dev->data->port_id); + return; + } /* Enable Rx DMA channel */ FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); } @@ -850,7 +794,6 @@ static void fxgmac_enable_rx(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); #else /* Enable MAC Rx */ - FXGMAC_IOWRITE_BITS(pdata, MAC_ECR, DCRCC, 1); /* Frame is forwarded after stripping CRC to application*/ if (pdata->expansion.crc_strip_enable) { @@ -864,7 +807,7 @@ static void fxgmac_enable_channel_rx(struct fxgmac_pdata *pdata, unsigned int queue) { struct fxgmac_channel *channel; - unsigned int regval; + u32 regval; /* Enable Rx DMA channel */ channel = pdata->channel_head + queue; @@ -909,19 +852,10 @@ static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - /* Prepare for Rx DMA channel stop */ -#ifndef DPDK - for (i = 0; i < pdata->rx_q_count; i++) - fxgmac_prepare_rx_stop(pdata, i); -#else +#ifdef DPDK PMD_INIT_FUNC_TRACE(); struct fxgmac_rx_queue *rxq; struct rte_eth_dev *dev = pdata->expansion.eth_dev; - - for (i = 0; i < dev->data->nb_rx_queues; i++) { - rxq = dev->data->rx_queues[i]; - fxgmac_prepare_rx_stop(pdata, i); - } #endif /* Disable each Rx queue */ @@ -946,494 +880,44 @@ static void fxgmac_disable_rx(struct fxgmac_pdata *pdata) #else for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + dev->data->port_id); + return; + } FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); } #endif } -static void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, - struct fxgmac_ring *ring) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_desc_data *desc_data; - - /* Make sure everything is written before the register write */ - wmb(); - - /* Issue a poll command to Tx DMA by writing address - * of next immediate free descriptor - */ - desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); - -#if !(FXGMAC_DUMMY_TX_DEBUG) - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); -#else - DPRINTK("dummy tx, fxgmac_tx_start_xmit, tail reg=0x%lx, val=%08x\n", - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO) - pdata->mac_regs, - (u32)lower_32_bits(desc_data->dma_desc_addr)); -#endif - if (netif_msg_tx_done(pdata)) - DPRINTK("tx_start_xmit: dump before wr reg, dma base=0x%016llx, reg=0x%08x, tx timer usecs=%u, tx_timer_active=%u\n", - desc_data->dma_desc_addr, - readreg(pdata->pAdapter, - FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), - pdata->tx_usecs, channel->tx_timer_active); - - ring->tx.xmit_more = 0; -} - -static void fxgmac_dev_xmit(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->tx_ring; - unsigned int tso_context, vlan_context; - struct fxgmac_desc_data *desc_data; - struct fxgmac_dma_desc *dma_desc; - struct fxgmac_pkt_info *pkt_info; - unsigned int csum, tso, vlan; - int start_index = ring->cur; - int cur_index = ring->cur; - int i; - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); - - pkt_info = &ring->pkt_info; - csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, - TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); - tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, - TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); - vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, - TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); - - if (tso && (pkt_info->mss != ring->tx.cur_mss)) - tso_context = 1; - else - tso_context = 0; - - if ((tso_context) && (netif_msg_tx_done(pdata))) { - /* tso is initialized to start... */ - DPRINTK("fxgmac_dev_xmit, tso_%s tso=0x%x, pkt_mss=%d, cur_mss=%d\n", - (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, - ring->tx.cur_mss); - } - - if (vlan && (pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag)) - vlan_context = 1; - else - vlan_context = 0; - - if (vlan && (netif_msg_tx_done(pdata))) - DPRINTK("fxgmac_dev_xmi:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", - pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, - vlan_context); - - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - - /* Create a context descriptor if this is a TSO pkt_info */ - if (tso_context || vlan_context) { - if (tso_context) { - if (netif_msg_tx_done(pdata)) - DPRINTK("xlgamc dev xmit, construct tso context descriptor, mss=%u\n", - pkt_info->mss); - - /* Set the MSS size */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, - TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); - - /* Mark it as a CONTEXT descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, - TX_CONTEXT_DESC3_CTXT_LEN, 1); - - /* Indicate this descriptor contains the MSS */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, - TX_CONTEXT_DESC3_TCMSSV_LEN, 1); - - ring->tx.cur_mss = pkt_info->mss; - } - - if (vlan_context) { - netif_dbg(pdata, tx_queued, pdata->netdev, - "VLAN context descriptor, ctag=%u\n", - pkt_info->vlan_ctag); - - /* Mark it as a CONTEXT descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, - TX_CONTEXT_DESC3_CTXT_LEN, 1); - - /* Set the VLAN tag */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, - TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); - - /* Indicate this descriptor contains the VLAN tag */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, - TX_CONTEXT_DESC3_VLTV_LEN, 1); - - ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; - } - - cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - } - - /* Update buffer address (for TSO this is the header) */ - dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); - dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); - - /* Update the buffer length */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, - TX_NORMAL_DESC2_HL_B1L_POS, - TX_NORMAL_DESC2_HL_B1L_LEN, - desc_data->skb_dma_len); - - /* VLAN tag insertion check */ - if (vlan) { - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, - TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); - pdata->stats.tx_vlan_packets++; - } - - /* Timestamp enablement check */ - if (FXGMAC_GET_REG_BITS(pkt_info->attributes, - TX_PACKET_ATTRIBUTES_PTP_POS, - TX_PACKET_ATTRIBUTES_PTP_LEN)) - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, - TX_NORMAL_DESC2_TTSE_LEN, 1); - - /* Mark it as First Descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_FD_POS, - TX_NORMAL_DESC3_FD_LEN, 1); - - /* Mark it as a NORMAL descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN, 0); - - /* Set OWN bit if not the first descriptor */ - if (cur_index != start_index) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - if (tso) { - /* Enable TSO */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, - TX_NORMAL_DESC3_TSE_LEN, 1); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, - TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, - TX_NORMAL_DESC3_TCPHDRLEN_LEN, - pkt_info->tcp_header_len / 4); - - pdata->stats.tx_tso_packets++; - } else { - /* Enable CRC and Pad Insertion */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, - TX_NORMAL_DESC3_CPC_LEN, 0); - - /* Enable HW CSUM */ - if (csum) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, - TX_NORMAL_DESC3_CIC_LEN, 0x3); - - /* Set the total length to be transmitted */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_FL_POS, - TX_NORMAL_DESC3_FL_LEN, - pkt_info->length); - } - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", - cur_index, start_index, dma_desc->desc0, - dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); - - if (start_index <= cur_index) - i = cur_index - start_index + 1; - else - i = ring->dma_desc_count - start_index + cur_index; - - for (; i < pkt_info->desc_count; i++) { - cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - - desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); - dma_desc = desc_data->dma_desc; - - /* Update buffer address */ - dma_desc->desc0 = - cpu_to_le32(lower_32_bits(desc_data->skb_dma)); - dma_desc->desc1 = - cpu_to_le32(upper_32_bits(desc_data->skb_dma)); - - /* Update the buffer length */ - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, - TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); - - /* Set OWN bit */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - /* Mark it as NORMAL descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN, 0); - - /* Enable HW CSUM */ - if (csum) - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( - dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, - TX_NORMAL_DESC3_CIC_LEN, 0x3); - } - - /* Set LAST bit for the last descriptor */ - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_LD_POS, - TX_NORMAL_DESC3_LD_LEN, 1); - - dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, - TX_NORMAL_DESC2_IC_POS, - TX_NORMAL_DESC2_IC_LEN, 1); - - /* Save the Tx info to report back during cleanup */ - desc_data->tx.packets = pkt_info->tx_packets; - desc_data->tx.bytes = pkt_info->tx_bytes; - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", - cur_index, dma_desc->desc0, dma_desc->desc1, - dma_desc->desc2, dma_desc->desc3); - - /* In case the Tx DMA engine is running, make sure everything - * is written to the descriptor(s) before setting the OWN bit - * for the first descriptor - */ - dma_wmb(); - - /* Set OWN bit for the first descriptor */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - dma_desc = desc_data->dma_desc; - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - TX_NORMAL_DESC3_OWN_POS, - TX_NORMAL_DESC3_OWN_LEN, 1); - - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", - start_index, dma_desc->desc0, dma_desc->desc1, - dma_desc->desc2, dma_desc->desc3); - - if (netif_msg_tx_queued(pdata)) - fxgmac_dump_tx_desc(pdata, ring, start_index, - pkt_info->desc_count, 1); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit about to call tx_start_xmit, ring xmit_more=%d, txq_stopped=%x\n", - ring->tx.xmit_more, - netif_xmit_stopped(netdev_get_tx_queue( - pdata->netdev, channel->queue_index))); -#else /* ( LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,165))*/ - if (netif_msg_tx_done(pdata)) - DPRINTK("dev_xmit about to call tx_start_xmit, pkt xmit_more=%d, txq_stopped=%x\n", - pkt_info->skb->xmit_more, - netif_xmit_stopped(netdev_get_tx_queue( - pdata->netdev, channel->queue_index))); -#endif - - /* Make sure ownership is written to the descriptor */ - smp_wmb(); - - ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); - - fxgmac_tx_start_xmit(channel, ring); - - /* yzhang for reduce debug output */ - if (netif_msg_tx_done(pdata)) { - DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", - channel->name, start_index & (ring->dma_desc_count - 1), - (ring->cur - 1) & (ring->dma_desc_count - 1)); - } -} - -static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, - struct fxgmac_dma_desc *dma_desc) -{ - u64 nsec; - - nsec = le32_to_cpu(dma_desc->desc1); - nsec <<= 32; - nsec |= le32_to_cpu(dma_desc->desc0); - if (nsec != 0xffffffffffffffffULL) { - pkt_info->rx_tstamp = nsec; - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, - RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); - } -} - -static void fxgmac_tx_desc_reset(struct fxgmac_desc_data *desc_data) -{ - struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; - - /* Reset the Tx descriptor - * Set buffer 1 (lo) address to zero - * Set buffer 1 (hi) address to zero - * Reset all other control bits (IC, TTSE, B2L & B1L) - * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc) - */ - dma_desc->desc0 = 0; - dma_desc->desc1 = 0; - dma_desc->desc2 = 0; - dma_desc->desc3 = 0; - - /* Make sure ownership is written to the descriptor */ - dma_wmb(); -} - -static void fxgmac_tx_desc_init(struct fxgmac_channel *channel) -{ - struct fxgmac_ring *ring = channel->tx_ring; - struct fxgmac_desc_data *desc_data; - int start_index = ring->cur; - unsigned int i; - - /* Initialize all descriptors */ - for (i = 0; i < ring->dma_desc_count; i++) { - desc_data = FXGMAC_GET_DESC_DATA(ring, i); - - /* Initialize Tx descriptor */ - fxgmac_tx_desc_reset(desc_data); - } - - writereg(channel->pdata->pAdapter, channel->pdata->tx_desc_count - 1, - FXGMAC_DMA_REG(channel, DMA_CH_TDRLR)); - - /* Update the starting address of descriptor ring */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - writereg(channel->pdata->pAdapter, - upper_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDLR_HI)); - writereg(channel->pdata->pAdapter, - lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_TDLR_LO)); -} - -static void fxgmac_rx_desc_reset(struct fxgmac_pdata *pdata, - struct fxgmac_desc_data *desc_data, - unsigned int index) -{ - struct fxgmac_dma_desc *dma_desc = desc_data->dma_desc; - - /* Reset the Rx descriptor - * Set buffer 1 (lo) address to header dma address (lo) - * Set buffer 1 (hi) address to header dma address (hi) - * Set buffer 2 (lo) address to buffer dma address (lo) - * Set buffer 2 (hi) address to buffer dma address (hi) and - * set control bits OWN and INTE - */ - dma_desc->desc0 = - cpu_to_le32(lower_32_bits(desc_data->rx.buf.dma_base)); - dma_desc->desc1 = - cpu_to_le32(upper_32_bits(desc_data->rx.buf.dma_base)); - dma_desc->desc2 = 0; - dma_desc->desc3 = 0; - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_INTE_POS, - RX_NORMAL_DESC3_INTE_LEN, 1); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_BUF2V_POS, - RX_NORMAL_DESC3_BUF2V_LEN, 0); - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_BUF1V_POS, - RX_NORMAL_DESC3_BUF1V_LEN, 1); - - /* Since the Rx DMA engine is likely running, make sure everything - * is written to the descriptor(s) before setting the OWN bit - * for the descriptor - */ - dma_wmb(); - - dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_OWN_POS, - RX_NORMAL_DESC3_OWN_LEN, 1); - - /* Make sure ownership is written to the descriptor */ - dma_wmb(); -} - -static void fxgmac_rx_desc_init(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->rx_ring; - unsigned int start_index = ring->cur; - struct fxgmac_desc_data *desc_data; - unsigned int i; - - /* Initialize all descriptors */ - for (i = 0; i < ring->dma_desc_count; i++) { - desc_data = FXGMAC_GET_DESC_DATA(ring, i); - - /* Initialize Rx descriptor */ - fxgmac_rx_desc_reset(pdata, desc_data, i); - } - - /* Update the total number of Rx descriptors */ - writereg(pdata->pAdapter, ring->dma_desc_count - 1, - FXGMAC_DMA_REG(channel, DMA_CH_RDRLR)); - - /* Update the starting address of descriptor ring */ - desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); - writereg(pdata->pAdapter, upper_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDLR_HI)); - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDLR_LO)); - - /* Update the Rx Descriptor Tail Pointer */ - desc_data = FXGMAC_GET_DESC_DATA( - ring, start_index + ring->dma_desc_count - 1); - writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), - FXGMAC_DMA_REG(channel, DMA_CH_RDTR_LO)); -} - static int fxgmac_is_context_desc(struct fxgmac_dma_desc *dma_desc) { /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */ - return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, - TX_NORMAL_DESC3_CTXT_LEN); + int regval; + + regval = (int)FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN); + return regval; } static int fxgmac_is_last_desc(struct fxgmac_dma_desc *dma_desc) { /* Rx and Tx share LD bit, so check TDES3.LD bit */ - return FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, TX_NORMAL_DESC3_LD_POS, - TX_NORMAL_DESC3_LD_LEN); + int regval; + + regval = (int)FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN); + return regval; } static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) { unsigned int max_q_count, q_count; - unsigned int reg, regval; + unsigned int reg; unsigned int i; + u32 regval; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -1464,8 +948,9 @@ static int fxgmac_disable_tx_flow_control(struct fxgmac_pdata *pdata) static int fxgmac_enable_tx_flow_control(struct fxgmac_pdata *pdata) { unsigned int max_q_count, q_count; - unsigned int reg, regval; + unsigned int reg; unsigned int i; + u32 regval; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -1569,6 +1054,11 @@ static int fxgmac_config_rx_coalesce(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RIWT, RWT, pdata->rx_riwt); } #endif @@ -1641,6 +1131,11 @@ static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; @@ -1657,31 +1152,38 @@ static void fxgmac_config_rx_buffer_size(struct fxgmac_pdata *pdata) static void fxgmac_config_tso_mode(struct fxgmac_pdata *pdata) { + u32 tso; #ifndef DPDK struct fxgmac_channel *channel; unsigned int i; u32 regval; + tso = pdata->hw_feat.tso; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) break; - if (pdata->hw_feat.tso) { - regval = readreg(pdata->pAdapter, - FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, - DMA_CH_TCR_TSE_LEN, 1); - writereg(pdata->pAdapter, regval, + regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - } + regval = FXGMAC_SET_REG_BITS(regval, DMA_CH_TCR_TSE_POS, + DMA_CH_TCR_TSE_LEN, tso); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_TCR)); } #else struct fxgmac_tx_queue *txq; unsigned int i; + tso = pdata->hw_feat.tso; for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; - FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, pdata->tx_pbl); + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } + + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, TSE, tso); } #endif } @@ -1710,7 +1212,12 @@ static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; - FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, pdata->rx_pbl); + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } + FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_CR, SPH, 0); } #endif @@ -1720,11 +1227,11 @@ static void fxgmac_config_sph_mode(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_ECR); } -static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, - unsigned int usec) +static unsigned long fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, + unsigned int usec) { unsigned long rate; - unsigned int ret; + unsigned long ret; rate = pdata->sysclk_rate; @@ -1738,11 +1245,11 @@ static unsigned int fxgmac_usec_to_riwt(struct fxgmac_pdata *pdata, return ret; } -static unsigned int fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, - unsigned int riwt) +static unsigned long fxgmac_riwt_to_usec(struct fxgmac_pdata *pdata, + unsigned int riwt) { unsigned long rate; - unsigned int ret; + unsigned long ret; rate = pdata->sysclk_rate; @@ -1805,16 +1312,14 @@ static void fxgmac_config_mtl_mode(struct fxgmac_pdata *pdata) static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) { unsigned int ppq, ppq_extra, prio, prio_queues; - unsigned int queue; - unsigned int reg, regval; + unsigned int reg; unsigned int mask; unsigned int i, j; + u32 regval; /* Map the MTL Tx Queues to Traffic Classes * Note: Tx Queues >= Traffic Classes */ - queue = 0; - DPRINTK("need to map TXq(%u) to TC\n", queue); /* Map the 8 VLAN priority values to available MTL Rx queues */ prio_queues = @@ -1827,15 +1332,17 @@ static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) for (i = 0, prio = 0; i < prio_queues;) { mask = 0; for (j = 0; j < ppq; j++) { + netif_dbg(pdata, drv, pdata->netdev, "PRIO%u,", prio); netif_dbg(pdata, drv, pdata->netdev, - "PRIO%u mapped to RXq%u\n", prio, i); + " mapped to RXq%u\n", i); mask |= (1 << prio); prio++; } if (i < ppq_extra) { - netif_dbg(pdata, drv, pdata->netdev, - "PRIO%u mapped to RXq%u\n", prio, i); + netif_dbg(pdata, drv, pdata->netdev, "PRIO%u.", i); + netif_dbg(pdata, drv, pdata->netdev, " mapped to Rxq%u", + i); mask |= (1 << prio); prio++; } @@ -1875,11 +1382,11 @@ static void fxgmac_config_queue_mapping(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, regval, pdata->mac_regs + reg); } -static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, - unsigned int queue_count) +static u32 fxgmac_calculate_per_queue_fifo(unsigned long fifo_size, + unsigned int queue_count) { - unsigned int q_fifo_size; - unsigned int p_fifo; + unsigned long q_fifo_size; + unsigned long p_fifo; /* Calculate the configured fifo size */ q_fifo_size = 1 << (fifo_size + 7); @@ -1900,16 +1407,51 @@ static unsigned int fxgmac_calculate_per_queue_fifo(unsigned int fifo_size, return p_fifo; } +static u32 fxgmac_calculate_max_checksum_size(struct fxgmac_pdata *pdata) +{ + u32 fifo_size; + + fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, + pdata->tx_q_count); + + /* Each increment in the queue fifo size represents 256 bytes of + * fifo, with 0 representing 256 bytes. Distribute the fifo equally + * between the queues. + */ + fifo_size = (fifo_size + 1) * 256; + + /* Packet size < TxQSize - (PBL + N)*(DATAWIDTH/8), + * Datawidth = 128 + * If Datawidth = 32, N = 7, elseif Datawidth != 32, N = 5. + * TxQSize is indicated by TQS field of MTL_TxQ#_Operation_Mode register + * PBL = TxPBL field in the DMA_CH#_TX_Control register in all DMA + * configurations. + */ + fifo_size -= (pdata->tx_pbl * (pdata->pblx8 ? 8 : 1) + 5) * + (FXGMAC_DATA_WIDTH / 8); + fifo_size -= 256; + + return fifo_size; +} + static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) { - unsigned int fifo_size; + u32 fifo_size; unsigned int i; u32 regval; fifo_size = fxgmac_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size, +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + 1); //force to 1 queue +#else pdata->tx_q_count); +#endif for (i = 0; i < pdata->tx_q_count; i++) { +#if FXGMAC_FAKE_4_TX_QUEUE_ENABLED + if (i > 0) + break; +#endif regval = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_TQOMR_TQS_POS, @@ -1918,14 +1460,15 @@ static void fxgmac_config_tx_fifo_size(struct fxgmac_pdata *pdata) FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); } - netif_info(pdata, drv, pdata->netdev, - "%d Tx hardware queues, %d byte fifo per queue\n", - pdata->tx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, "%d Tx hardware queues,", + pdata->tx_q_count); + netif_info(pdata, drv, pdata->netdev, " %d byte fifo per queue\n", + ((fifo_size + 1) * 256)); } static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) { - unsigned int fifo_size; + u32 fifo_size; unsigned int i; u32 regval; @@ -1941,9 +1484,10 @@ static void fxgmac_config_rx_fifo_size(struct fxgmac_pdata *pdata) FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); } - netif_info(pdata, drv, pdata->netdev, - "%d Rx hardware queues, %d byte fifo per queue\n", - pdata->rx_q_count, ((fifo_size + 1) * 256)); + netif_info(pdata, drv, pdata->netdev, "%d Rx hardware queues,", + pdata->rx_q_count); + netif_info(pdata, drv, pdata->netdev, " %d byte fifo per queue\n", + ((fifo_size + 1) * 256)); } static void fxgmac_config_flow_control_threshold(struct fxgmac_pdata *pdata) @@ -1954,10 +1498,10 @@ static void fxgmac_config_flow_control_threshold(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->rx_q_count; i++) { regval = readreg(pdata->pAdapter, FXGMAC_MTL_REG(pdata, i, MTL_Q_RQOMR)); - /* Activate flow control when less than 6k left in fifo */ + /* Activate flow control when less than 4k left in fifo */ regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFA_POS, MTL_Q_RQOMR_RFA_LEN, 6); - /* De-activate flow control when more than 10k left in fifo */ + /* De-activate flow control when more than 6k left in fifo */ regval = FXGMAC_SET_REG_BITS(regval, MTL_Q_RQOMR_RFD_POS, MTL_Q_RQOMR_RFD_LEN, 10); writereg(pdata->pAdapter, regval, @@ -2046,6 +1590,12 @@ static int fxgmac_config_osp_mode(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, pdata->tx_osp_mode); } @@ -2075,6 +1625,12 @@ static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } + FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, pdata->pblx8); } #endif @@ -2082,7 +1638,7 @@ static int fxgmac_config_pblx8(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_tx_pbl_val(struct fxgmac_pdata *pdata) { u32 regval; @@ -2118,6 +1674,11 @@ static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, pdata->tx_pbl); } #endif @@ -2125,7 +1686,7 @@ static int fxgmac_config_tx_pbl_val(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_rx_pbl_val(struct fxgmac_pdata *pdata) { u32 regval; @@ -2161,6 +1722,11 @@ static int fxgmac_config_rx_pbl_val(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_rx_queues; i++) { rxq = pdata->expansion.eth_dev->data->rx_queues[i]; + if (!rxq) { + DPRINTK("Rx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return -1; + } FXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, pdata->rx_pbl); } #endif @@ -2179,8 +1745,7 @@ static u64 fxgmac_mmc_read(struct fxgmac_pdata *pdata, unsigned int reg_lo) static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) { - unsigned int mmc_isr = - readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR); + u32 mmc_isr = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR); struct fxgmac_stats *stats = &pdata->stats; if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_TISR_TXOCTETCOUNT_GB_POS, @@ -2317,8 +1882,7 @@ static void fxgmac_tx_mmc_int(struct fxgmac_pdata *pdata) static void fxgmac_rx_mmc_int(struct fxgmac_pdata *pdata) { - unsigned int mmc_isr = - readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR); + u32 mmc_isr = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR); struct fxgmac_stats *stats = &pdata->stats; if (FXGMAC_GET_REG_BITS(mmc_isr, MMC_RISR_RXFRAMECOUNT_GB_POS, @@ -2451,12 +2015,6 @@ static void fxgmac_rx_mmc_int(struct fxgmac_pdata *pdata) static void fxgmac_read_mmc_stats(struct fxgmac_pdata *pdata) { struct fxgmac_stats *stats = &pdata->stats; - u32 regval; - - /* Freeze counters */ - regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); - regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 1); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); stats->txoctetcount_gb += fxgmac_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); @@ -2593,11 +2151,6 @@ static void fxgmac_read_mmc_stats(struct fxgmac_pdata *pdata) fxgmac_mmc_read(pdata, MMC_RXRECEIVEERRORFRAME); stats->rxcontrolframe_g += fxgmac_mmc_read(pdata, MMC_RXCONTROLFRAME_G); - - /* Un-freeze counters */ - regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_CR); - regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_MCF_POS, MMC_CR_MCF_LEN, 0); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); } static void fxgmac_config_mmc(struct fxgmac_pdata *pdata) @@ -2611,16 +2164,17 @@ static void fxgmac_config_mmc(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MMC_CR_CR_POS, MMC_CR_CR_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_CR); -#if defined(FUXI_MISC_INT_HANDLE_FEATURE_EN) && FUXI_MISC_INT_HANDLE_FEATURE_EN +#if !(FXGMAC_MISC_INT_HANDLE_FEATURE_ENABLED) writereg(pdata->pAdapter, 0xffffffff, pdata->mac_regs + MMC_IPCRXINTMASK); #endif } static int fxgmac_write_rss_reg(struct fxgmac_pdata *pdata, unsigned int type, - unsigned int index, unsigned int val) + unsigned int index, u32 val) { int ret = 0; + (void)type; writereg(pdata->pAdapter, val, (pdata->base_mem + index)); @@ -2655,7 +2209,6 @@ static int fxgmac_write_rss_options(struct fxgmac_pdata *pdata) return 0; } -#if !defined(DPDK) static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) { unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); @@ -2678,7 +2231,6 @@ static int fxgmac_read_rss_hash_key(struct fxgmac_pdata *pdata, u8 *key_buf) return 0; } -#endif static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) { @@ -2688,9 +2240,10 @@ static int fxgmac_write_rss_hash_key(struct fxgmac_pdata *pdata) while (key_regs--) { ret = fxgmac_write_rss_reg( - pdata, FXGMAC_RSS_HASH_KEY_TYPE, - MGMT_RSS_KEY0 + key_regs * MGMT_RSS_KEY_REG_INC, - cpu_to_be32(*key)); + pdata, (unsigned int)FXGMAC_RSS_HASH_KEY_TYPE, + (unsigned int)(MGMT_RSS_KEY0 + + key_regs * MGMT_RSS_KEY_REG_INC), + (unsigned int)(cpu_to_be32(*key))); if (ret) return ret; key++; @@ -2738,7 +2291,7 @@ static int fxgmac_write_rss_lookup_table(struct fxgmac_pdata *pdata) static int fxgmac_set_rss_hash_key(struct fxgmac_pdata *pdata, const u8 *key) { - memcpy(pdata->rss_key, key, sizeof(pdata->rss_key)); + memcpy(pdata->rss_key, (void *)key, sizeof(pdata->rss_key)); return fxgmac_write_rss_hash_key(pdata); } @@ -2749,7 +2302,7 @@ static int fxgmac_set_rss_lookup_table(struct fxgmac_pdata *pdata, unsigned int i; u32 tval; -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED DPRINTK("Set_rss_table, rss ctrl eth=0x%08x\n", 0); return 0; @@ -2784,6 +2337,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) u32 regval; u32 size = 0; +#ifdef FXGMAC_USE_DEFAULT_RSS_KEY_TBALE int ret; if (!pdata->hw_feat.rss) { @@ -2801,6 +2355,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) if (ret) { return ret; } +#endif regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_RSS_CTRL); @@ -2809,7 +2364,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MGMT_RSS_CTRL_TBL_SIZE_POS, MGMT_RSS_CTRL_TBL_SIZE_LEN, size); -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED /* set default cpu id to 1 */ regval = FXGMAC_SET_REG_BITS(regval, 8, 2, 1); #endif @@ -2822,7 +2377,7 @@ static int fxgmac_enable_rss(struct fxgmac_pdata *pdata) MGMT_RSS_CTRL_OPT_LEN, pdata->rss_options); writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); - DPRINTK("enable_rss callout, rss ctrl reg=0x%08x\n", regval); + DPRINTK("enable_rss callout, set val = 0x%08x\n", regval); return 0; } @@ -2834,7 +2389,7 @@ static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) if (!pdata->hw_feat.rss) return -EOPNOTSUPP; -#if FXGMAC_MSIX_CH0RXDIS_EN +#if FXGMAC_MSIX_CH0RXDIS_ENABLED DPRINTK("Disable_rss, rss ctrl eth=0x%08x\n", 0); return 0; @@ -2845,7 +2400,7 @@ static int fxgmac_disable_rss(struct fxgmac_pdata *pdata) MAC_RSSCR_RSSE_LEN, 0); writereg(pdata->pAdapter, regval, (pdata->base_mem + MGMT_RSS_CTRL)); - DPRINTK("disable_rss, rss ctrl reg=0x%08x\n", regval); + DPRINTK("disable_rss, set val = 0x%08x\n", regval); return 0; } @@ -2862,10 +2417,8 @@ static void fxgmac_config_rss(struct fxgmac_pdata *pdata) else ret = fxgmac_disable_rss(pdata); - if (ret) { - DBGPRINT(MP_ERROR, - ("fxgmac_config_rss: error configuring RSS\n")); - } + if (ret) + DPRINTK("%s: error configuring RSS\n", __func__); } static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) @@ -2886,12 +2439,14 @@ static void fxgmac_update_aoe_ipv4addr(struct fxgmac_pdata *pdata, u8 *ip_addr) DPRINTK("%s, covert IP dotted-addr %s to binary 0x%08x ok.\n", __FUNCTION__, ip_addr, cpu_to_be32(ipval)); } else { +#ifdef FXGMAC_AOE_FEATURE_ENABLED /* get ipv4 addr from net device */ ipval = fxgmac_get_netdev_ip4addr(pdata); DPRINTK("%s, Get net device binary IP ok, 0x%08x\n", __FUNCTION__, cpu_to_be32(ipval)); ipval = cpu_to_be32(ipval); +#endif } regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ARP_PROTO_ADDR); @@ -3038,6 +2593,7 @@ static int fxgmac_set_ns_offload(struct fxgmac_pdata *pdata, unsigned int index, return 0; } +#ifdef FXGMAC_NS_OFFLOAD_ENABLED static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, unsigned int param) { @@ -3079,6 +2635,7 @@ static void fxgmac_update_ns_offload_ipv6addr(struct fxgmac_pdata *pdata, if (pdata->expansion.ns_offload_tab_idx >= 2) pdata->expansion.ns_offload_tab_idx = 0; } +#endif static int fxgmac_enable_ns_offload(struct fxgmac_pdata *pdata) { @@ -3234,6 +2791,8 @@ static int fxgmac_set_wake_pattern(struct fxgmac_pdata *pdata, u32 regval = 0; u32 total_cnt = 0, pattern_inherited_cnt = 0; u8 *ptdata, *ptmask; + (void)ptdata; + (void)ptmask; if (pattern_cnt > MAX_PATTERN_COUNT) { DbgPrintF( @@ -3524,7 +3083,7 @@ static int fxgmac_disable_wake_magic_pattern(struct fxgmac_pdata *pdata) return 0; } -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED /* * enable Wake packet indication. called to enable before sleep/hibernation * and no needed to call disable for that, fxgmac_get_wake_packet_indication will clear to normal once done. @@ -3582,7 +3141,7 @@ static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, /* try to check wake reason. GMAC reg 20c0 only tells Magic or remote-pattern * read from MGMT_WOL_CTRL, 1530 instead. - */ + */ regval = (u32)readreg(pdata->pAdapter, pdata->base_mem + MGMT_WOL_CTRL); DbgPrintF(MP_TRACE, "%s - 0x1530=%x.\n", __FUNCTION__, regval); if (!regval) { @@ -3603,7 +3162,8 @@ static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, * wake_pattern_number, HW should tell, tbd */ for (i = 0; i < MAX_PATTERN_COUNT; i++) { - if (regval & (MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { + if (regval & + ((u32)MGMT_WOL_CTRL_WPI_RWK_PKT_NUMBER << i)) { *wake_pattern_number = i; break; } @@ -3707,7 +3267,7 @@ static void fxgmac_get_wake_packet_indication(struct fxgmac_pdata *pdata, return; } -#endif /* FUXI_PM_WPI_READ_FEATURE_EN */ +#endif /* FXGMAC_PM_WPI_READ_FEATURE_ENABLED */ static int fxgmac_enable_wake_link_change(struct fxgmac_pdata *pdata) { @@ -3730,50 +3290,7 @@ static int fxgmac_disable_wake_link_change(struct fxgmac_pdata *pdata) return 0; } -static void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en) -{ - /* enable or disable WOL. this function only set wake-up type, and power related configure - * will be in other place, see power management. - */ - if (!pdata->hw_feat.rwk) { - netdev_err(pdata->netdev, - "error configuring WOL - not supported.\n"); - return; - } - - fxgmac_disable_wake_magic_pattern(pdata); - fxgmac_disable_wake_pattern(pdata); - fxgmac_disable_wake_link_change(pdata); - - if (en) { - /* config mac address for rx of magic or ucast */ - fxgmac_set_mac_address(pdata, (u8 *)(pdata->netdev->dev_addr)); - - /* Enable Magic packet */ - if (pdata->expansion.wol & WAKE_MAGIC) { - fxgmac_enable_wake_magic_pattern(pdata); - } - - /* Enable global unicast packet */ - if (pdata->expansion.wol & WAKE_UCAST || - pdata->expansion.wol & WAKE_MCAST || - pdata->expansion.wol & WAKE_BCAST || - pdata->expansion.wol & WAKE_ARP) { - fxgmac_enable_wake_pattern(pdata); - } - - /* Enable ephy link change */ - if ((FXGMAC_WOL_UPON_EPHY_LINK) && - (pdata->expansion.wol & WAKE_PHY)) { - fxgmac_enable_wake_link_change(pdata); - } - } - device_set_wakeup_enable(/*pci_dev_to_dev*/ (pdata->dev), en); - - DPRINTK("config_wol callout\n"); -} - -static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) +static u32 fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) { u32 value; value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); @@ -3783,20 +3300,18 @@ static int fxgmac_get_ephy_state(struct fxgmac_pdata *pdata) static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) { #ifndef DPDK - unsigned int dma_ch_isr, dma_ch_ier; + u32 dma_ch_isr, dma_ch_ier; + u32 regval; struct fxgmac_channel *channel; unsigned int i; -#ifdef NIC_NET_ADAPETERCX - u32 regval; /* config interrupt to level signal */ regval = (u32)readreg(pdata->pAdapter, pdata->mac_regs + DMA_MR); regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, - 1); + DMA_MA_INTM_LEVLE_ENHANCE); regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, - DMA_MR_QUREAD_LEN, 1); + DMA_MR_QUREAD_LEN, DMA_MR_QUREAD_EN); writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_MR); -#endif channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -3865,6 +3380,11 @@ static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) for (i = 0; i < pdata->expansion.eth_dev->data->nb_tx_queues; i++) { txq = pdata->expansion.eth_dev->data->tx_queues[i]; + if (!txq) { + DPRINTK("Tx queue not setup for port %d\n", + pdata->expansion.eth_dev->data->port_id); + return; + } /* Clear all the interrupts which are set */ dma_ch_isr = FXGMAC_DMA_IOREAD(txq, DMA_CH_SR); @@ -3898,8 +3418,8 @@ static void fxgmac_enable_dma_interrupts(struct fxgmac_pdata *pdata) static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) { - unsigned int q_count, i; - unsigned int mtl_q_isr; + unsigned int i; + u32 mtl_q_isr, q_count; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { @@ -3917,7 +3437,7 @@ static void fxgmac_enable_mtl_interrupts(struct fxgmac_pdata *pdata) static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) { - unsigned int mac_ier = 0; + u32 mac_ier = 0; u32 regval; /* Enable Timestamp interrupt */ @@ -3929,11 +3449,13 @@ static void fxgmac_enable_mac_interrupts(struct fxgmac_pdata *pdata) /* Enable all counter interrupts */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RIER); regval = FXGMAC_SET_REG_BITS(regval, MMC_RIER_ALL_INTERRUPTS_POS, - MMC_RIER_ALL_INTERRUPTS_LEN, 0xffffffff); + MMC_RIER_ALL_INTERRUPTS_LEN, + FXGMAC_MMC_IER_ALL_DEFAULT); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_RIER); regval = readreg(pdata->pAdapter, pdata->mac_regs + MMC_TIER); regval = FXGMAC_SET_REG_BITS(regval, MMC_TIER_ALL_INTERRUPTS_POS, - MMC_TIER_ALL_INTERRUPTS_LEN, 0xffffffff); + MMC_TIER_ALL_INTERRUPTS_LEN, + FXGMAC_MMC_IER_ALL_DEFAULT); writereg(pdata->pAdapter, regval, pdata->mac_regs + MMC_TIER); } @@ -4006,11 +3528,12 @@ static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, bool *link_up, bool link_up_wait_to_complete) { u16 link_reg = 0; + (void)link_up_wait_to_complete; + if (pdata->base_mem) { + link_reg = (u16)readreg(pdata->pAdapter, + pdata->base_mem + MGMT_EPHY_CTRL); - struct net_device *netdev = pdata->netdev; - if (netdev->base_addr) { - link_reg = - (u16)(*((u32 *)(netdev->base_addr + MGMT_EPHY_CTRL))); + pdata->phy_duplex = !!(link_reg & 0x4); /* * check register address 0x1004 @@ -4033,11 +3556,13 @@ static int fxgmac_check_phy_link(struct fxgmac_pdata *pdata, u32 *speed, MGMT_EPHY_CTRL_STA_SPEED_MASK) >> MGMT_EPHY_CTRL_STA_SPEED_POS; } else { - DPRINTK("fxgmac_check_phy_link ethernet PHY not released.\n"); + DPRINTK("%s ethernet PHY not released link reg %d.\n", + __func__, link_reg); return -1; } } else { - DPRINTK("fxgmac_check_phy_link null base addr err\n"); + DPRINTK("%s null base addr err link reg %d\n", + __func__, link_reg); return -1; } @@ -4078,10 +3603,12 @@ static int fxgmac_write_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, busy--; } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); - DPRINTK("fxgmac_write_ephy_reg id %d %s, ctrl=0x%08x, data=0x%08x\n", - reg_id, (regval & 0x1) ? "err" : "ok", regval, data); + DPRINTK("%s id %d,", __func__, reg_id); + DPRINTK(" %s,", (regval & 0x1) ? "err" : "ok"); + DPRINTK(" ctrl=0x%08x,", regval); + DPRINTK(" data=0x%08x\n", data); - return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ + return (regval & MAC_MDIO_ADDRESS_BUSY) ? -ETIMEDOUT : 0; } static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, @@ -4101,14 +3628,19 @@ static int fxgmac_read_ephy_reg(struct fxgmac_pdata *pdata, u32 reg_id, if (0 == (regval & MAC_MDIO_ADDRESS_BUSY)) { regret = readreg(pdata->pAdapter, pdata->mac_regs + MAC_MDIO_DATA); - if (data) + if (data) { *data = regret; - return regret; + return 0; + } else { + return -ENOBUFS; + } } - DPRINTK("fxgmac_read_ephy_reg id=0x%02x err, busy=%d, ctrl=0x%08x.\n", - reg_id, busy, regval); - return -1; + DPRINTK("%s id=0x%02x err,", __func__, reg_id); + DPRINTK(" busy=%d,", busy); + DPRINTK(" ctrl=0x%08x\n", regval); + + return -ETIMEDOUT; } static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, @@ -4128,19 +3660,25 @@ static int fxgmac_write_ephy_mmd_reg(struct fxgmac_pdata *pdata, u32 reg_id, busy--; } while ((regval & MAC_MDIO_ADDRESS_BUSY) && (busy)); - DPRINTK("fxgmac_write_ephy_mmd_reg id %d mmd %d %s, ctrl=0x%08x, data=0x%08x\n", - reg_id, mmd, (regval & 0x1) ? "err" : "ok", regval, data); + DPRINTK("%s id %d,", __func__, reg_id); + DPRINTK(" mmd %d,", mmd); + DPRINTK(" %s,", (regval & 0x1) ? "err" : "ok"); + DPRINTK(" ctrl=0x%08x,", regval); + DPRINTK(" data=0x%08x\n", data); return (regval & MAC_MDIO_ADDRESS_BUSY) ? -1 : 0; /* -1 indicates err */ } static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) { +#ifndef FXGMAC_NOT_REPORT_PHY_FC_CAPABILITY u32 regval = 0; +#endif fxgmac_config_tx_flow_control(pdata); fxgmac_config_rx_flow_control(pdata); +#ifndef FXGMAC_NOT_REPORT_PHY_FC_CAPABILITY fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); /* set auto negotiation advertisement pause ability */ if (pdata->tx_pause || pdata->rx_pause) { @@ -4159,31 +3697,35 @@ static void fxgmac_config_flow_control(struct fxgmac_pdata *pdata) PHY_MII_ADVERTISE_ASYPAUSE_LEN, 0); } fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); - /* after change the auto negotiation advertisement need to soft reset */ - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, - 1); - fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); +#endif } static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, struct fxphy_ag_adv phy_ag_adv) { - u32 regval = 0, ret = 0; + u32 regval = 0; + int ret = 0; + + ret = fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + if (ret < 0) + return ret; if (phy_ag_adv.auto_neg_en) { - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, 1); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); } else { - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, PHY_CR_AUTOENG_LEN, 0); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); } - fxgmac_read_ephy_reg(pdata, REG_MII_CTRL1000, ®val); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + if (ret < 0) + return ret; + + ret = fxgmac_read_ephy_reg(pdata, REG_MII_CTRL1000, ®val); + if (ret < 0) + return ret; + if (phy_ag_adv.full_1000m) { regval = FXGMAC_SET_REG_BITS(regval, PHY_MII_CTRL1000_1000FULL_POS, @@ -4202,9 +3744,14 @@ static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, PHY_MII_CTRL1000_1000HALF_POS, PHY_MII_CTRL1000_1000HALF_LEN, 0); } - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); - fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_CTRL1000, regval); + if (ret < 0) + return ret; + + ret = fxgmac_read_ephy_reg(pdata, REG_MII_ADVERTISE, ®val); + if (ret < 0) + return ret; if (phy_ag_adv.full_100m) { regval = FXGMAC_SET_REG_BITS(regval, @@ -4243,16 +3790,112 @@ static int fxgmac_set_ephy_autoneg_advertise(struct fxgmac_pdata *pdata, PHY_MII_ADVERTISE_10HALF_LEN, 0); } - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_ADVERTISE, regval); + if (ret < 0) + return ret; + /* after change the auto negotiation advertisement need to soft reset */ - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + ret = fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); + if (ret < 0) + return ret; + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, 1); - ret |= fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); + ret = fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); return ret; } +static void fxgmac_phy_green_ethernet(struct fxgmac_pdata *pdata) +{ + if (pdata->phy_green_ethernet) { + /* GREEN */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_PMA_DBG0_ADC); + fxgmac_write_ephy_reg( + pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE); + + /* CLD */ + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG0); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_NP_WP); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_REG_CLD_REG1); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ENABLE_CLD_GT_HT_BT); + } +} + +static void fxgmac_phy_eee_feature(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + if (pdata->phy_eee) { + regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EN_LPI_POS, + DMA_SBMR_EN_LPI_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_LPI_XIT_PKT_POS, + DMA_SBMR_LPI_XIT_PKT_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_AALE_POS, + DMA_SBMR_AALE_LEN, 1); + writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); + + regval = + readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIATE_POS, + MAC_LPIATE_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPITXA_POS, + MAC_LPITXA_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_PLS_POS, MAC_PLS_LEN, + 1); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIEN_POS, + MAC_LPIEN_LEN, 1); + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_LPI_STA); + + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_LPI_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIET_POS, + MAC_LPIET_LEN, + MAC_LPI_ENTRY_TIMER); + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_LPI_TIMER); + + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_LPI_CONTROL); + regval = FXGMAC_SET_REG_BITS(regval, MAC_TWT_POS, MAC_TWT_LEN, + MAC_TWT_TIMER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_LST_POS, MAC_LST_LEN, + MAC_LST_TIMER); + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_LPI_CONTROL); + + regval = readreg(pdata->pAdapter, + pdata->mac_regs + MAC_MS_TIC_COUNTER); + regval = FXGMAC_SET_REG_BITS(regval, MAC_MS_TIC_POS, + MAC_MS_TIC_LEN, MAC_MS_TIC); + writereg(pdata->pAdapter, regval, + pdata->mac_regs + MAC_MS_TIC_COUNTER); + + fxgmac_write_ephy_mmd_reg(pdata, REG_MMD_EEE_ABILITY_REG, 0x07, + REG_MMD_EEE_ABILITY_VALUE); + } +} + +static void fxgmac_phy_disable_smartspeed_feature(struct fxgmac_pdata *pdata) +{ + u32 regval = 0; + + if (pdata->phy_disablesmartspeed) { + fxgmac_read_ephy_reg(pdata, REG_MII_DOWNG_CTRL, ®val); + regval = FXGMAC_SET_REG_BITS(regval, REG_SMART_SPEED_POS, + REG_SMART_SPEED_LEN, 0); + fxgmac_write_ephy_reg(pdata, REG_MII_DOWNG_CTRL, regval); + } +} + static int fxgmac_phy_config(struct fxgmac_pdata *pdata) { struct fxphy_ag_adv phy_ag_adv; @@ -4260,7 +3903,7 @@ static int fxgmac_phy_config(struct fxgmac_pdata *pdata) if (pdata->phy_autoeng) { phy_ag_adv.auto_neg_en = 1; } else { - phy_ag_adv.auto_neg_en = 0; + phy_ag_adv.auto_neg_en = (pdata->phy_speed == SPEED_1000M) ? 1 : 0; } switch (pdata->phy_speed) { case SPEED_1000: @@ -4294,83 +3937,12 @@ static int fxgmac_phy_config(struct fxgmac_pdata *pdata) default: break; } + fxgmac_phy_green_ethernet(pdata); + fxgmac_phy_eee_feature(pdata); + fxgmac_phy_disable_smartspeed_feature(pdata); return fxgmac_set_ephy_autoneg_advertise(pdata, phy_ag_adv); } -static void fxgmac_phy_green_ethernet(struct fxgmac_pdata *pdata) -{ - u32 regval = 0; - /* GREEN */ - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, - REG_MII_EXT_REG_PMA_DBG0_ADC); - fxgmac_write_ephy_reg( - pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE); - - /* CLD */ - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, - REG_MII_EXT_REG_CLD_REG0); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ENABLE_CLD_NP_WP); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, - REG_MII_EXT_REG_CLD_REG1); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ENABLE_CLD_GT_HT_BT); - - /* after change green ethernet & CLD need to soft reset */ - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, - 1); - fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); -} - -static void fxgmac_phy_eee_feature(struct fxgmac_pdata *pdata) -{ - u32 regval = 0; - - regval = readreg(pdata->pAdapter, pdata->mac_regs + DMA_SBMR); - regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EN_LPI_POS, - DMA_SBMR_EN_LPI_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_LPI_XIT_PKT_POS, - DMA_SBMR_LPI_XIT_PKT_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_AALE_POS, - DMA_SBMR_AALE_LEN, 1); - writereg(pdata->pAdapter, regval, pdata->mac_regs + DMA_SBMR); - - regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); - regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIATE_POS, MAC_LPIATE_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, MAC_LPITXA_POS, MAC_LPITXA_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, MAC_PLS_POS, MAC_PLS_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIEN_POS, MAC_LPIEN_LEN, 1); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_STA); - - regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_TIMER); - regval = FXGMAC_SET_REG_BITS(regval, MAC_LPIET_POS, MAC_LPIET_LEN, - MAC_LPI_ENTRY_TIMER); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_TIMER); - - regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_CONTROL); - regval = FXGMAC_SET_REG_BITS(regval, MAC_TWT_POS, MAC_TWT_LEN, - MAC_TWT_TIMER); - regval = FXGMAC_SET_REG_BITS(regval, MAC_LST_POS, MAC_LST_LEN, - MAC_LST_TIMER); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_LPI_CONTROL); - - regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_MS_TIC_COUNTER); - regval = FXGMAC_SET_REG_BITS(regval, MAC_MS_TIC_POS, MAC_MS_TIC_LEN, - MAC_MS_TIC); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_MS_TIC_COUNTER); - - fxgmac_write_ephy_mmd_reg(pdata, REG_MMD_EEE_ABILITY_REG, 0x07, - REG_MMD_EEE_ABILITY_VALUE); - - /* after change EEE need to soft reset */ - fxgmac_read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, - 1); - fxgmac_write_ephy_reg(pdata, REG_MII_BMCR, regval); -} - static void fxgmac_reset_phy(struct fxgmac_pdata *pdata) { u32 value = 0; @@ -4393,7 +3965,7 @@ void fxgmac_release_phy(struct fxgmac_pdata *pdata) usleep_range_ex(pdata->pAdapter, 100, 150); value = readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL); DBGPRINT(MP_LOUD, ("0x1004: 0x%x\n", value)); -#ifdef AISC_MODE +#ifdef ASIC_MODE fxgmac_read_ephy_reg(pdata, REG_MII_SPEC_CTRL, &value); /* read phy specific control */ value = FXGMAC_SET_REG_BITS(value, PHY_MII_SPEC_CTRL_CRS_ON_POS, @@ -4411,16 +3983,35 @@ void fxgmac_release_phy(struct fxgmac_pdata *pdata) MII_EXT_ANALOG_CFG3_ADC_START_CFG_DEFAULT); fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); - fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, &value); - /* led index use bit0~bit5 */ - value = FXGMAC_GET_REG_BITS(value, EFUSE_LED_POS, EFUSE_LED_LEN); fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG2); fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ANALOG_CFG2_LED_VALUE); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_ANALOG_CFG8); - fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, - REG_MII_EXT_ANALOG_CFG8_LED_VALUE); + REG_MII_EXT_ANALOG_CFG2_VALUE); + + cfg_r32(pdata, REG_PCI_SUB_VENDOR_ID, &value); + if (value == AISTONEID_137D1D05_ADJUST_SI) { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG8_137D1D05_VALUE); + } else { + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_ANALOG_CFG8); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, + REG_MII_EXT_ANALOG_CFG8_VALUE); + } + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, + REG_MII_EXT_AFE_CONTROL_REGISTER3); + fxgmac_read_ephy_reg(pdata, REG_MII_EXT_DATA, &value); + value = FXGMAC_SET_REG_BITS(value, + REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_POS, + REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_LEN, + REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_ON); + fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, value); + + fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, &value); + + value = FXGMAC_GET_REG_BITS(value, EFUSE_LED_POS, EFUSE_LED_LEN); if (EFUSE_LED_COMMON_SOLUTION != value) { fxgmac_write_ephy_reg(pdata, REG_MII_EXT_ADDR, REG_MII_EXT_COMMON_LED0_CFG); @@ -4615,7 +4206,7 @@ static void fxgmac_close_phy_led(struct fxgmac_pdata *pdata) fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); } -static void fxmgac_config_led_under_active(struct fxgmac_pdata *pdata) +static void fxgmac_config_led_under_active(struct fxgmac_pdata *pdata) { u32 regval = 0; fxgmac_efuse_read_data(pdata, EFUSE_LED_ADDR, ®val); @@ -4743,149 +4334,10 @@ static void fxgmac_config_led_under_disable(struct fxgmac_pdata *pdata) } } -extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); - -static int fxgmac_dev_read(struct fxgmac_channel *channel) -{ - struct fxgmac_pdata *pdata = channel->pdata; - struct fxgmac_ring *ring = channel->rx_ring; - struct net_device *netdev = pdata->netdev; - struct fxgmac_desc_data *desc_data; - struct fxgmac_dma_desc *dma_desc; - struct fxgmac_pkt_info *pkt_info; - unsigned int err, etlt, l34t; - - static unsigned int cnt_incomplete; - - desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); - dma_desc = desc_data->dma_desc; - pkt_info = &ring->pkt_info; - - /* Check for data availability */ - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, - RX_NORMAL_DESC3_OWN_LEN)) { - return 1; - } - - /* Make sure descriptor fields are read after reading the OWN bit */ - dma_rmb(); - - if (netif_msg_rx_status(pdata)) - fxgmac_dump_rx_desc(pdata, ring, ring->cur); - - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, - RX_NORMAL_DESC3_CTXT_LEN)) { - /* Timestamp Context Descriptor */ - fxgmac_get_rx_tstamp(pkt_info, dma_desc); - - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); - if (netif_msg_rx_status(pdata)) - DPRINTK("dev_read context desc, ch=%s\n", channel->name); - return 0; - } - - /* Normal Descriptor, be sure Context Descriptor bit is off */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, - RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); - - /* Get the header length */ - if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, - RX_NORMAL_DESC3_FD_LEN)) { - desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( - dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, - RX_NORMAL_DESC2_HL_LEN); - if (desc_data->rx.hdr_len) - pdata->stats.rx_split_header_packets++; - } - l34t = 0; - - /* Get the pkt_info length */ - desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, - RX_NORMAL_DESC3_PL_POS, - RX_NORMAL_DESC3_PL_LEN); - - if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, - RX_NORMAL_DESC3_LD_LEN)) { - /* Not all the data has been transferred for this pkt_info */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, - RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); - cnt_incomplete++; - if ((cnt_incomplete < 2) && netif_msg_rx_status(pdata)) - DPRINTK("dev_read NOT last desc, pkt incomplete yet,%u\n", - cnt_incomplete); - - return 0; - } - if ((cnt_incomplete) && netif_msg_rx_status(pdata)) - DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", - cnt_incomplete); - cnt_incomplete = 0; /* when back to normal, reset cnt */ - - /* This is the last of the data for this pkt_info */ - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, - RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); - - /* Set checksum done indicator as appropriate */ - if (netdev->features & NETIF_F_RXCSUM) - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); - - /* Check for errors (only valid in last descriptor) */ - err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, - RX_NORMAL_DESC3_ES_LEN); - etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, - RX_NORMAL_DESC3_ETLT_LEN); - if ((err) && netif_msg_rx_status(pdata)) { - DPRINTK("dev_read:head_len=%u, pkt_len=%u, err=%u, etlt=%#x, desc2=0x%08x, desc3=0x%08x\n", - desc_data->rx.hdr_len, desc_data->rx.len, err, etlt, - dma_desc->desc2, dma_desc->desc3); - } - - if (!err || !etlt) { - /* No error if err is 0 or etlt is 0 */ - if ((etlt == 0x4 /*yzhang changed to 0x4, 0x09*/) && - (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, - RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); - pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( - dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, - RX_NORMAL_DESC0_OVT_LEN); - netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", - pkt_info->vlan_ctag); - } - } else { - if (etlt == 0x05 || etlt == 0x06) - pkt_info->attributes = FXGMAC_SET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 0); - else - pkt_info->errors = FXGMAC_SET_REG_BITS( - pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, - RX_PACKET_ERRORS_FRAME_LEN, 1); - } - - return 0; -} - static int fxgmac_enable_int(struct fxgmac_channel *channel, enum fxgmac_int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; dma_ch_ier = readreg(channel->pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_IER)); @@ -4947,7 +4399,7 @@ static int fxgmac_enable_int(struct fxgmac_channel *channel, static int fxgmac_disable_int(struct fxgmac_channel *channel, enum fxgmac_int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; dma_ch_ier = readreg(channel->pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_IER)); @@ -5007,10 +4459,53 @@ static int fxgmac_disable_int(struct fxgmac_channel *channel, return 0; } +static void fxgmac_enable_rx_tx_ints(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + enum fxgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_RI; + else + continue; + + fxgmac_enable_int(channel, int_id); + } +} + +static void fxgmac_disable_rx_tx_ints(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel; + enum fxgmac_int int_id; + unsigned int i; + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + if (channel->tx_ring && channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI_RI; + else if (channel->tx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_TI; + else if (channel->rx_ring) + int_id = FXGMAC_INT_DMA_CH_SR_RI; + else + continue; + + fxgmac_disable_int(channel, int_id); + } +} + static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) { - unsigned int dma_ch_ier; + u32 dma_ch_ier; + (void)int_id; dma_ch_ier = readreg(channel->pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_SR /*1160*/)); writereg(channel->pdata->pAdapter, dma_ch_ier, @@ -5021,8 +4516,8 @@ static int fxgmac_dismiss_DMA_int(struct fxgmac_channel *channel, int int_id) static void fxgmac_dismiss_MTL_Q_int(struct fxgmac_pdata *pdata) { - unsigned int q_count, i; - unsigned int mtl_q_isr; + unsigned int i; + u32 mtl_q_isr, q_count; q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); for (i = 0; i < q_count; i++) { @@ -5036,12 +4531,15 @@ static void fxgmac_dismiss_MTL_Q_int(struct fxgmac_pdata *pdata) static int fxgmac_dismiss_MAC_int(struct fxgmac_pdata *pdata) { - u32 regval, regErrVal; + u32 regval, err_val; /* all MAC interrupts in 0xb0 */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_ISR); /* MAC tx/rx error interrupts in 0xb8 */ - regErrVal = readreg(pdata->pAdapter, pdata->mac_regs + MAC_TX_RX_STA); + err_val = readreg(pdata->pAdapter, pdata->mac_regs + MAC_TX_RX_STA); + (void)regval; + (void)err_val; + return 0; } @@ -5051,6 +4549,8 @@ static int fxgmac_dismiss_MAC_PMT_int(struct fxgmac_pdata *pdata) /* MAC PMT interrupts in 0xc0 */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_PMT_STA); + (void)regval; + return 0; } @@ -5060,6 +4560,7 @@ static int fxgmac_dismiss_MAC_LPI_int(struct fxgmac_pdata *pdata) /* MAC PMT interrupts in 0xc0 */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_LPI_STA); + (void)regval; return 0; } @@ -5070,21 +4571,15 @@ static int fxgmac_dismiss_MAC_DBG_int(struct fxgmac_pdata *pdata) /* MAC PMT interrupts in 0xc0 */ regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); - writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_DBG_STA); return 0; } -int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) +static int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) { struct fxgmac_channel *channel; - unsigned int i, regval; - struct net_device *netdev = pdata->netdev; - - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac_dismiss_all_int callin\n"); - } + unsigned int i; channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { @@ -5096,25 +4591,33 @@ int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata) fxgmac_dismiss_MAC_LPI_int(pdata); fxgmac_dismiss_MAC_DBG_int(pdata); - /* control module int to PCIe slot */ - if (netdev->base_addr) { - regval = (unsigned int)(*( - (u32 *)(netdev->base_addr + MGMT_INT_CTRL0))); - } + if (netif_msg_drv(pdata)) + DPRINTK("%s callin %d\n", __func__, i); + return 0; } static void fxgmac_set_interrupt_moderation(struct fxgmac_pdata *pdata) { u32 value = 0, time; - +#if defined(FXGMAC_INTERRUPT_MODERATION_EXTERN) + pdata->intr_mod_timer = pdata->intr_mod_timer; +#else pdata->intr_mod_timer = INT_MOD_IN_US; +#endif - time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; +#if defined(FXGMAC_INTERRUPT_TX_INTERVAL) time = (pdata->intr_mod) ? pdata->tx_usecs : 0; +#else + time = (pdata->intr_mod) ? pdata->intr_mod_timer : 0; +#endif value = FXGMAC_SET_REG_BITS(value, INT_MOD_TX_POS, INT_MOD_TX_LEN, time); + +#if defined(FXGMAC_INTERRUPT_RX_INTERVAL) time = (pdata->intr_mod) ? pdata->rx_usecs : 0; +#endif + value = FXGMAC_SET_REG_BITS(value, INT_MOD_RX_POS, INT_MOD_RX_LEN, time); writereg(pdata->pAdapter, value, pdata->base_mem + INT_MOD); @@ -5129,6 +4632,7 @@ static void fxgmac_enable_msix_rxtxinterrupt(struct fxgmac_pdata *pdata) MSIX_TBL_MASK_OFFSET + intid * 16); } } + static void fxgmac_disable_msix_interrupt(struct fxgmac_pdata *pdata) { u32 intid; @@ -5139,10 +4643,12 @@ static void fxgmac_disable_msix_interrupt(struct fxgmac_pdata *pdata) MSIX_TBL_MASK_OFFSET + intid * 16); } } -static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) + +static int fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) { u32 intid, regval = 0; -#if !(FUXI_EPHY_INTERRUPT_D0_OFF) + int ret = 0; +#if !(FXGMAC_EPHY_INTERRUPT_D0_OFF) struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; #endif @@ -5154,18 +4660,22 @@ static void fxgmac_enable_msix_rxtxphyinterrupt(struct fxgmac_pdata *pdata) writereg(pdata->pAdapter, 0, pdata->base_mem + MSIX_TBL_BASE_ADDR + MSIX_TBL_MASK_OFFSET + MSI_ID_PHY_OTHER * 16); -#if !(FUXI_EPHY_INTERRUPT_D0_OFF) +#if !(FXGMAC_EPHY_INTERRUPT_D0_OFF) hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, PHY_INT_MASK_LINK_UP_LEN, 1); regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, PHY_INT_MASK_LINK_DOWN_LEN, 1); - hw_ops->write_ephy_reg( + ret = hw_ops->write_ephy_reg( pdata, REG_MII_INT_MASK, regval); /* enable phy interrupt ASIC bit10 linkup bit11 linkdown */ + return ret; +#else + return 0; #endif } + static void fxgmac_enable_msix_one_interrupt(struct fxgmac_pdata *pdata, u32 intid) { @@ -5184,13 +4694,109 @@ static void fxgmac_disable_msix_one_interrupt(struct fxgmac_pdata *pdata, static bool fxgmac_enable_mgm_interrupt(struct fxgmac_pdata *pdata) { +#ifdef FXGMAC_MISC_ENABLED writereg(pdata->pAdapter, 0xf0000000, pdata->base_mem + MGMT_INT_CTRL0); +#else + writereg(pdata->pAdapter, 0x00200000, pdata->base_mem + MGMT_INT_CTRL0); +#endif + return true; +} + +static bool fxgmac_enable_source_interrupt(struct fxgmac_pdata *pdata) +{ + u32 regval; + +#ifdef FXGMAC_MISC_ENABLED + writereg(pdata->pAdapter, 0xf0000000, pdata->base_mem + MGMT_INT_CTRL0); +#else + writereg(pdata->pAdapter, 0x00200000, pdata->base_mem + MGMT_INT_CTRL0); +#endif + + regval = 0; + regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); //enable phy interrupt + fxgmac_enable_rx_tx_ints(pdata); return true; } static bool fxgmac_disable_mgm_interrupt(struct fxgmac_pdata *pdata) { writereg(pdata->pAdapter, 0xffff0000, pdata->base_mem + MGMT_INT_CTRL0); + + return true; +} + +static bool fxgmac_disable_source_interrupt(struct fxgmac_pdata *pdata) +{ + unsigned int i, ti, ri, dma_ch_isr; + unsigned int dma_channel_status = 0, regval = 0; + struct fxgmac_channel *channel; + + for (i = 0; i < pdata->channel_count; i++) { + channel = pdata->channel_head + i; + + dma_ch_isr = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + + ti = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, + DMA_CH_SR_TI_LEN); + ri = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, + DMA_CH_SR_RI_LEN); + + if (!pdata->per_channel_irq && (ti || ri)) + dma_channel_status |= (1 << i); + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, + DMA_CH_SR_TPS_LEN)) + pdata->stats.tx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RPS_POS, + DMA_CH_SR_RPS_LEN)) + pdata->stats.rx_process_stopped++; + + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TBU_POS, + DMA_CH_SR_TBU_LEN)) + pdata->stats.tx_buffer_unavailable++; + + /* for legacy interrupt, check rx buffer interrupt status, */ + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RBU_POS, + DMA_CH_SR_RBU_LEN)) + pdata->stats.rx_buffer_unavailable++; + + /* Restart the device on a Fatal Bus Error */ + if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_FBE_POS, + DMA_CH_SR_FBE_LEN)) { + pdata->stats.fatal_bus_error++; + } + } + + fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, &pdata->mgmt_phy_val); + + if (!(dma_channel_status & 0x0f) && + !(pdata->mgmt_phy_val & ((1 << PHY_INT_STAT_LINK_UP_POS) | + (1 << PHY_INT_MASK_LINK_DOWN_POS)))) { + return false; + } + + fxgmac_disable_rx_tx_ints(pdata); + regval = 0; + fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, + regval); /* disable phy interrupt */ + + channel = pdata->channel_head; + for (i = 0; i < pdata->channel_count; i++, channel++) { + regval = readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + writereg(pdata->pAdapter, regval, + FXGMAC_DMA_REG(channel, DMA_CH_SR)); + } + + readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); + return true; } @@ -5206,10 +4812,10 @@ static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) MTL_Q_TQOMR_FTQ_LEN, 1); writereg(pdata->pAdapter, regval, FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); - DPRINTK("fxgmac_flush_tx_queues, reg=0x%p, val=0x%08x\n", - FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); + DPRINTK("%s, reg=0x%p,", __func__, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK(" val=0x%08x\n", regval); } - for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; do { @@ -5221,11 +4827,11 @@ static int fxgmac_flush_tx_queues(struct fxgmac_pdata *pdata) MTL_Q_TQOMR_FTQ_LEN); } while (--count && regval); - DPRINTK("fxgmac_flush_tx_queues wait... reg=0x%p, val=0x%08x\n", - FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR), regval); - if (regval) { /*(!count)*/ + DPRINTK("%s wait... reg=0x%p,", __func__, + FXGMAC_MTL_REG(pdata, i, MTL_Q_TQOMR)); + DPRINTK(" ... val=0x%08x\n", regval); + if (regval) return -EBUSY; - } } return 0; @@ -5239,6 +4845,13 @@ static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) /* Set enhanced addressing mode */ regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_EAME_POS, DMA_SBMR_EAME_LEN, 1); + + /* Out standing read/write requests*/ + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_RD_OSR_LMT_POS, + DMA_SBMR_RD_OSR_LMT_LEN, 0x7); + regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_WR_OSR_LMT_POS, + DMA_SBMR_WR_OSR_LMT_LEN, 0x7); + /* Set the System Bus mode */ regval = FXGMAC_SET_REG_BITS(regval, DMA_SBMR_FB_POS, DMA_SBMR_FB_LEN, 0); @@ -5255,9 +4868,21 @@ static void fxgmac_config_dma_bus(struct fxgmac_pdata *pdata) static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) { - unsigned int i = 0, regval = 0; + unsigned int i = 0; + u32 regval = 0; +#ifdef FXGMAC_LINK_SPEED_CHECK_PHY_LINK + u8 link = 0; + regval = fxgmac_get_ephy_state(pdata); + link = FXGMAC_GET_REG_BITS(regval, MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); +#endif fxgmac_phy_config(pdata); +#ifdef FXGMAC_LINK_SPEED_CHECK_PHY_LINK + /* no need to wait for link up again if link is down before s*/ + if (!link) + return; +#endif for (i = 0, regval = fxgmac_get_ephy_state(pdata); (!(regval & MGMT_EPHY_CTRL_STA_EPHY_RELEASE) || !(regval & MGMT_EPHY_CTRL_STA_EPHY_LINKUP)) && @@ -5271,23 +4896,30 @@ static void fxgmac_legacy_link_speed_setting(struct fxgmac_pdata *pdata) static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) { - unsigned int regval = 0; + u32 regval = 0; + int speed = SPEED_10; + (void)speed; fxgmac_disable_rx(pdata); /* HERE, WE NEED TO CONSIDER PHY CONFIG...TBD */ - DPRINTK("fxgmac_config_powerdown, phy and mac status update\n"); - /* for phy cable loopback, it can't configure phy speed, it will cause os resume again by link change although it has finished speed setting, */ + DPRINTK("fxgmac_config_powerdown, phy and mac status update speed %d\n", + speed); + + /* for phy cable loopback, it can't configure phy speed, it will cause + * os resume again by link change although it has finished speed setting + */ if (!phyloopback) { - /* When the Linux platform enters the s4 state, it goes through - * the suspend->resume->suspend process. The process of - * suspending again after resume is fast, and PHY - * auto-negotiation is not yet complete, so the - * auto-negotiation of PHY must be carried out again. When the - * Linux platform enters the s4 state, force speed to 10M. - */ - pdata->phy_speed = SPEED_10; + if (!pdata->support_10m_link) + speed = SPEED_100; + +#if defined(FXGMAC_LINK_SPEED_CHECK_PHY_LINK) + pdata->phy_autoeng = AUTONEG_ENABLE; + pdata->phy_speed = speed; + fxgmac_legacy_link_speed_setting(pdata); +#else fxgmac_legacy_link_speed_setting(pdata); +#endif } fxgmac_config_mac_speed(pdata); @@ -5306,88 +4938,14 @@ static void fxgmac_pre_powerdown(struct fxgmac_pdata *pdata, bool phyloopback) fxgmac_set_mac_address(pdata, pdata->mac_addr); } -/* only supports four patterns, and patterns will be cleared on every call */ -static void fxgmac_set_pattern_data(struct fxgmac_pdata *pdata) -{ - u32 ip_addr, i = 0; - u8 type_offset, op_offset, tip_offset; - struct pattern_packet packet; - struct wol_bitmap_pattern - pattern[4]; /* for WAKE_UCAST, WAKE_BCAST, WAKE_MCAST, WAKE_ARP. */ - - memset(pattern, 0, sizeof(struct wol_bitmap_pattern) * 4); - - /* config ucast */ - if (pdata->expansion.wol & WAKE_UCAST) { - pattern[i].mask_info[0] = 0x3F; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - memcpy(pattern[i].pattern_info, pdata->mac_addr, ETH_ALEN); - pattern[i].pattern_offset = 0; - i++; - } - - /* config bcast */ - if (pdata->expansion.wol & WAKE_BCAST) { - pattern[i].mask_info[0] = 0x3F; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - memset(pattern[i].pattern_info, 0xFF, ETH_ALEN); - pattern[i].pattern_offset = 0; - i++; - } - - /* config mcast */ - if (pdata->expansion.wol & WAKE_MCAST) { - pattern[i].mask_info[0] = 0x7; - pattern[i].mask_size = sizeof(pattern[0].mask_info); - pattern[i].pattern_info[0] = 0x1; - pattern[i].pattern_info[1] = 0x0; - pattern[i].pattern_info[2] = 0x5E; - pattern[i].pattern_offset = 0; - i++; - } - - /* config arp */ - if (pdata->expansion.wol & WAKE_ARP) { - memset(pattern[i].mask_info, 0, sizeof(pattern[0].mask_info)); - type_offset = offsetof(struct pattern_packet, ar_pro); - pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; - type_offset++; - pattern[i].mask_info[type_offset / 8] |= 1 << type_offset % 8; - op_offset = offsetof(struct pattern_packet, ar_op); - pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; - op_offset++; - pattern[i].mask_info[op_offset / 8] |= 1 << op_offset % 8; - tip_offset = offsetof(struct pattern_packet, ar_tip); - pattern[i].mask_info[tip_offset / 8] |= 1 << tip_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - tip_offset++; - pattern[i].mask_info[tip_offset / 8] |= 1 << type_offset % 8; - - packet.ar_pro = - 0x0 << 8 | - 0x08; /* arp type is 0x0800, notice that ar_pro and ar_op is big endian */ - packet.ar_op = - 0x1 - << 8; /* 1 is arp request,2 is arp replay, 3 is rarp request, 4 is rarp replay */ - ip_addr = fxgmac_get_netdev_ip4addr(pdata); - packet.ar_tip[0] = ip_addr & 0xFF; - packet.ar_tip[1] = (ip_addr >> 8) & 0xFF; - packet.ar_tip[2] = (ip_addr >> 16) & 0xFF; - packet.ar_tip[3] = (ip_addr >> 24) & 0xFF; - memcpy(pattern[i].pattern_info, &packet, MAX_PATTERN_SIZE); - pattern[i].mask_size = sizeof(pattern[0].mask_info); - pattern[i].pattern_offset = 0; - i++; - } - - fxgmac_set_wake_pattern(pdata, pattern, i); -} - +#ifdef FXGMAC_WOL_INTEGRATED_WOL_PARAMETER static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) +#else +static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, + unsigned int offloadcount, bool magic_en, + bool remote_pattern_en) +#endif { u32 regval = 0; @@ -5426,13 +4984,6 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, 0x00); } - if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { - netdev_err( - pdata->netdev, - "fxgmac powerstate is %lu when config power to down.\n", - pdata->expansion.powerstate); - } - #if FXGMAC_WOL_FEATURE_ENABLED fxgmac_config_wol(pdata, wol); #endif @@ -5449,12 +5000,23 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, fxgmac_enable_ns_offload(pdata); #endif +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED + fxgmac_enable_wake_packet_indication(pdata, 1); +#endif /* Enable MAC Rx TX */ +#ifdef FXGMAC_WOL_INTEGRATED_WOL_PARAMETER if (1) { +#else + if (magic_en || remote_pattern_en || offloadcount) { +#endif regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_CR); regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 1); +#if defined(FXGMAC_AOE_FEATURE_ENABLED) || defined(FXGMAC_NS_OFFLOAD_ENABLED) if (pdata->hw_feat.aoe) { +#else + if (offloadcount) { +#endif regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_TE_POS, MAC_CR_TE_LEN, 1); } @@ -5474,16 +5036,6 @@ static void fxgmac_config_powerdown(struct fxgmac_pdata *pdata, MAC_PMT_STA_PWRDWN_LEN, 1); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_PMT_STA); - /* adjust sigdet threshold - * redmine.motor-comm.com/issues/5093 - * fix issue can not wake up os on some FT-D2000 platform, y - * this modification is only temporarif it is 55mv, wol maybe failed. - */ - - regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); - regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, - MGMT_SIGDET_40MV); - writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_SIGDET); DPRINTK("fxgmac_config_powerdown callout, reg=0x%08x\n", regval); } @@ -5491,13 +5043,6 @@ static void fxgmac_config_powerup(struct fxgmac_pdata *pdata) { u32 regval = 0; - if (test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { - netdev_err( - pdata->netdev, - "fxgmac powerstate is %lu when config power to up.\n", - pdata->expansion.powerstate); - } - /* After enable OOB_WOL from efuse, mac will loopcheck phy status, and lead to panic sometimes. * So we should disable it from powerup, enable it from power down. */ @@ -5542,11 +5087,11 @@ static int fxgmac_diag_sanity_check(struct fxgmac_pdata *pdata) static int cnt; reg_q_val = readreg(pdata->pAdapter, - FXGMAC_MTL_REG(pdata, 0 /* tx channe 0 */, - 0x8 /* 0x2d08 */)); - if (!(reg_q_val & 0x10)) { /* tx q is empty */ + FXGMAC_MTL_REG(pdata, 0, 0x8)); + /* tx q is empty */ + if (!(reg_q_val & 0x10)) return 0; - } + reg_tail_val = readreg(pdata->pAdapter, FXGMAC_DMA_REG(pdata->channel_head, DMA_CH_TDTR_LO)); @@ -5571,6 +5116,7 @@ static int fxgmac_diag_sanity_check(struct fxgmac_pdata *pdata) return 0; } #endif + static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata) { u32 regval = 0; @@ -5585,6 +5131,7 @@ static void fxgmac_pwr_clock_gate(struct fxgmac_pdata *pdata) 0); fxgmac_write_ephy_reg(pdata, REG_MII_EXT_DATA, regval); } + static void fxgmac_pwr_clock_ungate(struct fxgmac_pdata *pdata) { u32 regval = 0; @@ -5605,7 +5152,7 @@ static unsigned char fxgmac_suspend_int(void *context) { /* ULONG_PTR addr; */ u32 intid; -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF u32 regval = 0; #endif u32 val_mgmt_intcrtl0; @@ -5638,7 +5185,7 @@ static unsigned char fxgmac_suspend_int(void *context) /* since Msix interrupt masked now, enable EPHY interrupt for case of link change wakeup */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, PHY_INT_MASK_LINK_UP_LEN, 1); regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, @@ -5649,20 +5196,13 @@ static unsigned char fxgmac_suspend_int(void *context) return true; } + static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) { struct fxgmac_channel *channel; unsigned int i; u32 regval; int busy = 15; - /* Prepare for Tx DMA channel stop */ - channel = pdata->channel_head; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->tx_ring) { - break; - } - fxgmac_prepare_tx_stop(pdata, channel); - } /* Disable each Tx DMA channel */ channel = pdata->channel_head; @@ -5677,7 +5217,7 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) DMA_CH_TCR_ST_LEN, 0); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_TCR)); - DBGPRINT(MP_TRACE, (" %s disable tx dma", __FUNCTION__)); + DPRINTK(" disable channel %d tx dma", i); } do { @@ -5689,11 +5229,13 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) if (0 != (regval & MAC_DBG_STA_TX_BUSY)) { regval = readreg(pdata->pAdapter, pdata->mac_regs + MAC_DBG_STA); - DbgPrintF(MP_WARN, - "warning !!!timed out waiting for Tx MAC to stop\n"); + DPRINTK("warning !!!timed out waiting for Tx MAC to stop regval %x\n", + regval); return -1; } - /* wait empty Tx queue */ + + busy = 15; + /* wait empty Tx queue */ for (i = 0; i < pdata->tx_q_count; i++) { do { regval = readreg(pdata->pAdapter, @@ -5703,9 +5245,7 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) if (0 != (regval & MTL_TXQ_DEG_TX_BUSY)) { regval = readreg(pdata->pAdapter, pdata->mac_regs + MTL_TXQ_DEG); - DbgPrintF( - MP_WARN, - "warning !!!timed out waiting for tx queue %u to empty\n", + DPRINTK("warning !!!timed out waiting for tx queue %u to empty\n", i); return -1; } @@ -5717,16 +5257,11 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) regval = FXGMAC_SET_REG_BITS(regval, MAC_CR_RE_POS, MAC_CR_RE_LEN, 0); writereg(pdata->pAdapter, regval, pdata->mac_regs + MAC_CR); - /* Prepare for Rx DMA channel stop */ - for (i = 0; i < pdata->rx_q_count; i++) { - fxgmac_prepare_rx_stop(pdata, i); - } /* Disable each Rx DMA channel */ channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { - if (!channel->rx_ring) { + if (!channel->rx_ring) break; - } regval = readreg(pdata->pAdapter, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); @@ -5734,10 +5269,11 @@ static int fxgmac_suspend_txrx(struct fxgmac_pdata *pdata) DMA_CH_RCR_SR_LEN, 0); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_RCR)); - DBGPRINT(MP_TRACE, (" %s disable rx dma", __FUNCTION__)); + DPRINTK(" disable channel %d rx dma", i); } return 0; } + static void fxgmac_resume_int(struct fxgmac_pdata *pdata) { u32 intid, regval = 0; @@ -5765,7 +5301,7 @@ static void fxgmac_resume_int(struct fxgmac_pdata *pdata) MSIX_TBL_MASK_OFFSET + intid * 16); } -#if FUXI_EPHY_INTERRUPT_D0_OFF +#if FXGMAC_EPHY_INTERRUPT_D0_OFF fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, 0x0); /* disable phy interrupt */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, @@ -5780,23 +5316,32 @@ static void fxgmac_resume_int(struct fxgmac_pdata *pdata) #endif } +static void fxgmac_config_wol_wait_time(struct fxgmac_pdata *pdata) +{ + u32 regval; + + regval = readreg(pdata->pAdapter, pdata->base_mem + WOL_CTL); + regval = FXGMAC_SET_REG_BITS(regval, WOL_WAIT_TIME_POS, + WOL_WAIT_TIME_LEN, FXGMAC_WOL_WAIT_TIME); + writereg(pdata->pAdapter, regval, pdata->base_mem + WOL_CTL); +} + static int fxgmac_hw_init(struct fxgmac_pdata *pdata) { struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; int ret; - u32 regval = 0; - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac hw init call in\n"); - } + if (netif_msg_drv(pdata)) + DPRINTK("%s call in\n", __func__); /* Flush Tx queues */ ret = fxgmac_flush_tx_queues(pdata); if (ret) { - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac_hw_init call flush tx queue err.\n"); - } +#ifdef FXGMAC_FLUSH_TX_CHECK_ENABLED + dev_err(pdata->dev, + "%s call flush tx queue err.\n", __func__); return ret; +#endif } /* Initialize DMA related features */ @@ -5811,10 +5356,16 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) fxgmac_config_tso_mode(pdata); fxgmac_config_sph_mode(pdata); fxgmac_config_rss(pdata); - fxgmac_config_wol(pdata, pdata->expansion.wol); desc_ops->tx_desc_init(pdata); - desc_ops->rx_desc_init(pdata); + ret = desc_ops->rx_desc_init(pdata); + if (ret) { +#ifdef FXGMAC_RX_DESC_INIT_CHECK_ENABLED + dev_err(pdata->dev, "rx_desc_init err.\n"); + return ret; +#endif + } + fxgmac_enable_dma_interrupts(pdata); /* Initialize MTL related features */ @@ -5843,19 +5394,21 @@ static int fxgmac_hw_init(struct fxgmac_pdata *pdata) fxgmac_config_mmc(pdata); fxgmac_enable_mac_interrupts(pdata); + fxgmac_config_wol_wait_time(pdata); + /* enable EPhy link change interrupt */ fxgmac_read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); /* clear phy interrupt */ - regval = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, - PHY_INT_MASK_LINK_UP_LEN, 1); - regval = FXGMAC_SET_REG_BITS(regval, PHY_INT_MASK_LINK_DOWN_POS, - PHY_INT_MASK_LINK_DOWN_LEN, 1); + ret = FXGMAC_SET_REG_BITS(0, PHY_INT_MASK_LINK_UP_POS, + PHY_INT_MASK_LINK_UP_LEN, 1); + ret = FXGMAC_SET_REG_BITS(ret, PHY_INT_MASK_LINK_DOWN_POS, + PHY_INT_MASK_LINK_DOWN_LEN, 1); fxgmac_write_ephy_reg(pdata, REG_MII_INT_MASK, - regval); /* enable phy interrupt */ + ret); /* enable phy interrupt */ + + if (netif_msg_drv(pdata)) + DPRINTK("%s callout\n", __func__); - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac hw init callout\n"); - } return 0; } @@ -5866,6 +5419,31 @@ static void fxgmac_save_nonstick_reg(struct fxgmac_pdata *pdata) pdata->reg_nonstick[(i - REG_PCIE_TRIGGER) >> 2] = readreg(pdata->pAdapter, pdata->base_mem + i); } + + cfg_r32(pdata, REG_PCI_COMMAND, &pdata->expansion.cfg_pci_cmd); + cfg_r32(pdata, REG_CACHE_LINE_SIZE, + &pdata->expansion.cfg_cache_line_size); + cfg_r32(pdata, REG_MEM_BASE, &pdata->expansion.cfg_mem_base); + cfg_r32(pdata, REG_MEM_BASE_HI, &pdata->expansion.cfg_mem_base_hi); + cfg_r32(pdata, REG_IO_BASE, &pdata->expansion.cfg_io_base); + cfg_r32(pdata, REG_INT_LINE, &pdata->expansion.cfg_int_line); + cfg_r32(pdata, REG_DEVICE_CTRL1, &pdata->expansion.cfg_device_ctrl1); + cfg_r32(pdata, REG_PCI_LINK_CTRL, &pdata->expansion.cfg_pci_link_ctrl); + cfg_r32(pdata, REG_DEVICE_CTRL2, &pdata->expansion.cfg_device_ctrl2); + cfg_r32(pdata, REG_MSIX_CAPABILITY, + &pdata->expansion.cfg_msix_capability); + + DPRINTK("%s:\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\nCFG%02x-%02x\n", + __func__, REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd, + REG_CACHE_LINE_SIZE, pdata->expansion.cfg_cache_line_size, + REG_MEM_BASE, pdata->expansion.cfg_mem_base, REG_MEM_BASE_HI, + pdata->expansion.cfg_mem_base_hi, REG_IO_BASE, + pdata->expansion.cfg_io_base, REG_INT_LINE, + pdata->expansion.cfg_int_line, REG_DEVICE_CTRL1, + pdata->expansion.cfg_device_ctrl1, REG_PCI_LINK_CTRL, + pdata->expansion.cfg_pci_link_ctrl, REG_DEVICE_CTRL2, + pdata->expansion.cfg_device_ctrl2, REG_MSIX_CAPABILITY, + pdata->expansion.cfg_msix_capability); } static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) @@ -5878,6 +5456,7 @@ static void fxgmac_restore_nonstick_reg(struct fxgmac_pdata *pdata) } } +#if defined(FXGMAC_ESD_RESTORE_PCIE_CFG) static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) { cfg_w32(pdata, REG_PCI_COMMAND, pdata->expansion.cfg_pci_cmd); @@ -5893,22 +5472,30 @@ static void fxgmac_esd_restore_pcie_cfg(struct fxgmac_pdata *pdata) cfg_w32(pdata, REG_MSIX_CAPABILITY, pdata->expansion.cfg_msix_capability); } +#endif static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) { u32 regval; u32 value = 0; - cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); - pdata->pcie_link_status = - FXGMAC_GET_REG_BITS(regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, - PCI_LINK_CTRL_ASPM_CONTROL_LEN); - if (PCI_LINK_CTRL_L1_STATUS == (pdata->pcie_link_status & 0x02)) { - regval = FXGMAC_SET_REG_BITS(regval, - PCI_LINK_CTRL_ASPM_CONTROL_POS, - PCI_LINK_CTRL_ASPM_CONTROL_LEN, 0); - cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); +#ifdef FXGMAC_CHECK_DEV_STATE + if (pdata->expansion.dev_state == FXGMAC_DEV_OPEN) { +#endif + cfg_r32(pdata, REG_PCI_LINK_CTRL, ®val); + pdata->pcie_link_status = FXGMAC_GET_REG_BITS( + regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN); + if (PCI_LINK_CTRL_L1_STATUS == + (pdata->pcie_link_status & 0x02)) { + regval = FXGMAC_SET_REG_BITS( + regval, PCI_LINK_CTRL_ASPM_CONTROL_POS, + PCI_LINK_CTRL_ASPM_CONTROL_LEN, 0); + cfg_w32(pdata, REG_PCI_LINK_CTRL, regval); + } +#ifdef FXGMAC_CHECK_DEV_STATE } +#endif /* Issue a CHIP reset */ regval = readreg(pdata->pAdapter, pdata->base_mem + SYS_RESET_REG); @@ -5919,7 +5506,16 @@ static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) usleep_range_ex(pdata->pAdapter, 9000, 10000); - /* reg152c reset will reset trigger circuit and reload efuse patch 0x1004=0x16, need to release ephy reset again */ + /* bypass pcie reset */ + regval = readreg(pdata->pAdapter, pdata->base_mem + SYS_RESET_REG); + regval = FXGMAC_SET_REG_BITS(regval, SYS_RESET_BYPASS_POS, + SYS_RESET_BYPASS_LEN, 0); + regval = FXGMAC_SET_REG_BITS(regval, SYS_RESET_POS, SYS_RESET_LEN, 0); + writereg(pdata->pAdapter, regval, pdata->base_mem + SYS_RESET_REG); + + /* reg152c reset will reset trigger circuit and reload efuse patch + * 0x1004=0x16, need to release ephy reset again + */ value = FXGMAC_SET_REG_BITS(value, MGMT_EPHY_CTRL_RESET_POS, MGMT_EPHY_CTRL_RESET_LEN, MGMT_EPHY_CTRL_STA_EPHY_RELEASE); @@ -5932,23 +5528,23 @@ static int fxgmac_hw_exit(struct fxgmac_pdata *pdata) return 0; } -static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, u8 *address, +static int fxgmac_set_gmac_register(struct fxgmac_pdata *pdata, IOMEM address, unsigned int data) { - if (address < (u8 *)(pdata->base_mem)) { + if (address < pdata->base_mem) return -1; - } + writereg(pdata->pAdapter, data, address); return 0; } -static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, u8 *address) +static u32 fxgmac_get_gmac_register(struct fxgmac_pdata *pdata, IOMEM address) { u32 regval = 0; - if (address > (u8 *)(pdata->base_mem)) { + if (address > pdata->base_mem) regval = readreg(pdata->pAdapter, address); - } + return regval; } @@ -5975,9 +5571,9 @@ static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, LTR_IDLE_ENTER_REQUIRE); regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_SCALE_POS, LTR_IDLE_ENTER_SCALE_LEN, - LTR_IDLE_ENTER_SCALE); + LTR_IDLE_ENTER_SCALE_1MS); regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_ENTER_POS, - LTR_IDLE_ENTER_LEN, LTR_IDLE_ENTER_USVAL); + LTR_IDLE_ENTER_LEN, LTR_IDLE_ENTER_VAL); regval = (regval << 16) + regval; /* snoopy + non-snoopy */ writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_ENTER); @@ -5987,9 +5583,9 @@ static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, LTR_IDLE_EXIT_REQUIRE); regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_SCALE_POS, LTR_IDLE_EXIT_SCALE_LEN, - LTR_IDLE_EXIT_SCALE); + LTR_IDLE_EXIT_SCALE_1US); regval = FXGMAC_SET_REG_BITS(regval, LTR_IDLE_EXIT_POS, - LTR_IDLE_EXIT_LEN, LTR_IDLE_EXIT_USVAL); + LTR_IDLE_EXIT_LEN, LTR_IDLE_EXIT_VAL); regval = (regval << 16) + regval; /* snoopy + non-snoopy */ writereg(pdata->pAdapter, regval, pdata->base_mem + LTR_IDLE_EXIT); @@ -6034,18 +5630,144 @@ static int fxgmac_pcie_init(struct fxgmac_pdata *pdata, bool ltr_en, /*fuxi nto adjust sigdet threshold*/ cfg_r8(pdata, REG_PCI_REVID, ®val); cfg_r16(pdata, REG_PCI_DEVICE_ID, &deviceid); - if (FUXI_REV_01 == regval && PCI_DEVICE_ID_FUXI == deviceid) { + if (YT6801_NTO_VER == regval && PCI_DEVICE_ID_FUXI == deviceid) { regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_SIGDET); regval = FXGMAC_SET_REG_BITS(regval, MGMT_SIGDET_POS, MGMT_SIGDET_LEN, MGMT_SIGDET_55MV); writereg(pdata->pAdapter, regval, pdata->base_mem + MGMT_SIGDET); - } + + regval = readreg(pdata->pAdapter, + pdata->base_mem + MGMT_SIGDET_DEGLITCH); + regval = FXGMAC_SET_REG_BITS(regval, + MGMT_SIGDET_DEGLITCH_DISABLE_POS, + MGMT_SIGDET_DEGLITCH_DISABLE_LEN, + 1); + writereg(pdata->pAdapter, regval, + pdata->base_mem + MGMT_SIGDET_DEGLITCH); + } + + cfg_r16(pdata, REG_DEVICE_CTRL1, ®val); + if (deviceid == PCI_DEVICE_ID_FUXI && + (FXGMAC_GET_REG_BITS(regval, DEVICE_CTRL1_MPS_POS, + DEVICE_CTRL1_MPS_LEN) > + DEVICE_CTRL1_MPS_128B)) { + regval = FXGMAC_SET_REG_BITS(regval, DEVICE_CTRL1_MPS_POS, + DEVICE_CTRL1_MPS_LEN, + DEVICE_CTRL1_MPS_128B); + cfg_w16(pdata, REG_DEVICE_CTRL1, regval); + } + + cfg_r32(pdata, REG_ACK_LATENCY_RELAY_TIMER, ®val); + regval = FXGMAC_SET_REG_BITS(regval, REG_ACK_LATENCY_TIMER_POS, + REG_ACK_LATENCY_TIMER_LEN, + REG_ACK_LATENCY_TIMER_VAL); + cfg_w32(pdata, REG_ACK_LATENCY_RELAY_TIMER, regval); + +#ifdef ASIC_MODE + /* close AER */ + cfg_r32(pdata, REG_CORRECTABLE_ERROR_MASK_REG, ®val); + regval = FXGMAC_SET_REG_BITS(regval, REG_CORRECTABLE_ERROR_MASK_POS, + REG_CORRECTABLE_ERROR_MASK_LEN, 0xFFFF); + cfg_w32(pdata, REG_CORRECTABLE_ERROR_MASK_REG, regval); +#endif + + /* close L1 sub timeout */ + cfg_r32(pdata, REG_L1SUB_TIMING, ®val); + regval = FXGMAC_SET_REG_BITS(regval, L1SUB_T_PCLKACK_LOW_POS, + L1SUB_T_PCLKACK_LOW_LEN, 0x0); + regval = FXGMAC_SET_REG_BITS(regval, L1SUB_T_PCLKACK_HIGH_POS, + L1SUB_T_PCLKACK_HIGH_LEN, 0x0); + cfg_w32(pdata, REG_L1SUB_TIMING, regval); return 0; } +static void fxgmac_clear_misc_int_status(struct fxgmac_pdata *pdata) +{ + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval, i, q_count; + + /* clear phy interrupt status */ + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + /* clear other interrupt status of misc interrupt */ + regval = pdata->hw_ops.get_gmac_register(pdata, + pdata->mac_regs + MAC_ISR); + if (regval) { + if (regval & (1 << MGMT_MAC_PHYIF_STA_POS)) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_PHYIF_STA); + + if ((regval & (1 << MGMT_MAC_AN_SR0_POS)) || + (regval & (1 << MGMT_MAC_AN_SR1_POS)) || + (regval & (1 << MGMT_MAC_AN_SR2_POS))) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_AN_SR); + + if (regval & (1 << MGMT_MAC_PMT_STA_POS)) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_PMT_STA); + + if (regval & (1 << MGMT_MAC_LPI_STA_POS)) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_LPI_STA); + + if (regval & (1 << MGMT_MAC_MMC_STA_POS)) { + if (regval & (1 << MGMT_MAC_RX_MMC_STA_POS)) + hw_ops->rx_mmc_int(pdata); + + if (regval & (1 << MGMT_MAC_TX_MMC_STA_POS)) + hw_ops->tx_mmc_int(pdata); + + if (regval & (1 << MGMT_MMC_IPCRXINT_POS)) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MMC_IPCRXINT); + } + + if ((regval & (1 << MGMT_MAC_TX_RX_STA0_POS)) || + (regval & (1 << MGMT_MAC_TX_RX_STA1_POS))) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_TX_RX_STA); + + if (regval & (1 << MGMT_MAC_GPIO_SR_POS)) + pdata->hw_ops.get_gmac_register( + pdata, pdata->mac_regs + MAC_GPIO_SR); + } + + /* MTL_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, + pdata->mac_regs + MTL_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + MTL_INT_SR, + regval); + + /* MTL_Q(#i)_Interrupt_Control_Status, write 1 clear */ + q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); + for (i = 0; i < q_count; i++) { + /* Clear all the interrupts which are set */ + regval = pdata->hw_ops.get_gmac_register( + pdata, + pdata->mac_regs + MTL_Q_INT_CTL_SR + i * MTL_Q_INC); + pdata->hw_ops.set_gmac_register( + pdata, + pdata->mac_regs + MTL_Q_INT_CTL_SR + i * MTL_Q_INC, + regval); + } + + /* MTL_ECC_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + + MTL_ECC_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + MTL_ECC_INT_SR, + regval); + + /* DMA_ECC_Interrupt_Status, write 1 clear */ + regval = pdata->hw_ops.get_gmac_register(pdata, pdata->mac_regs + + DMA_ECC_INT_SR); + pdata->hw_ops.set_gmac_register(pdata, pdata->mac_regs + DMA_ECC_INT_SR, + regval); +} + static void fxgmac_trigger_pcie(struct fxgmac_pdata *pdata, u32 code) { writereg(pdata->pAdapter, code, pdata->base_mem + REG_PCIE_TRIGGER); @@ -6057,7 +5779,9 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->exit = fxgmac_hw_exit; hw_ops->save_nonstick_reg = fxgmac_save_nonstick_reg; hw_ops->restore_nonstick_reg = fxgmac_restore_nonstick_reg; +#if defined(FXGMAC_ESD_RESTORE_PCIE_CFG) hw_ops->esd_restore_pcie_cfg = fxgmac_esd_restore_pcie_cfg; +#endif hw_ops->set_gmac_register = fxgmac_set_gmac_register; hw_ops->get_gmac_register = fxgmac_get_gmac_register; @@ -6068,9 +5792,7 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_rx = fxgmac_enable_rx; hw_ops->disable_rx = fxgmac_disable_rx; hw_ops->enable_channel_rx = fxgmac_enable_channel_rx; - hw_ops->dev_xmit = fxgmac_dev_xmit; - hw_ops->dev_read = fxgmac_dev_read; - hw_ops->config_tso = fxgmac_config_tso_mode; + hw_ops->enable_int = fxgmac_enable_int; hw_ops->disable_int = fxgmac_disable_int; hw_ops->set_interrupt_moderation = fxgmac_set_interrupt_moderation; @@ -6082,9 +5804,15 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->disable_msix_one_interrupt = fxgmac_disable_msix_one_interrupt; hw_ops->enable_mgm_interrupt = fxgmac_enable_mgm_interrupt; hw_ops->disable_mgm_interrupt = fxgmac_disable_mgm_interrupt; + hw_ops->enable_source_interrupt = fxgmac_enable_source_interrupt; + hw_ops->disable_source_interrupt = fxgmac_disable_source_interrupt; + hw_ops->dismiss_all_int = fxgmac_dismiss_all_int; + hw_ops->clear_misc_int_status = fxgmac_clear_misc_int_status; + hw_ops->enable_rx_tx_ints = fxgmac_enable_rx_tx_ints; + hw_ops->disable_rx_tx_ints = fxgmac_disable_rx_tx_ints; hw_ops->set_mac_address = fxgmac_set_mac_address; - hw_ops->set_mac_hash = fxgmac_add_mac_addresses; + hw_ops->set_mac_hash = fxgmac_set_mc_addresses; hw_ops->config_rx_mode = fxgmac_config_rx_mode; hw_ops->enable_rx_csum = fxgmac_enable_rx_csum; hw_ops->disable_rx_csum = fxgmac_disable_rx_csum; @@ -6094,17 +5822,10 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->get_xlgmii_phy_status = fxgmac_check_phy_link; /* For descriptor related operation */ - hw_ops->tx_desc_init = fxgmac_tx_desc_init; - hw_ops->rx_desc_init = fxgmac_rx_desc_init; - hw_ops->tx_desc_reset = fxgmac_tx_desc_reset; - hw_ops->rx_desc_reset = fxgmac_rx_desc_reset; hw_ops->is_last_desc = fxgmac_is_last_desc; hw_ops->is_context_desc = fxgmac_is_context_desc; - hw_ops->tx_start_xmit = fxgmac_tx_start_xmit; - hw_ops->set_pattern_data = fxgmac_set_pattern_data; - hw_ops->config_wol = fxgmac_config_wol; - hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; - hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; + + hw_ops->config_tso = fxgmac_config_tso_mode; #if FXGMAC_SANITY_CHECK_ENABLED hw_ops->diag_sanity_check = fxgmac_diag_sanity_check; #endif @@ -6148,6 +5869,8 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->config_tx_pbl_val = fxgmac_config_tx_pbl_val; hw_ops->get_tx_pbl_val = fxgmac_get_tx_pbl_val; hw_ops->config_pblx8 = fxgmac_config_pblx8; + hw_ops->calculate_max_checksum_size = + fxgmac_calculate_max_checksum_size; /* For MMC statistics support */ hw_ops->tx_mmc_int = fxgmac_tx_mmc_int; @@ -6161,6 +5884,8 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->set_rss_options = fxgmac_write_rss_options; hw_ops->set_rss_hash_key = fxgmac_set_rss_hash_key; hw_ops->set_rss_lookup_table = fxgmac_set_rss_lookup_table; + hw_ops->get_rss_hash_key = fxgmac_read_rss_hash_key; + hw_ops->write_rss_lookup_table = fxgmac_write_rss_lookup_table; /*For Offload*/ hw_ops->set_arp_offload = fxgmac_update_aoe_ipv4addr; @@ -6183,7 +5908,7 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_wake_pattern = fxgmac_enable_wake_pattern; hw_ops->disable_wake_pattern = fxgmac_disable_wake_pattern; hw_ops->set_wake_pattern_mask = fxgmac_set_wake_pattern_mask; -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED hw_ops->enable_wake_packet_indication = fxgmac_enable_wake_packet_indication; hw_ops->get_wake_packet_indication = fxgmac_get_wake_packet_indication; @@ -6198,7 +5923,7 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->set_ephy_autoneg_advertise = fxgmac_set_ephy_autoneg_advertise; hw_ops->phy_config = fxgmac_phy_config; hw_ops->close_phy_led = fxgmac_close_phy_led; - hw_ops->led_under_active = fxmgac_config_led_under_active; + hw_ops->led_under_active = fxgmac_config_led_under_active; hw_ops->led_under_sleep = fxgmac_config_led_under_sleep; hw_ops->led_under_shutdown = fxgmac_config_led_under_shutdown; hw_ops->led_under_disable = fxgmac_config_led_under_disable; @@ -6208,8 +5933,6 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->clean_cable_loopback = fxgmac_clean_cable_loopback; hw_ops->disable_phy_sleep = fxgmac_disable_phy_sleep; hw_ops->enable_phy_sleep = fxgmac_enable_phy_sleep; - hw_ops->phy_green_ethernet = fxgmac_phy_green_ethernet; - hw_ops->phy_eee_feature = fxgmac_phy_eee_feature; /* For power management */ hw_ops->pre_power_down = fxgmac_pre_powerdown; @@ -6228,17 +5951,16 @@ void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops) hw_ops->enable_rx_broadcast = fxgmac_enable_rx_broadcast; /* efuse relevant operation. */ - hw_ops->read_patch_from_efuse = - fxgmac_read_patch_from_efuse; /* read patch per register. */ + hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; + hw_ops->read_efuse_data = fxgmac_efuse_read_data; + hw_ops->read_patch_from_efuse = fxgmac_read_patch_from_efuse; hw_ops->read_patch_from_efuse_per_index = fxgmac_read_patch_from_efuse_per_index; /* read patch per index. */ hw_ops->write_patch_to_efuse = fxgmac_write_patch_to_efuse; hw_ops->write_patch_to_efuse_per_index = fxgmac_write_patch_to_efuse_per_index; - hw_ops->read_mac_subsys_from_efuse = fxgmac_read_mac_subsys_from_efuse; hw_ops->write_mac_subsys_to_efuse = fxgmac_write_mac_subsys_to_efuse; hw_ops->efuse_load = fxgmac_efuse_load; - hw_ops->read_efuse_data = fxgmac_efuse_read_data; hw_ops->write_oob = fxgmac_efuse_write_oob; hw_ops->write_led = fxgmac_efuse_write_led; hw_ops->write_led_config = fxgmac_write_led_setting_to_efuse; diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c similarity index 61% rename from drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c rename to drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c index 4596d91b6e282..c9619d0d40a9c 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-debugfs.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-ioctl.c @@ -3,145 +3,37 @@ #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -#ifdef HAVE_FXGMAC_DEBUG_FS -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define TEST_MAC_HEAD 14 -#define TEST_TCP_HEAD_LEN_OFFSET 12 -#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 -#define TEST_TCP_FIX_HEAD_LEN 24 -#define TEST_TCP_MSS_OFFSET 56 - -#define DF_MAX_NIC_NUM 16 - -#ifdef HAVE_FXGMAC_DEBUG_FS - -/** - * fxgmac_dbg_netdev_ops_read - read for netdev_ops datum - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t fxgmac_dbg_netdev_ops_read(struct file *filp, - char __user *buffer, size_t count, - loff_t *ppos) -{ - struct fxgmac_pdata *pdata = filp->private_data; - char *buf; - int len; - - /* don't allow partial reads */ - if (*ppos != 0) - return 0; - - buf = kasprintf(GFP_KERNEL, "%s: %s\n", pdata->netdev->name, - pdata->expansion.fxgmac_dbg_netdev_ops_buf); - if (!buf) - return -ENOMEM; - - if (count < strlen(buf)) { - kfree(buf); - return -ENOSPC; - } - - len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); - - kfree(buf); - return len; -} - -/** - * fxgmac_dbg_netdev_ops_write - write into netdev_ops datum - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - **/ -static ssize_t fxgmac_dbg_netdev_ops_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct fxgmac_pdata *pdata = filp->private_data; - int len; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - if (count >= sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf)) - return -ENOSPC; - - len = simple_write_to_buffer( - pdata->expansion.fxgmac_dbg_netdev_ops_buf, - sizeof(pdata->expansion.fxgmac_dbg_netdev_ops_buf) - 1, ppos, - buffer, count); - if (len < 0) - return len; - - pdata->expansion.fxgmac_dbg_netdev_ops_buf[len] = '\0'; - - if (strncmp(pdata->expansion.fxgmac_dbg_netdev_ops_buf, "tx_timeout", - 10) == 0) { - DPRINTK("tx_timeout called\n"); - } else { - FXGMAC_PR("Unknown command: %s\n", - pdata->expansion.fxgmac_dbg_netdev_ops_buf); - FXGMAC_PR("Available commands:\n"); - FXGMAC_PR(" tx_timeout\n"); - } - return count; -} -#endif static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) { - unsigned int pktLen = 0; + unsigned int pkt_len = 0; struct sk_buff *skb; - pfxgmac_test_packet pPkt; - u8 *pTx_data = NULL; - u8 *pSkb_data = NULL; + pfxgmac_test_packet pkt; + u8 *tx_data = NULL; + u8 *skb_data = NULL; u32 offload_len = 0; - u8 ipHeadLen, tcpHeadLen, headTotalLen; - static u32 lastGsoSize = 806; /* initial default value */ + u8 ip_head_len, tcp_head_len, head_total_len; + static u32 last_gso_size = 806; //initial default value + //int i = 0; /* get fxgmac_test_packet */ - pPkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); - pktLen = pPkt->length; + pkt = (pfxgmac_test_packet)(pcmd_data + sizeof(struct ext_ioctl_data)); + pkt_len = pkt->length; /* get pkt data */ - pTx_data = (u8 *)pPkt + sizeof(fxgmac_test_packet); + tx_data = (u8 *)pkt + sizeof(fxgmac_test_packet); /* alloc sk_buff */ - skb = alloc_skb(pktLen, GFP_ATOMIC); + skb = alloc_skb(pkt_len, GFP_ATOMIC); if (!skb) { DPRINTK("alloc skb fail\n"); return; } /* copy data to skb */ - pSkb_data = skb_put(skb, pktLen); - memset(pSkb_data, 0, pktLen); - memcpy(pSkb_data, pTx_data, pktLen); + skb_data = skb_put(skb, pkt_len); + memset(skb_data, 0, pkt_len); + memcpy(skb_data, tx_data, pkt_len); /* set skb parameters */ skb->dev = pdata->netdev; @@ -155,59 +47,62 @@ static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) pdata->expansion.fxgmac_test_tso_flag = true; /* get protocol head length */ - ipHeadLen = (pSkb_data[TEST_MAC_HEAD] & 0xF) * 4; - tcpHeadLen = (pSkb_data[TEST_MAC_HEAD + ipHeadLen + - TEST_TCP_HEAD_LEN_OFFSET] >> - 4 & - 0xF) * - 4; - headTotalLen = TEST_MAC_HEAD + ipHeadLen + tcpHeadLen; - offload_len = (pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | - pSkb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & + ip_head_len = (skb_data[TEST_MAC_HEAD] & 0xF) * 4; + tcp_head_len = (skb_data[TEST_MAC_HEAD + ip_head_len + + TEST_TCP_HEAD_LEN_OFFSET] >> + 4 & + 0xF) * + 4; + head_total_len = TEST_MAC_HEAD + ip_head_len + tcp_head_len; + offload_len = (skb_data[TEST_TCP_OFFLOAD_LEN_OFFSET] << 8 | + skb_data[TEST_TCP_OFFLOAD_LEN_OFFSET + 1]) & 0xFFFF; /* set tso skb parameters */ - skb->transport_header = ipHeadLen + TEST_MAC_HEAD; + //skb->ip_summed = CHECKSUM_PARTIAL; + skb->transport_header = ip_head_len + TEST_MAC_HEAD; skb->network_header = TEST_MAC_HEAD; skb->inner_network_header = TEST_MAC_HEAD; skb->mac_len = TEST_MAC_HEAD; /* set skb_shinfo parameters */ - if (tcpHeadLen > TEST_TCP_FIX_HEAD_LEN) { + if (tcp_head_len > TEST_TCP_FIX_HEAD_LEN) { skb_shinfo(skb)->gso_size = - (pSkb_data[TEST_TCP_MSS_OFFSET] << 8 | - pSkb_data[TEST_TCP_MSS_OFFSET + 1]) & + (skb_data[TEST_TCP_MSS_OFFSET] << 8 | + skb_data[TEST_TCP_MSS_OFFSET + 1]) & 0xFFFF; } else { skb_shinfo(skb)->gso_size = 0; } if (skb_shinfo(skb)->gso_size != 0) { - lastGsoSize = skb_shinfo(skb)->gso_size; + last_gso_size = skb_shinfo(skb)->gso_size; } else { - skb_shinfo(skb)->gso_size = lastGsoSize; + skb_shinfo(skb)->gso_size = last_gso_size; } + /* get segment size */ if (offload_len % skb_shinfo(skb)->gso_size == 0) { skb_shinfo(skb)->gso_segs = offload_len / skb_shinfo(skb)->gso_size; pdata->expansion.fxgmac_test_last_tso_len = - skb_shinfo(skb)->gso_size + headTotalLen; + skb_shinfo(skb)->gso_size + head_total_len; } else { skb_shinfo(skb)->gso_segs = offload_len / skb_shinfo(skb)->gso_size + 1; pdata->expansion.fxgmac_test_last_tso_len = offload_len % skb_shinfo(skb)->gso_size + - headTotalLen; + head_total_len; } pdata->expansion.fxgmac_test_tso_seg_num = skb_shinfo(skb)->gso_segs; skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; skb_shinfo(skb)->frag_list = NULL; - skb->csum_start = skb_headroom(skb) + TEST_MAC_HEAD + ipHeadLen; - skb->csum_offset = skb->len - TEST_MAC_HEAD - ipHeadLen; + skb->csum_start = + skb_headroom(skb) + TEST_MAC_HEAD + ip_head_len; + skb->csum_offset = skb->len - TEST_MAC_HEAD - ip_head_len; pdata->expansion.fxgmac_test_packet_len = - skb_shinfo(skb)->gso_size + headTotalLen; + skb_shinfo(skb)->gso_size + head_total_len; } else { /* set non-TSO packet parameters */ pdata->expansion.fxgmac_test_packet_len = skb->len; @@ -221,14 +116,15 @@ static void fxgmac_dbg_tx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) { - unsigned int totalLen = 0; + unsigned int total_len = 0; struct sk_buff *rx_skb; struct ext_ioctl_data *pcmd; fxgmac_test_packet pkt; void *addr = 0; - u8 *rx_data = (u8 *)kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); + u8 *rx_data = kzalloc(FXGMAC_MAX_DBG_RX_DATA, GFP_KERNEL); if (!rx_data) return; + //int i; /* initial dest data region */ pcmd = (struct ext_ioctl_data *)pcmd_data; @@ -240,15 +136,16 @@ static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) pdata->expansion.fxgmac_test_skb_array [pdata->expansion.fxgmac_test_skb_arr_out_index]; - if (rx_skb->len + sizeof(fxgmac_test_packet) + totalLen < + if (rx_skb->len + sizeof(fxgmac_test_packet) + total_len < 64000) { pkt.length = rx_skb->len; pkt.type = 0x80; pkt.buf[0].offset = - totalLen + sizeof(fxgmac_test_packet); + total_len + sizeof(fxgmac_test_packet); pkt.buf[0].length = rx_skb->len; /* get data from skb */ + //DPRINTK("FXG:rx_skb->len=%d", rx_skb->len); memcpy(rx_data, rx_skb->data, rx_skb->len); /* update next pointer */ @@ -258,25 +155,26 @@ static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) pkt.next = NULL; } else { pkt.next = - (pfxgmac_test_packet)(addr + totalLen + + (pfxgmac_test_packet)(addr + total_len + sizeof(fxgmac_test_packet) + pkt.length); } /* copy data to user space */ - if (copy_to_user((void *)(addr + totalLen), - (void *)(&pkt), + if (copy_to_user((void *)(addr + total_len), + (void *)&pkt, sizeof(fxgmac_test_packet))) { DPRINTK("cppy pkt data to user fail..."); } - if (copy_to_user((void *)(addr + totalLen + + //FXGMAC_PR("FXG:rx_skb->len=%d", rx_skb->len); + if (copy_to_user((void *)(addr + total_len + sizeof(fxgmac_test_packet)), (void *)rx_data, rx_skb->len)) { DPRINTK("cppy data to user fail..."); } /* update total length */ - totalLen += (sizeof(fxgmac_test_packet) + rx_skb->len); + total_len += (sizeof(fxgmac_test_packet) + rx_skb->len); /* free skb */ kfree_skb(rx_skb); @@ -299,11 +197,10 @@ static void fxgmac_dbg_rx_pkt(struct fxgmac_pdata *pdata, u8 *pcmd_data) kfree(rx_data); } -/* Based on the current application scenario, we only use CMD_DATA for data. - * if you use other struct, you should recalculate in_total_size - */ -long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) +// Based on the current application scenario,we only use CMD_DATA for data. +// if you use other struct, you should recalculate in_total_size +long fxgmac_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) { bool ret = true; int regval = 0; @@ -333,11 +230,12 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, /* check command number*/ if (_IOC_NR(cmd) > IOC_MAXNR) { - DPRINTK("[%s] command numer [%d] exceeded!\n", __func__, + DPRINTK("[%s] command number [%d] exceeded!\n", __func__, _IOC_NR(cmd)); goto err; } + //buf = (u8*)kzalloc(FXGMAC_MAX_DBG_BUF_LEN, GFP_KERNEL); if (copy_from_user(&pcmd, (void *)arg, ioctl_cmd_size)) { DPRINTK("copy data from user fail... \n"); goto err; @@ -360,8 +258,11 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, if (arg != 0) { switch (pcmd.cmd_type) { /* ioctl diag begin */ - case FUXI_DFS_IOCTL_DIAG_BEGIN: + case FXGMAC_DFS_IOCTL_DIAG_BEGIN: DPRINTK("Debugfs received diag begin command.\n"); +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + pdata->expansion.lb_test_flag = 1; +#endif if (netif_running(pdata->netdev)) { fxgmac_restart_dev(pdata); } @@ -395,25 +296,28 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, break; /* ioctl diag end */ - case FUXI_DFS_IOCTL_DIAG_END: + case FXGMAC_DFS_IOCTL_DIAG_END: DPRINTK("Debugfs received diag end command.\n"); if (netif_running(pdata->netdev)) { fxgmac_restart_dev(pdata); } +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + pdata->expansion.lb_test_flag = 0; +#endif break; /* ioctl diag tx pkt */ - case FUXI_DFS_IOCTL_DIAG_TX_PKT: + case FXGMAC_DFS_IOCTL_DIAG_TX_PKT: fxgmac_dbg_tx_pkt(pdata, buf); break; /* ioctl diag rx pkt */ - case FUXI_DFS_IOCTL_DIAG_RX_PKT: + case FXGMAC_DFS_IOCTL_DIAG_RX_PKT: fxgmac_dbg_rx_pkt(pdata, buf); break; /* ioctl device reset */ - case FUXI_DFS_IOCTL_DEVICE_RESET: + case FXGMAC_DFS_IOCTL_DEVICE_RESET: DPRINTK("Debugfs received device reset command.\n"); if (netif_running(pdata->netdev)) { fxgmac_restart_dev(pdata); @@ -451,8 +355,6 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, memcpy(&ex_data, data, sizeof(CMD_DATA)); ret = hw_ops->read_efuse_data(pdata, ex_data.val0, &ex_data.val1); - DPRINTK("FXGMAC_EFUSE_READ_REGIONABC, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); if (ret) { memcpy(data, &ex_data, sizeof(CMD_DATA)); out_total_size = @@ -465,8 +367,6 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, case FXGMAC_EFUSE_WRITE_PATCH_REG: memcpy(&ex_data, data, sizeof(CMD_DATA)); - DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_REG, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); ret = hw_ops->write_patch_to_efuse(pdata, ex_data.val0, ex_data.val1); break; @@ -475,8 +375,6 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, memcpy(&ex_data, data, sizeof(CMD_DATA)); ret = hw_ops->read_patch_from_efuse(pdata, ex_data.val0, &ex_data.val1); - DPRINTK("FXGMAC_EFUSE_READ_PATCH_REG, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1); if (ret) { memcpy(data, &ex_data, sizeof(CMD_DATA)); out_total_size = @@ -492,8 +390,6 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, ret = hw_ops->write_patch_to_efuse_per_index( pdata, ex_data.val0, ex_data.val1, ex_data.val2); - DPRINTK("FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, index = %d, address = 0x%x, val = 0x%x\n", - ex_data.val0, ex_data.val1, ex_data.val2); break; case FXGMAC_EFUSE_READ_PATCH_PER_INDEX: @@ -501,8 +397,6 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, ret = hw_ops->read_patch_from_efuse_per_index( pdata, ex_data.val0, &ex_data.val1, &ex_data.val2); - DPRINTK("FXGMAC_EFUSE_READ_PATCH_PER_INDEX, address = 0x%x, val = 0x%x\n", - ex_data.val1, ex_data.val2); if (ret) { memcpy(data, &ex_data, sizeof(CMD_DATA)); out_total_size = @@ -539,6 +433,7 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, if (ret) { eth_hw_addr_set(pdata->netdev, mac); memcpy(pdata->mac_addr, mac, ETH_ALEN); + hw_ops->set_mac_address(pdata, mac); hw_ops->set_mac_hash(pdata); } @@ -549,7 +444,7 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, ret = hw_ops->read_mac_subsys_from_efuse( pdata, NULL, &ex_data.val0, NULL); if (ret) { - ex_data.val1 = 0xFFFF; /* invalid value */ + ex_data.val1 = 0xFFFF; // invalid value memcpy(data, &ex_data, sizeof(CMD_DATA)); out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); @@ -565,10 +460,10 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, pdata, NULL, &ex_data.val0, NULL); break; - case FXGMAC_GET_GMAC_REG: + case FXGMAC_GET_REG: memcpy(&ex_data, data, sizeof(CMD_DATA)); ex_data.val1 = hw_ops->get_gmac_register( - pdata, (u8 *)(pdata->mac_regs + ex_data.val0)); + pdata, (u8 *)(pdata->base_mem + ex_data.val0)); memcpy(data, &ex_data, sizeof(CMD_DATA)); out_total_size = ioctl_cmd_size + sizeof(CMD_DATA); if (copy_to_user((void *)arg, (void *)buf, @@ -576,10 +471,10 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, goto err; break; - case FXGMAC_SET_GMAC_REG: + case FXGMAC_SET_REG: memcpy(&ex_data, data, sizeof(CMD_DATA)); regval = hw_ops->set_gmac_register( - pdata, (u8 *)(pdata->mac_regs + ex_data.val0), + pdata, (u8 *)(pdata->base_mem + ex_data.val0), ex_data.val1); ret = (regval == 0 ? true : false); break; @@ -684,104 +579,3 @@ long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, kfree(buf); return FXGMAC_FAIL; } - -#ifdef HAVE_FXGMAC_DEBUG_FS - -static struct file_operations fxgmac_dbg_netdev_ops_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = fxgmac_dbg_netdev_ops_read, - .write = fxgmac_dbg_netdev_ops_write, - .unlocked_ioctl = fxgmac_dbg_netdev_ops_ioctl, -}; - -/** - * fxgmac_dbg_adapter_init - setup the debugfs directory for the adapter - * @adapter: the adapter that is starting up - **/ -void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata) -{ - const char *name = pdata->drv_name; - struct dentry *pfile; - - pdata->expansion.dbg_adapter = - debugfs_create_dir(name, pdata->expansion.fxgmac_dbg_root); - if (pdata->expansion.dbg_adapter) { - pfile = debugfs_create_file("netdev_ops", 0600, - pdata->expansion.dbg_adapter, pdata, - &fxgmac_dbg_netdev_ops_fops); - if (!pfile) - DPRINTK("debugfs netdev_ops for %s failed\n", name); - } else { - DPRINTK("debugfs entry for %s failed\n", name); - } -} - -/** - * fxgmac_dbg_adapter_exit - clear out the adapter's debugfs entries - * @adapter: board private structure - **/ -void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata) -{ - if (pdata->expansion.dbg_adapter) - debugfs_remove_recursive(pdata->expansion.dbg_adapter); - pdata->expansion.dbg_adapter = NULL; -} - -/** - * fxgmac_dbg_init - start up debugfs for the driver - **/ -void fxgmac_dbg_init(struct fxgmac_pdata *pdata) -{ - unsigned int i; - char num[3]; - const char debug_path[] = "/sys/kernel/debug/"; - const char file_prefix[] = "fuxi_"; - char file_path[50]; - char file_name[8]; - - /* init file_path */ - memset(file_path, '\0', sizeof(file_path)); - memcpy(file_path, debug_path, sizeof(debug_path)); - - for (i = 0; i < DF_MAX_NIC_NUM; i++) { - /* init num and filename */ - memset(num, '\0', sizeof(num)); - memset(file_name, '\0', sizeof(file_name)); - - /* int to string */ - sprintf(num, "%d", i); - - /* file name */ - memcpy(file_name, file_prefix, sizeof(file_prefix)); - memcpy(file_name + strlen(file_prefix), num, sizeof(num)); - - /* file path */ - memcpy(file_path + sizeof(debug_path) - 1, file_name, - sizeof(file_name)); - - /* whether file exist */ - pdata->expansion.fxgmac_dbg_root = - debugfs_lookup(file_name, NULL); - if (!pdata->expansion.fxgmac_dbg_root) { - /* create file */ - pdata->expansion.fxgmac_dbg_root = - debugfs_create_dir(file_name, NULL); - if (IS_ERR(pdata->expansion.fxgmac_dbg_root)) - DPRINTK("fxgmac init of debugfs failed\n"); - - break; - } - } -} - -/** - * fxgmac_dbg_exit - clean out the driver's debugfs entries - **/ -void fxgmac_dbg_exit(struct fxgmac_pdata *pdata) -{ - if (pdata->expansion.fxgmac_dbg_root) - debugfs_remove_recursive(pdata->expansion.fxgmac_dbg_root); -} - -#endif /* HAVE_XLGMAC_DEBUG_FS */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c index 0ae527a068388..330f736de2a57 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-net.c @@ -1,20 +1,294 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include -#include -#include -#include - -#include "fuxi-os.h" #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" static int fxgmac_one_poll_rx(struct napi_struct *, int); static int fxgmac_one_poll_tx(struct napi_struct *, int); static int fxgmac_all_poll(struct napi_struct *, int); +static int fxgmac_dev_read(struct fxgmac_channel *channel); + +void fxgmac_lock(struct fxgmac_pdata *pdata) +{ + mutex_lock(&pdata->expansion.mutex); +} + +void fxgmac_unlock(struct fxgmac_pdata *pdata) +{ + mutex_unlock(&pdata->expansion.mutex); +} + +#ifdef FXGMAC_ESD_CHECK_ENABLED +static void fxgmac_schedule_esd_work(struct fxgmac_pdata *pdata) +{ + set_bit(FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, + pdata->expansion.task_flags); + schedule_delayed_work(&pdata->expansion.esd_work, FXGMAC_ESD_INTERVAL); +} + +static void fxgmac_update_esd_stats(struct fxgmac_pdata *pdata) +{ + u32 value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXEXCESSIVECOLLSIONFRAMES); + pdata->expansion.esd_stats.tx_abort_excess_collisions += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXUNDERFLOWERROR_LO); + pdata->expansion.esd_stats.tx_dma_underrun += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXCARRIERERRORFRAMES); + pdata->expansion.esd_stats.tx_lost_crs += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXLATECOLLISIONFRAMES); + pdata->expansion.esd_stats.tx_late_collisions += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXCRCERROR_LO); + pdata->expansion.esd_stats.rx_crc_errors += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXALIGNERROR); + pdata->expansion.esd_stats.rx_align_errors += value; + + value = readreg(pdata->pAdapter, pdata->mac_regs + MMC_RXRUNTERROR); + pdata->expansion.esd_stats.rx_runt_errors += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXSINGLECOLLISION_G); + pdata->expansion.esd_stats.single_collisions += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXMULTIPLECOLLISION_G); + pdata->expansion.esd_stats.multi_collisions += value; + + value = readreg(pdata->pAdapter, + pdata->mac_regs + MMC_TXDEFERREDFRAMES); + pdata->expansion.esd_stats.tx_deferred_frames += value; +} + +static void fxgmac_check_esd_work(struct fxgmac_pdata *pdata) +{ + FXGMAC_ESD_STATS *stats = &pdata->expansion.esd_stats; + int i = 0; + u32 regval; + + /* ESD test will make recv crc errors more than 4,294,967,xxx + * in one second. + */ + if (stats->rx_crc_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->rx_align_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->rx_runt_errors > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_abort_excess_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_dma_underrun > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_lost_crs > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_late_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->single_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->multi_collisions > FXGMAC_ESD_ERROR_THRESHOLD || + stats->tx_deferred_frames > FXGMAC_ESD_ERROR_THRESHOLD) { + dev_info(pdata->dev, "%s - Error:\n", __func__); + dev_info(pdata->dev, "rx_crc_errors %ul.\n", + stats->rx_crc_errors); + dev_info(pdata->dev, "rx_align_errors %ul.\n", + stats->rx_align_errors); + dev_info(pdata->dev, "rx_runt_errors %ul.\n", + stats->rx_runt_errors); + dev_info(pdata->dev, "tx_abort_excess_collisions %ul.\n", + stats->tx_abort_excess_collisions); + dev_info(pdata->dev, "tx_dma_underrun %ul.\n", + stats->tx_dma_underrun); + dev_info(pdata->dev, "tx_lost_crs %ul.\n", stats->tx_lost_crs); + dev_info(pdata->dev, "tx_late_collisions %ul.\n", + stats->tx_late_collisions); + dev_info(pdata->dev, "single_collisions %ul.\n", + stats->single_collisions); + dev_info(pdata->dev, "multi_collisions %ul.\n", + stats->multi_collisions); + dev_info(pdata->dev, "tx_deferred_frames %ul.\n", + stats->tx_deferred_frames); + + dev_info(pdata->dev, "esd error triggered, restart NIC...\n"); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + while ((regval == FXGMAC_PCIE_LINK_DOWN) && + (i++ < FXGMAC_PCIE_RECOVER_TIMES)) { + usleep_range_ex(pdata->pAdapter, 200, 200); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + dev_info(pdata->dev, + "pcie recovery link cost %d(200us)\n", i); + } + + if (regval == FXGMAC_PCIE_LINK_DOWN) { + dev_info(pdata->dev, + "pcie link down, recovery failed.\n"); + return; + } + + if (regval & FXGMAC_PCIE_IO_MEM_MASTER_ENABLE) { + pdata->hw_ops.esd_restore_pcie_cfg(pdata); + cfg_r32(pdata, REG_PCI_COMMAND, ®val); + dev_info(pdata->dev, + "pci command reg is %x after restoration.\n", + regval); + fxgmac_restart_dev(pdata); + } + } + + memset(stats, 0, sizeof(FXGMAC_ESD_STATS)); +} + +static void fxgmac_esd_work(struct work_struct *work) +{ + struct fxgmac_pdata *pdata = container_of(work, struct fxgmac_pdata, + expansion.esd_work.work); + + rtnl_lock(); + if (!netif_running(pdata->netdev) || + !test_and_clear_bit(FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, + pdata->expansion.task_flags)) + goto out_unlock; + + fxgmac_update_esd_stats(pdata); + fxgmac_check_esd_work(pdata); + fxgmac_schedule_esd_work(pdata); + +out_unlock: + rtnl_unlock(); +} + +static void fxgmac_cancel_esd_work(struct fxgmac_pdata *pdata) +{ + struct work_struct *work = &pdata->expansion.esd_work.work; + + if (!work->func) { + dev_info(pdata->dev, "work func is NULL.\n"); + return; + } + + cancel_delayed_work_sync(&pdata->expansion.esd_work); +} +#endif + +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED +static void fxgmac_schedule_loopback_work(struct fxgmac_pdata *pdata) +{ + schedule_delayed_work(&pdata->expansion.loopback_work, + FXGMAC_LOOPBACK_CHECK_INTERVAL); +} + +static void fxgmac_loopback_work(struct work_struct *work) +{ + int ret; + u32 regval; + struct fxgmac_pdata *pdata = container_of(work, struct fxgmac_pdata, + expansion.loopback_work.work); + + if (pdata->expansion.lb_test_flag || pdata->expansion.phy_link) + goto reschedule; + + if (!pdata->expansion.lb_cable_flag) { + ret = pdata->hw_ops.read_ephy_reg(pdata, REG_MII_STAT1000, + ®val); + if (ret < 0) { + DPRINTK("%s:read ephy failed\n", __func__); + goto reschedule; + } + + // DPRINTK("%s: regval = 0x%x\n", __func__, regval); + regval = FXGMAC_GET_REG_BITS(regval, + PHY_MII_STAT1000_CFG_ERROR_POS, + PHY_MII_STAT1000_CFG_ERROR_LEN); + if (regval == 1) { + pdata->expansion.lb_cable_detect_count++; + if (pdata->expansion.lb_cable_detect_count == + FXGMAC_PHY_LOOPBACK_DETECT_THRESOLD) { + pdata->expansion.lb_cable_flag = 1; + pdata->hw_ops.setup_cable_loopback(pdata); + pdata->expansion.lb_cable_detect_count = 0; + } + } + } + +reschedule: + fxgmac_schedule_loopback_work(pdata); +} + +static void fxgmac_cancel_loopback_work(struct fxgmac_pdata *pdata) +{ + struct work_struct *work = &pdata->expansion.loopback_work.work; + + if (!work->func) { + dev_info(pdata->dev, "work func is NULL.\n"); + return; + } + + cancel_delayed_work_sync(&pdata->expansion.loopback_work); +} +#endif + +#ifdef FXGMAC_ASPM_ENABLED +void fxgmac_schedule_aspm_config_work(struct fxgmac_pdata *pdata) +{ + if (!pdata->expansion.aspm_work_active && !pdata->expansion.aspm_en && + pdata->expansion.dev_state != FXGMAC_DEV_CLOSE) { + schedule_delayed_work(&pdata->expansion.aspm_config_work, + FXGMAC_ASPM_INTERVAL); + pdata->expansion.aspm_work_active = true; + } +} + +static void fxgmac_aspm_config_work(struct work_struct *work) +{ + u32 pcie_low_power = PCIE_LP_ASPM_LTR | PCIE_LP_ASPM_L1SS | + PCIE_LP_ASPM_L1; + struct fxgmac_pdata *pdata = container_of( + work, struct fxgmac_pdata, expansion.aspm_config_work.work); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + if (pdata->expansion.aspm_work_active) { + hw_ops->pcie_init(pdata, pcie_low_power & PCIE_LP_ASPM_LTR, + pcie_low_power & PCIE_LP_ASPM_L1SS, + pcie_low_power & PCIE_LP_ASPM_L1, + pcie_low_power & PCIE_LP_ASPM_L0S); + pdata->expansion.aspm_en = true; + DPRINTK("NIC set aspm at link down\n"); + } + pdata->expansion.aspm_work_active = false; +} + +void fxgmac_cancel_aspm_config_work(struct fxgmac_pdata *pdata) +{ + struct work_struct *work = &pdata->expansion.aspm_config_work.work; + + if (!work->func) { + dev_info(pdata->dev, "work func is NULL.\n"); + return; + } + + cancel_delayed_work_sync(&pdata->expansion.aspm_config_work); +} + +bool fxgmac_aspm_action_linkup(struct fxgmac_pdata *pdata) +{ + if ((pdata->expansion.aspm_work_active || pdata->expansion.aspm_en)) { + DPRINTK("cancel aspm work.\n"); + pdata->expansion.aspm_work_active = false; + fxgmac_cancel_aspm_config_work(pdata); + if (pdata->expansion.aspm_en) { + DPRINTK("reset from aspm.\n"); + pdata->expansion.aspm_en = false; + pdata->expansion.recover_from_aspm = true; + schedule_work(&pdata->expansion.restart_work); + return true; + } + pdata->expansion.aspm_en = false; + } + + return false; +} + +#endif unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata) { @@ -53,7 +327,6 @@ unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, struct in6_addr *addr_ip6_solicited = (struct in6_addr *)solicited_ipval; int err = -EADDRNOTAVAIL; - unsigned char *ret; if (ipval) { addr_ip6 = (struct in6_addr *)ipval; @@ -98,9 +371,7 @@ unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, DPRINTK("%s get ipv6 addr failed, use default.\n", __FUNCTION__); - ret = (err ? NULL : ipval); - - return ret; + return err ? NULL : ipval; } inline unsigned int fxgmac_tx_avail_desc(struct fxgmac_ring *ring) @@ -127,31 +398,52 @@ inline unsigned int fxgmac_rx_dirty_desc(struct fxgmac_ring *ring) return dirty; } -static int fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, - struct fxgmac_ring *ring, - unsigned int count) +static netdev_tx_t fxgmac_maybe_stop_tx_queue(struct fxgmac_channel *channel, + struct fxgmac_ring *ring, + unsigned int count) { struct fxgmac_pdata *pdata = channel->pdata; if (count > fxgmac_tx_avail_desc(ring)) { - netif_info( - pdata, drv, pdata->netdev, - "Tx queue stopped, not enough descriptors available\n"); + if (netif_msg_tx_done(pdata)) { + netif_info( + pdata, drv, pdata->netdev, + "Tx queue stopped, not enough descriptors available\n"); + } + + /* Avoid wrongly optimistic queue wake-up: tx poll thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); netif_stop_subqueue(pdata->netdev, channel->queue_index); ring->tx.queue_stopped = 1; - /* If we haven't notified the hardware because of xmit_more - * support, tell it now + /* Sync with tx poll: + * - publish queue status and cur ring index (write barrier) + * - refresh dirty ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing tx poll thread + * can't. */ - if (ring->tx.xmit_more) - pdata->hw_ops.tx_start_xmit(channel, ring); - if (netif_msg_tx_done(pdata)) - DPRINTK("about stop tx q, ret BUSY\n"); - - return NETDEV_TX_BUSY; + smp_mb(); /* memory barrier */ + if (count <= fxgmac_tx_avail_desc(ring)) { + ring->tx.queue_stopped = 0; + netif_start_subqueue(pdata->netdev, + channel->queue_index); + fxgmac_tx_start_xmit(channel, ring); + } else { + /* If we haven't notified the hardware because of + * xmit_more support, tell it now + */ + if (ring->tx.xmit_more) + fxgmac_tx_start_xmit(channel, ring); + if (netif_msg_tx_done(pdata)) + DPRINTK("about stop tx q, ret BUSY\n"); + return NETDEV_TX_BUSY; + } } - return 0; + return NETDEV_TX_OK; } static void fxgmac_prep_vlan(struct sk_buff *skb, @@ -210,11 +502,7 @@ static void fxgmac_prep_tx_pkt(struct fxgmac_pdata *pdata, struct fxgmac_ring *ring, struct sk_buff *skb, struct fxgmac_pkt_info *pkt_info) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)) skb_frag_t *frag; -#else - struct skb_frag_struct *frag; -#endif unsigned int context_desc; unsigned int len; unsigned int i; @@ -303,8 +591,10 @@ static void fxgmac_prep_tx_pkt(struct fxgmac_pdata *pdata, static int fxgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) { unsigned int rx_buf_size; + unsigned int max_mtu; - if (mtu > FXGMAC_JUMBO_PACKET_MTU) { + max_mtu = FXGMAC_JUMBO_PACKET_MTU - ETH_HLEN; + if (mtu > max_mtu) { netdev_alert(netdev, "MTU exceeds maximum supported value\n"); return -EINVAL; } @@ -320,154 +610,56 @@ static int fxgmac_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu) return rx_buf_size; } -static void fxgmac_enable_rx_tx_ints(struct fxgmac_pdata *pdata) -{ - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - struct fxgmac_channel *channel; - enum fxgmac_int int_id; - unsigned int i; - - channel = pdata->channel_head; - for (i = 0; i < pdata->channel_count; i++, channel++) { - if (channel->tx_ring && channel->rx_ring) - int_id = FXGMAC_INT_DMA_CH_SR_TI_RI; - else if (channel->tx_ring) - int_id = FXGMAC_INT_DMA_CH_SR_TI; - else if (channel->rx_ring) - int_id = FXGMAC_INT_DMA_CH_SR_RI; - else - continue; - - hw_ops->enable_int(channel, int_id); - } -} - -static void fxgmac_phy_process(struct fxgmac_pdata *pdata) -{ - int cur_link = 0; - int regval = 0; - int cur_speed = 0; - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - - regval = hw_ops->get_ephy_state(pdata); - - /* We should make sure that PHY is done with the reset */ - if (regval & MGMT_EPHY_CTRL_STA_EPHY_RESET) { - pdata->expansion.phy_link = false; - return; - } - - cur_link = FXGMAC_GET_REG_BITS(regval, - MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, - MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); - if (pdata->expansion.phy_link != cur_link) { - pdata->expansion.phy_link = cur_link; - if (pdata->expansion.phy_link) { - cur_speed = FXGMAC_GET_REG_BITS( - regval, MGMT_EPHY_CTRL_STA_SPEED_POS, - MGMT_EPHY_CTRL_STA_SPEED_LEN); - pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : - (cur_speed == 1) ? SPEED_100 : - SPEED_10; - pdata->phy_duplex = FXGMAC_GET_REG_BITS( - regval, MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, - MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); - hw_ops->config_mac_speed(pdata); - - hw_ops->enable_rx(pdata); - hw_ops->enable_tx(pdata); - netif_carrier_on(pdata->netdev); - if (netif_running(pdata->netdev)) { - netif_tx_wake_all_queues(pdata->netdev); - DPRINTK("%s now is link up, mac_speed=%d.\n", - FXGMAC_DRV_NAME, pdata->phy_speed); - } - } else { - netif_carrier_off(pdata->netdev); - netif_tx_stop_all_queues(pdata->netdev); - pdata->phy_speed = SPEED_UNKNOWN; - pdata->phy_duplex = DUPLEX_UNKNOWN; - hw_ops->disable_rx(pdata); - hw_ops->disable_tx(pdata); - DPRINTK("%s now is link down\n", FXGMAC_DRV_NAME); - } - } -} - -static int fxgmac_phy_poll(struct napi_struct *napi, int budget) +#ifdef FXGMAC_MISC_ENABLED +static int fxgmac_misc_poll(struct napi_struct *napi, int budget) { struct fxgmac_pdata *pdata = - container_of(napi, struct fxgmac_pdata, expansion.napi_phy); + container_of(napi, struct fxgmac_pdata, expansion.napi_misc); struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - fxgmac_phy_process(pdata); if (napi_complete_done(napi, 0)) hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); return 0; } -static irqreturn_t fxgmac_phy_isr(int irq, void *data) +static irqreturn_t fxgmac_misc_isr(int irq, void *data) { struct fxgmac_pdata *pdata = data; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval; regval = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); - if (!(regval & MGMT_INT_CTRL0_INT_STATUS_PHY)) + if (!(regval & MGMT_INT_CTRL0_INT_STATUS_MISC)) return IRQ_HANDLED; hw_ops->disable_msix_one_interrupt(pdata, MSI_ID_PHY_OTHER); - hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); - if (napi_schedule_prep(&pdata->expansion.napi_phy)) { - __napi_schedule_irqoff(&pdata->expansion.napi_phy); - } + hw_ops->clear_misc_int_status(pdata); + + napi_schedule_irqoff(&pdata->expansion.napi_misc); return IRQ_HANDLED; } +#endif static irqreturn_t fxgmac_isr(int irq, void *data) { - unsigned int dma_isr, dma_ch_isr, mac_isr; + unsigned int dma_ch_isr, dma_isr, mac_isr; struct fxgmac_pdata *pdata = data; struct fxgmac_channel *channel; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - unsigned int i, ti, ri; + unsigned int i; u32 val; - dma_isr = readreg(pdata->pAdapter, pdata->mac_regs + DMA_ISR); - val = readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0); - if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK)) - return IRQ_HANDLED; + if (!(val & MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK)) + return IRQ_NONE; - hw_ops->disable_mgm_interrupt(pdata); - pdata->expansion.mgm_intctrl_val = val; - - pdata->stats.mgmt_int_isr++; + dma_isr = readreg(pdata->pAdapter, pdata->mac_regs + DMA_ISR); for (i = 0; i < pdata->channel_count; i++) { channel = pdata->channel_head + i; - dma_ch_isr = readl(FXGMAC_DMA_REG(channel, DMA_CH_SR)); - netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n", - i, dma_ch_isr); - - /* The TI or RI interrupt bits may still be set even if using - * per channel DMA interrupts. Check to be sure those are not - * enabled before using the private data napi structure. - */ - ti = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TI_POS, - DMA_CH_SR_TI_LEN); - ri = FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_RI_POS, - DMA_CH_SR_RI_LEN); - if (!pdata->per_channel_irq && (ti || ri)) { - if (napi_schedule_prep(&pdata->expansion.napi)) { - pdata->stats.napi_poll_isr++; - /* Turn on polling */ - __napi_schedule_irqoff(&pdata->expansion.napi); - } - } if (FXGMAC_GET_REG_BITS(dma_ch_isr, DMA_CH_SR_TPS_POS, DMA_CH_SR_TPS_LEN)) @@ -499,7 +691,7 @@ static irqreturn_t fxgmac_isr(int irq, void *data) if (FXGMAC_GET_REG_BITS(dma_isr, DMA_ISR_MACIS_POS, DMA_ISR_MACIS_LEN)) { mac_isr = readl(pdata->mac_regs + MAC_ISR); - +#ifdef FXGMAC_MISC_ENABLED if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCTXIS_POS, MAC_ISR_MMCTXIS_LEN)) hw_ops->tx_mmc_int(pdata); @@ -507,18 +699,18 @@ static irqreturn_t fxgmac_isr(int irq, void *data) if (FXGMAC_GET_REG_BITS(mac_isr, MAC_ISR_MMCRXIS_POS, MAC_ISR_MMCRXIS_LEN)) hw_ops->rx_mmc_int(pdata); - +#endif /* Clear all interrupt signals */ writel(mac_isr, (pdata->mac_regs + MAC_ISR)); } - if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { - hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, &val); - if (napi_schedule_prep(&pdata->expansion.napi)) { - pdata->stats.napi_poll_isr++; - /* Turn on polling */ - __napi_schedule_irqoff(&pdata->expansion.napi); - } + hw_ops->disable_mgm_interrupt(pdata); + pdata->stats.mgmt_int_isr++; + + if (napi_schedule_prep(&pdata->expansion.napi)) { + pdata->stats.napi_poll_isr++; + /* Turn on polling */ + __napi_schedule_irqoff(&pdata->expansion.napi); } return IRQ_HANDLED; @@ -540,9 +732,7 @@ static irqreturn_t fxgmac_dma_isr(int irq, void *data) DMA_CH_SR_TI_LEN, 1); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_SR)); - if (napi_schedule_prep(&channel->expansion.napi_tx)) { - __napi_schedule_irqoff(&channel->expansion.napi_tx); - } + napi_schedule_irqoff(&channel->expansion.napi_tx); } else { message_id = channel->queue_index; hw_ops->disable_msix_one_interrupt(pdata, message_id); @@ -553,27 +743,17 @@ static irqreturn_t fxgmac_dma_isr(int irq, void *data) DMA_CH_SR_RI_LEN, 1); writereg(pdata->pAdapter, regval, FXGMAC_DMA_REG(channel, DMA_CH_SR)); - if (napi_schedule_prep(&channel->expansion.napi_rx)) { - __napi_schedule_irqoff(&channel->expansion.napi_rx); - } + napi_schedule_irqoff(&channel->expansion.napi_rx); } return IRQ_HANDLED; } -#if FXGMAC_TX_HANG_TIMER_EN -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) +#if FXGMAC_TX_HANG_TIMER_ENABLED static void fxgmac_tx_hang_timer_handler(struct timer_list *t) -#else -static void fxgmac_tx_hang_timer_handler(unsigned long data) -#endif { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) struct fxgmac_channel *channel = from_timer(channel, t, expansion.tx_hang_timer); -#else - struct fxgmac_channel *channel = (struct fxgmac_channel *)data; -#endif #if FXGMAC_TX_HANG_CHECH_DIRTY struct fxgmac_ring *ring = channel->tx_ring; @@ -636,77 +816,94 @@ static void fxgmac_napi_enable(struct fxgmac_pdata *pdata, unsigned int add) { struct fxgmac_channel *channel; unsigned int i; + u32 tx_napi = 0, rx_napi = 0; + +#ifdef FXGMAC_MISC_ENABLED + u32 misc_napi = 0; + + misc_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN); +#endif + tx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN); + rx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS, + FXGMAC_FLAG_RX_NAPI_FREE_LEN); if (pdata->per_channel_irq) { channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { - if (add) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) - netif_napi_add_weight( - pdata->netdev, - &channel->expansion.napi_rx, - fxgmac_one_poll_rx, NAPI_POLL_WEIGHT); -#else - netif_napi_add(pdata->netdev, - &channel->expansion.napi_rx, - fxgmac_one_poll_rx, - NAPI_POLL_WEIGHT); -#endif + if (!FXGMAC_GET_REG_BITS( + rx_napi, i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN)) { + if (add) { + netif_napi_add_weight( + pdata->netdev, + &channel->expansion.napi_rx, + fxgmac_one_poll_rx, + NAPI_POLL_WEIGHT); + } + napi_enable(&channel->expansion.napi_rx); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); } - napi_enable(&channel->expansion.napi_rx); - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && !tx_napi) { netif_napi_add_weight( pdata->netdev, &channel->expansion.napi_tx, fxgmac_one_poll_tx, NAPI_POLL_WEIGHT); -#else - netif_napi_add(pdata->netdev, - &channel->expansion.napi_tx, - fxgmac_one_poll_tx, - NAPI_POLL_WEIGHT); -#endif napi_enable(&channel->expansion.napi_tx); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); } if (netif_msg_drv(pdata)) - DPRINTK("napi_enable, msix ch%d napi enabled done, add=%d\n", + DPRINTK("napi_enable, msix ch%d napi enabled done,add=%d\n", i, add); } - /* for phy */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) - netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi_phy, - fxgmac_phy_poll, NAPI_POLL_WEIGHT); -#else - netif_napi_add(pdata->netdev, &pdata->expansion.napi_phy, - fxgmac_phy_poll, NAPI_POLL_WEIGHT); +#ifdef FXGMAC_MISC_ENABLED + /* for misc */ + if (!misc_napi) { + netif_napi_add_weight(pdata->netdev, + &pdata->expansion.napi_misc, + fxgmac_misc_poll, + NAPI_POLL_WEIGHT); + napi_enable(&pdata->expansion.napi_misc); + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); + } #endif - napi_enable(&pdata->expansion.napi_phy); } else { i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN); if (!i) { if (add) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6, 1, 0)) netif_napi_add_weight(pdata->netdev, &pdata->expansion.napi, fxgmac_all_poll, NAPI_POLL_WEIGHT); -#else - netif_napi_add(pdata->netdev, - &pdata->expansion.napi, - fxgmac_all_poll, - NAPI_POLL_WEIGHT); -#endif } napi_enable(&pdata->expansion.napi); pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, - FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 1); + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, + FXGMAC_NAPI_ENABLE); } } } @@ -715,31 +912,76 @@ static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) { struct fxgmac_channel *channel; unsigned int i; + u32 tx_napi = 0, rx_napi = 0; +#ifdef FXGMAC_MISC_ENABLED + u32 misc_napi = 0; +#endif if (pdata->per_channel_irq) { +#ifdef FXGMAC_MISC_ENABLED + misc_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN); +#endif + tx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN); + rx_napi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS, + FXGMAC_FLAG_RX_NAPI_FREE_LEN); channel = pdata->channel_head; if (channel != NULL) { for (i = 0; i < pdata->channel_count; i++, channel++) { - napi_disable(&channel->expansion.napi_rx); - - if (del) { - netif_napi_del( + if (FXGMAC_GET_REG_BITS( + rx_napi, i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN)) { + napi_disable( &channel->expansion.napi_rx); + + if (del) { + netif_napi_del( + &channel->expansion + .napi_rx); + } + pdata->expansion + .int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_RX_NAPI_FREE_POS + + i, + FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); } - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && + tx_napi) { napi_disable( &channel->expansion.napi_tx); netif_napi_del( &channel->expansion.napi_tx); + pdata->expansion + .int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_TX_NAPI_FREE_POS, + FXGMAC_FLAG_TX_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); } if (netif_msg_drv(pdata)) - DPRINTK("napi_disable, msix ch%d napi disabled done, del=%d\n", + DPRINTK("napi_disable, msix ch%d napi disabled done,del=%d\n", i, del); } - napi_disable(&pdata->expansion.napi_phy); - netif_napi_del(&pdata->expansion.napi_phy); +#ifdef FXGMAC_MISC_ENABLED + if (misc_napi) { + napi_disable(&pdata->expansion.napi_misc); + netif_napi_del(&pdata->expansion.napi_misc); + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_NAPI_FREE_POS, + FXGMAC_FLAG_MISC_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); + } +#endif } } else { i = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, @@ -753,7 +995,8 @@ static void fxgmac_napi_disable(struct fxgmac_pdata *pdata, unsigned int del) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_NAPI_FREE_POS, - FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, 0); + FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN, + FXGMAC_NAPI_DISABLE); } } } @@ -765,6 +1008,10 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) unsigned int i; int ret; u32 msi, msix, need_free; + u32 tx = 0, rx = 0; +#ifdef FXGMAC_MISC_ENABLED + u32 misc = 0; +#endif msi = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_MSI_POS, FXGMAC_FLAG_MSI_LEN); @@ -793,28 +1040,27 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, - FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 1); + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, + FXGMAC_IRQ_ENABLE); } } if (!pdata->per_channel_irq) return 0; - ret = devm_request_irq(pdata->dev, pdata->expansion.phy_irq, - fxgmac_phy_isr, 0, netdev->name, pdata); - if (ret) { - netdev_alert(netdev, "error requesting phy irq %d, ret = %d\n", - pdata->expansion.phy_irq, ret); - return ret; - } - + tx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN); + rx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS, + FXGMAC_FLAG_RX_IRQ_FREE_LEN); channel = pdata->channel_head; for (i = 0; i < pdata->channel_count; i++, channel++) { snprintf(channel->expansion.dma_irq_name, sizeof(channel->expansion.dma_irq_name) - 1, "%s-ch%d-Rx-%u", netdev_name(netdev), i, channel->queue_index); - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && !tx) { snprintf(channel->expansion.dma_irq_name_tx, sizeof(channel->expansion.dma_irq_name_tx) - 1, "%s-ch%d-Tx-%u", netdev_name(netdev), i, @@ -826,31 +1072,67 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) channel->expansion.dma_irq_name_tx, channel); if (ret) { - DPRINTK("fxgmac_req_irqs, err with MSIx irq request for ch %d tx, ret=%d\n", + netdev_alert( + netdev, + "fxgmac_req_irqs, err with MSIx irq request for ch %d tx, ret=%d\n", i, ret); - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - devm_free_irq(pdata->dev, - channel->expansion.dma_irq_tx, - channel); - return ret; + goto err_irq; } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, FXGMAC_IRQ_ENABLE); + if (netif_msg_drv(pdata)) DPRINTK("fxgmac_req_irqs, MSIx irq_tx request ok, ch=%d, irq=%d,%s\n", i, channel->expansion.dma_irq_tx, channel->expansion.dma_irq_name_tx); } - ret = devm_request_irq(pdata->dev, channel->dma_irq, - fxgmac_dma_isr, 0, - channel->expansion.dma_irq_name, - channel); + + if (!FXGMAC_GET_REG_BITS( + rx, i, FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + ret = devm_request_irq(pdata->dev, channel->dma_irq, + fxgmac_dma_isr, 0, + channel->expansion.dma_irq_name, + channel); + if (ret) { + netdev_alert(netdev, + "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_ENABLE); + } + } + +#ifdef FXGMAC_MISC_ENABLED + misc = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN); + if (!misc) { + snprintf(pdata->expansion.misc_irq_name, + sizeof(pdata->expansion.misc_irq_name) - 1, "%s-misc", + netdev_name(netdev)); + ret = devm_request_irq(pdata->dev, pdata->expansion.misc_irq, + fxgmac_misc_isr, 0, + pdata->expansion.misc_irq_name, pdata); if (ret) { - netdev_alert(netdev, "error requesting irq %d\n", - channel->dma_irq); + netdev_alert(netdev, + "error requesting misc irq %d, ret = %d\n", + pdata->expansion.misc_irq, ret); goto err_irq; } + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, FXGMAC_IRQ_ENABLE); } - +#endif if (netif_msg_drv(pdata)) DPRINTK("fxgmac_req_irqs, MSIx irq request ok, total=%d,%d~%d\n", i, (pdata->channel_head)[0].dma_irq, @@ -858,20 +1140,48 @@ static int fxgmac_request_irqs(struct fxgmac_pdata *pdata) return 0; err_irq: - DPRINTK("fxgmac_req_irqs, err with MSIx irq request at %d, ret=%d\n", i, - ret); + netdev_alert(netdev, + "fxgmac_req_irqs, err with MSIx irq request at %d, ret=%d\n", + i, ret); if (pdata->per_channel_irq) { for (i--, channel--; i < pdata->channel_count; i--, channel--) { - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && tx) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); devm_free_irq(pdata->dev, channel->expansion.dma_irq_tx, channel); } - devm_free_irq(pdata->dev, channel->dma_irq, channel); + + if (FXGMAC_GET_REG_BITS( + rx, i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->dma_irq, + channel); + } } - devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); +#ifdef FXGMAC_MISC_ENABLED + if (misc) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, pdata->expansion.misc_irq, + pdata); + } +#endif } return ret; } @@ -881,6 +1191,10 @@ static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) struct fxgmac_channel *channel; unsigned int i = 0; u32 need_free, msix; + u32 tx = 0, rx = 0; +#ifdef FXGMAC_MISC_ENABLED + u32 misc = 0; +#endif msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN); @@ -895,17 +1209,36 @@ static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( pdata->expansion.int_flags, FXGMAC_FLAG_LEGACY_IRQ_FREE_POS, - FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, 0); + FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); } } if (!pdata->per_channel_irq) return; +#ifdef FXGMAC_MISC_ENABLED + misc = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN); +#endif + tx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN); + rx = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS, + FXGMAC_FLAG_RX_IRQ_FREE_LEN); + channel = pdata->channel_head; if (channel != NULL) { for (i = 0; i < pdata->channel_count; i++, channel++) { - if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i)) { + if (FXGMAC_IS_CHANNEL_WITH_TX_IRQ(i) && tx) { + pdata->expansion.int_flags = + FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_TX_IRQ_FREE_POS, + FXGMAC_FLAG_TX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); devm_free_irq(pdata->dev, channel->expansion.dma_irq_tx, channel); @@ -913,10 +1246,31 @@ static void fxgmac_free_irqs(struct fxgmac_pdata *pdata) DPRINTK("fxgmac_free_irqs, MSIx irq_tx clear done, ch=%d\n", i); } - devm_free_irq(pdata->dev, channel->dma_irq, channel); + + if (FXGMAC_GET_REG_BITS( + rx, i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN)) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_RX_IRQ_FREE_POS + i, + FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, channel->dma_irq, + channel); + } } - devm_free_irq(pdata->dev, pdata->expansion.phy_irq, pdata); +#ifdef FXGMAC_MISC_ENABLED + if (misc) { + pdata->expansion.int_flags = FXGMAC_SET_REG_BITS( + pdata->expansion.int_flags, + FXGMAC_FLAG_MISC_IRQ_FREE_POS, + FXGMAC_FLAG_MISC_IRQ_FREE_LEN, + FXGMAC_IRQ_DISABLE); + devm_free_irq(pdata->dev, pdata->expansion.misc_irq, + pdata); + } +#endif } if (netif_msg_drv(pdata)) DPRINTK("fxgmac_free_irqs, MSIx rx irq clear done, total=%d\n", @@ -975,7 +1329,7 @@ void fxgmac_free_rx_data(struct fxgmac_pdata *pdata) */ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) { - u16 pcie_cap_offset; + u16 pcie_cap_offset = 0; u32 pcie_msi_mask_bits = 0; int ret = 0; @@ -984,9 +1338,9 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) ret = pci_read_config_dword(pdev, pcie_cap_offset, &pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR - "read pci config space MSI cap. failed, %d\n", - ret); + DPRINTK(KERN_ERR + "read pci config space MSI cap. failed, %d\n", + ret); ret = -EFAULT; } } @@ -996,8 +1350,8 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) PCI_CAP_ID_MSI_ENABLE_LEN, 0); ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR "write pci config space MSI mask failed, %d\n", - ret); + DPRINTK(KERN_ERR "write pci config space MSI mask failed, %d\n", + ret); ret = -EFAULT; } @@ -1006,7 +1360,7 @@ static int fxgmac_disable_pci_msi_config(struct pci_dev *pdev) static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) { - u16 pcie_cap_offset; + u16 pcie_cap_offset = 0; u32 pcie_msi_mask_bits = 0; int ret = 0; @@ -1015,9 +1369,9 @@ static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) ret = pci_read_config_dword(pdev, pcie_cap_offset, &pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR - "read pci config space MSIX cap. failed, %d\n", - ret); + DPRINTK(KERN_ERR + "read pci config space MSIX cap. failed, %d\n", + ret); ret = -EFAULT; } } @@ -1027,8 +1381,9 @@ static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) PCI_CAP_ID_MSIX_ENABLE_LEN, 0); ret = pci_write_config_dword(pdev, pcie_cap_offset, pcie_msi_mask_bits); if (ret) { - printk(KERN_ERR "write pci config space MSIX mask failed, %d\n", - ret); + DPRINTK(KERN_ERR + "write pci config space MSIX mask failed, %d\n", + ret); ret = -EFAULT; } @@ -1038,14 +1393,18 @@ static int fxgmac_disable_pci_msix_config(struct pci_dev *pdev) int fxgmac_start(struct fxgmac_pdata *pdata) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - struct net_device *netdev = pdata->netdev; + unsigned int pcie_low_power = PCIE_LP_ASPM_LTR; + u8 deviceid; int ret; - unsigned int pcie_low_power = 0; - u32 regval; if (netif_msg_drv(pdata)) DPRINTK("fxgmac start callin here.\n"); + if (pdata->expansion.dev_state != FXGMAC_DEV_OPEN && + pdata->expansion.dev_state != FXGMAC_DEV_STOP && + pdata->expansion.dev_state != FXGMAC_DEV_RESUME) + return 0; + /* must reset software again here, to avoid flushing tx queue error caused by the system only run probe * when installing driver on the arm platform. */ @@ -1071,54 +1430,78 @@ int fxgmac_start(struct fxgmac_pdata *pdata) hw_ops->reset_phy(pdata); hw_ops->release_phy(pdata); + cfg_r8(pdata, REG_PCI_REVID, &deviceid); hw_ops->pcie_init(pdata, pcie_low_power & PCIE_LP_ASPM_LTR, pcie_low_power & PCIE_LP_ASPM_L1SS, - pcie_low_power & PCIE_LP_ASPM_L1, + (deviceid != YT6801_NTO_VER) ? + true : + (pcie_low_power & PCIE_LP_ASPM_L1), pcie_low_power & PCIE_LP_ASPM_L0S); +#ifdef FXGMAC_ASPM_ENABLED + if (pdata->expansion.aspm_work_active || pdata->expansion.aspm_en) { + DPRINTK("cancel aspm work.\n"); + fxgmac_cancel_aspm_config_work(pdata); + pdata->expansion.aspm_en = false; + pdata->expansion.aspm_work_active = false; + } +#endif hw_ops->config_power_up(pdata); - - fxgmac_dismiss_all_int(pdata); + hw_ops->dismiss_all_int(pdata); ret = hw_ops->init(pdata); if (ret) { - printk("fxgmac hw init error.\n"); + DPRINTK("fxgmac hw init error.\n"); return ret; } +#ifdef FXGMAC_PHY_SLEEP_ENABLE + hw_ops->enable_phy_sleep(pdata); +#endif fxgmac_napi_enable(pdata, 1); ret = fxgmac_request_irqs(pdata); if (ret) goto err_napi; - hw_ops->enable_tx(pdata); - hw_ops->enable_rx(pdata); - - /* config interrupt to level signal */ - regval = (u32)readl((const volatile void *)(pdata->mac_regs + DMA_MR)); - regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_INTM_POS, DMA_MR_INTM_LEN, - 1); - regval = FXGMAC_SET_REG_BITS(regval, DMA_MR_QUREAD_POS, - DMA_MR_QUREAD_LEN, 1); - writel(regval, pdata->mac_regs + DMA_MR); - - writel(0xF0000000, - (volatile void *)(netdev->base_addr + MGMT_INT_CTRL0)); - + hw_ops->enable_mgm_interrupt(pdata); hw_ops->set_interrupt_moderation(pdata); if (pdata->per_channel_irq) - hw_ops->enable_msix_rxtxphyinterrupt(pdata); + hw_ops->enable_msix_rxtxinterrupt(pdata); + +#ifdef FXGMAC_ESD_CHECK_ENABLED + fxgmac_schedule_esd_work(pdata); +#endif + +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + pdata->expansion.lb_cable_flag = 0; + fxgmac_schedule_loopback_work(pdata); +#endif - fxgmac_enable_rx_tx_ints(pdata); + if (pdata->expansion.recover_phy_state) + fxgmac_set_phy_link_ksettings(pdata); hw_ops->led_under_active(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_START; + + if (!pdata->expansion.recover_from_aspm) { +#ifdef FXGMAC_ASPM_ENABLED + DPRINTK("start aspm work and phy timer.\n"); + fxgmac_schedule_aspm_config_work(pdata); + pdata->expansion.aspm_work_active = true; +#endif + fxgmac_phy_timer_init(pdata); + } + pdata->expansion.recover_from_aspm = false; return 0; err_napi: + if (!pdata->expansion.recover_from_aspm) + fxgmac_phy_timer_destroy(pdata); + fxgmac_napi_disable(pdata, 1); hw_ops->exit(pdata); - DPRINTK("fxgmac start callout with irq err.\n"); + dev_err(pdata->dev, "fxgmac start callout with irq err.\n"); return ret; } @@ -1130,19 +1513,32 @@ void fxgmac_stop(struct fxgmac_pdata *pdata) struct netdev_queue *txq; unsigned int i; - if (pdata->per_channel_irq) { - hw_ops->disable_msix_interrupt(pdata); - } else { - hw_ops->disable_mgm_interrupt(pdata); - } - - pdata->expansion.phy_link = false; + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + return; + pdata->expansion.dev_state = FXGMAC_DEV_STOP; netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); + if (!pdata->expansion.recover_from_aspm) + fxgmac_phy_timer_destroy(pdata); + +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + fxgmac_cancel_loopback_work(pdata); +#endif + +#ifdef FXGMAC_ESD_CHECK_ENABLED + fxgmac_cancel_esd_work(pdata); +#endif + hw_ops->disable_tx(pdata); hw_ops->disable_rx(pdata); + + if (pdata->per_channel_irq) + hw_ops->disable_msix_interrupt(pdata); + else + hw_ops->disable_mgm_interrupt(pdata); + fxgmac_free_irqs(pdata); fxgmac_napi_disable(pdata, 1); @@ -1152,44 +1548,35 @@ void fxgmac_stop(struct fxgmac_pdata *pdata) if (!channel->tx_ring) continue; - txq = netdev_get_tx_queue(netdev, channel->queue_index); - netdev_tx_reset_queue(txq); - } - } - - switch (pdata->expansion.current_state) { - case CURRENT_STATE_SUSPEND: - hw_ops->led_under_sleep(pdata); - break; - case CURRENT_STATE_SHUTDOWN: - case CURRENT_STATE_RESTART: - hw_ops->led_under_shutdown(pdata); - break; - case CURRENT_STATE_CLOSE: - break; - default: - break; + txq = netdev_get_tx_queue(netdev, channel->queue_index); + netdev_tx_reset_queue(txq); + } } } void fxgmac_restart_dev(struct fxgmac_pdata *pdata) { + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; int ret; /* If not running, "restart" will happen on open */ - if (!netif_running(pdata->netdev)) + if (!netif_running(pdata->netdev) && + pdata->expansion.dev_state != FXGMAC_DEV_START) return; - pdata->expansion.current_state = CURRENT_STATE_RESTART; + fxgmac_lock(pdata); fxgmac_stop(pdata); + hw_ops->led_under_shutdown(pdata); fxgmac_free_tx_data(pdata); fxgmac_free_rx_data(pdata); ret = fxgmac_start(pdata); if (ret) { - printk("fxgmac_restart_dev: fxgmac_start failed.\n"); + DPRINTK("fxgmac_restart_dev: fxgmac_start failed.\n"); } + + fxgmac_unlock(pdata); } static void fxgmac_restart(struct work_struct *work) @@ -1221,23 +1608,23 @@ void fxgmac_net_powerup(struct fxgmac_pdata *pdata) ret = fxgmac_start(pdata); if (ret) { - printk("fxgmac_net_powerup: fxgmac_start error\n"); + DPRINTK("%s: fxgmac_start error\n", __func__); return; } /* must call it after fxgmac_start, because it will be enable in fxgmac_start */ hw_ops->disable_arp_offload(pdata); - if (netif_msg_drv(pdata)) { - DPRINTK("fxgmac_net_powerup callout, powerstate=%ld.\n", + if (netif_msg_drv(pdata)) + DPRINTK("%s callout, powerstate=%ld.\n", __func__, pdata->expansion.powerstate); - } } void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) { struct net_device *netdev = pdata->netdev; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 val; if (netif_msg_drv(pdata)) DPRINTK("fxgmac_net_powerdown callin here.\n"); @@ -1268,8 +1655,20 @@ void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) fxgmac_stop(pdata); /* some works are redundent in this call */ - /* must call it after software reset */ - hw_ops->pre_power_down(pdata, false); +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + val = pdata->expansion.lb_cable_flag; +#else + val = 0; +#endif + // must call it after software reset + hw_ops->pre_power_down(pdata, val); + + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) { + netdev_err( + pdata->netdev, + "fxgmac powerstate is %lu when config power to down.\n", + pdata->expansion.powerstate); + } /* set mac to lowpower mode and enable wol accordingly */ hw_ops->config_power_down(pdata, wol); @@ -1280,6 +1679,9 @@ void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol) fxgmac_free_tx_data(pdata); fxgmac_free_rx_data(pdata); + /* Close the phy when not enable any wakeup */ + if (!wol) + hw_ops->reset_phy(pdata); if (netif_msg_drv(pdata)) DPRINTK("fxgmac_net_powerdown callout, powerstate=%ld.\n", pdata->expansion.powerstate); @@ -1294,23 +1696,37 @@ static int fxgmac_open(struct net_device *netdev) if (netif_msg_drv(pdata)) DPRINTK("fxgmac_open callin\n"); + fxgmac_lock(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_OPEN; desc_ops = &pdata->desc_ops; - /* TODO: Initialize the phy */ - /* Calculate the Rx buffer size before allocating rings */ ret = fxgmac_calc_rx_buf_size(netdev, netdev->mtu); if (ret < 0) - return ret; + goto unlock; pdata->rx_buf_size = ret; /* Allocate the channels and rings */ - ret = desc_ops->alloc_channles_and_rings(pdata); + ret = desc_ops->alloc_channels_and_rings(pdata); if (ret) - return ret; + goto unlock; INIT_WORK(&pdata->expansion.restart_work, fxgmac_restart); +#ifdef FXGMAC_ESD_CHECK_ENABLED + INIT_DELAYED_WORK(&pdata->expansion.esd_work, fxgmac_esd_work); +#endif + +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + INIT_DELAYED_WORK(&pdata->expansion.loopback_work, + fxgmac_loopback_work); +#endif + +#ifdef FXGMAC_ASPM_ENABLED + INIT_DELAYED_WORK(&pdata->expansion.aspm_config_work, + fxgmac_aspm_config_work); +#endif + ret = fxgmac_start(pdata); if (ret) goto err_channels_and_rings; @@ -1318,54 +1734,174 @@ static int fxgmac_open(struct net_device *netdev) if (netif_msg_drv(pdata)) DPRINTK("fxgmac_open callout\n"); + fxgmac_unlock(pdata); + return 0; err_channels_and_rings: desc_ops->free_channels_and_rings(pdata); DPRINTK("fxgmac_open callout with channel alloc err\n"); +unlock: + fxgmac_unlock(pdata); return ret; } static int fxgmac_close(struct net_device *netdev) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - struct fxgmac_desc_ops *desc_ops; + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; if (netif_msg_drv(pdata)) DPRINTK("fxgmac_close callin\n"); - desc_ops = &pdata->desc_ops; - - pdata->expansion.current_state = - (pdata->expansion.current_state == CURRENT_STATE_SHUTDOWN) ? - pdata->expansion.current_state : - CURRENT_STATE_CLOSE; - + fxgmac_lock(pdata); /* Stop the device */ fxgmac_stop(pdata); + pdata->expansion.dev_state = FXGMAC_DEV_CLOSE; /* Free the channels and rings */ desc_ops->free_channels_and_rings(pdata); - pdata->hw_ops.reset_phy(pdata); + fxgmac_phy_update_link(netdev); + +#ifdef FXGMAC_ASPM_ENABLED + fxgmac_cancel_aspm_config_work(pdata); + pdata->expansion.aspm_work_active = false; + pdata->expansion.recover_from_aspm = false; + pdata->expansion.aspm_en = false; + pdata->hw_ops.pcie_init(pdata, true, true, true, false); +#endif if (netif_msg_drv(pdata)) DPRINTK("fxgmac_close callout\n"); + fxgmac_unlock(pdata); return 0; } -#if ((LINUX_VERSION_CODE > KERNEL_VERSION(4, 0, 0)) && \ - (LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0))) -static void fxgmac_tx_timeout(struct net_device *netdev) -#else +static void fxgmac_dump_state(struct fxgmac_pdata *pdata) +{ + struct fxgmac_channel *channel = pdata->channel_head; + struct fxgmac_stats *pstats = &pdata->stats; + struct fxgmac_ring *ring; + u32 i; + (void)pstats; + + ring = channel->tx_ring; + DPRINTK("Tx descriptor info:\n"); + DPRINTK("Tx cur = 0x%x\n", ring->cur); + DPRINTK("Tx dirty = 0x%x\n", ring->dirty); + DPRINTK("Tx dma_desc_head = %pad\n", &ring->dma_desc_head); + DPRINTK("Tx desc_data_head = %pad\n", &ring->desc_data_head); + + for (i = 0; i < pdata->channel_count; i++, channel++) { + ring = channel->rx_ring; + DPRINTK("Rx[%d] descriptor info:\n", i); + DPRINTK("Rx cur = 0x%x\n", ring->cur); + DPRINTK("Rx dirty = 0x%x\n", ring->dirty); + DPRINTK("Rx dma_desc_head = %pad\n", &ring->dma_desc_head); + DPRINTK("Rx desc_data_head = %pad\n", &ring->desc_data_head); + } + + DPRINTK("Device Registers:\n"); + DPRINTK("MAC_ISR = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MAC_ISR)); + DPRINTK("MAC_IER = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MAC_IER)); + DPRINTK("MMC_RISR = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MMC_RISR)); + DPRINTK("MMC_RIER = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MMC_RIER)); + DPRINTK("MMC_TISR = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MMC_TISR)); + DPRINTK("MMC_TIER = %08x\n", + readreg(pdata->pAdapter, pdata->mac_regs + MMC_TIER)); + + DPRINTK("EPHY_CTRL = %04x\n", + readreg(pdata->pAdapter, pdata->base_mem + MGMT_EPHY_CTRL)); + DPRINTK("MGMT_INT_CTRL0 = %04x\n", + readreg(pdata->pAdapter, pdata->base_mem + MGMT_INT_CTRL0)); + DPRINTK("LPW_CTRL = %04x\n", + readreg(pdata->pAdapter, pdata->base_mem + LPW_CTRL)); + DPRINTK("MSIX_TBL_MASK = %04x\n", + readreg(pdata->pAdapter, pdata->base_mem + MSIX_TBL_BASE_ADDR + + MSIX_TBL_MASK_OFFSET)); + + DPRINTK("Dump nonstick regs:\n"); + for (i = REG_PCIE_TRIGGER; i < MSI_PBA_REG; i += 4) + DPRINTK("[%d] = %04x\n", i / 4, + readreg(pdata->pAdapter, pdata->base_mem + i)); + + pdata->hw_ops.read_mmc_stats(pdata); + + DPRINTK("Dump TX counters:\n"); + DPRINTK("tx_packets %lld\n", pstats->txframecount_gb); + DPRINTK("tx_errors %lld\n", + pstats->txframecount_gb - pstats->txframecount_g); + DPRINTK("tx_multicastframes_errors %lld\n", + pstats->txmulticastframes_gb - pstats->txmulticastframes_g); + DPRINTK("tx_broadcastframes_errors %lld\n", + pstats->txbroadcastframes_gb - pstats->txbroadcastframes_g); + + DPRINTK("txunderflowerror %lld\n", pstats->txunderflowerror); + DPRINTK("txdeferredframes %lld\n", pstats->txdeferredframes); + DPRINTK("txlatecollisionframes %lld\n", pstats->txlatecollisionframes); + DPRINTK("txexcessivecollisionframes %lld\n", + pstats->txexcessivecollisionframes); + DPRINTK("txcarriererrorframes %lld\n", pstats->txcarriererrorframes); + DPRINTK("txexcessivedeferralerror %lld\n", + pstats->txexcessivedeferralerror); + + DPRINTK("txsinglecollision_g %lld\n", pstats->txsinglecollision_g); + DPRINTK("txmultiplecollision_g %lld\n", pstats->txmultiplecollision_g); + DPRINTK("txoversize_g %lld\n", pstats->txoversize_g); + + DPRINTK("Dump RX counters:\n"); + DPRINTK("rx_packets %lld\n", pstats->rxframecount_gb); + DPRINTK("rx_errors %lld\n", pstats->rxframecount_gb - + pstats->rxbroadcastframes_g - + pstats->rxmulticastframes_g - + pstats->rxunicastframes_g); + + DPRINTK("rx_crc_errors %lld\n", pstats->rxcrcerror); + DPRINTK("rxalignerror %lld\n", pstats->rxalignerror); + DPRINTK("rxrunterror %lld\n", pstats->rxrunterror); + DPRINTK("rxjabbererror %lld\n", pstats->rxjabbererror); + DPRINTK("rx_length_errors %lld\n", pstats->rxlengtherror); + DPRINTK("rxoutofrangetype %lld\n", pstats->rxoutofrangetype); + DPRINTK("rx_fifo_errors %lld\n", pstats->rxfifooverflow); + DPRINTK("rxwatchdogerror %lld\n", pstats->rxwatchdogerror); + DPRINTK("rxreceiveerrorframe %lld\n", pstats->rxreceiveerrorframe); + + DPRINTK("rxbroadcastframes_g %lld\n", pstats->rxbroadcastframes_g); + DPRINTK("rxmulticastframes_g %lld\n", pstats->rxmulticastframes_g); + DPRINTK("rxundersize_g %lld\n", pstats->rxundersize_g); + DPRINTK("rxoversize_g %lld\n", pstats->rxoversize_g); + DPRINTK("rxunicastframes_g %lld\n", pstats->rxunicastframes_g); + DPRINTK("rxcontrolframe_g %lld\n", pstats->rxcontrolframe_g); + + DPRINTK("Dump Extra counters:\n"); + DPRINTK("tx_tso_packets %lld\n", pstats->tx_tso_packets); + DPRINTK("rx_split_header_packets %lld\n", + pstats->rx_split_header_packets); + DPRINTK("tx_process_stopped %lld\n", pstats->tx_process_stopped); + DPRINTK("rx_process_stopped %lld\n", pstats->rx_process_stopped); + DPRINTK("tx_buffer_unavailable %lld\n", pstats->tx_buffer_unavailable); + DPRINTK("rx_buffer_unavailable %lld\n", pstats->rx_buffer_unavailable); + DPRINTK("fatal_bus_error %lld\n", pstats->fatal_bus_error); + DPRINTK("napi_poll_isr %lld\n", pstats->napi_poll_isr); + DPRINTK("napi_poll_txtimer %lld\n", pstats->napi_poll_txtimer); + DPRINTK("ephy_poll_timer_cnt %lld\n", pstats->ephy_poll_timer_cnt); + DPRINTK("mgmt_int_isr %lld\n", pstats->mgmt_int_isr); +} + static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) -#endif { struct fxgmac_pdata *pdata = netdev_priv(netdev); netdev_warn(netdev, "tx timeout, device restarting\n"); -#if FXGMAC_TX_HANG_TIMER_EN + fxgmac_dump_state(pdata); +#if FXGMAC_TX_HANG_TIMER_ENABLED if (!pdata->tx_hang_restart_queuing) schedule_work(&pdata->expansion.restart_work); #else @@ -1373,19 +1909,17 @@ static void fxgmac_tx_timeout(struct net_device *netdev, unsigned int unused) #endif } -static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) +static netdev_tx_t fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_pkt_info *tx_pkt_info; struct fxgmac_desc_ops *desc_ops; struct fxgmac_channel *channel; - struct fxgmac_hw_ops *hw_ops; struct netdev_queue *txq; struct fxgmac_ring *ring; int ret; desc_ops = &pdata->desc_ops; - hw_ops = &pdata->hw_ops; if (netif_msg_tx_done(pdata)) DPRINTK("xmit callin, skb->len=%d, q=%d\n", skb->len, @@ -1418,29 +1952,26 @@ static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) if (ret) { netif_err(pdata, tx_err, netdev, "error processing TSO packet\n"); - DPRINTK("dev_xmit, tx err for TSO\n"); dev_kfree_skb_any(skb); - return ret; + return NETDEV_TX_OK; } fxgmac_prep_vlan(skb, tx_pkt_info); if (!desc_ops->map_tx_skb(channel, skb)) { dev_kfree_skb_any(skb); - DPRINTK("xmit, map tx skb err\n"); + netif_err(pdata, tx_err, netdev, "xmit, map tx skb err\n"); return NETDEV_TX_OK; } /* Report on the actual number of bytes (to be) sent */ netdev_tx_sent_queue(txq, tx_pkt_info->tx_bytes); if (netif_msg_tx_done(pdata)) - DPRINTK("xmit, before hw_xmit, byte len=%d\n", + DPRINTK("xmit,before hw_xmit, byte len=%d\n", tx_pkt_info->tx_bytes); /* Configure required descriptor fields for transmission */ - hw_ops->dev_xmit(channel); -#if FXGMAC_DUMMY_TX_DEBUG - DPRINTK("tx hw_ops->dev_xmit ok\n"); -#endif + fxgmac_dev_xmit(channel); + if (netif_msg_pktdata(pdata)) fxgmac_dbg_pkt(netdev, skb, true); @@ -1450,13 +1981,8 @@ static int fxgmac_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)) static void fxgmac_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *s) -#else -static struct rtnl_link_stats64 *fxgmac_get_stats64(struct net_device *netdev, - struct rtnl_link_stats64 *s) -#endif { struct fxgmac_pdata *pdata = netdev_priv(netdev); struct fxgmac_stats *pstats = &pdata->stats; @@ -1467,24 +1993,29 @@ static struct rtnl_link_stats64 *fxgmac_get_stats64(struct net_device *netdev, #endif { pdata->hw_ops.read_mmc_stats(pdata); - } - s->rx_packets = pstats->rxframecount_gb; - s->rx_bytes = pstats->rxoctetcount_gb; - s->rx_errors = pstats->rxframecount_gb - pstats->rxbroadcastframes_g - - pstats->rxmulticastframes_g - pstats->rxunicastframes_g; - s->multicast = pstats->rxmulticastframes_g; - s->rx_length_errors = pstats->rxlengtherror; - s->rx_crc_errors = pstats->rxcrcerror; - s->rx_fifo_errors = pstats->rxfifooverflow; - - s->tx_packets = pstats->txframecount_gb; - s->tx_bytes = pstats->txoctetcount_gb; - s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g; - s->tx_dropped = netdev->stats.tx_dropped; - -#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) - return s; -#endif + + s->rx_packets = pstats->rxframecount_gb; + s->rx_bytes = pstats->rxoctetcount_gb; + s->rx_errors = pstats->rxcrcerror + pstats->rxalignerror + + pstats->rxrunterror + pstats->rxjabbererror + + pstats->rxlengtherror + pstats->rxwatchdogerror + + pstats->rxreceiveerrorframe; + s->multicast = pstats->rxmulticastframes_g; + s->rx_length_errors = pstats->rxlengtherror; + s->rx_crc_errors = pstats->rxcrcerror; + s->rx_fifo_errors = pstats->rxfifooverflow; + + s->tx_packets = pstats->txframecount_gb; + s->tx_bytes = pstats->txoctetcount_gb; + s->tx_errors = pstats->txunderflowerror + + pstats->txlatecollisionframes + + pstats->txexcessivecollisionframes + + pstats->txcarriererrorframes + + pstats->txexcessivedeferralerror; + s->tx_dropped = netdev->stats.tx_dropped; + } + + return; } static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) @@ -1496,11 +2027,8 @@ static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) if (!is_valid_ether_addr(saddr->sa_data)) return -EADDRNOTAVAIL; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)) eth_hw_addr_set(netdev, saddr->sa_data); -#else - memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len); -#endif + memcpy(pdata->mac_addr, saddr->sa_data, netdev->addr_len); hw_ops->set_mac_address(pdata, saddr->sa_data); @@ -1512,23 +2040,27 @@ static int fxgmac_set_mac_address(struct net_device *netdev, void *addr) return 0; } -/* cmd = [0x89F0, 0x89FF] */ +/* cmd = [0x89F0, 0x89FF] + * When using it, we must pay attention to the thread synchronization + * of this interface. Because it's an external call that isn't + * initiated by the OS. + */ static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct file f; int ret = FXGMAC_SUCCESS; struct fxgmac_pdata *pdata = netdev_priv(netdev); - if (!netif_running(netdev)) + if (!netif_running(netdev) || + pdata->expansion.dev_state != FXGMAC_DEV_START) return -ENODEV; f.private_data = pdata; switch (cmd) { case FXGMAC_DEV_CMD: - ret = fxgmac_dbg_netdev_ops_ioctl( - &f, FXGMAC_IOCTL_DFS_COMMAND, - (unsigned long)(ifr->ifr_data)); + ret = fxgmac_netdev_ops_ioctl(&f, FXGMAC_IOCTL_DFS_COMMAND, + (unsigned long)(ifr->ifr_data)); break; default: ret = -EINVAL; @@ -1538,47 +2070,45 @@ static int fxgmac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return ret; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) static int fxgmac_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { return fxgmac_ioctl(dev, ifr, cmd); } -#endif static int fxgmac_change_mtu(struct net_device *netdev, int mtu) { struct fxgmac_pdata *pdata = netdev_priv(netdev); - int ret; + int ret, max_mtu; #ifdef FXGMAC_DEBUG int old_mtu = netdev->mtu; #endif - fxgmac_stop(pdata); - fxgmac_free_tx_data(pdata); + max_mtu = FXGMAC_JUMBO_PACKET_MTU - ETH_HLEN; + if (mtu > max_mtu) { + netdev_alert(netdev, "MTU exceeds maximum supported value\n"); + return -EINVAL; + } /* We must unmap rx desc's dma before we change rx_buf_size. */ /* Becaues the size of the unmapped DMA is set according to rx_buf_size */ - fxgmac_free_rx_data(pdata); - - pdata->jumbo = mtu > ETH_DATA_LEN ? 1 : 0; ret = fxgmac_calc_rx_buf_size(netdev, mtu); if (ret < 0) return ret; pdata->rx_buf_size = ret; + + pdata->jumbo = mtu > ETH_DATA_LEN ? 1 : 0; netdev->mtu = mtu; - if (netif_running(netdev)) - fxgmac_start(pdata); + if (netif_running(netdev)) { + fxgmac_restart_dev(pdata); + netdev_update_features(netdev); + } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) - DPRINTK("fxgmac, set MTU from %d to %d. min, max=(%d,%d)\n", old_mtu, + DPRINTK("fxgmac,set MTU from %d to %d. min, max=(%d,%d)\n", old_mtu, netdev->mtu, netdev->min_mtu, netdev->max_mtu); -#else - DPRINTK("fxgmac, set MTU from %d to %d.\n", old_mtu, netdev->mtu); -#endif return 0; } @@ -1637,6 +2167,23 @@ static void fxgmac_poll_controller(struct net_device *netdev) } #endif /* CONFIG_NET_POLL_CONTROLLER */ +static netdev_features_t fxgmac_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + u32 fifo_size; + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + + fifo_size = hw_ops->calculate_max_checksum_size(pdata); + + if (netdev->mtu > fifo_size) { + features &= ~NETIF_F_IP_CSUM; + features &= ~NETIF_F_IPV6_CSUM; + } + + return features; +} + static int fxgmac_set_features(struct net_device *netdev, netdev_features_t features) { @@ -1653,11 +2200,11 @@ static int fxgmac_set_features(struct net_device *netdev, tso = pdata->expansion.netdev_features & (NETIF_F_TSO | NETIF_F_TSO6); if ((features & (NETIF_F_TSO | NETIF_F_TSO6)) && !tso) { - printk("enable tso.\n"); + DPRINTK("enable tso.\n"); pdata->hw_feat.tso = 1; hw_ops->config_tso(pdata); } else if (!(features & (NETIF_F_TSO | NETIF_F_TSO6)) && tso) { - printk("disable tso.\n"); + DPRINTK("disable tso.\n"); pdata->hw_feat.tso = 0; hw_ops->config_tso(pdata); } @@ -1708,15 +2255,14 @@ static const struct net_device_ops fxgmac_netdev_ops = { .ndo_set_mac_address = fxgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = fxgmac_ioctl, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)) .ndo_siocdevprivate = fxgmac_siocdevprivate, -#endif .ndo_vlan_rx_add_vid = fxgmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = fxgmac_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fxgmac_poll_controller, #endif .ndo_set_features = fxgmac_set_features, + .ndo_fix_features = fxgmac_fix_features, .ndo_set_rx_mode = fxgmac_set_rx_mode, }; @@ -1730,11 +2276,18 @@ static void fxgmac_rx_refresh(struct fxgmac_channel *channel) struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_ring *ring = channel->rx_ring; struct fxgmac_desc_data *desc_data; - struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + struct fxgmac_desc_ops *desc_ops = &pdata->desc_ops; while (ring->dirty != ring->cur) { desc_data = FXGMAC_GET_DESC_DATA(ring, ring->dirty); - hw_ops->rx_desc_reset(pdata, desc_data, ring->dirty); +#ifdef FXGMAC_ZERO_COPY + /* Reset desc_data values */ + desc_ops->unmap_desc_data(pdata, desc_data); + + if (desc_ops->map_rx_buffer(pdata, ring, desc_data)) + break; +#endif + desc_ops->rx_desc_reset(pdata, desc_data, ring->dirty); ring->dirty = FXGMAC_GET_ENTRY(ring->dirty, ring->dma_desc_count); } @@ -1757,6 +2310,21 @@ static struct sk_buff *fxgmac_create_skb(struct fxgmac_pdata *pdata, unsigned int len) { struct sk_buff *skb; +#ifndef FXGMAC_NOT_USE_PAGE_MAPPING + unsigned int copy_len; + u8 *packet; +#endif + +#ifdef FXGMAC_NOT_USE_PAGE_MAPPING +#ifdef FXGMAC_ZERO_COPY + dma_sync_single_for_cpu(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); + skb = desc_data->skb; + desc_data->skb = NULL; + skb_put(skb, len); + dma_sync_single_for_device(pdata->dev, desc_data->rx.buf.dma_base, len, + DMA_FROM_DEVICE); +#else skb = __netdev_alloc_skb_ip_align(pdata->netdev, len, GFP_ATOMIC); if (!skb) { netdev_err(pdata->netdev, "%s: Rx init fails; skb is NULL\n", @@ -1770,8 +2338,29 @@ static struct sk_buff *fxgmac_create_skb(struct fxgmac_pdata *pdata, skb_put(skb, len); dma_sync_single_for_device(pdata->dev, desc_data->rx.buf.dma_base, len, DMA_FROM_DEVICE); +#endif + return skb; +#else + skb = napi_alloc_skb(napi, desc_data->rx.hdr.dma_len); + if (!skb) + return NULL; + /* Start with the header buffer which may contain just the header + * or the header plus data + */ + dma_sync_single_range_for_cpu(pdata->dev, desc_data->rx.hdr.dma_base, + desc_data->rx.hdr.dma_off, + desc_data->rx.hdr.dma_len, + DMA_FROM_DEVICE); + + packet = page_address(desc_data->rx.hdr.pa.pages) + + desc_data->rx.hdr.pa.pages_offset; + copy_len = len; + copy_len = min(desc_data->rx.hdr.dma_len, copy_len); + skb_copy_to_linear_data(skb, packet, copy_len); + skb_put(skb, copy_len); return skb; +#endif } static int fxgmac_tx_poll(struct fxgmac_channel *channel) @@ -1791,8 +2380,11 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) static int fxgmac_restart_need; static u32 change_cnt; static u32 reg_cur_pre = 0xffffffff; + (void)reg_cur_pre; + (void)change_cnt; + (void)fxgmac_restart_need; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED static u32 reg_cur; #endif @@ -1878,7 +2470,7 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) } } #endif -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED if ((!pdata->tx_hang_restart_queuing) && (!channel->expansion.tx_hang_timer_active)) { reg_cur = ring->dirty; @@ -1938,7 +2530,7 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) /* Free the SKB and reset the descriptor for re-use */ desc_ops->unmap_desc_data(pdata, desc_data); - hw_ops->tx_desc_reset(desc_data); + desc_ops->tx_desc_reset(desc_data); processed++; ring->dirty = @@ -1950,6 +2542,8 @@ static int fxgmac_tx_poll(struct fxgmac_channel *channel) netdev_tx_completed_queue(txq, tx_packets, tx_bytes); + /* memory barrier */ + smp_wmb(); if ((ring->tx.queue_stopped == 1) && (fxgmac_tx_avail_desc(ring) > FXGMAC_TX_DESC_MIN_FREE)) { ring->tx.queue_stopped = 0; @@ -1968,18 +2562,14 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_ring *ring = channel->rx_ring; struct net_device *netdev = pdata->netdev; - unsigned int len; + unsigned int len, max_len; unsigned int context_next, context; struct fxgmac_desc_data *desc_data; struct fxgmac_pkt_info *pkt_info; unsigned int incomplete; - struct fxgmac_hw_ops *hw_ops; struct napi_struct *napi; struct sk_buff *skb; int packet_count = 0; - u32 ipce, iphe; - - hw_ops = &pdata->hw_ops; /* Nothing to do if there isn't a Rx ring for this channel */ if (!ring) @@ -1989,7 +2579,7 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) context_next = 0; napi = (pdata->per_channel_irq) ? &channel->expansion.napi_rx : - &pdata->expansion.napi; + &pdata->expansion.napi; desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); pkt_info = &ring->pkt_info; @@ -2005,7 +2595,7 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) if (fxgmac_rx_dirty_desc(ring) > FXGMAC_RX_DESC_MAX_DIRTY) fxgmac_rx_refresh(channel); - if (hw_ops->dev_read(channel)) + if (fxgmac_dev_read(channel)) break; ring->cur = FXGMAC_GET_ENTRY(ring->cur, ring->dma_desc_count); @@ -2029,27 +2619,20 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) netif_err(pdata, rx_err, netdev, "error in received packet\n"); dev_kfree_skb(skb); + pdata->netdev->stats.rx_dropped++; goto next_packet; } if (!context) { len = desc_data->rx.len; - if (len > pdata->rx_buf_size) { - if (net_ratelimit()) - netdev_err( - pdata->netdev, - "len %d larger than size (%d)\n", - len, pdata->rx_buf_size); - pdata->netdev->stats.rx_dropped++; - goto next_packet; - } if (len == 0) { if (net_ratelimit()) - netdev_err( - pdata->netdev, + netif_err( + pdata, rx_err, netdev, "A packet of length 0 was received\n"); pdata->netdev->stats.rx_length_errors++; + pdata->netdev->stats.rx_dropped++; goto next_packet; } @@ -2058,38 +2641,42 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) len); if (unlikely(!skb)) { if (net_ratelimit()) - netdev_warn( - pdata->netdev, + netif_err( + pdata, rx_err, netdev, "create skb failed\n"); + pdata->netdev->stats.rx_dropped++; goto next_packet; } } + + max_len = netdev->mtu + ETH_HLEN; + if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + skb->protocol == htons(ETH_P_8021Q)) + max_len += VLAN_HLEN; + if (len > max_len) { + if (net_ratelimit()) + netif_err( + pdata, rx_err, netdev, + "len %d larger than max size %d\n", + len, max_len); + pdata->netdev->stats.rx_length_errors++; + pdata->netdev->stats.rx_dropped++; + dev_kfree_skb(skb); + goto next_packet; + } } - if (!skb) + if (!skb) { + pdata->netdev->stats.rx_dropped++; goto next_packet; + } if (netif_msg_pktdata(pdata)) fxgmac_print_pkt(netdev, skb, false); skb_checksum_none_assert(skb); - if (netdev->features & NETIF_F_RXCSUM) { - ipce = FXGMAC_GET_REG_BITS_LE( - desc_data->dma_desc->desc1, - RX_NORMAL_DESC1_WB_IPCE_POS, - RX_NORMAL_DESC1_WB_IPCE_LEN); - iphe = FXGMAC_GET_REG_BITS_LE( - desc_data->dma_desc->desc1, - RX_NORMAL_DESC1_WB_IPHE_POS, - RX_NORMAL_DESC1_WB_IPHE_LEN); - /* if csum error, let the stack verify checksum errors.otherwise don't verify */ - if (!ipce && !iphe && - FXGMAC_GET_REG_BITS( - pkt_info->attributes, - RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, - RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN)) - skb->ip_summed = CHECKSUM_UNNECESSARY; - } + if (netdev->features & NETIF_F_RXCSUM) + skb->ip_summed = CHECKSUM_UNNECESSARY; if (FXGMAC_GET_REG_BITS(pkt_info->attributes, RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, @@ -2219,6 +2806,7 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) } } } + napi_gro_receive(napi, skb); next_packet: @@ -2228,8 +2816,6 @@ static int fxgmac_rx_poll(struct fxgmac_channel *channel, int budget) pdata->netdev->stats.rx_bytes += len; } - fxgmac_rx_refresh(channel); - return packet_count; } @@ -2237,19 +2823,13 @@ static int fxgmac_one_poll_tx(struct napi_struct *napi, int budget) { struct fxgmac_channel *channel = container_of(napi, struct fxgmac_channel, expansion.napi_tx); - int ret = 0; struct fxgmac_pdata *pdata = channel->pdata; struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - ret = fxgmac_tx_poll(channel); -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + fxgmac_tx_poll(channel); if (napi_complete_done(napi, 0)) { hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); } -#else - napi_complete(napi); - hw_ops->enable_msix_one_interrupt(pdata, MSI_ID_TXQ0); -#endif return 0; } @@ -2263,7 +2843,6 @@ static int fxgmac_one_poll_rx(struct napi_struct *napi, int budget) processed = fxgmac_rx_poll(channel, budget); if (processed < budget) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) /* if there no interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED, * napi_complete_done return true and we can enable irq, it will not cause unbalanced iqr issure. * if there more interrupt occured when this interrupt running, struct napi's state is NAPIF_STATE_SCHED | NAPIF_STATE_MISSED @@ -2274,10 +2853,6 @@ static int fxgmac_one_poll_rx(struct napi_struct *napi, int budget) hw_ops->enable_msix_one_interrupt(pdata, channel->queue_index); } -#else - napi_complete(napi); - hw_ops->enable_msix_one_interrupt(pdata, channel->queue_index); -#endif } return processed; @@ -2307,13 +2882,6 @@ static int fxgmac_all_poll(struct napi_struct *napi, int budget) } } while (false); - /* for phy, we needn't to process any packet, so processed will be 0 */ - if (pdata->expansion.mgm_intctrl_val & MGMT_INT_CTRL0_INT_STATUS_PHY) { - fxgmac_phy_process(pdata); - pdata->expansion.mgm_intctrl_val &= - ~MGMT_INT_CTRL0_INT_STATUS_PHY; - } - /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ @@ -2327,3 +2895,468 @@ static int fxgmac_all_poll(struct napi_struct *napi, int budget) return processed; } + +void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, + struct fxgmac_ring *ring) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_desc_data *desc_data; + + /* Make sure everything is written before the register write */ + wmb(); + + /* Issue a poll command to Tx DMA by writing address + * of next immediate free descriptor + */ + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + + writereg(pdata->pAdapter, lower_32_bits(desc_data->dma_desc_addr), + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)); + + if (netif_msg_tx_done(pdata)) { + DPRINTK("tx_start_xmit: dump before wr reg, dma base=0x%016llx,reg=0x%08x, tx timer usecs=%u,tx_timer_active=%u\n", + desc_data->dma_desc_addr, + readreg(pdata->pAdapter, + FXGMAC_DMA_REG(channel, DMA_CH_TDTR_LO)), + pdata->tx_usecs, channel->tx_timer_active); + } + + ring->tx.xmit_more = 0; +} + +void fxgmac_dev_xmit(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->tx_ring; + unsigned int tso_context, vlan_context; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + unsigned int csum, tso, vlan; + int start_index = ring->cur; + int cur_index = ring->cur; + int i; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit callin, desc cur=%d\n", cur_index); + + pkt_info = &ring->pkt_info; + csum = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_POS, + TX_PACKET_ATTRIBUTES_CSUM_ENABLE_LEN); + tso = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_POS, + TX_PACKET_ATTRIBUTES_TSO_ENABLE_LEN); + vlan = FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + TX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN); + + if (tso && pkt_info->mss != ring->tx.cur_mss) + tso_context = 1; + else + tso_context = 0; + + if ((tso_context) && (netif_msg_tx_done(pdata))) + DPRINTK("%s,tso_%s tso=0x%x,pkt_mss=%d,cur_mss=%d\n", __func__, + (pkt_info->mss) ? "start" : "stop", tso, pkt_info->mss, + ring->tx.cur_mss); + + if (vlan && pkt_info->vlan_ctag != ring->tx.cur_vlan_ctag) + vlan_context = 1; + else + vlan_context = 0; + + if (vlan && (netif_msg_tx_done(pdata))) + DPRINTK("%s:pkt vlan=%d, ring vlan=%d, vlan_context=%d\n", + __func__, pkt_info->vlan_ctag, ring->tx.cur_vlan_ctag, + vlan_context); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Create a context descriptor if this is a TSO pkt_info */ + if (tso_context || vlan_context) { + if (tso_context) { + if (netif_msg_tx_done(pdata)) + DPRINTK("%s construct tso context descriptor, mss=%u\n", + __func__, pkt_info->mss); + + /* Set the MSS size */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_CONTEXT_DESC2_MSS_POS, + TX_CONTEXT_DESC2_MSS_LEN, pkt_info->mss); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Indicate this descriptor contains the MSS */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_TCMSSV_POS, + TX_CONTEXT_DESC3_TCMSSV_LEN, 1); + + ring->tx.cur_mss = pkt_info->mss; + } + + if (vlan_context) { + netif_dbg(pdata, tx_queued, pdata->netdev, + "VLAN context descriptor, ctag=%u\n", + pkt_info->vlan_ctag); + + /* Mark it as a CONTEXT descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_CTXT_POS, + TX_CONTEXT_DESC3_CTXT_LEN, 1); + + /* Set the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VT_POS, + TX_CONTEXT_DESC3_VT_LEN, pkt_info->vlan_ctag); + + /* Indicate this descriptor contains the VLAN tag */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_CONTEXT_DESC3_VLTV_POS, + TX_CONTEXT_DESC3_VLTV_LEN, 1); + + ring->tx.cur_vlan_ctag = pkt_info->vlan_ctag; + } + + //cur_index++; + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + } + + /* Update buffer address (for TSO this is the header) */ + dma_desc->desc0 = cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, + desc_data->skb_dma_len); + + /* VLAN tag insertion check */ + if (vlan) { + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_VTIR_POS, + TX_NORMAL_DESC2_VTIR_LEN, TX_NORMAL_DESC2_VLAN_INSERT); + pdata->stats.tx_vlan_packets++; + } + + /* Timestamp enablement check */ + if (FXGMAC_GET_REG_BITS(pkt_info->attributes, + TX_PACKET_ATTRIBUTES_PTP_POS, + TX_PACKET_ATTRIBUTES_PTP_LEN)) + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_TTSE_POS, + TX_NORMAL_DESC2_TTSE_LEN, 1); + + /* Mark it as First Descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FD_POS, + TX_NORMAL_DESC3_FD_LEN, 1); + + /* Mark it as a NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Set OWN bit if not the first descriptor */ + if (cur_index != start_index) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (tso) { + /* Enable TSO */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TSE_POS, + TX_NORMAL_DESC3_TSE_LEN, 1); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPPL_POS, + TX_NORMAL_DESC3_TCPPL_LEN, pkt_info->tcp_payload_len); + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_TCPHDRLEN_POS, + TX_NORMAL_DESC3_TCPHDRLEN_LEN, + pkt_info->tcp_header_len / 4); + + pdata->stats.tx_tso_packets++; + } else { + /* Enable CRC and Pad Insertion */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CPC_POS, + TX_NORMAL_DESC3_CPC_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + + /* Set the total length to be transmitted */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_FL_POS, + TX_NORMAL_DESC3_FL_LEN, + pkt_info->length); + } + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit before more descs, desc cur=%d, start=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, start_index, dma_desc->desc0, + dma_desc->desc1, dma_desc->desc2, dma_desc->desc3); + + if (start_index <= cur_index) { + i = cur_index - start_index + 1; + } else { + i = ring->dma_desc_count - start_index + cur_index; + if (tso_context || vlan_context) + i += 1; + } + + for (; i < pkt_info->desc_count; i++) { + cur_index = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + desc_data = FXGMAC_GET_DESC_DATA(ring, cur_index); + dma_desc = desc_data->dma_desc; + + /* Update buffer address */ + dma_desc->desc0 = + cpu_to_le32(lower_32_bits(desc_data->skb_dma)); + dma_desc->desc1 = + cpu_to_le32(upper_32_bits(desc_data->skb_dma)); + + /* Update the buffer length */ + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc2, TX_NORMAL_DESC2_HL_B1L_POS, + TX_NORMAL_DESC2_HL_B1L_LEN, desc_data->skb_dma_len); + + /* Set OWN bit */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + /* Mark it as NORMAL descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CTXT_POS, + TX_NORMAL_DESC3_CTXT_LEN, 0); + + /* Enable HW CSUM */ + if (csum) + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE( + dma_desc->desc3, TX_NORMAL_DESC3_CIC_POS, + TX_NORMAL_DESC3_CIC_LEN, 0x3); + } + + /* Set LAST bit for the last descriptor */ + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_LD_POS, + TX_NORMAL_DESC3_LD_LEN, 1); + + dma_desc->desc2 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc2, + TX_NORMAL_DESC2_IC_POS, + TX_NORMAL_DESC2_IC_LEN, 1); + + /* Save the Tx info to report back during cleanup */ + desc_data->tx.packets = pkt_info->tx_packets; + desc_data->tx.bytes = pkt_info->tx_bytes; + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit last descs, desc cur=%d, desc=%#x,%#x,%#x,%#x\n", + cur_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + /* In case the Tx DMA engine is running, make sure everything + * is written to the descriptor(s) before setting the OWN bit + * for the first descriptor + */ + dma_wmb(); + + /* Set OWN bit for the first descriptor */ + desc_data = FXGMAC_GET_DESC_DATA(ring, start_index); + dma_desc = desc_data->dma_desc; + dma_desc->desc3 = FXGMAC_SET_REG_BITS_LE(dma_desc->desc3, + TX_NORMAL_DESC3_OWN_POS, + TX_NORMAL_DESC3_OWN_LEN, 1); + + if (netif_msg_tx_done(pdata)) + DPRINTK("dev_xmit first descs, start=%d, desc=%#x,%#x,%#x,%#x\n", + start_index, dma_desc->desc0, dma_desc->desc1, + dma_desc->desc2, dma_desc->desc3); + + if (netif_msg_tx_queued(pdata)) + fxgmac_dump_tx_desc(pdata, ring, start_index, + pkt_info->desc_count, 1); + + /* Make sure ownership is written to the descriptor */ + smp_wmb(); + + //ring->cur = cur_index + 1; + ring->cur = FXGMAC_GET_ENTRY(cur_index, ring->dma_desc_count); + + fxgmac_tx_start_xmit(channel, ring); + + /* yzhang for reduce debug output */ + if (netif_msg_tx_done(pdata)) { + DPRINTK("dev_xmit callout %s: descriptors %u to %u written\n", + channel->name, start_index & (ring->dma_desc_count - 1), + (ring->cur - 1) & (ring->dma_desc_count - 1)); + } +} + +extern void fxgmac_diag_get_rx_info(struct fxgmac_channel *channel); + +static void fxgmac_get_rx_tstamp(struct fxgmac_pkt_info *pkt_info, + struct fxgmac_dma_desc *dma_desc) +{ + u64 nsec; + + nsec = le32_to_cpu(dma_desc->desc1); + nsec <<= 32; + nsec |= le32_to_cpu(dma_desc->desc0); + if (nsec != 0xffffffffffffffffULL) { + pkt_info->rx_tstamp = nsec; + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_POS, + RX_PACKET_ATTRIBUTES_RX_TSTAMP_LEN, 1); + } +} + +static int fxgmac_dev_read(struct fxgmac_channel *channel) +{ + struct fxgmac_pdata *pdata = channel->pdata; + struct fxgmac_ring *ring = channel->rx_ring; + struct net_device *netdev = pdata->netdev; + struct fxgmac_desc_data *desc_data; + struct fxgmac_dma_desc *dma_desc; + struct fxgmac_pkt_info *pkt_info; + u32 ipce, iphe, rxparser; + unsigned int err, etlt; + + static unsigned int cnt_incomplete; + + desc_data = FXGMAC_GET_DESC_DATA(ring, ring->cur); + dma_desc = desc_data->dma_desc; + pkt_info = &ring->pkt_info; + + /* Check for data availability */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_OWN_POS, + RX_NORMAL_DESC3_OWN_LEN)) + return 1; + + /* Make sure descriptor fields are read after reading the OWN bit */ + dma_rmb(); + + if (netif_msg_rx_status(pdata)) + fxgmac_dump_rx_desc(pdata, ring, ring->cur); + + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_CTXT_POS, + RX_NORMAL_DESC3_CTXT_LEN)) { + /* Timestamp Context Descriptor */ + fxgmac_get_rx_tstamp(pkt_info, dma_desc); + + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 1); + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_LEN, 0); + if (netif_msg_rx_status(pdata)) + DPRINTK("dev_read context desc,ch=%s\n", channel->name); + return 0; + } + + /* Normal Descriptor, be sure Context Descriptor bit is off */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_CONTEXT_POS, + RX_PACKET_ATTRIBUTES_CONTEXT_LEN, 0); + + /* Get the header length */ + if (FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_FD_POS, + RX_NORMAL_DESC3_FD_LEN)) { + desc_data->rx.hdr_len = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc2, RX_NORMAL_DESC2_HL_POS, + RX_NORMAL_DESC2_HL_LEN); + if (desc_data->rx.hdr_len) + pdata->stats.rx_split_header_packets++; + } + + /* Get the pkt_info length */ + desc_data->rx.len = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, + RX_NORMAL_DESC3_PL_POS, + RX_NORMAL_DESC3_PL_LEN); + + if (!FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_LD_POS, + RX_NORMAL_DESC3_LD_LEN)) { + /* Not all the data has been transferred for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 1); + cnt_incomplete++; + if (cnt_incomplete < 2 && netif_msg_rx_status(pdata)) + DPRINTK("dev_read NOT last desc,pkt incomplete yet,%u\n", + cnt_incomplete); + + return 0; + } + if ((cnt_incomplete) && netif_msg_rx_status(pdata)) + DPRINTK("dev_read rx back to normal and incomplete cnt=%u\n", + cnt_incomplete); + cnt_incomplete = 0; //when back to normal, reset cnt + + /* This is the last of the data for this pkt_info */ + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, RX_PACKET_ATTRIBUTES_INCOMPLETE_POS, + RX_PACKET_ATTRIBUTES_INCOMPLETE_LEN, 0); + + /* Set checksum done indicator as appropriate */ + if (netdev->features & NETIF_F_RXCSUM) { + ipce = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPCE_POS, + RX_NORMAL_DESC1_WB_IPCE_LEN); + iphe = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc1, + RX_NORMAL_DESC1_WB_IPHE_POS, + RX_NORMAL_DESC1_WB_IPHE_LEN); + if (!ipce && !iphe) + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_CSUM_DONE_POS, + RX_PACKET_ATTRIBUTES_CSUM_DONE_LEN, 1); + else + return 0; + } + + /* Check for errors (only valid in last descriptor) */ + err = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ES_POS, + RX_NORMAL_DESC3_ES_LEN); + /* b111: Incomplete parsing due to ECC error */ + rxparser = FXGMAC_GET_REG_BITS_LE(desc_data->dma_desc->desc2, + RX_NORMAL_DESC2_WB_RAPARSER_POS, + RX_NORMAL_DESC2_WB_RAPARSER_LEN); + if (err || rxparser == 0x7) { + pkt_info->errors = FXGMAC_SET_REG_BITS( + pkt_info->errors, RX_PACKET_ERRORS_FRAME_POS, + RX_PACKET_ERRORS_FRAME_LEN, 1); + return 0; + } + + etlt = FXGMAC_GET_REG_BITS_LE(dma_desc->desc3, RX_NORMAL_DESC3_ETLT_POS, + RX_NORMAL_DESC3_ETLT_LEN); + if (etlt == 0x4 && (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) { + pkt_info->attributes = FXGMAC_SET_REG_BITS( + pkt_info->attributes, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_POS, + RX_PACKET_ATTRIBUTES_VLAN_CTAG_LEN, 1); + pkt_info->vlan_ctag = FXGMAC_GET_REG_BITS_LE( + dma_desc->desc0, RX_NORMAL_DESC0_OVT_POS, + RX_NORMAL_DESC0_OVT_LEN); + netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n", + pkt_info->vlan_ctag); + } + + return 0; +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c index f6f8f4f6a5e9b..598fb85a48a83 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-pci.c @@ -1,18 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include -#include - -/* for file operation */ -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -#define FXGMAC_DBG 0 - /* declarations */ static void fxgmac_shutdown(struct pci_dev *pdev); @@ -56,27 +47,25 @@ static int fxgmac_probe(struct pci_dev *pcidev, const struct pci_device_id *id) static void fxgmac_remove(struct pci_dev *pcidev) { - struct net_device *netdev = dev_get_drvdata(&pcidev->dev); - struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct net_device *netdev; + struct fxgmac_pdata *pdata; + (void)pdata; -#ifdef CONFIG_PCI_MSI - u32 msix = FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, - FXGMAC_FLAG_MSIX_POS, - FXGMAC_FLAG_MSIX_LEN); -#endif + netdev = dev_get_drvdata(&pcidev->dev); + pdata = netdev_priv(netdev); fxgmac_drv_remove(&pcidev->dev); + #ifdef CONFIG_PCI_MSI - if (msix) { + if (FXGMAC_GET_REG_BITS(pdata->expansion.int_flags, + FXGMAC_FLAG_MSIX_POS, FXGMAC_FLAG_MSIX_LEN)) { pci_disable_msix(pcidev); kfree(pdata->expansion.msix_entries); pdata->expansion.msix_entries = NULL; } #endif -#ifdef HAVE_FXGMAC_DEBUG_FS - fxgmac_dbg_exit(pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ + DPRINTK("%s has been removed\n", netdev->name); } /* for Power management, 20210628 */ @@ -123,14 +112,16 @@ static int __fxgmac_shutdown(struct pci_dev *pdev, bool *enable_wake) static void fxgmac_shutdown(struct pci_dev *pdev) { - bool wake; struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + bool wake; DPRINTK("fxpm, fxgmac_shutdown callin\n"); - pdata->expansion.current_state = CURRENT_STATE_SHUTDOWN; + fxgmac_lock(pdata); __fxgmac_shutdown(pdev, &wake); + hw_ops->led_under_shutdown(pdata); if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, wake); @@ -138,6 +129,7 @@ static void fxgmac_shutdown(struct pci_dev *pdev) } DPRINTK("fxpm, fxgmac_shutdown callout, system power off=%d\n", (system_state == SYSTEM_POWER_OFF) ? 1 : 0); + fxgmac_unlock(pdata); } #ifdef CONFIG_PM @@ -145,22 +137,32 @@ static void fxgmac_shutdown(struct pci_dev *pdev) static int fxgmac_suspend(struct pci_dev *pdev, pm_message_t __always_unused state) { - int retval; - bool wake; struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + int retval = 0; + bool wake; DPRINTK("fxpm, fxgmac_suspend callin\n"); - pdata->expansion.current_state = CURRENT_STATE_SUSPEND; + fxgmac_lock(pdata); + if (pdata->expansion.dev_state != FXGMAC_DEV_START) + goto unlock; if (netif_running(netdev)) { +#ifdef FXGMAC_ASPM_ENABLED + fxgmac_cancel_aspm_config_work(pdata); + pdata->expansion.aspm_en = false; + pdata->expansion.aspm_work_active = false; + pdata->expansion.recover_from_aspm = false; +#endif retval = __fxgmac_shutdown(pdev, &wake); if (retval) - return retval; + goto unlock; } else { wake = !!(pdata->expansion.wol); } + hw_ops->led_under_sleep(pdata); if (wake) { pci_prepare_to_sleep(pdev); @@ -169,24 +171,29 @@ static int fxgmac_suspend(struct pci_dev *pdev, pci_set_power_state(pdev, PCI_D3hot); } + pdata->expansion.recover_phy_state = 1; + pdata->expansion.dev_state = FXGMAC_DEV_SUSPEND; DPRINTK("fxpm, fxgmac_suspend callout to %s\n", wake ? "sleep" : "D3hot"); - return 0; +unlock: + fxgmac_unlock(pdata); + return retval; } static int fxgmac_resume(struct pci_dev *pdev) { - struct fxgmac_pdata *pdata; - struct net_device *netdev; - u32 err; + struct net_device *netdev = dev_get_drvdata(&pdev->dev); + struct fxgmac_pdata *pdata = netdev_priv(netdev); + u32 err = 0; DPRINTK("fxpm, fxgmac_resume callin\n"); - netdev = dev_get_drvdata(&pdev->dev); - pdata = netdev_priv(netdev); + fxgmac_lock(pdata); + if (pdata->expansion.dev_state != FXGMAC_DEV_SUSPEND) + goto unlock; - pdata->expansion.current_state = CURRENT_STATE_RESUME; + pdata->expansion.dev_state = FXGMAC_DEV_RESUME; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); @@ -200,7 +207,7 @@ static int fxgmac_resume(struct pci_dev *pdev) if (err) { dev_err(pdata->dev, "fxgmac_resume, failed to enable PCI device from suspend\n"); - return err; + goto unlock; } smp_mb__before_atomic(); __clear_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate); @@ -219,14 +226,17 @@ static int fxgmac_resume(struct pci_dev *pdev) rtnl_unlock(); DPRINTK("fxpm, fxgmac_resume callout\n"); - +unlock: + fxgmac_unlock(pdata); return err; } #endif -static const struct pci_device_id fxgmac_pci_tbl[] = { { PCI_DEVICE(0x1f0a, - 0x6801) }, - { 0 } }; +static const struct pci_device_id fxgmac_pci_tbl[] = { + { PCI_DEVICE(0x1f0a, 0x6801) }, + { 0 } +}; + MODULE_DEVICE_TABLE(pci, fxgmac_pci_tbl); static struct pci_driver fxgmac_pci_driver = { @@ -246,5 +256,5 @@ module_pci_driver(fxgmac_pci_driver); MODULE_DESCRIPTION(FXGMAC_DRV_DESC); MODULE_VERSION(FXGMAC_DRV_VERSION); -MODULE_AUTHOR("Frank "); -MODULE_LICENSE("Dual BSD/GPL"); +MODULE_AUTHOR("Motorcomm Electronic Tech. Co., Ltd."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c index 88066a110f410..b4ff3abc0dca2 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-phy.c @@ -1,19 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021 Motorcomm Corporation. */ -#include -#include - #include "fuxi-gmac.h" #include "fuxi-gmac-reg.h" -void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) +/* When in forced mode, set the speed, duplex, and auto-negotiation of the PHY + * all at once to avoid the problems caused by individual settings + * on some machines + */ +int fxgmac_phy_force_mode(struct fxgmac_pdata *pdata) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; unsigned int high_bit = 0, low_bit = 0; + int ret = 0; - switch (speed) { + switch (pdata->phy_speed) { case SPEED_1000: high_bit = 1, low_bit = 0; break; @@ -27,149 +29,90 @@ void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) break; } - /* disable autoneg */ hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, - PHY_CR_AUTOENG_LEN, 0); + PHY_CR_AUTOENG_LEN, pdata->phy_autoeng); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, PHY_CR_SPEED_SEL_H_LEN, high_bit); regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, PHY_CR_SPEED_SEL_L_LEN, low_bit); - hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, + PHY_CR_DUPLEX_LEN, pdata->phy_duplex); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_RESET_POS, PHY_CR_RESET_LEN, + 1); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; } -void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) +int fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; + unsigned int high_bit = 0, low_bit = 0; + int ret = 0; + + switch (speed) { + case SPEED_1000: + high_bit = 1, low_bit = 0; + break; + case SPEED_100: + high_bit = 0, low_bit = 1; + break; + case SPEED_10: + high_bit = 0, low_bit = 0; + break; + default: + break; + } + + /* disable autoneg */ hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, - PHY_CR_DUPLEX_LEN, (duplex ? 1 : 0)); - hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_H_POS, + PHY_CR_SPEED_SEL_H_LEN, high_bit); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_SPEED_SEL_L_POS, + PHY_CR_SPEED_SEL_L_LEN, low_bit); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; } -void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) +int fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; u32 regval = 0; + int ret = 0; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); - regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, - PHY_CR_AUTOENG_LEN, (autoneg ? 1 : 0)); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_DUPLEX_POS, + PHY_CR_DUPLEX_LEN, (duplex ? 1 : 0)); hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; } -/* - * input: lport - * output: - * cap_mask, bit definitions: - * pause capbility and 100/10 capbilitys follow the definition of mii reg4. - * for 1000M capability, bit0=1000M half; bit1=1000M full, refer to mii reg9.[9:8]. - */ -int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, - unsigned int *cap_mask) +int fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - unsigned int val; - unsigned int reg; - - if ((!hw_ops->read_ephy_reg) || (!hw_ops->write_ephy_reg)) - return -1; - - reg = REG_MII_ADVERTISE; - if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) - goto busy_exit; - - if (FXGMAC_ADVERTISE_10HALF & val) { - *cap_mask |= FXGMAC_ADVERTISE_10HALF; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_10HALF; - } - - if (FXGMAC_ADVERTISE_10FULL & val) { - *cap_mask |= FXGMAC_ADVERTISE_10FULL; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_10FULL; - } - - if (FXGMAC_ADVERTISE_100HALF & val) { - *cap_mask |= FXGMAC_ADVERTISE_100HALF; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_100HALF; - } - - if (FXGMAC_ADVERTISE_100FULL & val) { - *cap_mask |= FXGMAC_ADVERTISE_100FULL; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_100FULL; - } - - if (FXGMAC_ADVERTISE_PAUSE_CAP & val) { - *cap_mask |= FXGMAC_ADVERTISE_PAUSE_CAP; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_CAP; - } - - if (FXGMAC_ADVERTISE_PAUSE_ASYM & val) { - *cap_mask |= FXGMAC_ADVERTISE_PAUSE_ASYM; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_PAUSE_ASYM; - } - - reg = REG_MII_CTRL1000; - if (hw_ops->read_ephy_reg(pdata, reg, &val) < 0) - goto busy_exit; - - if (REG_BIT_ADVERTISE_1000HALF & val) { - *cap_mask |= FXGMAC_ADVERTISE_1000HALF; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_1000HALF; - } - - if (REG_BIT_ADVERTISE_1000FULL & val) { - *cap_mask |= FXGMAC_ADVERTISE_1000FULL; - } else { - *cap_mask &= ~FXGMAC_ADVERTISE_1000FULL; - } - - return 0; - -busy_exit: - DPRINTK("fxgmac_ephy_autoneg_ability_get exit due to ephy reg access fail.\n"); + u32 regval = 0; + int ret = 0; - return -1; + hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, ®val); + regval = FXGMAC_SET_REG_BITS(regval, PHY_CR_AUTOENG_POS, + PHY_CR_AUTOENG_LEN, (autoneg ? 1 : 0)); + ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, regval); + return ret; } -int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata) +void fxgmac_set_phy_link_ksettings(struct fxgmac_pdata *pdata) { struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; - int ret; - volatile unsigned int val; - int busy = 15; - - ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, (unsigned int *)&val); - if (0 > ret) - goto busy_exit; - - ret = hw_ops->write_ephy_reg(pdata, REG_MII_BMCR, (val | 0x8000)); - if (0 > ret) - goto busy_exit; - - do { - ret = hw_ops->read_ephy_reg(pdata, REG_MII_BMCR, - (unsigned int *)&val); - busy--; - } while ((ret >= 0) && (0 != (val & 0x8000)) && (busy)); - - if (0 == (val & 0x8000)) - return 0; - - DPRINTK("fxgmac_ephy_soft_reset, timeout, busy=%d.\n", busy); - return -EBUSY; - -busy_exit: - DPRINTK("fxgmac_ephy_soft_reset exit due to ephy reg access fail.\n"); - - return ret; + pdata->phy_speed = pdata->expansion.pre_phy_speed; + pdata->phy_duplex = pdata->expansion.pre_phy_duplex; + pdata->phy_autoeng = pdata->expansion.pre_phy_autoneg; + + if (pdata->phy_autoeng || + (!pdata->phy_autoeng && pdata->phy_speed == SPEED_1000)) + hw_ops->phy_config(pdata); + else + fxgmac_phy_force_mode(pdata); } /* this function used to double check the speed. for fiber, to correct there is no 10M */ @@ -179,8 +122,9 @@ static int fxgmac_ephy_adjust_status(u32 lport, int val, int is_utp, int *speed, int speed_mode; *speed = -1; - *duplex = (val & BIT(FUXI_EPHY_DUPLEX_BIT)) >> FUXI_EPHY_DUPLEX_BIT; - speed_mode = (val & FUXI_EPHY_SPEED_MODE) >> FUXI_EPHY_SPEED_MODE_BIT; + *duplex = (val & BIT(FXGMAC_EPHY_DUPLEX_BIT)) >> FXGMAC_EPHY_DUPLEX_BIT; + speed_mode = (val & FXGMAC_EPHY_SPEED_MODE) >> + FXGMAC_EPHY_SPEED_MODE_BIT; switch (speed_mode) { case 0: if (is_utp) @@ -201,13 +145,13 @@ static int fxgmac_ephy_adjust_status(u32 lport, int val, int is_utp, int *speed, return 0; } -/* - * this function for polling to get status of ephy link. +/* this function for polling to get status of ephy link. * output: - * speed: SPEED_10M, SPEED_100M, SPEED_1000M or -1; - * duplex: 0 or 1, see reg 0x11, bit YT8614_DUPLEX_BIT. - * ret_link: 0 or 1, link down or up. - * media: only valid when ret_link=1, (YT8614_SMI_SEL_SDS_SGMII + 1) for fiber; (YT8614_SMI_SEL_PHY + 1) for utp. -1 for link down. + * speed: SPEED_10M, SPEED_100M, SPEED_1000M or -1; + * duplex: 0 or 1, see reg 0x11, bit YT8614_DUPLEX_BIT. + * ret_link: 0 or 1, link down or up. + * media: only valid when ret_link=1, (YT8614_SMI_SEL_SDS_SGMII + 1) + * for fiber; (YT8614_SMI_SEL_PHY + 1) for utp. -1 for link down. */ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, int *ret_link, int *media) @@ -224,7 +168,7 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, if (0 > ret) goto busy_exit; - link = val & (BIT(FUXI_EPHY_LINK_STATUS_BIT)); + link = val & (BIT(FXGMAC_EPHY_LINK_STATUS_BIT)); if (link) { link_utp = 1; fxgmac_ephy_adjust_status(0, val, 1, speed, duplex); @@ -235,9 +179,9 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, if (link_utp || link_fiber) { /* case of fiber of priority */ if (link_utp) - *media = (FUXI_EPHY_SMI_SEL_PHY + 1); + *media = (FXGMAC_EPHY_SMI_SEL_PHY + 1); if (link_fiber) - *media = (FUXI_EPHY_SMI_SEL_SDS_SGMII + 1); + *media = (FXGMAC_EPHY_SMI_SEL_SDS_SGMII + 1); *ret_link = 1; } else { @@ -254,3 +198,136 @@ int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, return ret; } + +/* fxgmac_phy_update_link - update the phy link status + * @adapter: pointer to the device adapter structure + */ +void fxgmac_phy_update_link(struct net_device *netdev) +{ + struct fxgmac_pdata *pdata = netdev_priv(netdev); + struct fxgmac_hw_ops *hw_ops = &pdata->hw_ops; + u32 regval, cur_link, cur_speed; + + regval = hw_ops->get_ephy_state(pdata); + + if (!(regval & BIT(MGMT_EPHY_CTRL_RESET_POS)) && + pdata->expansion.dev_state != FXGMAC_DEV_CLOSE) { + pdata->expansion.phy_link = false; + return; + } + + cur_speed = FXGMAC_GET_REG_BITS(regval, MGMT_EPHY_CTRL_STA_SPEED_POS, + MGMT_EPHY_CTRL_STA_SPEED_LEN); + pdata->phy_speed = (cur_speed == 2) ? SPEED_1000 : + (cur_speed == 1) ? SPEED_100 : + SPEED_10; + pdata->phy_duplex = + FXGMAC_GET_REG_BITS(regval, MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_POS, + MGMT_EPHY_CTRL_STA_EPHY_DUPLEX_LEN); + cur_link = FXGMAC_GET_REG_BITS(regval, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_POS, + MGMT_EPHY_CTRL_STA_EPHY_LINKUP_LEN); + if (pdata->expansion.phy_link != cur_link) { + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + hw_ops->read_ephy_reg(pdata, REG_MII_INT_STATUS, NULL); + + if (cur_link) { +#ifdef FXGMAC_ASPM_ENABLED + if (fxgmac_aspm_action_linkup(pdata)) + return; +#endif + hw_ops->config_mac_speed(pdata); + hw_ops->enable_rx(pdata); + hw_ops->enable_tx(pdata); + hw_ops->read_ephy_reg(pdata, REG_MII_LPA, ®val); + if (FXGMAC_GET_REG_BITS( + regval, PHY_MII_LINK_PARNTNER_10FULL_POS, + PHY_MII_LINK_PARNTNER_10FULL_LEN) || + FXGMAC_GET_REG_BITS( + regval, PHY_MII_LINK_PARNTNER_10HALF_POS, + PHY_MII_LINK_PARNTNER_10HALF_LEN)) { + pdata->support_10m_link = true; + } else { + pdata->support_10m_link = false; + } + pdata->expansion.pre_phy_speed = pdata->phy_speed; + pdata->expansion.pre_phy_duplex = pdata->phy_duplex; + pdata->expansion.pre_phy_autoneg = pdata->phy_autoeng; + netif_carrier_on(pdata->netdev); + if (netif_running(pdata->netdev)) { + netif_tx_wake_all_queues(pdata->netdev); + dev_info(pdata->dev, + "%s now is link up, mac_speed=%d.\n", + netdev_name(pdata->netdev), + pdata->phy_speed); + } + } else { + netif_carrier_off(pdata->netdev); + netif_tx_stop_all_queues(pdata->netdev); + pdata->phy_speed = SPEED_UNKNOWN; + pdata->phy_duplex = DUPLEX_UNKNOWN; + hw_ops->disable_rx(pdata); + hw_ops->disable_tx(pdata); +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + if (pdata->expansion.lb_cable_flag) { + hw_ops->clean_cable_loopback(pdata); + pdata->expansion.lb_cable_flag = 0; + } +#endif + +#ifdef FXGMAC_ASPM_ENABLED + fxgmac_schedule_aspm_config_work(pdata); +#endif + dev_info(pdata->dev, "%s now is link down\n", + netdev_name(pdata->netdev)); + } + pdata->expansion.phy_link = cur_link; + } +} + +static void fxgmac_phy_link_poll(struct timer_list *t) +{ + struct fxgmac_pdata *pdata = + from_timer(pdata, t, expansion.phy_poll_tm); + + if (!pdata->netdev) { + DPRINTK("fxgmac_phy_timer polling with NULL netdev %lx\n", + (unsigned long)(pdata->netdev)); + return; + } + + pdata->stats.ephy_poll_timer_cnt++; + +#if FXGMAC_PM_FEATURE_ENABLED + if (!test_bit(FXGMAC_POWER_STATE_DOWN, &pdata->expansion.powerstate)) +#endif + { + mod_timer(&pdata->expansion.phy_poll_tm, jiffies + HZ / 2); + fxgmac_phy_update_link(pdata->netdev); + } else { + DPRINTK("%s powerstate changed, %ld, netdev=%lx, tm=%lx\n", + __func__, pdata->expansion.powerstate, + (unsigned long)(pdata->netdev), + (unsigned long)&pdata->expansion.phy_poll_tm); + } +} + +int fxgmac_phy_timer_init(struct fxgmac_pdata *pdata) +{ + init_timer_key(&pdata->expansion.phy_poll_tm, NULL, 0, + "fuxi_phy_link_update_timer", NULL); + + pdata->expansion.phy_poll_tm.expires = jiffies + HZ / 2; + pdata->expansion.phy_poll_tm.function = (void *)(fxgmac_phy_link_poll); + + add_timer(&pdata->expansion.phy_poll_tm); + + DPRINTK("fxgmac_phy_timer started, %lx\n", jiffies); + return 0; +} + +void fxgmac_phy_timer_destroy(struct fxgmac_pdata *pdata) +{ + del_timer_sync(&pdata->expansion.phy_poll_tm); + DPRINTK("fxgmac_phy_timer removed\n"); +} diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h index 65d6288e6869a..81b92815dc9b7 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac-reg.h @@ -4,20 +4,23 @@ #ifndef __FUXI_GMAC_REG_H__ #define __FUXI_GMAC_REG_H__ -#define AISC_MODE +#define ASIC_MODE -#define FUXI_REV_01 0x01 /* The first NTO version. */ -#define FUXI_REV_03 0x03 /* ECO back on 07/2023. */ +#define YT6801_NTO_VER 0x01 /* The first NTO version. */ +#define YT6801_ECO1_VER 0x03 /* ECO back on 07/2023. */ +#define YT6801_ECO2_VER 0x04 /* ECO2 back on 01/2026. */ /* MAC register offsets */ #define MAC_OFFSET 0x2000 #define MAC_CR 0x0000 /* The MAC Configuration Register */ #define MAC_ECR 0x0004 #define MAC_PFR 0x0008 +#define MAC_WTR 0x000c #define MAC_HTR0 0x0010 #define MAC_VLANTR 0x0050 #define MAC_VLANHTR 0x0058 #define MAC_VLANIR 0x0060 +#define MAC_IVLANIR 0x0064 #define MAC_Q0TFCR 0x0070 #define MAC_RFCR 0x0090 #define MAC_RQC0R 0x00a0 @@ -36,8 +39,12 @@ #define MAC_LPI_CONTROL 0x00d4 #define MAC_LPI_TIMER 0x00d8 #define MAC_MS_TIC_COUNTER 0x00dc -#define MAC_AN_SR 0x00E4 -#define MAC_PHYIF_STA 0x00F8 +#define MAC_AN_CR 0x00e0 +#define MAC_AN_SR 0x00e4 +#define MAC_AN_ADV 0x00e8 +#define MAC_AN_LPA 0x00ec +#define MAC_AN_EXP 0x00f0 +#define MAC_PHYIF_STA 0x00f8 #define MAC_VR 0x0110 #define MAC_DBG_STA 0x0114 #define MAC_HWF0R 0x011c @@ -46,6 +53,7 @@ #define MAC_HWF3R 0x0128 #define MAC_MDIO_ADDRESS 0x0200 #define MAC_MDIO_DATA 0x0204 +#define MAC_GPIOCR 0x0208 #define MAC_GPIO_SR 0x020c #define MAC_ARP_PROTO_ADDR 0x0210 #define MAC_CSR_SW_CTRL 0x0230 @@ -406,7 +414,7 @@ #define MMC_CR_MCF_POS 3 #define MMC_CR_MCF_LEN 1 #define MMC_RIER_ALL_INTERRUPTS_POS 0 -#define MMC_RIER_ALL_INTERRUPTS_LEN 26 +#define MMC_RIER_ALL_INTERRUPTS_LEN 28 #define MMC_RISR_RXFRAMECOUNT_GB_POS 0 #define MMC_RISR_RXFRAMECOUNT_GB_LEN 1 #define MMC_RISR_RXOCTETCOUNT_GB_POS 1 @@ -465,7 +473,7 @@ #define MMC_RISR_RXLPITRANSITION_LEN 1 #define MMC_TIER_ALL_INTERRUPTS_POS 0 -#define MMC_TIER_ALL_INTERRUPTS_LEN 26 +#define MMC_TIER_ALL_INTERRUPTS_LEN 28 #define MMC_TISR_TXOCTETCOUNT_GB_POS 0 #define MMC_TISR_TXOCTETCOUNT_GB_LEN 1 #define MMC_TISR_TXFRAMECOUNT_GB_POS 1 @@ -525,10 +533,12 @@ /* MTL register offsets */ #define MTL_OMR 0x0c00 -#define MTL_FDDR 0x0c10 +#define MTL_FDCR 0x0c08 +#define MTL_FDSR 0x0c0c +#define MTL_FDDR 0x0c10 #define MTL_INT_SR 0x0c20 #define MTL_RQDCM0R 0x0c30 -#define MTL_ECC_INT_SR 0x0ccc +#define MTL_ECC_INT_SR 0x0ccc #define MTL_RQDCM_INC 4 #define MTL_RQDCM_Q_PER_REG 4 @@ -549,10 +559,13 @@ #define MTL_Q_INT_CTL_SR 0x0d2c #define MTL_Q_TQOMR 0x00 +#define MTL_Q_TQUR 0x04 #define MTL_Q_RQOMR 0x30 +#define MTL_Q_RQMPOCR 0x34 #define MTL_Q_RQDR 0x38 -#define MTL_Q_IER 0x2c -#define MTL_Q_ISR 0x2c /* no isr register */ +#define MTL_Q_RQCR 0x3c +#define MTL_Q_IER 0x2c +#define MTL_Q_ISR 0x2c /* no isr register */ #define MTL_TXQ_DEG 0x08 /* transmit debug */ /* MTL queue register entry bit positions and sizes */ @@ -652,10 +665,19 @@ #define MTL_TC_BASE MTL_Q_BASE #define MTL_TC_INC MTL_Q_INC +#define MTL_TC_TQDR 0x08 #define MTL_TC_ETSCR 0x10 #define MTL_TC_ETSSR 0x14 #define MTL_TC_QWR 0x18 +/* The Queue 0 Transmit Debug register gives the debug status of various blocks + * related to the Transmit queue + */ +#define MTL_TC_TQDR_TRCSTS_POS 1 +#define MTL_TC_TQDR_TRCSTS_LEN 2 +#define MTL_TC_TQDR_TXQSTS_POS 4 +#define MTL_TC_TQDR_TXQSTS_LEN 1 + /* MTL traffic class register entry bit positions and sizes */ #define MTL_TC_ETSCR_TSA_POS 0 #define MTL_TC_ETSCR_TSA_LEN 2 @@ -673,28 +695,45 @@ #define DMA_DSR0 0x100c #define DMA_DSR1 0x1010 #define DMA_DSR2 0x1014 +#define DMA_AXIARCR 0x1020 +#define DMA_AXIAWCR 0x1024 +#define DMA_AXIAWRCR 0x1028 +#define DMA_SAFE_ISR 0x1080 +#define DMA_ECC_IE 0x1084 #define DMA_ECC_INT_SR 0x1088 /* DMA register entry bit positions and sizes */ #define DMA_ISR_MACIS_POS 17 #define DMA_ISR_MACIS_LEN 1 #define DMA_ISR_MTLIS_POS 16 -#define DMA_ISR_MTLIS_LEN 1 +#define DMA_ISR_MTLIS_LEN 1 #define DMA_MR_SWR_POS 0 #define DMA_MR_SWR_LEN 1 +#define DMA_MR_TXPR_POS 11 +#define DMA_MR_TXPR_LEN 1 #define DMA_MR_INTM_POS 16 #define DMA_MR_INTM_LEN 2 +#define DMA_MA_INTM_EDGE 0 +#define DMA_MA_INTM_LEVEL 1 +#define DMA_MA_INTM_LEVLE_ENHANCE 2 #define DMA_MR_QUREAD_POS 19 #define DMA_MR_QUREAD_LEN 1 - -#define DMA_SBMR_EN_LPI_POS 31 -#define DMA_SBMR_EN_LPI_LEN 1 -#define DMA_SBMR_LPI_XIT_PKT_POS 30 -#define DMA_SBMR_LPI_XIT_PKT_LEN 1 +#define DMA_MR_QUREAD_EN 1 +#define DMA_MR_TNDF_POS 20 +#define DMA_MR_TNDF_LEN 2 +#define DMA_MR_RNDF_POS 22 +#define DMA_MR_RNDF_LEN 2 + +#define DMA_SBMR_EN_LPI_POS 31 +#define DMA_SBMR_EN_LPI_LEN 1 +#define DMA_SBMR_LPI_XIT_PKT_POS 30 +#define DMA_SBMR_LPI_XIT_PKT_LEN 1 #define DMA_SBMR_WR_OSR_LMT_POS 24 #define DMA_SBMR_WR_OSR_LMT_LEN 6 #define DMA_SBMR_RD_OSR_LMT_POS 16 #define DMA_SBMR_RD_OSR_LMT_LEN 8 +#define DMA_SBMR_AAL_POS 12 +#define DMA_SBMR_AAL_LEN 1 #define DMA_SBMR_EAME_POS 11 #define DMA_SBMR_EAME_LEN 1 #define DMA_SBMR_AALE_POS 10 @@ -749,6 +788,12 @@ #define DMA_CH_RDRLR 0x30 #define DMA_CH_IER 0x34 #define DMA_CH_RIWT 0x38 +#define DMA_CH_CATDR_LO 0x44 +#define DMA_CH_CARDR_LO 0x4c +#define DMA_CH_CATBR_HI 0x50 +#define DMA_CH_CATBR_LO 0x54 +#define DMA_CH_CARBR_HI 0x58 +#define DMA_CH_CARBR_LO 0x5c #define DMA_CH_SR 0x60 /* DMA channel register entry bit positions and sizes */ @@ -861,6 +906,8 @@ #define RX_NORMAL_DESC3_INTE_POS 30 #define RX_NORMAL_DESC3_INTE_LEN 1 #define RX_NORMAL_DESC3_L34T_LEN 4 +#define RX_NORMAL_DESC3_RSV_POS 26 +#define RX_NORMAL_DESC3_RSV_LEN 1 #define RX_NORMAL_DESC3_LD_POS 28 #define RX_NORMAL_DESC3_LD_LEN 1 #define RX_NORMAL_DESC3_OWN_POS 31 @@ -909,6 +956,8 @@ */ #define RX_NORMAL_DESC2_WB_DAF_POS 17 #define RX_NORMAL_DESC2_WB_DAF_LEN 1 +#define RX_NORMAL_DESC2_WB_RAPARSER_POS 11 +#define RX_NORMAL_DESC2_WB_RAPARSER_LEN 3 #define RX_NORMAL_DESC3_WB_LD_POS 28 #define RX_NORMAL_DESC3_WB_LD_LEN 1 @@ -924,6 +973,59 @@ #define RX_NORMAL_DESC3_WB_CE_POS 24 #define RX_NORMAL_DESC3_WB_CE_LEN 1 +/* When this bit is set, it indicates that the packet length exceeds + * the specified maximum Ethernet size of 1518, 1522, or 2000 bytes + * (9018 or 9022 bytes if jumbo packet enable is set). Note: Giant packet + * indicates only the packet length. It does not cause any packet truncation. + */ +#define RX_NORMAL_DESC3_WB_GP_POS 23 +#define RX_NORMAL_DESC3_WB_GP_LEN 1 + +/* When this bit is set, it indicates that the Receive Watchdog Timer has expired + * while receiving the current packet. The current packet is truncated after + * watchdog timeout. + */ +#define RX_NORMAL_DESC3_WB_RWT_POS 22 +#define RX_NORMAL_DESC3_WB_RWT_LEN 1 + +/* When this bit is set, it indicates that the received packet is damaged because + * of buffer overflow in Rx FIFO. + * Note: This bit is set only when the DMA transfers a partial packet to the + * application. This happens only when the Rx FIFO is operating in the threshold + * mode. In the store-and-forward mode, all partial packets are dropped completely + * in Rx FIFO. + */ +#define RX_NORMAL_DESC3_WB_OE_POS 21 +#define RX_NORMAL_DESC3_WB_OE_LEN 1 + +/* When this bit is set, it indicates that the gmii_rxer_i signal is asserted while + * the gmii_rxdv_i signal is asserted during packet reception. This error also + * includes carrier extension error in the GMII and half-duplex mode. Error can be + * of less or no extension, or error (rxd!= 0f) during extension + */ +#define RX_NORMAL_DESC3_WB_RE_POS 20 +#define RX_NORMAL_DESC3_WB_RE_LEN 1 + +/* When this bit is set, it indicates that the received packet has a non-integer + * multiple of bytes(odd nibbles). This bit is valid only in the MII Mode + */ +#define RX_NORMAL_DESC3_WB_DE_POS 19 +#define RX_NORMAL_DESC3_WB_DE_LEN 1 + +/* When this bit is set, it indicates the logical OR of the following bits: + * RDES3[24]: CRC Error + * RDES3[19]: Dribble Error + * RDES3[20]: Receive Error + * RDES3[22]: Watchdog Timeout + * RDES3[21]: Overflow Error + * RDES3[23]: Giant Packet + * RDES2[17]: Destination Address Filter Fail, when Flexible RX Parser is enabled + * RDES2[16]: SA Address Filter Fail, when Flexible RX Parser is enabled + * This field is valid only when the LD bit of RDES3 is set + */ +#define RX_NORMAL_DESC3_WB_ES_POS 15 +#define RX_NORMAL_DESC3_WB_ES_LEN 1 + #define RX_DESC3_L34T_IPV4_TCP 1 #define RX_DESC3_L34T_IPV4_UDP 2 #define RX_DESC3_L34T_IPV4_ICMP 3 @@ -1037,6 +1139,8 @@ #define PHY_CR_SPEED_SEL_L_LEN 1 #define PHY_CR_AUTOENG_POS 12 #define PHY_CR_AUTOENG_LEN 1 +#define PHY_CR_POWER_POS 11 +#define PHY_CR_POWER_LEN 1 #define PHY_CR_RE_AUTOENG_POS 9 #define PHY_CR_RE_AUTOENG_LEN 1 #define PHY_CR_DUPLEX_POS 8 @@ -1060,6 +1164,10 @@ #define PHY_MII_ADVERTISE_10HALF_POS 5 #define PHY_MII_ADVERTISE_10HALF_LEN 1 #define REG_MII_LPA 0x05 /* Link partner ability reg */ +#define PHY_MII_LINK_PARNTNER_10FULL_POS 6 +#define PHY_MII_LINK_PARNTNER_10FULL_LEN 1 +#define PHY_MII_LINK_PARNTNER_10HALF_POS 5 +#define PHY_MII_LINK_PARNTNER_10HALF_LEN 1 #define REG_MII_EXPANSION 0x06 /* Expansion register */ #define REG_MII_NEXT_PAGE 0x07 /* Next page register */ #define REG_MII_LPR_NEXT_PAGE 0x08 /* LPR next page register */ @@ -1085,7 +1193,7 @@ #define PHY_MII_SPEC_DUPLEX_LEN 1 #define REG_MII_INT_MASK 0x12 /* Interrupt mask register */ -#ifdef AISC_MODE +#ifdef ASIC_MODE #define PHY_INT_MASK_LINK_UP_POS 10 #define PHY_INT_MASK_LINK_UP_LEN 1 #define PHY_INT_MASK_LINK_DOWN_POS 11 @@ -1096,12 +1204,21 @@ #define PHY_INT_MASK_LINK_DOWN_POS 0 #define PHY_INT_MASK_LINK_DOWN_LEN 1 #endif -#define REG_MII_INT_STATUS 0x13 /* Interrupt status register */ +#define REG_MII_INT_STATUS 0x13 /* Interrupt status register */ +#ifdef ASIC_MODE +#define PHY_INT_STAT_LINK_UP_POS 10 +#define PHY_INT_STAT_LINK_UP_LEN 1 +#define PHY_INT_STAT_LINK_DOWN_POS 11 +#define PHY_INT_STAT_LINK_DOWN_LEN 1 +#else #define PHY_INT_STAT_LINK_UP_POS 1 #define PHY_INT_STAT_LINK_UP_LEN 1 #define PHY_INT_STAT_LINK_DOWN_POS 0 #define PHY_INT_STAT_LINK_DOWN_LEN 1 +#endif #define REG_MII_DOWNG_CTRL 0x14 /* Speed auto downgrade control*/ +#define REG_SMART_SPEED_POS 5 +#define REG_SMART_SPEED_LEN 1 #define REG_MII_RERRCOUNTER 0x15 /* Receive error counter */ #define REG_MII_EXT_ADDR 0x1E /* Extended reg's address */ @@ -1112,11 +1229,10 @@ /* for ephy link capability * Advertisement control register(0x04) */ - /* Advertisement control register(0x04) */ +/* Advertisement control register(0x04) */ #define FXGMAC_ADVERTISE_SLCT 0x001f /* Selector bits */ #define FXGMAC_ADVERTISE_CSMA 0x0001 /* Only selector supported */ -#define FXGMAC_ADVERTISE_1000FULL 0x0004 /* trt fir 1000BASE-T full duplex */ -#define FXGMAC_ADVERTISE_1000HALF 0x0008 /* try for 1000BASE-T half duplex */ + #define FXGMAC_ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ #define FXGMAC_ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ #define FXGMAC_ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ @@ -1130,11 +1246,14 @@ #define FXGMAC_ADVERTISE_NPAGE 0x8000 /* Next page bit */ /* 1000BASE-T Control register(0x09) */ -#define REG_BIT_ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ -#define REG_BIT_ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +#define FXGMAC_ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define FXGMAC_ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ -#define REG_BIT_ADVERTISE_1000_CAP (REG_BIT_ADVERTISE_1000FULL | REG_BIT_ADVERTISE_1000HALF) -#define REG_BIT_ADVERTISE_100_10_CAP (FXGMAC_ADVERTISE_100FULL | FXGMAC_ADVERTISE_100HALF | FXGMAC_ADVERTISE_10FULL | FXGMAC_ADVERTISE_10HALF) +#define REG_BIT_ADVERTISE_1000_CAP \ + (FXGMAC_ADVERTISE_1000FULL | FXGMAC_ADVERTISE_1000HALF) +#define REG_BIT_ADVERTISE_100_10_CAP \ + (FXGMAC_ADVERTISE_100FULL | FXGMAC_ADVERTISE_100HALF | \ + FXGMAC_ADVERTISE_10FULL | FXGMAC_ADVERTISE_10HALF) #ifndef SPEED_1000M #define SPEED_1000M 1000 @@ -1161,19 +1280,23 @@ #define BIT(n) (0x1<<(n)) #endif -#ifndef FUXI_EPHY_SPEED_MODE_BIT -#define FUXI_EPHY_SPEED_MODE 0xc000 -#define FUXI_EPHY_DUPLEX 0x2000 -#define FUXI_EPHY_SPEED_MODE_BIT 14 -#define FUXI_EPHY_DUPLEX_BIT 13 -#define FUXI_EPHY_LINK_STATUS_BIT 10 +#ifndef FXGMAC_EPHY_SPEED_MODE_BIT +#define FXGMAC_EPHY_SPEED_MODE 0xc000 +#define FXGMAC_EPHY_DUPLEX 0x2000 +#define FXGMAC_EPHY_SPEED_MODE_BIT 14 +#define FXGMAC_EPHY_DUPLEX_BIT 13 +#define FXGMAC_EPHY_LINK_STATUS_BIT 10 #endif -#define FUXI_EPHY_SMI_SEL_PHY 0x0 -#define FUXI_EPHY_SMI_SEL_SDS_QSGMII 0x02 -#define FUXI_EPHY_SMI_SEL_SDS_SGMII 0x03 +#define FXGMAC_EPHY_SMI_SEL_PHY 0x0 +#define FXGMAC_EPHY_SMI_SEL_SDS_QSGMII 0x02 +#define FXGMAC_EPHY_SMI_SEL_SDS_SGMII 0x03 +#define REG_MII_EXT_AFE_CONTROL_REGISTER3 0x12 +#define REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_POS 13 +#define REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_LEN 1 +#define REG_MII_EXT_AFE_CONTROL_CLKDAC_AON_ON 1 #define REG_MII_EXT_ANALOG_CFG3 0x52 #define MII_EXT_ANALOG_CFG3_ADC_START_CFG_POS 14 #define MII_EXT_ANALOG_CFG3_ADC_START_CFG_LEN 2 @@ -1209,69 +1332,70 @@ #define REG_MII_EXT_SLEEP_REG_ENABLE_LOOPBACK 0x6812 #define REG_MII_EXT_SLEEP_REG_CLEAN_LOOPBACK 0xe812 -#define REG_MII_EXT_ANALOG_CFG2 0x51 -#define REG_MII_EXT_ANALOG_CFG2_LED_VALUE 0x4a9 -#define REG_MII_EXT_ANALOG_CFG8 0x57 -#define REG_MII_EXT_ANALOG_CFG8_LED_VALUE 0x274c - -#define REG_MII_EXT_COMMON_LED_CFG 0xA00B -#define REG_MII_EXT_COMMON_LED0_CFG 0xA00C -#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0 0x2600 -#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1 0x00 -#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2 0x20 -#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3 0x2600 -#define REG_MII_EXT_COMMON_LED1_CFG 0xA00D -#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0 0x1800 -#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1 0x00 -#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2 0x40 -#define REG_MII_EXT_COMMON_LED2_CFG 0xA00E -#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0 0x00 -#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2 0x07 -#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3 0x20 -#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4 0x1800 -#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xA00F -#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0F - -#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3 0x2600 - -#define REG_MII_EXT_PKG_CFG0 0xA0 -#define REG_MII_EXT_PKG_CHECK_POS 14 -#define REG_MII_EXT_PKG_CHECK_LEN 2 -#define REG_MII_EXT_PKG_ENABLE_CHECK 0x2 -#define REG_MII_EXT_PKG_DISABLE_CHECK 0x1 -#define REG_MII_EXT_SLEEP_CONTROL1 0x27 -#define MII_EXT_SLEEP_CONTROL1_EN_POS 15 -#define MII_EXT_SLEEP_CONTROL1_EN_LEN 1 -#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS 14 -#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN 1 -#define REG_MII_EXT_PKG_RX_VALID0 0xA3 -#define REG_MII_EXT_REG_RX_VALID1 0xA4 -#define REG_MII_EXT_REG_RX_OS0 0xA5 -#define REG_MII_EXT_REG_RX_OS1 0xA6 -#define REG_MII_EXT_REG_RX_US0 0xA7 -#define REG_MII_EXT_REG_RX_US1 0xA8 -#define REG_MII_EXT_REG_RX_ERR 0xA9 -#define REG_MII_EXT_REG_RX_0S_BAD 0xAA -#define REG_MII_EXT_REG_RX_FRAGMENT 0xAB -#define REG_MII_EXT_REG_RX_NOSFD 0xAC -#define REG_MII_EXT_REG_TX_VALID0 0xAD -#define REG_MII_EXT_REG_TX_VALID1 0xAE -#define REG_MII_EXT_REG_TX_OS0 0xAF -#define REG_MII_EXT_REG_TX_OS1 0xB0 -#define REG_MII_EXT_REG_TX_US0 0xB1 -#define REG_MII_EXT_REG_TX_US1 0xB2 -#define REG_MII_EXT_REG_TX_ERR 0xB3 -#define REG_MII_EXT_REG_TX_OS_BAD 0xB4 -#define REG_MII_EXT_REG_TX_FRAGMENT 0xB5 -#define REG_MII_EXT_REG_TX_NOSFD 0xB6 -#define REG_MII_EXT_REG_PMA_DBG0_ADC 0x13 -#define REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE 0x3538 -#define REG_MII_EXT_REG_CLD_REG0 0x3A0 -#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xEB24 -#define REG_MII_EXT_REG_CLD_REG1 0x3CC -#define REG_MII_EXT_ENABLE_CLD_GT_HT_BT 0x7001 -#define REG_MMD_EEE_ABILITY_REG 0x3C -#define REG_MMD_EEE_ABILITY_VALUE 0x06 +#define REG_MII_EXT_ANALOG_CFG2 0x51 +#define REG_MII_EXT_ANALOG_CFG2_VALUE 0x4a9 +#define REG_MII_EXT_ANALOG_CFG8 0x57 +#define REG_MII_EXT_ANALOG_CFG8_VALUE 0x274c +#define REG_MII_EXT_ANALOG_CFG8_137D1D05_VALUE 0x264c + +#define REG_MII_EXT_COMMON_LED_CFG 0xa00b +#define REG_MII_EXT_COMMON_LED0_CFG 0xa00c +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION0 0x2600 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION2 0x20 +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SOLUTION3 0x2600 +#define REG_MII_EXT_COMMON_LED1_CFG 0xa00d +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION0 0x1800 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION1 0x00 +#define REG_MII_EXT_COMMON_LED1_CFG_VALUE_SOLUTION2 0x40 +#define REG_MII_EXT_COMMON_LED2_CFG 0xa00e +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION0 0x00 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION2 0x07 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION3 0x20 +#define REG_MII_EXT_COMMON_LED2_CFG_VALUE_SOLUTION4 0x1800 +#define REG_MII_EXT_COMMON_LED_BLINK_CFG 0xa00f +#define REG_MII_EXT_COMMON_LED_BLINK_CFG_SOLUTION2 0x0f + +#define REG_MII_EXT_COMMON_LED0_CFG_VALUE_SLEEP_SOLUTION3 0x2600 + +#define REG_MII_EXT_PKG_CFG0 0xa0 +#define REG_MII_EXT_PKG_CHECK_POS 14 +#define REG_MII_EXT_PKG_CHECK_LEN 2 +#define REG_MII_EXT_PKG_ENABLE_CHECK 0x2 +#define REG_MII_EXT_PKG_DISABLE_CHECK 0x1 +#define REG_MII_EXT_SLEEP_CONTROL1 0x27 +#define MII_EXT_SLEEP_CONTROL1_EN_POS 15 +#define MII_EXT_SLEEP_CONTROL1_EN_LEN 1 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_POS 14 +#define MII_EXT_SLEEP_CONTROL1_PLLON_IN_SLP_LEN 1 +#define REG_MII_EXT_PKG_RX_VALID0 0xa3 +#define REG_MII_EXT_REG_RX_VALID1 0xa4 +#define REG_MII_EXT_REG_RX_OS0 0xa5 +#define REG_MII_EXT_REG_RX_OS1 0xa6 +#define REG_MII_EXT_REG_RX_US0 0xa7 +#define REG_MII_EXT_REG_RX_US1 0xa8 +#define REG_MII_EXT_REG_RX_ERR 0xa9 +#define REG_MII_EXT_REG_RX_0S_BAD 0xaa +#define REG_MII_EXT_REG_RX_FRAGMENT 0xab +#define REG_MII_EXT_REG_RX_NOSFD 0xac +#define REG_MII_EXT_REG_TX_VALID0 0xad +#define REG_MII_EXT_REG_TX_VALID1 0xae +#define REG_MII_EXT_REG_TX_OS0 0xaf +#define REG_MII_EXT_REG_TX_OS1 0xb0 +#define REG_MII_EXT_REG_TX_US0 0xb1 +#define REG_MII_EXT_REG_TX_US1 0xb2 +#define REG_MII_EXT_REG_TX_ERR 0xb3 +#define REG_MII_EXT_REG_TX_OS_BAD 0xb4 +#define REG_MII_EXT_REG_TX_FRAGMENT 0xb5 +#define REG_MII_EXT_REG_TX_NOSFD 0xb6 +#define REG_MII_EXT_REG_PMA_DBG0_ADC 0x13 +#define REG_MII_EXT_ENABLE_GIGA_POWER_SAVING_FOR_SHORT_CABLE 0x3538 +#define REG_MII_EXT_REG_CLD_REG0 0x3a0 +#define REG_MII_EXT_ENABLE_CLD_NP_WP 0xeb24 +#define REG_MII_EXT_REG_CLD_REG1 0x3cc +#define REG_MII_EXT_ENABLE_CLD_GT_HT_BT 0x7001 +#define REG_MMD_EEE_ABILITY_REG 0x3c +#define REG_MMD_EEE_ABILITY_VALUE 0x06 /* Below registers don't belong to GMAC, it has zero offset, not 0x2000 offset. mem_base + REG_XXX. */ /***When issue happens, driver write this register to trigger pcie sniffer. ***/ @@ -1302,7 +1426,7 @@ #define MGMT_EPHY_CTRL_STA_SPEED_LEN 2 #define MGMT_EPHY_CTRL_STA_SPEED_MASK 0x18 -#define MGMT_EPHY_CTRL_ERROR_VAULE 0xFFFFFFFF +#define MGMT_EPHY_CTRL_ERROR_VALUE 0xFFFFFFFF #define MGMT_PCIE_EP_CTRL 0x1008 @@ -1323,11 +1447,14 @@ /* set means link change wakeup enable */ #define WOL_LINKCHG_EN_POS 0 #define WOL_LINKCHG_EN_LEN 1 +#define WOL_WAIT_TIME_POS 2 +#define WOL_WAIT_TIME_LEN 13 #define OOB_WOL_CTRL 0x1010 #define OOB_WOL_CTRL_DIS_POS 0 #define OOB_WOL_CTRL_DIS_LEN 1 +#define MGMT_INT_CTRL0 0x1100 /* b3:0 per rx ch interrupt * b7:4 per tx ch interrupt * b8 Safety interrupt signal for un-correctable error @@ -1352,9 +1479,9 @@ #define MGMT_INT_CTRL0_INT_STATUS_MASK 0xFFFF #define MGMT_INT_CTRL0_INT_STATUS_RX 0x0001 #define MGMT_INT_CTRL0_INT_STATUS_TX 0x0010 -#define MGMT_INI_CTRL0_INT_STATUS_TX_INVERSE 0xFFEF -#define MGMG_INT_CTRL0_INT_STATUS_PHY_INVERSE 0xFFDF -#define MGMT_INT_CTRL0_INT_STATUS_PHY 0x0020 +#define MGMT_INT_CTRL0_INT_STATUS_TX_INVERSE 0xffef +#define MGMT_INT_CTRL0_INT_STATUS_MISC_INVERSE 0xffdf +#define MGMT_INT_CTRL0_INT_STATUS_MISC 0x0020 #define MGMT_INT_CTRL0_INT_MASK_RXCH_POS 16 #define MGMT_INT_CTRL0_INT_STATUS_RXCH_POS 0 @@ -1362,13 +1489,26 @@ #define MGMT_INT_CTRL0_INT_STATUS_RXCH_MASK 0xF #define MGMT_INT_CTRL0_INT_STATUS_RXTX_LEN 5 #define MGMT_INT_CTRL0_INT_STATUS_RXTX_MASK 0x1F -#define MGMT_INT_CTRL0_INT_STATUS_RXTXPHY_MASK 0x3F +#define MGMT_INT_CTRL0_INT_STATUS_RXTXMISC_MASK 0x3F #define MGMT_INT_CTRL0_INT_MASK_TXCH_POS 20 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_POS 4 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_LEN 1 #define MGMT_INT_CTRL0_INT_STATUS_TXCH_MASK 0x1 +#define MGMT_MAC_PHYIF_STA_POS 0 +#define MGMT_MAC_AN_SR0_POS 1 +#define MGMT_MAC_AN_SR1_POS 2 +#define MGMT_MAC_AN_SR2_POS 3 +#define MGMT_MAC_PMT_STA_POS 4 +#define MGMT_MAC_LPI_STA_POS 5 +#define MGMT_MAC_MMC_STA_POS 8 +#define MGMT_MAC_RX_MMC_STA_POS 9 +#define MGMT_MAC_TX_MMC_STA_POS 10 +#define MGMT_MMC_IPCRXINT_POS 11 +#define MGMT_MAC_TX_RX_STA0_POS 13 +#define MGMT_MAC_TX_RX_STA1_POS 14 +#define MGMT_MAC_GPIO_SR_POS 15 /* Interrupt Ctrl1 */ #define INT_CTRL1 0x1104 @@ -1414,10 +1554,13 @@ system exit idle state, send out one LTR exit message. #define LTR_IDLE_ENTER 0x113C /* LTR_CTRL3, LTR latency message, only for System IDLE Start. */ #define LTR_IDLE_ENTER_POS 0 #define LTR_IDLE_ENTER_LEN 10 -#define LTR_IDLE_ENTER_USVAL 900 +#define LTR_IDLE_ENTER_VAL 1 #define LTR_IDLE_ENTER_SCALE_POS 10 #define LTR_IDLE_ENTER_SCALE_LEN 5 #define LTR_IDLE_ENTER_SCALE 2 /* 0-1ns, 1-32ns, 2-1024ns, 3-32,768ns, 4-1,048,576ns, 5-33,554,432ns, 110-111-Not Permitted.*/ + +#define LTR_IDLE_ENTER_SCALE_32US 3 +#define LTR_IDLE_ENTER_SCALE_1MS 4 #define LTR_IDLE_ENTER_REQUIRE_POS 15 #define LTR_IDLE_ENTER_REQUIRE_LEN 1 #define LTR_IDLE_ENTER_REQUIRE 1 @@ -1425,10 +1568,11 @@ system exit idle state, send out one LTR exit message. #define LTR_IDLE_EXIT 0x1140 /* LTR_CTRL4, LTR latency message, only for System IDLE End. */ #define LTR_IDLE_EXIT_POS 0 #define LTR_IDLE_EXIT_LEN 10 -#define LTR_IDLE_EXIT_USVAL 2 -#define LTR_IDLE_EXIT_SCALE_POS 10 -#define LTR_IDLE_EXIT_SCALE_LEN 5 -#define LTR_IDLE_EXIT_SCALE 2 +#define LTR_IDLE_EXIT_VAL 288 +#define LTR_IDLE_EXIT_SCALE_POS 10 +#define LTR_IDLE_EXIT_SCALE_LEN 5 +#define LTR_IDLE_EXIT_SCALE_1US 2 +#define LTR_IDLE_EXIT_SCALE_32US 3 #define LTR_IDLE_EXIT_REQUIRE_POS 15 #define LTR_IDLE_EXIT_REQUIRE_LEN 1 #define LTR_IDLE_EXIT_REQUIRE 1 @@ -1466,6 +1610,8 @@ system exit idle state, send out one LTR exit message. #define SYS_RESET_REG 0x152C #define SYS_RESET_POS 31 #define SYS_RESET_LEN 1 +#define SYS_RESET_BYPASS_POS 0 +#define SYS_RESET_BYPASS_LEN 1 #define REG_PCIE_PSM_STATE 0x1994 /* PCIe PHY power state. */ #define PCIE_PSM_STATE_POS 0 @@ -1595,6 +1741,8 @@ system exit idle state, send out one LTR exit message. #define MGMT_RSS_CTRL_TBL_SIZE_LEN 3 #define MGMT_RSS_CTRL_TBL_SIZE_MASK 0x7 +#define MAC_RSSCR_IP2TE_POS 1 +#define MAC_RSSCR_IP2TE_LEN 1 #define MAC_RSSCR_RSSE_POS 31 #define MAC_RSSCR_RSSE_LEN 1 @@ -1653,6 +1801,16 @@ system exit idle state, send out one LTR exit message. #define MGMT_RMK_CTRL 0x1400 +#define MGMT_SIGDET_DEGLITCH 0x17f0 +#define MGMT_SIGDET_DEGLITCH_DISABLE_POS 2 /* sigdet deglitch disable ,active low */ +#define MGMT_SIGDET_DEGLITCH_DISABLE_LEN 1 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_POS 3 /* sigdet deglitch time windows filter seltion */ +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_LEN 2 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_10ns 0 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_20ns 1 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_30ns 2 +#define MGMT_SIGDET_DEGLITCH_TIME_WIN_40ns 3 + #define MGMT_SIGDET 0x17F8 #define MGMT_SIGDET_POS 13 #define MGMT_SIGDET_LEN 3 @@ -1676,25 +1834,12 @@ system exit idle state, send out one LTR exit message. #define MSI_ID_RXQ3 3 #define MSI_ID_TXQ0 4 -#if 1/* msi table modify to 6 0~3 rx 4 tx 5 phy/other */ +/* msi table modify to 6 0~3 rx 4 tx 5 phy/other */ #define MSI_ID_PHY_OTHER 5 #define MSIX_TBL_MAX_NUM 6 #define MSIX_TBL_RXTX_NUM 5 -#else -#define MSI_ID_TXQ1 5 -#define MSI_ID_TXQ2 6 -#define MSI_ID_TXQ3 7 -#define MSI_ID_SFTUE 8 -#define MSI_ID_SFTCE 9 -#define MSI_ID_SBD 10 -#define MSI_ID_PMT 11 -#define MSI_ID_PHY 12 - -#define MSIX_TBL_MAX_NUM 16 -#define MSIX_TBL_RXTX_NUM 8 -#endif #define MSIX_TBL_BASE_ADDR 0x1200 #define MSIX_TBL_MASK_OFFSET 0xC #define MSIX_TBL_DATA_OFFSET 0x8 @@ -1738,8 +1883,8 @@ system exit idle state, send out one LTR exit message. /* efuse layout refer to http://redmine.motor-comm.com/issues/3856 */ #define EFUSE_FISRT_UPDATE_ADDR 255 #define EFUSE_SECOND_UPDATE_ADDR 209 -#define FUXI_EFUSE_MAX_ENTRY 39 -#define FUXI_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 +#define FXGMAC_EFUSE_MAX_ENTRY 39 +#define FXGMAC_EFUSE_MAX_ENTRY_UNDER_LED_COMMON 24 #define EFUSE_PATCH_ADDR_START_BYTE 0 #define EFUSE_PATCH_DATA_START_BYTE 2 #define EFUSE_REGION_A_B_LENGTH 18 @@ -1815,7 +1960,15 @@ system exit idle state, send out one LTR exit message. #define PM_CTRLSTAT_PME_STAT_POS 15 #define PM_CTRLSTAT_PME_STAT_LEN 1 -#define REG_DEVICE_CTRL1 0x78 +#define REG_DEVICE_CTRL1 0x78 +#define DEVICE_CTRL1_MPS_POS 5 /* MPS: max payload size */ +#define DEVICE_CTRL1_MPS_LEN 3 +#define DEVICE_CTRL1_MPS_128B 0 +#define DEVICE_CTRL1_MPS_256B 1 +#define DEVICE_CTRL1_MPS_512B 2 +#define DEVICE_CTRL1_MPS_1024B 3 +#define DEVICE_CTRL1_MPS_2048B 4 +#define DEVICE_CTRL1_MPS_4096B 5 #define DEVICE_CTRL1_CONTROL_POS 0 #define DEVICE_CTRL1_CONTROL_LEN 16 #define DEVICE_CTRL1_STATUS_POS 16 @@ -1891,4 +2044,22 @@ system exit idle state, send out one LTR exit message. #define POWER_EIOS_POS 7 #define POWER_EIOS_LEN 1 +#define REG_ACK_LATENCY_RELAY_TIMER 0x700 +#define REG_ACK_LATENCY_TIMER_POS 0 +#define REG_ACK_LATENCY_TIMER_LEN 16 +#define REG_ACK_LATENCY_TIMER_VAL 0x20 + +#define REG_L1SUB_TIMING 0xb44 +#define L1SUB_T_PCLKACK_LOW_POS 6 +#define L1SUB_T_PCLKACK_LOW_LEN 2 +#define L1SUB_T_PCLKACK_HIGH_POS 9 +#define L1SUB_T_PCLKACK_HIGH_LEN 5 + +#define REG_CORRECTABLE_ERROR_MASK_REG 0x114 +#define REG_CORRECTABLE_ERROR_MASK_POS 0 +#define REG_CORRECTABLE_ERROR_MASK_LEN 16 + +#define AISTONEID_137D1D05_ADJUST_SI 0x137d1d05 +#define KX_SERIAL_CPU_TX_PATCH 0x00011F0A + #endif /* __FUXI_GMAC_REG_H__ */ diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h index ea01ebdadc4e3..b729d70a09921 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-gmac.h @@ -12,17 +12,17 @@ #define FXGMAC_DRV_NAME "yt6801" -#define FXGMAC_DRV_DESC "Motorcomm FUXI GMAC Driver" +#define FXGMAC_DRV_DESC "Motorcomm YT6801 Gigabit Ethernet Driver" -#define FUXI_MAC_REGS_OFFSET 0x2000 +#define FXGMAC_MAC_REGS_OFFSET 0x2000 /* 1: in normal D0 state, turn off ephy link change interrupt. */ -#define FUXI_EPHY_INTERRUPT_D0_OFF 0 +#define FXGMAC_EPHY_INTERRUPT_D0_OFF 0 /* 1:when rec buffer is not enough, to create rbd and rec buffer, * but the rdb need to be continus with the intialized rdb, so * close the feature */ -#define FUXI_ALLOC_NEW_RECBUFFER 0 +#define FXGMAC_ALLOC_NEW_RECBUFFER 0 #define RESUME_MAX_TIME 3000000 #define PHY_LINK_TIMEOUT 3000 @@ -41,7 +41,7 @@ #define FXGMAX_ASPM_WAR_EN /* Descriptor related parameters */ -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED #define FXGMAC_TX_DESC_CNT 1024 #else /* 256 to make sure the tx ring is in the 4k range when @@ -126,6 +126,10 @@ #define FXGMAC_POWER_STATE_DOWN 0 #define FXGMAC_POWER_STATE_UP 1 +#define FXGMAC_DATA_WIDTH 128 + +#define FXGMAC_WOL_WAIT_TIME 0 + struct wol_bitmap_pattern { u32 flags; u32 pattern_size; @@ -352,18 +356,6 @@ struct fxgmac_desc_data { struct fxgmac_rx_desc_data rx; unsigned int mapped_as_page; - - /* Incomplete receive save location. If the budget is exhausted - * or the last descriptor (last normal descriptor or a following - * context descriptor) has not been DMA'd yet the current state - * of the receive processing needs to be saved. - */ - unsigned int state_saved; - struct { - struct sk_buff *skb; - unsigned int len; - unsigned int error; - } state; }; struct fxgmac_ring { @@ -378,7 +370,11 @@ struct fxgmac_ring { /* Array of descriptor data corresponding the DMA descriptor * (always use the FXGMAC_GET_DESC_DATA macro to access this data) */ +#ifdef FXGMAC_USE_STATIC_ALLOC + struct fxgmac_desc_data desc_data_head[FXGMAC_RX_DESC_CNT]; +#else struct fxgmac_desc_data *desc_data_head; +#endif /* Page allocation for RX buffers */ struct fxgmac_page_alloc rx_hdr_pa; @@ -419,7 +415,7 @@ struct fxgmac_channel { u32 dma_irq; FXGMAC_CHANNEL_OF_PLATFORM expansion; - unsigned int saved_ier; + u32 saved_ier; unsigned int tx_timer_active; @@ -438,7 +434,7 @@ struct fxphy_ag_adv { }; struct fxgmac_desc_ops { - int (*alloc_channles_and_rings)(struct fxgmac_pdata *pdata); + int (*alloc_channels_and_rings)(struct fxgmac_pdata *pdata); void (*free_channels_and_rings)(struct fxgmac_pdata *pdata); int (*map_tx_skb)(struct fxgmac_channel *channel, struct sk_buff *skb); int (*map_rx_buffer)(struct fxgmac_pdata *pdata, @@ -447,7 +443,14 @@ struct fxgmac_desc_ops { void (*unmap_desc_data)(struct fxgmac_pdata *pdata, struct fxgmac_desc_data *desc_data); void (*tx_desc_init)(struct fxgmac_pdata *pdata); - void (*rx_desc_init)(struct fxgmac_pdata *pdata); + int (*rx_desc_init)(struct fxgmac_pdata *pdata); + /* For descriptor related operation */ + void (*tx_desc_init_channel)(struct fxgmac_channel *channel); + void (*rx_desc_init_channel)(struct fxgmac_channel *channel); + void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); + void (*rx_desc_reset)(struct fxgmac_pdata *pdata, + struct fxgmac_desc_data *desc_data, + unsigned int index); }; struct fxgmac_hw_ops { @@ -455,9 +458,9 @@ struct fxgmac_hw_ops { int (*exit)(struct fxgmac_pdata *pdata); void (*save_nonstick_reg)(struct fxgmac_pdata *pdata); void (*restore_nonstick_reg)(struct fxgmac_pdata *pdata); - int (*set_gmac_register)(struct fxgmac_pdata *pdata, u8 *address, + int (*set_gmac_register)(struct fxgmac_pdata *pdata, IOMEM address, unsigned int data); - u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, u8 *address); + u32 (*get_gmac_register)(struct fxgmac_pdata *pdata, IOMEM address); void (*esd_restore_pcie_cfg)(struct fxgmac_pdata *pdata); int (*tx_complete)(struct fxgmac_dma_desc *dma_desc); @@ -468,6 +471,8 @@ struct fxgmac_hw_ops { void (*disable_rx)(struct fxgmac_pdata *pdata); void (*enable_channel_rx)(struct fxgmac_pdata *pdata, unsigned int queue); + void (*enable_rx_tx_ints)(struct fxgmac_pdata *pdata); + void (*disable_rx_tx_ints)(struct fxgmac_pdata *pdata); int (*enable_int)(struct fxgmac_channel *channel, enum fxgmac_int int_id); @@ -476,13 +481,17 @@ struct fxgmac_hw_ops { void (*set_interrupt_moderation)(struct fxgmac_pdata *pdata); void (*enable_msix_rxtxinterrupt)(struct fxgmac_pdata *pdata); void (*disable_msix_interrupt)(struct fxgmac_pdata *pdata); - void (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); + int (*enable_msix_rxtxphyinterrupt)(struct fxgmac_pdata *pdata); void (*enable_msix_one_interrupt)(struct fxgmac_pdata *pdata, u32 intid); void (*disable_msix_one_interrupt)(struct fxgmac_pdata *pdata, u32 intid); bool (*enable_mgm_interrupt)(struct fxgmac_pdata *pdata); bool (*disable_mgm_interrupt)(struct fxgmac_pdata *pdata); + bool (*enable_source_interrupt)(struct fxgmac_pdata *pdata); + bool (*disable_source_interrupt)(struct fxgmac_pdata *pdata); + int (*dismiss_all_int)(struct fxgmac_pdata *pdata); + void (*clear_misc_int_status)(struct fxgmac_pdata *pdata); void (*dev_xmit)(struct fxgmac_channel *channel); int (*dev_read)(struct fxgmac_channel *channel); @@ -496,26 +505,13 @@ struct fxgmac_hw_ops { /* For MII speed configuration */ int (*config_mac_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_2500_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_1000_speed)(struct fxgmac_pdata *pdata); - int (*set_xlgmii_100_speed)(struct fxgmac_pdata *pdata); int (*get_xlgmii_phy_status)(struct fxgmac_pdata *pdata, u32 *speed, bool *link_up, bool link_up_wait_to_complete); /* For descriptor related operation */ - void (*tx_desc_init)(struct fxgmac_channel *channel); - void (*rx_desc_init)(struct fxgmac_channel *channel); - void (*tx_desc_reset)(struct fxgmac_desc_data *desc_data); - void (*rx_desc_reset)(struct fxgmac_pdata *pdata, - struct fxgmac_desc_data *desc_data, - unsigned int index); int (*is_last_desc)(struct fxgmac_dma_desc *dma_desc); int (*is_context_desc)(struct fxgmac_dma_desc *dma_desc); - void (*tx_start_xmit)(struct fxgmac_channel *channel, - struct fxgmac_ring *ring); - void (*set_pattern_data)(struct fxgmac_pdata *pdata); - void (*config_wol)(struct fxgmac_pdata *pdata, int en); /* For Flow Control */ int (*config_tx_flow_control)(struct fxgmac_pdata *pdata); @@ -537,10 +533,10 @@ struct fxgmac_hw_ops { /* For RX coalescing */ int (*config_rx_coalesce)(struct fxgmac_pdata *pdata); int (*config_tx_coalesce)(struct fxgmac_pdata *pdata); - unsigned int (*usec_to_riwt)(struct fxgmac_pdata *pdata, - unsigned int usec); - unsigned int (*riwt_to_usec)(struct fxgmac_pdata *pdata, - unsigned int riwt); + unsigned long (*usec_to_riwt)(struct fxgmac_pdata *pdata, + unsigned int usec); + unsigned long (*riwt_to_usec)(struct fxgmac_pdata *pdata, + unsigned int riwt); /* For RX and TX threshold config */ int (*config_rx_threshold)(struct fxgmac_pdata *pdata, @@ -556,10 +552,11 @@ struct fxgmac_hw_ops { int (*config_osp_mode)(struct fxgmac_pdata *pdata); /* For RX and TX PBL config */ + u32 (*calculate_max_checksum_size)(struct fxgmac_pdata *pdata); int (*config_rx_pbl_val)(struct fxgmac_pdata *pdata); - int (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); + u32 (*get_rx_pbl_val)(struct fxgmac_pdata *pdata); int (*config_tx_pbl_val)(struct fxgmac_pdata *pdata); - int (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); + u32 (*get_tx_pbl_val)(struct fxgmac_pdata *pdata); int (*config_pblx8)(struct fxgmac_pdata *pdata); /* For MMC statistics */ @@ -609,7 +606,7 @@ struct fxgmac_hw_ops { int (*set_wake_pattern_mask)(struct fxgmac_pdata *pdata, u32 filter_index, u8 register_index, u32 Data); -#if defined(FUXI_PM_WPI_READ_FEATURE_EN) && FUXI_PM_WPI_READ_FEATURE_EN +#if FXGMAC_PM_WPI_READ_FEATURE_ENABLED void (*get_wake_packet_indication)(struct fxgmac_pdata *pdata, int *wake_reason, u32 *wake_pattern_number, @@ -630,7 +627,7 @@ struct fxgmac_hw_ops { void (*enable_phy_sleep)(struct fxgmac_pdata *pdata); void (*phy_green_ethernet)(struct fxgmac_pdata *pdata); void (*phy_eee_feature)(struct fxgmac_pdata *pdata); - int (*get_ephy_state)(struct fxgmac_pdata *pdata); + u32 (*get_ephy_state)(struct fxgmac_pdata *pdata); int (*write_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 data); int (*read_ephy_reg)(struct fxgmac_pdata *pdata, u32 val, u32 *data); int (*set_ephy_autoneg_advertise)(struct fxgmac_pdata *pdata, @@ -647,7 +644,13 @@ struct fxgmac_hw_ops { int (*diag_sanity_check)(struct fxgmac_pdata *pdata); int (*write_rss_lookup_table)(struct fxgmac_pdata *pdata); int (*get_rss_hash_key)(struct fxgmac_pdata *pdata, u8 *key_buf); +#ifdef FXGMAC_WOL_INTEGRATED_WOL_PARAMETER void (*config_power_down)(struct fxgmac_pdata *pdata, unsigned int wol); +#else + void (*config_power_down)(struct fxgmac_pdata *pdata, + unsigned int offloadcount, bool magic_en, + bool remote_pattern_en); +#endif void (*config_power_up)(struct fxgmac_pdata *pdata); unsigned char (*set_suspend_int)(void *pdata); void (*set_resume_int)(struct fxgmac_pdata *pdata); @@ -669,18 +672,21 @@ struct fxgmac_hw_ops { unsigned int enable); /* efuse relevant operation. */ - bool (*read_patch_from_efuse)(struct fxgmac_pdata *pdata, u32 offset, - u32 *value); /* read patch per index. */ bool (*read_patch_from_efuse_per_index)( struct fxgmac_pdata *pdata, u8 index, u32 *offset, u32 *value); /* read patch per index. */ + bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, + u8 *mac_addr, u32 *subsys, + u32 *revid); + bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); + /* read patch per index. */ + bool (*read_patch_from_efuse)(struct fxgmac_pdata *pdata, u32 offset, + u32 *value); bool (*write_patch_to_efuse)(struct fxgmac_pdata *pdata, u32 offset, u32 value); bool (*write_patch_to_efuse_per_index)(struct fxgmac_pdata *pdata, u8 index, u32 offset, u32 value); - bool (*read_mac_subsys_from_efuse)(struct fxgmac_pdata *pdata, - u8 *mac_addr, u32 *subsys, - u32 *revid); bool (*write_mac_subsys_to_efuse)(struct fxgmac_pdata *pdata, u8 *mac_addr, u32 *subsys, u32 *revid); @@ -689,8 +695,6 @@ struct fxgmac_hw_ops { bool (*write_mac_addr_to_efuse)(struct fxgmac_pdata *pdata, u8 *mac_addr); bool (*efuse_load)(struct fxgmac_pdata *pdata); - bool (*read_efuse_data)(struct fxgmac_pdata *pdata, u32 offset, - u32 *value); bool (*write_oob)(struct fxgmac_pdata *pdata); bool (*write_led)(struct fxgmac_pdata *pdata, u32 value); bool (*read_led_config)(struct fxgmac_pdata *pdata); @@ -698,18 +702,7 @@ struct fxgmac_hw_ops { int (*pcie_init)(struct fxgmac_pdata *pdata, bool ltr_en, bool aspm_l1ss_en, bool aspm_l1_en, bool aspm_l0s_en); - void (*trigger_pcie)( - struct fxgmac_pdata *pdata, - u32 code); /* To trigger pcie sniffer for analysis. */ -#ifdef DPDK - int (*phy_init)(struct fxgmac_pdata *); - int (*phy_start)(struct fxgmac_pdata *); - void (*phy_stop)(struct fxgmac_pdata *); - void (*phy_status)(struct fxgmac_pdata *); - void (*an_isr)( - struct fxgmac_pdata - *); /* phy_if->an_isr For single interrupt support */ -#endif + void (*trigger_pcie)(struct fxgmac_pdata *pdata, u32 code); }; /* This structure contains flags that indicate what hardware features @@ -717,47 +710,47 @@ struct fxgmac_hw_ops { */ struct fxgmac_hw_features { /* HW Version */ - unsigned int version; + u32 version; /* HW Feature Register0 */ - unsigned int phyifsel; /* PHY interface support */ - unsigned int vlhash; /* VLAN Hash Filter */ - unsigned int sma; /* SMA(MDIO) Interface */ - unsigned int rwk; /* PMT remote wake-up packet */ - unsigned int mgk; /* PMT magic packet */ - unsigned int mmc; /* RMON module */ - unsigned int aoe; /* ARP Offload */ - unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */ - unsigned int eee; /* Energy Efficient Ethernet */ - unsigned int tx_coe; /* Tx Checksum Offload */ - unsigned int rx_coe; /* Rx Checksum Offload */ - unsigned int addn_mac; /* Additional MAC Addresses */ - unsigned int ts_src; /* Timestamp Source */ - unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */ + u32 phyifsel; /* PHY interface support */ + u32 vlhash; /* VLAN Hash Filter */ + u32 sma; /* SMA(MDIO) Interface */ + u32 rwk; /* PMT remote wake-up packet */ + u32 mgk; /* PMT magic packet */ + u32 mmc; /* RMON module */ + u32 aoe; /* ARP Offload */ + u32 ts; /* IEEE 1588-2008 Advanced Timestamp */ + u32 eee; /* Energy Efficient Ethernet */ + u32 tx_coe; /* Tx Checksum Offload */ + u32 rx_coe; /* Rx Checksum Offload */ + u32 addn_mac; /* Additional MAC Addresses */ + u32 ts_src; /* Timestamp Source */ + u32 sa_vlan_ins; /* Source Address or VLAN Insertion */ /* HW Feature Register1 */ - unsigned int rx_fifo_size; /* MTL Receive FIFO Size */ - unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */ - unsigned int adv_ts_hi; /* Advance Timestamping High Word */ - unsigned int dma_width; /* DMA width */ - unsigned int dcb; /* DCB Feature */ - unsigned int sph; /* Split Header Feature */ - unsigned int tso; /* TCP Segmentation Offload */ - unsigned int dma_debug; /* DMA Debug Registers */ - unsigned int rss; /* Receive Side Scaling */ - unsigned int tc_cnt; /* Number of Traffic Classes */ - unsigned int avsel; /* AV Feature Enable */ - unsigned int ravsel; /* Rx Side Only AV Feature Enable */ - unsigned int hash_table_size; /* Hash Table Size */ - unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */ + u32 rx_fifo_size; /* MTL Receive FIFO Size */ + u32 tx_fifo_size; /* MTL Transmit FIFO Size */ + u32 adv_ts_hi; /* Advance Timestamping High Word */ + u32 dma_width; /* DMA width */ + u32 dcb; /* DCB Feature */ + u32 sph; /* Split Header Feature */ + u32 tso; /* TCP Segmentation Offload */ + u32 dma_debug; /* DMA Debug Registers */ + u32 rss; /* Receive Side Scaling */ + u32 tc_cnt; /* Number of Traffic Classes */ + u32 avsel; /* AV Feature Enable */ + u32 ravsel; /* Rx Side Only AV Feature Enable */ + u32 hash_table_size; /* Hash Table Size */ + u32 l3l4_filter_num; /* Number of L3-L4 Filters */ /* HW Feature Register2 */ - unsigned int rx_q_cnt; /* Number of MTL Receive Queues */ - unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */ - unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */ - unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */ - unsigned int pps_out_num; /* Number of PPS outputs */ - unsigned int aux_snap_num; /* Number of Aux snapshot inputs */ + u32 rx_q_cnt; /* Number of MTL Receive Queues */ + u32 tx_q_cnt; /* Number of MTL Transmit Queues */ + u32 rx_ch_cnt; /* Number of DMA Receive Channels */ + u32 tx_ch_cnt; /* Number of DMA Transmit Channels */ + u32 pps_out_num; /* Number of PPS outputs */ + u32 aux_snap_num; /* Number of Aux snapshot inputs */ /* HW Feature Register3 */ u32 hwfr3; @@ -799,6 +792,10 @@ struct fxgmac_pdata { unsigned int rx_desc_count; unsigned int tx_q_count; unsigned int rx_q_count; +#ifdef FXGMAC_USE_STATIC_ALLOC + struct fxgmac_channel channel[FXGMAC_MAX_DMA_CHANNELS]; + struct fxgmac_ring ring[FXGMAC_MAX_DMA_CHANNELS_PLUS_1TX]; +#endif /* Tx/Rx common settings */ unsigned int pblx8; @@ -808,7 +805,7 @@ struct fxgmac_pdata { unsigned int tx_threshold; unsigned int tx_pbl; unsigned int tx_osp_mode; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED /* for tx hang checking. 20211227 */ unsigned int tx_hang_restart_queuing; #endif @@ -823,7 +820,7 @@ struct fxgmac_pdata { unsigned int tx_frames; /* Rx coalescing settings */ - unsigned int rx_riwt; + unsigned long rx_riwt; unsigned int rx_usecs; unsigned int rx_frames; @@ -882,7 +879,9 @@ struct fxgmac_pdata { int phy_speed; int phy_duplex; int phy_autoeng; - + int phy_green_ethernet; + int phy_eee; + int phy_disablesmartspeed; char drv_name[32]; char drv_ver[32]; @@ -894,6 +893,9 @@ struct fxgmac_pdata { FXGMAC_PDATA_OF_PLATFORM expansion; u32 pcie_link_status; + u32 mgmt_phy_val; + + u32 support_10m_link; }; #define FXGMAC_FLAG_MSI_CAPABLE ((u32)(1 << 0)) /* bit0 */ @@ -915,6 +917,24 @@ struct fxgmac_pdata { #define FXGMAC_FLAG_LEGACY_IRQ_FREE_LEN 1 #define FXGMAC_FLAG_LEGACY_NAPI_FREE_POS 30 /* bit30 */ #define FXGMAC_FLAG_LEGACY_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_MISC_IRQ_FREE_POS 29 +#define FXGMAC_FLAG_MISC_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_MISC_NAPI_FREE_POS 28 +#define FXGMAC_FLAG_MISC_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_TX_IRQ_FREE_POS 27 +#define FXGMAC_FLAG_TX_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_TX_NAPI_FREE_POS 26 +#define FXGMAC_FLAG_TX_NAPI_FREE_LEN 1 +#define FXGMAC_FLAG_RX_IRQ_FREE_POS 22 +#define FXGMAC_FLAG_RX_IRQ_FREE_LEN 4 +#define FXGMAC_FLAG_PER_CHAN_RX_IRQ_FREE_LEN 1 +#define FXGMAC_FLAG_RX_NAPI_FREE_POS 18 +#define FXGMAC_FLAG_RX_NAPI_FREE_LEN 4 +#define FXGMAC_FLAG_PER_CHAN_RX_NAPI_FREE_LEN 1 + +#ifndef FXGMAC_FAKE_4_TX_QUEUE_ENABLED +#define FXGMAC_FAKE_4_TX_QUEUE_ENABLED 0 +#endif void fxgmac_init_desc_ops(struct fxgmac_desc_ops *desc_ops); void fxgmac_init_hw_ops(struct fxgmac_hw_ops *hw_ops); diff --git a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h index 1a40267e1fa2e..bc68189e54949 100644 --- a/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h +++ b/drivers/net/ethernet/motorcomm/yt6801/fuxi-os.h @@ -6,32 +6,54 @@ #define __FUXI_OS_H__ #include +#include +#include +#include #include #include -#include +#include +#include #include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #ifdef CONFIG_PCI_MSI #include #endif -#include -#include -#include -#include -#include -#include -#include -#include +#include + #include + #include "fuxi-dbg.h" struct fxgmac_ring; struct fxgmac_pdata; +struct fxgmac_channel; -#define FXGMAC_DRV_VERSION "1.0.27" +#define FXGMAC_DRV_VERSION "1.0.31" #define PCIE_LP_ASPM_L0S 1 #define PCIE_LP_ASPM_L1 2 @@ -50,54 +72,42 @@ struct fxgmac_pdata; #define FXGMAC_TEST_MAC_HEAD_LEN 14 -#define FUXI_PM_WPI_READ_FEATURE_EN 1 +#define FXGMAC_PM_WPI_READ_FEATURE_ENABLED 1 #define RSS_Q_COUNT 4 -#define FXGMAC_TX_HANG_TIMER_EN 0 -/* only for debug. for normal run, pls keep them both 0 - * 0: use default tx q; other: specify txq-1: 1 txq; - */ -#define FXGMAC_NUM_OF_TX_Q_USED 0 -/* 1 to enable a dummy tx, ie, no tail for gmac; */ -#define FXGMAC_DUMMY_TX_DEBUG 0 +#define FXGMAC_TX_HANG_TIMER_ENABLED 0 /* 1 to trigger(write reg 0x1000) for sniffer stop */ -#define FXGMAC_TRIGGER_TX_HANG 0 +#define FXGMAC_TRIGGER_TX_HANG 0 /* driver feature configuration */ -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED /* 0: check hw current desc; 1: check software dirty */ -#define FXGMAC_TX_HANG_CHECH_DIRTY 0 +#define FXGMAC_TX_HANG_CHECH_DIRTY 0 #endif -/* 1:poll tx of 4 channels; 0: since only 1 tx channel supported in this - * version, poll ch 0 always. - */ - -#define FXGMAC_FULL_TX_CHANNEL 0 - -#ifdef CONFIG_ARM64 -/* when you want to run this driver on 64bit arm, you should open this, - * otherwise dma's mask cannot be set successfully. - */ -#define FUXI_DMA_BIT_MASK 64 -#endif +#define FXGMAC_DMA_BIT_MASK64 64 +#define FXGMAC_DMA_BIT_MASK32 32 #ifdef CONFIG_PCI_MSI /* should be same as FXGMAC_MAX_DMA_CHANNELS + 1 tx_irq */ -#define FXGMAC_MAX_MSIX_Q_VECTORS (FXGMAC_MSIX_Q_VECTORS + 1) -#define FXGMAC_MSIX_CH0RXDIS_EN 0 /* set to 1 for ch0 unbalance fix; */ -#define FXGMAC_MSIX_INTCTRL_EN 1 +#define FXGMAC_MAX_MSIX_Q_VECTORS (FXGMAC_MSIX_Q_VECTORS + 1) +#define FXGMAC_MSIX_CH0RXDIS_ENABLED 0 //set to 1 for ch0 unbalance fix; +#define FXGMAC_MSIX_INTCTRL_EN 1 -#define FXGMAC_PHY_INT_NUM 1 -#define FXGMAC_MSIX_INT_NUMS (FXGMAC_MAX_MSIX_Q_VECTORS + FXGMAC_PHY_INT_NUM) -#else /* for case of no CONFIG_PCI_MSI */ +#ifdef FXGMAC_MISC_ENABLED +#define FXGMAC_MISC_INT_NUM 1 +#else +#define FXGMAC_MISC_INT_NUM 0 +#endif +#define FXGMAC_MSIX_INT_NUMS (FXGMAC_MAX_MSIX_Q_VECTORS + FXGMAC_MISC_INT_NUM) +#else /* NO modification needed! for non-MSI, set to 0 always */ -#define FXGMAC_MSIX_CH0RXDIS_EN 0 -#define FXGMAC_MSIX_INTCTRL_EN 0 +#define FXGMAC_MSIX_CH0RXDIS_ENABLED 0 +#define FXGMAC_MSIX_INTCTRL_EN 0 #endif -/*RSS features*/ +/* RSS features */ #ifdef FXGMAC_ONE_CHANNEL #define FXGMAC_RSS_FEATURE_ENABLED 0 /* 1:enable rss ; 0: rss not included. */ #else @@ -128,14 +138,60 @@ struct fxgmac_pdata; /*sanity check*/ #define FXGMAC_SANITY_CHECK_ENABLED 0 /* 1:enable health checking; */ -/*vlan id filter*/ -#define FXGMAC_FILTER_SINGLE_VLAN_ENABLED 1 /* 1:enable health checking; */ +/* vlan id filter */ +#define FXGMAC_FILTER_SINGLE_VLAN_ENABLED 0 + +/* Linux driver implement VLAN HASH Table feature to support mutliple VLAN feautre */ #define FXGMAC_FILTER_MULTIPLE_VLAN_ENABLED 1 -#define FUXI_MAC_HASH_TABLE 1 + +/* Linux driver implement MAC HASH Table feature */ +#define FXGMAC_MAC_HASH_TABLE 1 + +/* Linux driver implement write multiple mac addr */ #define FXGMAC_FILTER_MULTIPLE_MAC_ADDR_ENABLED 1 -#define FUXI_MISC_INT_HANDLE_FEATURE_EN 1 -#define HAVE_FXGMAC_DEBUG_FS +/* Linux driver disable MISC Interrupt */ +#define FXGMAC_MISC_INT_HANDLE_FEATURE_ENABLED 0 + +#define FXGMAC_ESD_RESTORE_PCIE_CFG + +#define FXGMAC_WOL_INTEGRATED_WOL_PARAMETER + +#define FXGMAC_LINK_SPEED_CHECK_PHY_LINK + +#define FXGMAC_FLUSH_TX_CHECK_ENABLED + +#define FXGMAC_INTERRUPT_TX_INTERVAL + +#define FXGMAC_INTERRUPT_RX_INTERVAL + +#define FXGMAC_WAIT_TX_STOP + +#define FXGMAC_WAIT_RX_STOP_BY_PRXQ_RXQSTS + +#define FXGMAC_USE_DEFAULT_RSS_KEY_TBALE + +#define FXGMAC_RX_VLAN_FILTERING_ENABLED \ + (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + +#define FXGMAC_NETDEV_PR_MODE_ENABLED \ + ((pdata->netdev->flags & IFF_PROMISC) != 0) +#define FXGMAC_NETDEV_AM_MODE_ENABLED \ + ((pdata->netdev->flags & IFF_ALLMULTI) != 0) +#define FXGMAC_NETDEV_MU_MODE_ENABLED \ + ((pdata->netdev->flags & IFF_MULTICAST) != 0) +#define FXGMAC_NETDEV_BD_MODE_ENABLED \ + ((pdata->netdev->flags & IFF_BROADCAST) != 0) + +#define FXGMAC_RX_CHECKSUM_ENABLED (pdata->netdev->features & NETIF_F_RXCSUM) + +#define TEST_MAC_HEAD 14 +#define TEST_TCP_HEAD_LEN_OFFSET 12 +#define TEST_TCP_OFFLOAD_LEN_OFFSET 48 +#define TEST_TCP_FIX_HEAD_LEN 24 +#define TEST_TCP_MSS_OFFSET 56 + +#define DF_MAX_NIC_NUM 16 #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &(((TYPE *)0)->MEMBER)) @@ -219,7 +275,6 @@ struct fxgmac_pdata; #define DbgPrintOidName(_Oid) #define DbgPrintAddress(_pAddress) -#define fxgmac_dump_buffer(_skb, _len, _tx_rx) #define DumpLine(_p, _cbLine, _fAddress, _ulGroup) #ifndef FXGMAC_DEBUG @@ -231,21 +286,31 @@ struct fxgmac_pdata; #define FXGMAC_PR(fmt, args...) \ pr_alert("[%s,%d]:" fmt, __func__, __LINE__, ## args) -#define DPRINTK printk +/* If you want to continue a line, you NEED to use KERN_CONT. + * That has always been true. It hasn't always been enforced, though. + * If you do two printk's and the second one doesn't say "I'm a continuation", + * the printk logic assumes you're just confused and wanted two lines. + */ +#define DPRINTK(fmt, args...) printk(KERN_CONT fmt, ##args) + #else -#define FXGMAC_PR(x...) do { } while (0) -#define DPRINTK(x...) +#define FXGMAC_PR(x...) \ + do { \ + } while (0) +#define DPRINTK(x...) \ + do { \ + } while (0) #endif #define IOC_MAGIC 'M' #define IOC_MAXNR (0x80 + 5) -#define FUXI_DFS_IOCTL_DEVICE_INACTIVE 0x10001 -#define FUXI_DFS_IOCTL_DEVICE_RESET 0x10002 -#define FUXI_DFS_IOCTL_DIAG_BEGIN 0x10003 -#define FUXI_DFS_IOCTL_DIAG_END 0x10004 -#define FUXI_DFS_IOCTL_DIAG_TX_PKT 0x10005 -#define FUXI_DFS_IOCTL_DIAG_RX_PKT 0x10006 +#define FXGMAC_DFS_IOCTL_DEVICE_INACTIVE 0x10001 +#define FXGMAC_DFS_IOCTL_DEVICE_RESET 0x10002 +#define FXGMAC_DFS_IOCTL_DIAG_BEGIN 0x10003 +#define FXGMAC_DFS_IOCTL_DIAG_END 0x10004 +#define FXGMAC_DFS_IOCTL_DIAG_TX_PKT 0x10005 +#define FXGMAC_DFS_IOCTL_DIAG_RX_PKT 0x10006 #define FXGMAC_EFUSE_UPDATE_LED_CFG 0x10007 #define FXGMAC_EFUSE_WRITE_LED 0x10008 @@ -262,11 +327,11 @@ struct fxgmac_pdata; #define FXGMAC_SET_MAC_DATA 0x10012 #define FXGMAC_GET_SUBSYS_ID 0x10013 #define FXGMAC_SET_SUBSYS_ID 0x10014 -#define FXGMAC_GET_GMAC_REG 0x10015 -#define FXGMAC_SET_GMAC_REG 0x10016 +#define FXGMAC_GET_REG 0x10015 +#define FXGMAC_SET_REG 0x10016 #define FXGMAC_GET_PHY_REG 0x10017 #define FXGMAC_SET_PHY_REG 0x10018 -#define FXGMAC_EPHYSTATISTICS 0x10019 +#define FXGMAC_EPHY_STATISTICS 0x10019 #define FXGMAC_GET_STATISTICS 0x1001A #define FXGMAC_GET_PCIE_LOCATION 0x1001B @@ -292,47 +357,65 @@ struct fxgmac_pdata; #define PCI_CAP_ID_MSIX_ENABLE_POS 0x1F #define PCI_CAP_ID_MSIX_ENABLE_LEN 0x1 -#ifndef fallthrough -#if __has_attribute(__fallthrough__) -# define fallthrough __attribute__((__fallthrough__)) -#else -# define fallthrough do {} while (0) /* fallthrough */ +#define FXGMAC_IRQ_ENABLE 0x1 +#define FXGMAC_IRQ_DISABLE 0x0 +#define FXGMAC_NAPI_ENABLE 0x1 +#define FXGMAC_NAPI_DISABLE 0x0 + +#define PHY_POWER_DOWN 1 +#define PHY_POWER_UP 0 + +#define FXGMAC_MMC_IER_ALL_DEFAULT 0 + +#define FXGMAC_RX_DESC_INIT_CHECK_ENABLED + +/* #define FXGMAC_ESD_CHECK_ENABLED */ +#ifdef FXGMAC_ESD_CHECK_ENABLED +#define FXGMAC_ESD_INTERVAL (5 * HZ) +#define FXGMAC_ESD_ERROR_THRESHOLD ((u64)4000000000) +#define FXGMAC_PCIE_LINK_DOWN 0xFFFFFFFF +#define FXGMAC_PCIE_RECOVER_TIMES 5000 +#define FXGMAC_PCIE_IO_MEM_MASTER_ENABLE 0x7 #endif + +//#define FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED +#define FXGMAC_LOOPBACK_CHECK_INTERVAL (5 * HZ) +#define FXGMAC_PHY_LOOPBACK_DETECT_THRESOLD 2 +#endif + +//#define FXGMAC_ASPM_ENABLED +#ifdef FXGMAC_ASPM_ENABLED +#define FXGMAC_CHECK_DEV_STATE +#define FXGMAC_ASPM_INTERVAL (20 * HZ) #endif +#define UDP_RSS_FLAGS (BIT(MAC_RSSCR_UDP4TE_POS) | BIT(MAC_RSSCR_UDP6TE_POS)) + +#define MF90_SUB_VENTOR_ID 0x17aa +#define MF90_SUB_DEVICE_ID 0x3509 #pragma pack(1) /* it's better to make this struct's size to 128byte. */ -struct pattern_packet{ - u8 ether_daddr[ETH_ALEN]; - u8 ether_saddr[ETH_ALEN]; - u16 ether_type; - - __be16 ar_hrd; /* format of hardware address */ - __be16 ar_pro; /* format of protocol */ - unsigned char ar_hln; /* length of hardware address */ - unsigned char ar_pln; /* length of protocol address */ - __be16 ar_op; /* ARP opcode (command) */ - unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ - unsigned char ar_sip[4]; /* sender IP address */ - unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ - unsigned char ar_tip[4]; /* target IP address */ - - u8 reverse[86]; +struct pattern_packet { + u8 ether_daddr[ETH_ALEN]; + u8 ether_saddr[ETH_ALEN]; + u16 ether_type; + + __be16 ar_hrd; /* format of hardware address */ + __be16 ar_pro; /* format of protocol */ + unsigned char ar_hln; /* length of hardware address */ + unsigned char ar_pln; /* length of protocol address */ + __be16 ar_op; /* ARP opcode (command) */ + unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */ + unsigned char ar_sip[4]; /* sender IP address */ + unsigned char ar_tha[ETH_ALEN]; /* target hardware address */ + unsigned char ar_tip[4]; /* target IP address s*/ + + u8 reverse[86]; }; #pragma pack() -typedef enum { - CURRENT_STATE_SHUTDOWN = 0, - CURRENT_STATE_RESUME = 1, - CURRENT_STATE_INIT = 2, - CURRENT_STATE_SUSPEND = 3, - CURRENT_STATE_CLOSE = 4, - CURRENT_STATE_OPEN = 5, - CURRENT_STATE_RESTART = 6, - CURRENT_STATE_REMOVE = 7, -} CURRENT_STATE; - typedef dma_addr_t DMA_ADDR_T; typedef enum pkt_hash_types RSS_HASH_TYPE; typedef void __iomem *IOMEM; @@ -345,22 +428,22 @@ struct ext_command_buf { }; struct ext_command_mac { - u32 num; + u32 num; union { - u32 val32; - u16 val16; - u8 val8; + u32 val32; + u16 val16; + u8 val8; }; }; struct ext_command_mii { - u16 dev; - u16 num; - u16 val; + u16 dev; + u16 num; + u16 val; }; struct ext_ioctl_data { - u32 cmd_type; + u32 cmd_type; struct ext_command_buf cmd_buf; }; @@ -371,21 +454,21 @@ typedef struct _fxgmac_test_buf { } fxgmac_test_buf, *pfxgmac_test_buf; typedef struct _fxgmac_test_packet { - struct _fxgmac_test_packet *next; - u32 length; /* total length of the packet(buffers) */ - u32 type; /* packet type, vlan, ip checksum, TSO, etc. */ - - fxgmac_test_buf buf[MAX_PKT_BUF]; - fxgmac_test_buf sGList[MAX_PKT_BUF]; - u16 vlanID; - u16 mss; - u32 hash; - u16 cpuNum; - u16 xsum; /* rx, ip-payload checksum */ - u16 csumStart; /* custom checksum offset to the mac-header */ - u16 csumPos; /* custom checksom position (to the mac_header) */ - void *upLevelReserved[4]; - void *lowLevelReserved[4]; + struct _fxgmac_test_packet *next; + u32 length; /* total length of the packet(buffers) */ + u32 type; /* packet type, vlan, ip checksum, TSO, etc. */ + + fxgmac_test_buf buf[MAX_PKT_BUF]; + fxgmac_test_buf sGList[MAX_PKT_BUF]; + u16 vlanID; + u16 mss; + u32 hash; + u16 cpuNum; + u16 xsum; /* rx, ip-payload checksum */ + u16 csumStart; /* custom checksum offset to the mac-header */ + u16 csumPos; /* custom checksom position (to the mac_header) */ + void *upLevelReserved[4]; + void *lowLevelReserved[4]; } fxgmac_test_packet, *pfxgmac_test_packet; typedef struct fxgmac_channel_of_platform { @@ -402,7 +485,7 @@ typedef struct fxgmac_channel_of_platform { struct napi_struct napi_rx; struct timer_list tx_timer; -#if FXGMAC_TX_HANG_TIMER_EN +#if FXGMAC_TX_HANG_TIMER_ENABLED unsigned int tx_hang_timer_active; struct timer_list tx_hang_timer; unsigned int tx_hang_hw_cur; @@ -410,10 +493,10 @@ typedef struct fxgmac_channel_of_platform { } FXGMAC_CHANNEL_OF_PLATFORM; typedef struct per_regisiter_info { - unsigned int size; - unsigned int address; - unsigned int value; - unsigned char data[FXGAMC_MAX_DATA_SIZE]; + unsigned int size; + unsigned int address; + unsigned int value; + unsigned char data[FXGAMC_MAX_DATA_SIZE]; } PER_REG_INFO; /* for FXGMAC_EFUSE_WRITE_PATCH_PER_INDEX, val0 is index, val1 is offset, @@ -425,80 +508,128 @@ typedef struct ext_command_data { u32 val2; } CMD_DATA; +enum fxgmac_task_flag { + FXGMAC_FLAG_TASK_DOWN = 0, + FXGMAC_FLAG_TASK_RESET_PENDING, + FXGMAC_FLAG_TASK_ESD_CHECK_PENDING, + FXGMAC_FLAG_TASK_LINKCHG_CHECK_PENDING, + FXGMAC_FLAG_TASK_MAX +}; + +typedef struct fxgmac_esd_stats { + u32 tx_abort_excess_collisions; + u32 tx_dma_underrun; + u32 tx_lost_crs; + u32 tx_late_collisions; + u32 rx_crc_errors; + u32 rx_align_errors; + u32 rx_runt_errors; + u32 single_collisions; + u32 multi_collisions; + u32 tx_deferred_frames; +} FXGMAC_ESD_STATS; + +typedef enum fxgmac_dev_state { + FXGMAC_DEV_OPEN = 0x0, + FXGMAC_DEV_CLOSE = 0x1, + FXGMAC_DEV_STOP = 0x2, + FXGMAC_DEV_START = 0x3, + FXGMAC_DEV_SUSPEND = 0x4, + FXGMAC_DEV_RESUME = 0x5, + FXGMAC_DEV_PROBE = 0xFF, +} DEV_STATE; + typedef struct fxgmac_pdata_of_platform { - u32 cfg_pci_cmd; - u32 cfg_cache_line_size; - u32 cfg_mem_base; - u32 cfg_mem_base_hi; - u32 cfg_io_base; - u32 cfg_int_line; - u32 cfg_device_ctrl1; - u32 cfg_pci_link_ctrl; - u32 cfg_device_ctrl2; - u32 cfg_msix_capability; - - struct work_struct restart_work; - u32 int_flags; /* legacy, msi or msix */ - int phy_irq; + u32 cfg_pci_cmd; + u32 cfg_cache_line_size; + u32 cfg_mem_base; + u32 cfg_mem_base_hi; + u32 cfg_io_base; + u32 cfg_int_line; + u32 cfg_device_ctrl1; + u32 cfg_pci_link_ctrl; + u32 cfg_device_ctrl2; + u32 cfg_msix_capability; + + int pre_phy_speed; + int pre_phy_duplex; + int pre_phy_autoneg; + + struct work_struct restart_work; +#ifdef FXGMAC_ESD_CHECK_ENABLED + struct delayed_work esd_work; + FXGMAC_ESD_STATS esd_stats; + DECLARE_BITMAP(task_flags, FXGMAC_FLAG_TASK_MAX); +#endif + +#ifdef FXGMAC_EPHY_LOOPBACK_DETECT_ENABLED + struct delayed_work loopback_work; + u32 lb_test_flag; // for tool + u32 lb_cable_flag; // for driver + u32 lb_cable_detect_count; // for driver +#endif + +#ifdef FXGMAC_ASPM_ENABLED + struct delayed_work aspm_config_work; + bool aspm_en; + bool aspm_work_active; +#endif + bool recover_from_aspm; + + u32 int_flags; /* legacy, msi or msix */ #ifdef CONFIG_PCI_MSI - struct msix_entry *msix_entries; + struct msix_entry *msix_entries; #endif /* power management and wol*/ - u32 wol; /* wol options */ - unsigned long powerstate; /* power state */ - unsigned int ns_offload_tab_idx; /* for ns-offload table. 2 entries supported. */ - CURRENT_STATE current_state; - netdev_features_t netdev_features; - struct napi_struct napi; - struct napi_struct napi_phy; - u32 mgm_intctrl_val; - bool phy_link; - bool fxgmac_test_tso_flag; - u32 fxgmac_test_tso_seg_num; - u32 fxgmac_test_last_tso_len; - u32 fxgmac_test_packet_len; - volatile u32 fxgmac_test_skb_arr_in_index; - volatile u32 fxgmac_test_skb_arr_out_index; - struct sk_buff *fxgmac_test_skb_array[FXGMAC_MAX_DBG_TEST_PKT]; -#ifdef HAVE_FXGMAC_DEBUG_FS - struct dentry *dbg_adapter; - struct dentry *fxgmac_dbg_root; - char fxgmac_dbg_netdev_ops_buf[FXGMAC_NETDEV_OPS_BUF_LEN]; + u32 wol; + unsigned long powerstate; + unsigned int ns_offload_tab_idx; /*for ns-offload table. 2 entries supported. */ + netdev_features_t netdev_features; + struct napi_struct napi; +#ifdef FXGMAC_MISC_ENABLED + struct napi_struct napi_misc; + int misc_irq; #endif + u8 recover_phy_state; + char misc_irq_name[IFNAMSIZ + 32]; + bool phy_link; + bool fxgmac_test_tso_flag; + u32 fxgmac_test_tso_seg_num; + u32 fxgmac_test_last_tso_len; + u32 fxgmac_test_packet_len; + u32 fxgmac_test_skb_arr_in_index; + u32 fxgmac_test_skb_arr_out_index; + + struct sk_buff *fxgmac_test_skb_array[FXGMAC_MAX_DBG_TEST_PKT]; + DEV_STATE dev_state; + struct mutex mutex; /* mutex */ + struct timer_list phy_poll_tm; } FXGMAC_PDATA_OF_PLATFORM; -void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, - bool tx_rx); -int fxgmac_dismiss_all_int(struct fxgmac_pdata *pdata); - -#ifdef HAVE_FXGMAC_DEBUG_FS -void fxgmac_dbg_adapter_init(struct fxgmac_pdata *pdata); -void fxgmac_dbg_adapter_exit(struct fxgmac_pdata *pdata); -void fxgmac_dbg_init(struct fxgmac_pdata *pdata); -void fxgmac_dbg_exit(struct fxgmac_pdata *pdata); -#endif /* HAVE_FXGMAC_DEBUG_FS */ - void fxgmac_restart_dev(struct fxgmac_pdata *pdata); -long fxgmac_dbg_netdev_ops_ioctl(struct file *file, unsigned int cmd, - unsigned long arg); +long fxgmac_netdev_ops_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); -int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg); +int fxgmac_init(struct fxgmac_pdata *pdata, bool save_private_reg); /* for phy interface */ -int fxgmac_ephy_autoneg_ability_get(struct fxgmac_pdata *pdata, - unsigned int *cap_mask); -int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, - int *duplex, int *ret_link, int *media); -int fxgmac_ephy_soft_reset(struct fxgmac_pdata *pdata); -void fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); -void fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); -void fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); - -unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata); + +int fxgmac_ephy_status_get(struct fxgmac_pdata *pdata, int *speed, int *duplex, + int *ret_link, int *media); +int fxgmac_phy_force_mode(struct fxgmac_pdata *pdata); +int fxgmac_phy_force_speed(struct fxgmac_pdata *pdata, int speed); +int fxgmac_phy_force_duplex(struct fxgmac_pdata *pdata, int duplex); +int fxgmac_phy_force_autoneg(struct fxgmac_pdata *pdata, int autoneg); + +int fxgmac_phy_timer_init(struct fxgmac_pdata *pdata); +void fxgmac_phy_timer_destroy(struct fxgmac_pdata *pdata); +void fxgmac_phy_update_link(struct net_device *netdev); + +unsigned int fxgmac_get_netdev_ip4addr(struct fxgmac_pdata *pdata); unsigned char *fxgmac_get_netdev_ip6addr(struct fxgmac_pdata *pdata, - unsigned char *ipval, - unsigned char *ip6addr_solicited, - unsigned int ifa_flag); + unsigned char *ipval, + unsigned char *ip6addr_solicited, + unsigned int ifa_flag); #if FXGMAC_PM_FEATURE_ENABLED void fxgmac_net_powerdown(struct fxgmac_pdata *pdata, unsigned int wol); @@ -512,4 +643,22 @@ void fxgmac_stop(struct fxgmac_pdata *pdata); void fxgmac_free_rx_data(struct fxgmac_pdata *pdata); void fxgmac_free_tx_data(struct fxgmac_pdata *pdata); +void fxgmac_tx_start_xmit(struct fxgmac_channel *channel, + struct fxgmac_ring *ring); +void fxgmac_dev_xmit(struct fxgmac_channel *channel); +void fxgmac_config_wol(struct fxgmac_pdata *pdata, int en); +void fxgmac_print_pkt(struct net_device *netdev, struct sk_buff *skb, + bool tx_rx); + +void fxgmac_lock(struct fxgmac_pdata *pdata); +void fxgmac_unlock(struct fxgmac_pdata *pdata); + +void fxgmac_set_phy_link_ksettings(struct fxgmac_pdata *pdata); + +#ifdef FXGMAC_ASPM_ENABLED +void fxgmac_schedule_aspm_config_work(struct fxgmac_pdata *pdata); +void fxgmac_cancel_aspm_config_work(struct fxgmac_pdata *pdata); +bool fxgmac_aspm_action_linkup(struct fxgmac_pdata *pdata); +#endif + #endif /* __FUXI_OS_H__ */