blob: aed97c934bfbd17929e31ef807c027acdba970f5 [file] [log] [blame]
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
#define HCLGE_NAME "hclge"
#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
#define NEED_RESERVE_TC_NUM 2
#define BUF_MAX_PERCENT 100
#define BUF_RESERVE_PERCENT 90
#define HCLGE_RESET_MAX_FAIL_CNT 5
#define HCLGE_RESET_SYNC_TIME 100
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT 1500
/* Get DFX BD number offset */
#define HCLGE_DFX_BIOS_BD_OFFSET 1
#define HCLGE_DFX_SSU_0_BD_OFFSET 2
#define HCLGE_DFX_SSU_1_BD_OFFSET 3
#define HCLGE_DFX_IGU_BD_OFFSET 4
#define HCLGE_DFX_RPU_0_BD_OFFSET 5
#define HCLGE_DFX_RPU_1_BD_OFFSET 6
#define HCLGE_DFX_NCSI_BD_OFFSET 7
#define HCLGE_DFX_RTC_BD_OFFSET 8
#define HCLGE_DFX_PPP_BD_OFFSET 9
#define HCLGE_DFX_RCB_BD_OFFSET 10
#define HCLGE_DFX_TQP_BD_OFFSET 11
#define HCLGE_DFX_SSU_2_BD_OFFSET 12
#define HCLGE_LINK_STATUS_MS 10
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);
static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
static struct workqueue_struct *hclge_wq;
static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG,
HCLGE_NIC_CSQ_BASEADDR_H_REG,
HCLGE_NIC_CSQ_DEPTH_REG,
HCLGE_NIC_CSQ_TAIL_REG,
HCLGE_NIC_CSQ_HEAD_REG,
HCLGE_NIC_CRQ_BASEADDR_L_REG,
HCLGE_NIC_CRQ_BASEADDR_H_REG,
HCLGE_NIC_CRQ_DEPTH_REG,
HCLGE_NIC_CRQ_TAIL_REG,
HCLGE_NIC_CRQ_HEAD_REG,
HCLGE_VECTOR0_CMDQ_SRC_REG,
HCLGE_CMDQ_INTR_STS_REG,
HCLGE_CMDQ_INTR_EN_REG,
HCLGE_CMDQ_INTR_GEN_REG};
static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
HCLGE_PF_OTHER_INT_REG,
HCLGE_MISC_RESET_STS_REG,
HCLGE_MISC_VECTOR_INT_STS,
HCLGE_GLOBAL_RESET_REG,
HCLGE_FUN_RST_ING,
HCLGE_GRO_EN_REG};
static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
HCLGE_RING_RX_ADDR_H_REG,
HCLGE_RING_RX_BD_NUM_REG,
HCLGE_RING_RX_BD_LENGTH_REG,
HCLGE_RING_RX_MERGE_EN_REG,
HCLGE_RING_RX_TAIL_REG,
HCLGE_RING_RX_HEAD_REG,
HCLGE_RING_RX_FBD_NUM_REG,
HCLGE_RING_RX_OFFSET_REG,
HCLGE_RING_RX_FBD_OFFSET_REG,
HCLGE_RING_RX_STASH_REG,
HCLGE_RING_RX_BD_ERR_REG,
HCLGE_RING_TX_ADDR_L_REG,
HCLGE_RING_TX_ADDR_H_REG,
HCLGE_RING_TX_BD_NUM_REG,
HCLGE_RING_TX_PRIORITY_REG,
HCLGE_RING_TX_TC_REG,
HCLGE_RING_TX_MERGE_EN_REG,
HCLGE_RING_TX_TAIL_REG,
HCLGE_RING_TX_HEAD_REG,
HCLGE_RING_TX_FBD_NUM_REG,
HCLGE_RING_TX_OFFSET_REG,
HCLGE_RING_TX_EBD_NUM_REG,
HCLGE_RING_TX_EBD_OFFSET_REG,
HCLGE_RING_TX_BD_ERR_REG,
HCLGE_RING_EN_REG};
static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_GL0_REG,
HCLGE_TQP_INTR_GL1_REG,
HCLGE_TQP_INTR_GL2_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
"App Loopback test",
"Serdes serial Loopback test",
"Serdes parallel Loopback test",
"Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
{"mac_tx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
{"mac_rx_mac_pause_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
{"mac_tx_control_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
{"mac_rx_control_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
{"mac_tx_pfc_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
{"mac_tx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
{"mac_tx_pfc_pri1_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
{"mac_tx_pfc_pri2_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
{"mac_tx_pfc_pri3_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
{"mac_tx_pfc_pri4_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
{"mac_tx_pfc_pri5_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
{"mac_tx_pfc_pri6_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
{"mac_tx_pfc_pri7_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
{"mac_rx_pfc_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
{"mac_rx_pfc_pri0_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
{"mac_rx_pfc_pri1_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
{"mac_rx_pfc_pri2_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
{"mac_rx_pfc_pri3_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
{"mac_rx_pfc_pri4_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
{"mac_rx_pfc_pri5_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
{"mac_rx_pfc_pri6_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
{"mac_rx_pfc_pri7_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
{"mac_tx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
{"mac_tx_total_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
{"mac_tx_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
{"mac_tx_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
{"mac_tx_good_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
{"mac_tx_bad_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
{"mac_tx_uni_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
{"mac_tx_multi_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
{"mac_tx_broad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
{"mac_tx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
{"mac_tx_oversize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
{"mac_tx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
{"mac_tx_65_127_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
{"mac_tx_128_255_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
{"mac_tx_256_511_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
{"mac_tx_512_1023_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
{"mac_tx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
{"mac_tx_1519_2047_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
{"mac_tx_2048_4095_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
{"mac_tx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
{"mac_tx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
{"mac_tx_9217_12287_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
{"mac_tx_12288_16383_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
{"mac_tx_1519_max_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
{"mac_tx_1519_max_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
{"mac_rx_total_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
{"mac_rx_total_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
{"mac_rx_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
{"mac_rx_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
{"mac_rx_good_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
{"mac_rx_bad_oct_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
{"mac_rx_uni_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
{"mac_rx_multi_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
{"mac_rx_broad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
{"mac_rx_undersize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
{"mac_rx_oversize_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
{"mac_rx_64_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
{"mac_rx_65_127_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
{"mac_rx_128_255_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
{"mac_rx_256_511_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
{"mac_rx_512_1023_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
{"mac_rx_1024_1518_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
{"mac_rx_1519_2047_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
{"mac_rx_2048_4095_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
{"mac_rx_4096_8191_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
{"mac_rx_8192_9216_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
{"mac_rx_9217_12287_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
{"mac_rx_12288_16383_oct_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
{"mac_rx_1519_max_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
{"mac_rx_1519_max_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
{"mac_tx_fragment_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
{"mac_tx_undermin_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
{"mac_tx_jabber_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
{"mac_tx_err_all_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
{"mac_tx_from_app_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
{"mac_tx_from_app_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
{"mac_rx_fragment_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
{"mac_rx_undermin_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
{"mac_rx_jabber_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
{"mac_rx_fcs_err_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
{"mac_rx_send_app_good_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
{"mac_rx_send_app_bad_pkt_num",
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{
.flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.i_port_bitmap = 0x1,
},
};
static const u8 hclge_hash_key[] = {
0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
};
static const u32 hclge_dfx_bd_offset_list[] = {
HCLGE_DFX_BIOS_BD_OFFSET,
HCLGE_DFX_SSU_0_BD_OFFSET,
HCLGE_DFX_SSU_1_BD_OFFSET,
HCLGE_DFX_IGU_BD_OFFSET,
HCLGE_DFX_RPU_0_BD_OFFSET,
HCLGE_DFX_RPU_1_BD_OFFSET,
HCLGE_DFX_NCSI_BD_OFFSET,
HCLGE_DFX_RTC_BD_OFFSET,
HCLGE_DFX_PPP_BD_OFFSET,
HCLGE_DFX_RCB_BD_OFFSET,
HCLGE_DFX_TQP_BD_OFFSET,
HCLGE_DFX_SSU_2_BD_OFFSET
};
static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
HCLGE_OPC_DFX_BIOS_COMMON_REG,
HCLGE_OPC_DFX_SSU_REG_0,
HCLGE_OPC_DFX_SSU_REG_1,
HCLGE_OPC_DFX_IGU_EGU_REG,
HCLGE_OPC_DFX_RPU_REG_0,
HCLGE_OPC_DFX_RPU_REG_1,
HCLGE_OPC_DFX_NCSI_REG,
HCLGE_OPC_DFX_RTC_REG,
HCLGE_OPC_DFX_PPP_REG,
HCLGE_OPC_DFX_RCB_REG,
HCLGE_OPC_DFX_TQP_REG,
HCLGE_OPC_DFX_SSU_REG_2
};
static const struct key_info meta_data_key_info[] = {
{ PACKET_TYPE_ID, 6 },
{ IP_FRAGEMENT, 1 },
{ ROCE_TYPE, 1 },
{ NEXT_KEY, 5 },
{ VLAN_NUMBER, 2 },
{ SRC_VPORT, 12 },
{ DST_VPORT, 12 },
{ TUNNEL_PACKET, 1 },
};
static const struct key_info tuple_key_info[] = {
{ OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
{ OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
{ OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
{ OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
{ OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
{ OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
{ OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
{ INNER_DST_MAC, 48, KEY_OPT_MAC,
offsetof(struct hclge_fd_rule, tuples.dst_mac),
offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
{ INNER_SRC_MAC, 48, KEY_OPT_MAC,
offsetof(struct hclge_fd_rule, tuples.src_mac),
offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
{ INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
{ INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
{ INNER_ETH_TYPE, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.ether_proto),
offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
{ INNER_L2_RSV, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.l2_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
{ INNER_IP_TOS, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_tos),
offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
{ INNER_IP_PROTO, 8, KEY_OPT_U8,
offsetof(struct hclge_fd_rule, tuples.ip_proto),
offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
{ INNER_SRC_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.src_ip),
offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
{ INNER_DST_IP, 32, KEY_OPT_IP,
offsetof(struct hclge_fd_rule, tuples.dst_ip),
offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
{ INNER_L3_RSV, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.l3_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
{ INNER_SRC_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.src_port),
offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
{ INNER_DST_PORT, 16, KEY_OPT_LE16,
offsetof(struct hclge_fd_rule, tuples.dst_port),
offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
{ INNER_L4_RSV, 32, KEY_OPT_LE32,
offsetof(struct hclge_fd_rule, tuples.l4_user_def),
offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
int i, k, n;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
if (ret) {
dev_err(&hdev->pdev->dev,
"Get MAC pkt stats fail, status = %d.\n", ret);
return ret;
}
for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
/* for special opcode 0032, only the first desc has the head */
if (unlikely(i == 0)) {
desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_RD_FIRST_STATS_NUM;
} else {
desc_data = (__le64 *)(&desc[i]);
n = HCLGE_RD_OTHER_STATS_NUM;
}
for (k = 0; k < n; k++) {
*data += le64_to_cpu(*desc_data);
data++;
desc_data++;
}
}
return 0;
}
static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
{
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
u16 i, k, n;
int ret;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
*/
desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
if (!desc)
return -ENOMEM;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
if (ret) {
kfree(desc);
return ret;
}
for (i = 0; i < desc_num; i++) {
/* for special opcode 0034, only the first desc has the head */
if (i == 0) {
desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_RD_FIRST_STATS_NUM;
} else {
desc_data = (__le64 *)(&desc[i]);
n = HCLGE_RD_OTHER_STATS_NUM;
}
for (k = 0; k < n; k++) {
*data += le64_to_cpu(*desc_data);
data++;
desc_data++;
}
}
kfree(desc);
return 0;
}
static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
{
struct hclge_desc desc;
__le32 *desc_data;
u32 reg_num;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
return ret;
desc_data = (__le32 *)(&desc.data[0]);
reg_num = le32_to_cpu(*desc_data);
*desc_num = 1 + ((reg_num - 3) >> 2) +
(u32)(((reg_num - 3) & 0x3) ? 1 : 0);
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
u32 desc_num;
int ret;
ret = hclge_mac_query_reg_num(hdev, &desc_num);
/* The firmware supports the new statistics acquisition method */
if (!ret)
ret = hclge_mac_update_stats_complete(hdev, desc_num);
else if (ret == -EOPNOTSUPP)
ret = hclge_mac_update_stats_defective(hdev);
else
dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
return ret;
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hnae3_queue *queue;
struct hclge_desc desc[1];
struct hclge_tqp *tqp;
int ret, i;
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
true);
desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[1]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
queue = handle->kinfo.tqp[i];
tqp = container_of(queue, struct hclge_tqp, q);
/* command : HCLGE_OPC_QUERY_IGU_STAT */
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_QUERY_TX_STATS,
true);
desc[0].data[0] = cpu_to_le32(tqp->index);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"Query tqp stat fail, status = %d,queue = %d\n",
ret, i);
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
le32_to_cpu(desc[0].data[1]);
}
return 0;
}
static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclge_tqp *tqp;
u64 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
*buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
*buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
}
static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
/* each tqp has TX & RX two queues */
return kinfo->num_tqps * (2);
}
static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
return buff;
}
static u64 *hclge_comm_get_stats(const void *comm_stats,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
for (i = 0; i < size; i++)
buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
return buf + size;
}
static u8 *hclge_comm_get_strings(u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
char *buff = (char *)data;
u32 i;
if (stringset != ETH_SS_STATS)
return buff;
for (i = 0; i < size; i++) {
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
return (u8 *)buff;
}
static void hclge_update_stats_for_all(struct hclge_dev *hdev)
{
struct hnae3_handle *handle;
int status;
handle = &hdev->vport[0].nic;
if (handle->client) {
status = hclge_tqps_update_stats(handle);
if (status) {
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
status);
}
}
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n", status);
}
static void hclge_update_stats(struct hnae3_handle *handle,
struct net_device_stats *net_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int status;
if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
return;
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
"Update MAC stats fail, status = %d.\n",
status);
status = hclge_tqps_update_stats(handle);
if (status)
dev_err(&hdev->pdev->dev,
"Update TQPS stats fail, status = %d.\n",
status);
clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}
static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int count = 0;
/* Loopback test support rules:
* mac: only GE mode support
* serdes: all mac mode will support include GE/XGE/LGE/CGE
* phy: only support when phy device exist on board
*/
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
count += 1;
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
count += 2;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
hnae3_dev_phy_imp_supported(hdev)) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
} else if (stringset == ETH_SS_STATS) {
count = ARRAY_SIZE(g_mac_stats_string) +
hclge_tqps_get_sset_count(handle, stringset);
}
return count;
}
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
p = hclge_comm_get_strings(stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
memcpy(p,
hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
}
static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u64 *p;
p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
static void hclge_get_mac_stat(struct hnae3_handle *handle,
struct hns3_mac_stats *mac_stats)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
hclge_update_stats(handle, NULL);
mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
struct hclge_func_status_cmd *status)
{
#define HCLGE_MAC_ID_MASK 0xF
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
/* Set the pf to main pf */
if (status->pf_state & HCLGE_PF_STATE_MAIN)
hdev->flag |= HCLGE_FLAG_MAIN;
else
hdev->flag &= ~HCLGE_FLAG_MAIN;
hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
return 0;
}
static int hclge_query_function_status(struct hclge_dev *hdev)
{
#define HCLGE_QUERY_MAX_CNT 5
struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
req = (struct hclge_func_status_cmd *)desc.data;
do {
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"query function status failed %d.\n", ret);
return ret;
}
/* Check pf reset is done */
if (req->pf_state)
break;
usleep_range(1000, 2000);
} while (timeout++ < HCLGE_QUERY_MAX_CNT);
return hclge_parse_func_status(hdev, req);
}
static int hclge_query_pf_resource(struct hclge_dev *hdev)
{
struct hclge_pf_res_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"query pf resource failed %d.\n", ret);
return ret;
}
req = (struct hclge_pf_res_cmd *)desc.data;
hdev->num_tqps = le16_to_cpu(req->tqp_num) +
le16_to_cpu(req->ext_tqp_num);
hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
if (req->dv_buf_size)
hdev->dv_buf_size =
le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
dev_err(&hdev->pdev->dev,
"only %u msi resources available, not enough for pf(min:2).\n",
hdev->num_nic_msi);
return -EINVAL;
}
if (hnae3_dev_roce_supported(hdev)) {
hdev->num_roce_msi =
le16_to_cpu(req->pf_intr_vector_number_roce);
/* PF should have NIC vectors and Roce vectors,
* NIC vectors are queued before Roce vectors.
*/
hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
} else {
hdev->num_msi = hdev->num_nic_msi;
}
return 0;
}
static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
case HCLGE_FW_MAC_SPEED_10M:
*speed = HCLGE_MAC_SPEED_10M;
break;
case HCLGE_FW_MAC_SPEED_100M:
*speed = HCLGE_MAC_SPEED_100M;
break;
case HCLGE_FW_MAC_SPEED_1G:
*speed = HCLGE_MAC_SPEED_1G;
break;
case HCLGE_FW_MAC_SPEED_10G:
*speed = HCLGE_MAC_SPEED_10G;
break;
case HCLGE_FW_MAC_SPEED_25G:
*speed = HCLGE_MAC_SPEED_25G;
break;
case HCLGE_FW_MAC_SPEED_40G:
*speed = HCLGE_MAC_SPEED_40G;
break;
case HCLGE_FW_MAC_SPEED_50G:
*speed = HCLGE_MAC_SPEED_50G;
break;
case HCLGE_FW_MAC_SPEED_100G:
*speed = HCLGE_MAC_SPEED_100G;
break;
case HCLGE_FW_MAC_SPEED_200G:
*speed = HCLGE_MAC_SPEED_200G;
break;
default:
return -EINVAL;
}
return 0;
}
static const struct hclge_speed_bit_map speed_bit_map[] = {
{HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
{HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
{HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
{HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
{HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
{HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
{HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
{HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
{HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
};
static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
{
u16 i;
for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
if (speed == speed_bit_map[i].speed) {
*speed_bit = speed_bit_map[i].speed_bit;
return 0;
}
}
return -EINVAL;
}
static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u32 speed_ability = hdev->hw.mac.speed_ability;
u32 speed_bit = 0;
int ret;
ret = hclge_get_speed_bit(speed, &speed_bit);
if (ret)
return ret;
if (speed_bit & speed_ability)
return 0;
return -EINVAL;
}
static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
mac->supported);
}
static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
mac->supported);
}
static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
mac->supported);
}
static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
mac->supported);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
mac->supported);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->supported);
mac->fec_ability =
BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->supported);
mac->fec_ability =
BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
BIT(HNAE3_FEC_AUTO);
break;
case HCLGE_MAC_SPEED_100G:
case HCLGE_MAC_SPEED_200G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
break;
default:
mac->fec_ability = 0;
break;
}
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
hclge_convert_setting_sr(mac, speed_ability);
hclge_convert_setting_lr(mac, speed_ability);
hclge_convert_setting_cr(mac, speed_ability);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
if (hnae3_dev_pause_supported(hdev))
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
hclge_convert_setting_kr(mac, speed_ability);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
if (hnae3_dev_pause_supported(hdev))
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
u16 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
/* default to support all speed for GE port */
if (!speed_ability)
speed_ability = HCLGE_SUPPORT_GE;
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
supported);
if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
supported);
}
if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
}
if (hnae3_dev_pause_supported(hdev)) {
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
}
static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
if (media_type == HNAE3_MEDIA_TYPE_FIBER)
hclge_parse_fiber_link_mode(hdev, speed_ability);
else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
hclge_parse_copper_link_mode(hdev, speed_ability);
else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
static u32 hclge_get_max_speed(u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
return HCLGE_MAC_SPEED_200G;
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
return HCLGE_MAC_SPEED_100G;
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
return HCLGE_MAC_SPEED_50G;
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
return HCLGE_MAC_SPEED_40G;
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
return HCLGE_MAC_SPEED_25G;
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
return HCLGE_MAC_SPEED_10G;
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
return HCLGE_MAC_SPEED_1G;
if (speed_ability & HCLGE_SUPPORT_100M_BIT)
return HCLGE_MAC_SPEED_100M;
if (speed_ability & HCLGE_SUPPORT_10M_BIT)
return HCLGE_MAC_SPEED_10M;
return HCLGE_MAC_SPEED_1G;
}
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
#define HCLGE_TX_SPARE_SIZE_UNIT 4096
#define SPEED_ABILITY_EXT_SHIFT 8
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u16 speed_ability_ext;
u64 mac_addr_tmp;
unsigned int i;
req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
HCLGE_CFG_TQP_DESC_N_M,
HCLGE_CFG_TQP_DESC_N_S);
cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_PHY_ADDR_M,
HCLGE_CFG_PHY_ADDR_S);
cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_MEDIA_TP_M,
HCLGE_CFG_MEDIA_TP_S);
cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_RX_BUF_LEN_M,
HCLGE_CFG_RX_BUF_LEN_S);
/* get mac_address */
mac_addr_tmp = __le32_to_cpu(req->param[2]);
mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_MAC_ADDR_H_M,
HCLGE_CFG_MAC_ADDR_H_S);
mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_DEFAULT_SPEED_M,
HCLGE_CFG_DEFAULT_SPEED_S);
cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
HCLGE_CFG_RSS_SIZE_M,
HCLGE_CFG_RSS_SIZE_S);
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_EXT_M,
HCLGE_CFG_SPEED_ABILITY_EXT_S);
cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_VLAN_FLTR_CAP_M,
HCLGE_CFG_VLAN_FLTR_CAP_S);
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
if (!cfg->umv_space)
cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
HCLGE_CFG_PF_RSS_SIZE_S);
/* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
* power of 2, instead of reading out directly. This would
* be more flexible for future changes and expansions.
* When VF max rss size field is HCLGE_CFG_RSS_SIZE_S,
* it does not make sense if PF's field is 0. In this case, PF and VF
* has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
*/
cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1U << cfg->pf_rss_size_max :
cfg->vf_rss_size_max;
/* The unit of the tx spare buffer size queried from configuration
* file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
* needed here.
*/
cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
}
/* hclge_get_cfg: query the static parameter from flash
* @hdev: pointer to struct hclge_dev
* @hcfg: the config structure to be getted
*/
static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
struct hclge_cfg_param_cmd *req;
unsigned int i;
int ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
u32 offset = 0;
req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
if (ret) {
dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
return ret;
}
hclge_parse_cfg(hcfg, desc);
return 0;
}
static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
{
#define HCLGE_MAX_NON_TSO_BD_NUM 8U
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
struct hclge_desc *desc)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_dev_specs_0_cmd *req0;
struct hclge_dev_specs_1_cmd *req1;
req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
ae_dev->dev_specs.rss_ind_tbl_size =
le16_to_cpu(req0->rss_ind_tbl_size);
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
{
struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
if (!dev_specs->max_non_tso_bd_num)
dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
if (!dev_specs->rss_ind_tbl_size)
dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
if (!dev_specs->rss_key_size)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
if (!dev_specs->max_qset_num)
dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
{
struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
int ret;
int i;
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
hclge_set_default_dev_specs(hdev);
return 0;
}
for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
true);
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
}
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
if (ret)
return ret;
hclge_parse_dev_specs(hdev, desc);
hclge_check_dev_specs(hdev);
return 0;
}
static int hclge_get_cap(struct hclge_dev *hdev)
{
int ret;
ret = hclge_query_function_status(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"query function status error %d.\n", ret);
return ret;
}
/* get pf resource */
return hclge_query_pf_resource(hdev);
}
static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
{
#define HCLGE_MIN_TX_DESC 64
#define HCLGE_MIN_RX_DESC 64
if (!is_kdump_kernel())
return;
dev_info(&hdev->pdev->dev,
"Running kdump kernel. Using minimal resources\n");
/* minimal queue pairs equals to the number of vports */
hdev->num_tqps = hdev->num_req_vfs + 1;
hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}
static int hclge_configure(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg;
unsigned int i;
int node, ret;
ret = hclge_get_cfg(hdev, &cfg);
if (ret)
return ret;
hdev->base_tqp_pid = 0;
hdev->vf_rss_size_max = cfg.vf_rss_size_max;
hdev->pf_rss_size_max = cfg.pf_rss_size_max;
hdev->rx_buf_len = cfg.rx_buf_len;
ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
hdev->hw.mac.media_type = cfg.media_type;
hdev->hw.mac.phy_addr = cfg.phy_addr;
hdev->num_tx_desc = cfg.tqp_desc_num;
hdev->num_rx_desc = cfg.tqp_desc_num;
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
if (ret) {
dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
cfg.default_speed, ret);
return ret;
}
hclge_parse_link_mode(hdev, cfg.speed_ability);
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
if ((hdev->tc_max > HNAE3_MAX_TC) ||
(hdev->tc_max < 1)) {
dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
/* Dev does not support DCB */
if (!hnae3_dev_dcb_supported(hdev)) {
hdev->tc_max = 1;
hdev->pfc_max = 0;
} else {
hdev->pfc_max = hdev->tc_max;
}
hdev->tm_info.num_tc = 1;
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */
node = dev_to_node(&hdev->pdev->dev);
if (node != NUMA_NO_NODE)
cpumask = cpumask_of_node(node);
cpumask_copy(&hdev->affinity_mask, cpumask);
return ret;
}
static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
u16 tso_mss_max)
{
struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_tso_status_cmd *)desc.data;
req->tso_mss_min = cpu_to_le16(tso_mss_min);
req->tso_mss_max = cpu_to_le16(tso_mss_max);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_config_gro(struct hclge_dev *hdev)
{
struct hclge_cfg_gro_status_cmd *req;
struct hclge_desc desc;
int ret;
if (!hnae3_dev_gro_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_gro_status_cmd *)desc.data;
req->gro_en = hdev->gro_en ? 1 : 0;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"GRO hardware config cmd failed, ret = %d\n", ret);
return ret;
}
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
int i;
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclge_tqp), GFP_KERNEL);
if (!hdev->htqp)
return -ENOMEM;
tqp = hdev->htqp;
for (i = 0; i < hdev->num_tqps; i++) {
tqp->dev = &hdev->pdev->dev;
tqp->index = i;
tqp->q.ae_algo = &ae_algo;
tqp->q.buf_size = hdev->rx_buf_len;
tqp->q.tx_desc_num = hdev->num_tx_desc;
tqp->q.rx_desc_num = hdev->num_rx_desc;
/* need an extended offset to configure queues >=
* HCLGE_TQP_MAX_SIZE_DEV_V2
*/
if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
tqp->q.io_base = hdev->hw.io_base +
HCLGE_TQP_REG_OFFSET +
i * HCLGE_TQP_REG_SIZE;
else
tqp->q.io_base = hdev->hw.io_base +
HCLGE_TQP_REG_OFFSET +
HCLGE_TQP_EXT_REG_OFFSET +
(i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
HCLGE_TQP_REG_SIZE;
tqp++;
}
return 0;
}
static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
u16 tqp_pid, u16 tqp_vid, bool is_pf)
{
struct hclge_tqp_map_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
req = (struct hclge_tqp_map_cmd *)desc.data;
req->tqp_id = cpu_to_le16(tqp_pid);
req->tqp_vf = func_id;
req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
if (!is_pf)
req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
req->tqp_vid = cpu_to_le16(tqp_vid);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
return ret;
}
static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
int i, alloced;
for (i = 0, alloced = 0; i < hdev->num_tqps &&
alloced < num_tqps; i++) {
if (!hdev->htqp[i].alloced) {
hdev->htqp[i].q.handle = &vport->nic;
hdev->htqp[i].q.tqp_index = alloced;
hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
kinfo->tqp[alloced] = &hdev->htqp[i].q;
hdev->htqp[i].alloced = true;
alloced++;
}
}
vport->alloc_tqps = alloced;
kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
vport->alloc_tqps / hdev->tm_info.num_tc);
/* ensure one to one mapping between irq and queue at default */
kinfo->rss_size = min_t(u16, kinfo->rss_size,
(hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
return 0;
}
static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
u16 num_tx_desc, u16 num_rx_desc)
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo = &nic->kinfo;
struct hclge_dev *hdev = vport->back;
int ret;
kinfo->num_tx_desc = num_tx_desc;
kinfo->num_rx_desc = num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
return -ENOMEM;
ret = hclge_assign_tqp(vport, num_tqps);
if (ret)
dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
return ret;
}
static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
struct hclge_vport *vport)
{
struct hnae3_handle *nic = &vport->nic;
struct hnae3_knic_private_info *kinfo;
u16 i;
kinfo = &nic->kinfo;
for (i = 0; i < vport->alloc_tqps; i++) {
struct hclge_tqp *q =
container_of(kinfo->tqp[i], struct hclge_tqp, q);
bool is_pf;
int ret;
is_pf = !(vport->vport_id);
ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
i, is_pf);
if (ret)
return ret;
}
return 0;
}
static int hclge_map_tqp(struct hclge_dev *hdev)
{
struct hclge_vport *vport = hdev->vport;
u16 i, num_vport;
num_vport = hdev->num_req_vfs + 1;
for (i = 0; i < num_vport; i++) {
int ret;
ret = hclge_map_tqp_to_vport(hdev, vport);
if (ret)
return ret;
vport++;
}
return 0;
}
static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_dev *hdev = vport->back;
int ret;
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
nic->numa_node_mask = hdev->numa_node_mask;
nic->kinfo.io_base = hdev->hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
hdev->num_tx_desc, hdev->num_rx_desc);
if (ret)
dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
return ret;
}
static int hclge_alloc_vport(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
struct hclge_vport *vport;
u32 tqp_main_vport;
u32 tqp_per_vport;
int num_vport, i;
int ret;
/* We need to alloc a vport for main NIC of PF */
num_vport = hdev->num_req_vfs + 1;
if (hdev->num_tqps < num_vport) {
dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
hdev->num_tqps, num_vport);
return -EINVAL;
}
/* Alloc the same number of TQPs for every vport */
tqp_per_vport = hdev->num_tqps / num_vport;
tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
GFP_KERNEL);
if (!vport)
return -ENOMEM;
hdev->vport = vport;
hdev->num_alloc_vport = num_vport;
if (IS_ENABLED(CONFIG_PCI_IOV))
hdev->num_alloc_vfs = hdev->num_req_vfs;
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
spin_lock_init(&vport->mac_list_lock);
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
else
ret = hclge_vport_setup(vport, tqp_per_vport);
if (ret) {
dev_err(&pdev->dev,
"vport setup failed for vport %d, %d\n",
i, ret);
return ret;
}
vport++;
}
return 0;
}
static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
/* TX buffer size is unit by 128 byte */
#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
struct hclge_tx_buff_alloc_cmd *req;
struct hclge_desc desc;
int ret;
u8 i;
req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
req->tx_pkt_buff[i] =
cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
HCLGE_BUF_SIZE_UPDATE_EN_MSK);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
ret);
return ret;
}
static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
if (ret)
dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
return ret;
}
static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
unsigned int i;
u32 cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
if (hdev->hw_tc_map & BIT(i))
cnt++;
return cnt;
}
/* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
unsigned int i;
int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
priv->enable)
cnt++;
}
return cnt;
}
/* Get the number of pfc disabled TCs, which have private buffer */
static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
unsigned int i;
int cnt = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i) &&
!(hdev->tm_info.hw_pfc_map & BIT(i)) &&
priv->enable)
cnt++;
}
return cnt;
}
static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_priv_buf *priv;
u32 rx_priv = 0;
int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
priv = &buf_alloc->priv_buf[i];
if (priv->enable)
rx_priv += priv->buf_size;
}
return rx_priv;
}
static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 i, total_tx_size = 0;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
return total_tx_size;
}
static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc,
u32 rx_all)
{
u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
u32 tc_num = hclge_get_tc_num(hdev);
u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
hdev->dv_buf_size;
else
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
shared_buf_tc = tc_num * aligned_mps + aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT);
rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
if (rx_all < rx_priv + shared_std)
return false;
shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / HCLGE_BUF_DIV_BY,
HCLGE_BUF_SIZE_UNIT);
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
buf_alloc->s_buf.self.low = aligned_mps;
}
if (hnae3_dev_dcb_supported(hdev)) {
hi_thrd = shared_buf - hdev->dv_buf_size;
if (tc_num <= NEED_RESERVE_TC_NUM)
hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
/ BUF_MAX_PERCENT;
if (tc_num)
hi_thrd = hi_thrd / tc_num;
hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
} else {
hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
lo_thrd = aligned_mps;
}
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
}
return true;
}
static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 i, total_size;
total_size = hdev->pkt_buf_size;
/* alloc tx buffer for all enabled tc */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
if (hdev->hw_tc_map & BIT(i)) {
if (total_size < hdev->tx_buf_size)
return -ENOMEM;
priv->tx_buf_size = hdev->tx_buf_size;
} else {
priv->tx_buf_size = 0;
}
total_size -= priv->tx_buf_size;
}
return 0;
}
static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
unsigned int i;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
priv->enable = 0;
priv->wl.low = 0;
priv->wl.high = 0;
priv->buf_size = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
priv->wl.high = roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT);
} else {
priv->wl.low = 0;
priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
aligned_mps;
}
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
}
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
int i;
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
unsigned int mask = BIT((unsigned int)i);
if (hdev->hw_tc_map & mask &&
!(hdev->tm_info.hw_pfc_map & mask)) {
/* Clear the no pfc TC private buffer */
priv->wl.low = 0;
priv->wl.high = 0;
priv->buf_size = 0;
priv->enable = 0;
no_pfc_priv_num--;
}
if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
no_pfc_priv_num == 0)
break;
}
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
int i;
/* let the last to be cleared first */
for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
unsigned int mask = BIT((unsigned int)i);
if (hdev->hw_tc_map & mask &&
hdev->tm_info.hw_pfc_map & mask) {
/* Reduce the number of pfc TC with private buffer */
priv->wl.low = 0;
priv->enable = 0;
priv->wl.high = 0;
priv->buf_size = 0;
pfc_priv_num--;
}
if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
pfc_priv_num == 0)
break;
}
return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
}
static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
#define COMPENSATE_BUFFER 0x3C00
#define COMPENSATE_HALF_MPS_NUM 5
#define PRIV_WL_GAP 0x1800
u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
u32 tc_num = hclge_get_tc_num(hdev);
u32 half_mps = hdev->mps >> 1;
u32 min_rx_priv;
unsigned int i;
if (tc_num)
rx_priv = rx_priv / tc_num;
if (tc_num <= NEED_RESERVE_TC_NUM)
rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
COMPENSATE_HALF_MPS_NUM * half_mps;
min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
if (rx_priv < min_rx_priv)
return false;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
priv->enable = 0;
priv->wl.low = 0;
priv->wl.high = 0;
priv->buf_size = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
priv->enable = 1;
priv->buf_size = rx_priv;
priv->wl.high = rx_priv - hdev->dv_buf_size;
priv->wl.low = priv->wl.high - PRIV_WL_GAP;
}
buf_alloc->s_buf.buf_size = 0;
return true;
}
/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hdev: pointer to struct hclge_dev
* @buf_alloc: pointer to buffer calculation data
* @return: 0: calculate successful, negative: fail
*/
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
/* When DCB is not supported, rx private buffer is not allocated. */
if (!hnae3_dev_dcb_supported(hdev)) {
u32 rx_all = hdev->pkt_buf_size;
rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
return -ENOMEM;
return 0;
}
if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
return 0;
if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
return 0;
/* try to decrease the buffer size */
if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
return 0;
if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
return 0;
if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
return 0;
return -ENOMEM;
}
static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_rx_priv_buff_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
req = (struct hclge_rx_priv_buff_cmd *)desc.data;
/* Alloc private buffer TCs */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
req->buf_num[i] =
cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
req->buf_num[i] |=
cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
}
req->shared_buf =
cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
(1 << HCLGE_TC0_PRI_BUF_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"rx private buffer alloc cmd failed %d\n", ret);
return ret;
}
static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_rx_priv_wl_buf *req;
struct hclge_priv_buf *priv;
struct hclge_desc desc[2];
int i, j;
int ret;
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
false);
req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
priv = &buf_alloc->priv_buf[idx];
req->tc_wl[j].high =
cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].high |=
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->tc_wl[j].low =
cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
req->tc_wl[j].low |=
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptor at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
dev_err(&hdev->pdev->dev,
"rx private waterline config cmd failed %d\n",
ret);
return ret;
}
static int hclge_common_thrd_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
struct hclge_rx_com_thrd *req;
struct hclge_desc desc[2];
struct hclge_tc_thrd *tc;
int i, j;
int ret;
for (i = 0; i < 2; i++) {
hclge_cmd_setup_basic_desc(&desc[i],
HCLGE_OPC_RX_COM_THRD_ALLOC, false);
req = (struct hclge_rx_com_thrd *)&desc[i].data;
/* The first descriptor set the NEXT bit to 1 */
if (i == 0)
desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
else
desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
req->com_thrd[j].high =
cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].high |=
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_thrd[j].low =
cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
req->com_thrd[j].low |=
cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
}
}
/* Send 2 descriptors at one time */
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
dev_err(&hdev->pdev->dev,
"common threshold config cmd failed %d\n", ret);
return ret;
}
static int hclge_common_wl_config(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
struct hclge_shared_buf *buf = &buf_alloc->s_buf;
struct hclge_rx_com_wl *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
req = (struct hclge_rx_com_wl *)desc.data;
req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"common waterline config cmd failed %d\n", ret);
return ret;
}
int hclge_buffer_alloc(struct hclge_dev *hdev)
{
struct hclge_pkt_buf_alloc *pkt_buf;
int ret;
pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
if (!pkt_buf)
return -ENOMEM;
ret = hclge_tx_buffer_calc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not calc tx buffer size for all TCs %d\n", ret);
goto out;
}
ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not alloc tx buffers %d\n", ret);
goto out;
}
ret = hclge_rx_buffer_calc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not calc rx priv buffer size for all TCs %d\n",
ret);
goto out;
}
ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
ret);
goto out;
}
if (hnae3_dev_dcb_supported(hdev)) {
ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not configure rx private waterline %d\n",
ret);
goto out;
}
ret = hclge_common_thrd_config(hdev, pkt_buf);
if (ret) {
dev_err(&hdev->pdev->dev,
"could not configure common threshold %d\n",
ret);
goto out;
}
}
ret = hclge_common_wl_config(hdev, pkt_buf);
if (ret)
dev_err(&hdev->pdev->dev,
"could not configure common waterline %d\n", ret);
out:
kfree(pkt_buf);
return ret;
}
static int hclge_init_roce_base_info(struct hclge_vport *vport)
{
struct hnae3_handle *roce = &vport->roce;
struct hnae3_handle *nic = &vport->nic;
struct hclge_dev *hdev = vport->back;
roce->rinfo.num_vectors = vport->back->num_roce_msi;
if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
return -EINVAL;
roce->rinfo.base_vector = hdev->roce_base_vector;
roce->rinfo.netdev = nic->kinfo.netdev;
roce->rinfo.roce_io_base = hdev->hw.io_base;
roce->rinfo.roce_mem_base = hdev->hw.mem_base;
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
roce->numa_node_mask = nic->numa_node_mask;
return 0;
}
static int hclge_init_msi(struct hclge_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int vectors;
int i;
vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
hdev->num_msi,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (vectors < 0) {
dev_err(&pdev->dev,
"failed(%d) to allocate MSI/MSI-X vectors\n",
vectors);
return vectors;
}
if (vectors < hdev->num_msi)
dev_warn(&hdev->pdev->dev,
"requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
hdev->num_msi, vectors);
hdev->num_msi = vectors;
hdev->num_msi_left = vectors;
hdev->base_msi_vector = pdev->irq;
hdev->roce_base_vector = hdev->base_msi_vector +
hdev->num_nic_msi;
hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(u16), GFP_KERNEL);
if (!hdev->vector_status) {
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
for (i = 0; i < hdev->num_msi; i++)
hdev->vector_status[i] = HCLGE_INVALID_VPORT;
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
return 0;
}
static u8 hclge_check_speed_dup(u8 duplex, int speed)
{
if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
duplex = HCLGE_MAC_FULL;
return duplex;
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
u8 duplex)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
if (duplex)
hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
switch (speed) {
case HCLGE_MAC_SPEED_10M:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M);
break;
case HCLGE_MAC_SPEED_100M:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M);
break;
case HCLGE_MAC_SPEED_1G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G);
break;
case HCLGE_MAC_SPEED_10G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G);
break;
case HCLGE_MAC_SPEED_25G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G);
break;
case HCLGE_MAC_SPEED_40G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G);
break;
case HCLGE_MAC_SPEED_50G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G);
break;
case HCLGE_MAC_SPEED_100G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G);
break;
case HCLGE_MAC_SPEED_200G:
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G);
break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
}
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
"mac speed/duplex config cmd failed %d.\n", ret);
return ret;
}
return 0;
}
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
mac->duplex == duplex)
return 0;
ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
u8 duplex)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
struct hclge_config_auto_neg_cmd *req;
struct hclge_desc desc;
u32 flag = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
req = (struct hclge_config_auto_neg_cmd *)desc.data;
if (enable)
hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
ret);
return ret;
}
static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (!hdev->hw.mac.support_autoneg) {
if (enable) {
dev_err(&hdev->pdev->dev,
"autoneg is not supported by current port\n");
return -EOPNOTSUPP;
} else {
return 0;
}
}
return hclge_set_autoneg_en(hdev, enable);
}
static int hclge_get_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct phy_device *phydev = hdev->hw.mac.phydev;
if (phydev)
return phydev->autoneg;
return hdev->hw.mac.autoneg;
}
static int hclge_restart_autoneg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
int ret;
dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
return ret;
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
return hclge_set_autoneg_en(hdev, !halt);
return 0;
}
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
req = (struct hclge_config_fec_cmd *)desc.data;
if (fec_mode & BIT(HNAE3_FEC_AUTO))
hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
return ret;
}
static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
if (fec_mode && !(mac->fec_ability & fec_mode)) {
dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
return -EINVAL;
}
ret = hclge_set_fec_hw(hdev, fec_mode);
if (ret)
return ret;
mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
return 0;
}
static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
struct hclge_mac *mac = &hdev->hw.mac;
if (fec_ability)
*fec_ability = mac->fec_ability;
if (fec_mode)
*fec_mode = mac->fec_mode;
}
static int hclge_mac_init(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
hdev->hw.mac.duplex);
if (ret)
return ret;
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
if (ret)
return ret;
}
ret = hclge_set_mac_mtu(hdev, hdev->mps);
if (ret) {
dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
return ret;
}
ret = hclge_set_default_loopback(hdev);
if (ret)
return ret;
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"allocate buffer fail, ret=%d\n", ret);
return ret;
}
static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
hclge_wq, &hdev->service_task,
delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
{
struct hclge_link_status_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
ret);
return ret;
}
req = (struct hclge_link_status_cmd *)desc.data;
*link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
return 0;
}
static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
*link_status = HCLGE_LINK_STATUS_DOWN;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
return 0;
if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
return 0;
return hclge_get_mac_link_status(hdev, link_status);
}
static void hclge_push_link_status(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
int ret;
u16 i;
for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
continue;
ret = hclge_push_vf_link_status(vport);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to push link status to vf%u, ret = %d\n",
i, ret);
}
}
}
static void hclge_update_link_status(struct hclge_dev *hdev)
{
struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
if (!client)
return;
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
return;
ret = hclge_get_mac_phy_link(hdev, &state);
if (ret) {
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
return;
}
if (state != hdev->hw.mac.link) {
hdev->hw.mac.link = state;
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
if (rclient && rclient->ops->link_status_change)
rclient->ops->link_status_change(rhandle, state);
hclge_push_link_status(hdev);
}