| /* |
| * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
| * |
| * This software is available to you under a choice of one of two |
| * licenses. You may choose to be licensed under the terms of the GNU |
| * General Public License (GPL) Version 2, available from the file |
| * COPYING in the main directory of this source tree, or the |
| * OpenIB.org BSD license below: |
| * |
| * Redistribution and use in source and binary forms, with or |
| * without modification, are permitted provided that the following |
| * conditions are met: |
| * |
| * - Redistributions of source code must retain the above |
| * copyright notice, this list of conditions and the following |
| * disclaimer. |
| * |
| * - Redistributions in binary form must reproduce the above |
| * copyright notice, this list of conditions and the following |
| * disclaimer in the documentation and/or other materials |
| * provided with the distribution. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| * SOFTWARE. |
| */ |
| |
| #include <net/tc_act/tc_gact.h> |
| #include <net/pkt_cls.h> |
| #include <linux/mlx5/fs.h> |
| #include <net/vxlan.h> |
| #include <net/geneve.h> |
| #include <linux/bpf.h> |
| #include <linux/if_bridge.h> |
| #include <net/page_pool.h> |
| #include <net/xdp_sock_drv.h> |
| #include "eswitch.h" |
| #include "en.h" |
| #include "en/txrx.h" |
| #include "en_tc.h" |
| #include "en_rep.h" |
| #include "en_accel/ipsec.h" |
| #include "en_accel/en_accel.h" |
| #include "en_accel/tls.h" |
| #include "accel/ipsec.h" |
| #include "accel/tls.h" |
| #include "lib/vxlan.h" |
| #include "lib/clock.h" |
| #include "en/port.h" |
| #include "en/xdp.h" |
| #include "lib/eq.h" |
| #include "en/monitor_stats.h" |
| #include "en/health.h" |
| #include "en/params.h" |
| #include "en/xsk/pool.h" |
| #include "en/xsk/setup.h" |
| #include "en/xsk/rx.h" |
| #include "en/xsk/tx.h" |
| #include "en/hv_vhca_stats.h" |
| #include "en/devlink.h" |
| #include "lib/mlx5.h" |
| #include "en/ptp.h" |
| #include "qos.h" |
| #include "en/trap.h" |
| #include "fpga/ipsec.h" |
| |
| bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev) |
| { |
| bool striding_rq_umr = MLX5_CAP_GEN(mdev, striding_rq) && |
| MLX5_CAP_GEN(mdev, umr_ptr_rlky) && |
| MLX5_CAP_ETH(mdev, reg_umr_sq); |
| u16 max_wqe_sz_cap = MLX5_CAP_GEN(mdev, max_wqe_sz_sq); |
| bool inline_umr = MLX5E_UMR_WQE_INLINE_SZ <= max_wqe_sz_cap; |
| |
| if (!striding_rq_umr) |
| return false; |
| if (!inline_umr) { |
| mlx5_core_warn(mdev, "Cannot support Striding RQ: UMR WQE size (%d) exceeds maximum supported (%d).\n", |
| (int)MLX5E_UMR_WQE_INLINE_SZ, max_wqe_sz_cap); |
| return false; |
| } |
| return true; |
| } |
| |
| void mlx5e_update_carrier(struct mlx5e_priv *priv) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| u8 port_state; |
| bool up; |
| |
| port_state = mlx5_query_vport_state(mdev, |
| MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, |
| 0); |
| |
| up = port_state == VPORT_STATE_UP; |
| if (up == netif_carrier_ok(priv->netdev)) |
| netif_carrier_event(priv->netdev); |
| if (up) { |
| netdev_info(priv->netdev, "Link up\n"); |
| netif_carrier_on(priv->netdev); |
| } else { |
| netdev_info(priv->netdev, "Link down\n"); |
| netif_carrier_off(priv->netdev); |
| } |
| } |
| |
| static void mlx5e_update_carrier_work(struct work_struct *work) |
| { |
| struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, |
| update_carrier_work); |
| |
| mutex_lock(&priv->state_lock); |
| if (test_bit(MLX5E_STATE_OPENED, &priv->state)) |
| if (priv->profile->update_carrier) |
| priv->profile->update_carrier(priv); |
| mutex_unlock(&priv->state_lock); |
| } |
| |
| static void mlx5e_update_stats_work(struct work_struct *work) |
| { |
| struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, |
| update_stats_work); |
| |
| mutex_lock(&priv->state_lock); |
| priv->profile->update_stats(priv); |
| mutex_unlock(&priv->state_lock); |
| } |
| |
| void mlx5e_queue_update_stats(struct mlx5e_priv *priv) |
| { |
| if (!priv->profile->update_stats) |
| return; |
| |
| if (unlikely(test_bit(MLX5E_STATE_DESTROYING, &priv->state))) |
| return; |
| |
| queue_work(priv->wq, &priv->update_stats_work); |
| } |
| |
| static int async_event(struct notifier_block *nb, unsigned long event, void *data) |
| { |
| struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); |
| struct mlx5_eqe *eqe = data; |
| |
| if (event != MLX5_EVENT_TYPE_PORT_CHANGE) |
| return NOTIFY_DONE; |
| |
| switch (eqe->sub_type) { |
| case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
| case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
| queue_work(priv->wq, &priv->update_carrier_work); |
| break; |
| default: |
| return NOTIFY_DONE; |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| static void mlx5e_enable_async_events(struct mlx5e_priv *priv) |
| { |
| priv->events_nb.notifier_call = async_event; |
| mlx5_notifier_register(priv->mdev, &priv->events_nb); |
| } |
| |
| static void mlx5e_disable_async_events(struct mlx5e_priv *priv) |
| { |
| mlx5_notifier_unregister(priv->mdev, &priv->events_nb); |
| } |
| |
| static int blocking_event(struct notifier_block *nb, unsigned long event, void *data) |
| { |
| struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb); |
| int err; |
| |
| switch (event) { |
| case MLX5_DRIVER_EVENT_TYPE_TRAP: |
| err = mlx5e_handle_trap_event(priv, data); |
| break; |
| default: |
| netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event); |
| err = -EINVAL; |
| } |
| return err; |
| } |
| |
| static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv) |
| { |
| priv->blocking_events_nb.notifier_call = blocking_event; |
| mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb); |
| } |
| |
| static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv) |
| { |
| mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb); |
| } |
| |
| static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, |
| struct mlx5e_icosq *sq, |
| struct mlx5e_umr_wqe *wqe) |
| { |
| struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
| struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl; |
| u8 ds_cnt = DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_DS); |
| |
| cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | |
| ds_cnt); |
| cseg->umr_mkey = rq->mkey_be; |
| |
| ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; |
| ucseg->xlt_octowords = |
| cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE)); |
| ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); |
| } |
| |
| static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node) |
| { |
| int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); |
| |
| rq->mpwqe.info = kvzalloc_node(array_size(wq_sz, |
| sizeof(*rq->mpwqe.info)), |
| GFP_KERNEL, node); |
| if (!rq->mpwqe.info) |
| return -ENOMEM; |
| |
| mlx5e_build_umr_wqe(rq, rq->icosq, &rq->mpwqe.umr_wqe); |
| |
| return 0; |
| } |
| |
| static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, |
| u64 npages, u8 page_shift, |
| struct mlx5_core_mkey *umr_mkey, |
| dma_addr_t filler_addr) |
| { |
| struct mlx5_mtt *mtt; |
| int inlen; |
| void *mkc; |
| u32 *in; |
| int err; |
| int i; |
| |
| inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*mtt) * npages; |
| |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); |
| |
| MLX5_SET(mkc, mkc, free, 1); |
| MLX5_SET(mkc, mkc, umr_en, 1); |
| MLX5_SET(mkc, mkc, lw, 1); |
| MLX5_SET(mkc, mkc, lr, 1); |
| MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); |
| mlx5e_mkey_set_relaxed_ordering(mdev, mkc); |
| MLX5_SET(mkc, mkc, qpn, 0xffffff); |
| MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); |
| MLX5_SET64(mkc, mkc, len, npages << page_shift); |
| MLX5_SET(mkc, mkc, translations_octword_size, |
| MLX5_MTT_OCTW(npages)); |
| MLX5_SET(mkc, mkc, log_page_size, page_shift); |
| MLX5_SET(create_mkey_in, in, translations_octword_actual_size, |
| MLX5_MTT_OCTW(npages)); |
| |
| /* Initialize the mkey with all MTTs pointing to a default |
| * page (filler_addr). When the channels are activated, UMR |
| * WQEs will redirect the RX WQEs to the actual memory from |
| * the RQ's pool, while the gaps (wqe_overflow) remain mapped |
| * to the default page. |
| */ |
| mtt = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); |
| for (i = 0 ; i < npages ; i++) |
| mtt[i].ptag = cpu_to_be64(filler_addr); |
| |
| err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); |
| |
| kvfree(in); |
| return err; |
| } |
| |
| static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) |
| { |
| u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); |
| |
| return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey, |
| rq->wqe_overflow.addr); |
| } |
| |
| static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix) |
| { |
| return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT; |
| } |
| |
| static void mlx5e_init_frags_partition(struct mlx5e_rq *rq) |
| { |
| struct mlx5e_wqe_frag_info next_frag = {}; |
| struct mlx5e_wqe_frag_info *prev = NULL; |
| int i; |
| |
| next_frag.di = &rq->wqe.di[0]; |
| |
| for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { |
| struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0]; |
| struct mlx5e_wqe_frag_info *frag = |
| &rq->wqe.frags[i << rq->wqe.info.log_num_frags]; |
| int f; |
| |
| for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) { |
| if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) { |
| next_frag.di++; |
| next_frag.offset = 0; |
| if (prev) |
| prev->last_in_page = true; |
| } |
| *frag = next_frag; |
| |
| /* prepare next */ |
| next_frag.offset += frag_info[f].frag_stride; |
| prev = frag; |
| } |
| } |
| |
| if (prev) |
| prev->last_in_page = true; |
| } |
| |
| int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node) |
| { |
| int len = wq_sz << rq->wqe.info.log_num_frags; |
| |
| rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node); |
| if (!rq->wqe.di) |
| return -ENOMEM; |
| |
| mlx5e_init_frags_partition(rq); |
| |
| return 0; |
| } |
| |
| void mlx5e_free_di_list(struct mlx5e_rq *rq) |
| { |
| kvfree(rq->wqe.di); |
| } |
| |
| static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work) |
| { |
| struct mlx5e_rq *rq = container_of(recover_work, struct mlx5e_rq, recover_work); |
| |
| mlx5e_reporter_rq_cqe_err(rq); |
| } |
| |
| static int mlx5e_alloc_mpwqe_rq_drop_page(struct mlx5e_rq *rq) |
| { |
| rq->wqe_overflow.page = alloc_page(GFP_KERNEL); |
| if (!rq->wqe_overflow.page) |
| return -ENOMEM; |
| |
| rq->wqe_overflow.addr = dma_map_page(rq->pdev, rq->wqe_overflow.page, 0, |
| PAGE_SIZE, rq->buff.map_dir); |
| if (dma_mapping_error(rq->pdev, rq->wqe_overflow.addr)) { |
| __free_page(rq->wqe_overflow.page); |
| return -ENOMEM; |
| } |
| return 0; |
| } |
| |
| static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq) |
| { |
| dma_unmap_page(rq->pdev, rq->wqe_overflow.addr, PAGE_SIZE, |
| rq->buff.map_dir); |
| __free_page(rq->wqe_overflow.page); |
| } |
| |
| static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, |
| struct mlx5e_rq *rq) |
| { |
| struct mlx5_core_dev *mdev = c->mdev; |
| int err; |
| |
| rq->wq_type = params->rq_wq_type; |
| rq->pdev = c->pdev; |
| rq->netdev = c->netdev; |
| rq->priv = c->priv; |
| rq->tstamp = c->tstamp; |
| rq->clock = &mdev->clock; |
| rq->icosq = &c->icosq; |
| rq->ix = c->ix; |
| rq->mdev = mdev; |
| rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
| rq->xdpsq = &c->rq_xdpsq; |
| rq->stats = &c->priv->channel_stats[c->ix].rq; |
| rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev); |
| err = mlx5e_rq_set_handlers(rq, params, NULL); |
| if (err) |
| return err; |
| |
| return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0); |
| } |
| |
| static int mlx5e_alloc_rq(struct mlx5e_params *params, |
| struct mlx5e_xsk_param *xsk, |
| struct mlx5e_rq_param *rqp, |
| int node, struct mlx5e_rq *rq) |
| { |
| struct page_pool_params pp_params = { 0 }; |
| struct mlx5_core_dev *mdev = rq->mdev; |
| void *rqc = rqp->rqc; |
| void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); |
| u32 pool_size; |
| int wq_sz; |
| int err; |
| int i; |
| |
| rqp->wq.db_numa_node = node; |
| INIT_WORK(&rq->recover_work, mlx5e_rq_err_cqe_work); |
| |
| if (params->xdp_prog) |
| bpf_prog_inc(params->xdp_prog); |
| RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog); |
| |
| rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; |
| rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); |
| pool_size = 1 << params->log_rq_mtu_frames; |
| |
| switch (rq->wq_type) { |
| case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
| err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, |
| &rq->wq_ctrl); |
| if (err) |
| goto err_rq_xdp_prog; |
| |
| err = mlx5e_alloc_mpwqe_rq_drop_page(rq); |
| if (err) |
| goto err_rq_wq_destroy; |
| |
| rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; |
| |
| wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); |
| |
| pool_size = MLX5_MPWRQ_PAGES_PER_WQE << |
| mlx5e_mpwqe_get_log_rq_size(params, xsk); |
| |
| rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); |
| rq->mpwqe.num_strides = |
| BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk)); |
| |
| rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz); |
| |
| err = mlx5e_create_rq_umr_mkey(mdev, rq); |
| if (err) |
| goto err_rq_drop_page; |
| rq->mkey_be = cpu_to_be32(rq->umr_mkey.key); |
| |
| err = mlx5e_rq_alloc_mpwqe_info(rq, node); |
| if (err) |
| goto err_rq_mkey; |
| break; |
| default: /* MLX5_WQ_TYPE_CYCLIC */ |
| err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, |
| &rq->wq_ctrl); |
| if (err) |
| goto err_rq_xdp_prog; |
| |
| rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; |
| |
| wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); |
| |
| rq->wqe.info = rqp->frags_info; |
| rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride; |
| |
| rq->wqe.frags = |
| kvzalloc_node(array_size(sizeof(*rq->wqe.frags), |
| (wq_sz << rq->wqe.info.log_num_frags)), |
| GFP_KERNEL, node); |
| if (!rq->wqe.frags) { |
| err = -ENOMEM; |
| goto err_rq_wq_destroy; |
| } |
| |
| err = mlx5e_init_di_list(rq, wq_sz, node); |
| if (err) |
| goto err_rq_frags; |
| |
| rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key); |
| } |
| |
| if (xsk) { |
| err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, |
| MEM_TYPE_XSK_BUFF_POOL, NULL); |
| xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq); |
| } else { |
| /* Create a page_pool and register it with rxq */ |
| pp_params.order = 0; |
| pp_params.flags = 0; /* No-internal DMA mapping in page_pool */ |
| pp_params.pool_size = pool_size; |
| pp_params.nid = node; |
| pp_params.dev = rq->pdev; |
| pp_params.dma_dir = rq->buff.map_dir; |
| |
| /* page_pool can be used even when there is no rq->xdp_prog, |
| * given page_pool does not handle DMA mapping there is no |
| * required state to clear. And page_pool gracefully handle |
| * elevated refcnt. |
| */ |
| rq->page_pool = page_pool_create(&pp_params); |
| if (IS_ERR(rq->page_pool)) { |
| err = PTR_ERR(rq->page_pool); |
| rq->page_pool = NULL; |
| goto err_free_by_rq_type; |
| } |
| if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) |
| err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq, |
| MEM_TYPE_PAGE_POOL, rq->page_pool); |
| } |
| if (err) |
| goto err_free_by_rq_type; |
| |
| for (i = 0; i < wq_sz; i++) { |
| if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
| struct mlx5e_rx_wqe_ll *wqe = |
| mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); |
| u32 byte_count = |
| rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; |
| u64 dma_offset = mlx5e_get_mpwqe_offset(i); |
| |
| wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom); |
| wqe->data[0].byte_count = cpu_to_be32(byte_count); |
| wqe->data[0].lkey = rq->mkey_be; |
| } else { |
| struct mlx5e_rx_wqe_cyc *wqe = |
| mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); |
| int f; |
| |
| for (f = 0; f < rq->wqe.info.num_frags; f++) { |
| u32 frag_size = rq->wqe.info.arr[f].frag_size | |
| MLX5_HW_START_PADDING; |
| |
| wqe->data[f].byte_count = cpu_to_be32(frag_size); |
| wqe->data[f].lkey = rq->mkey_be; |
| } |
| /* check if num_frags is not a pow of two */ |
| if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) { |
| wqe->data[f].byte_count = 0; |
| wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY); |
| wqe->data[f].addr = 0; |
| } |
| } |
| } |
| |
| INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); |
| |
| switch (params->rx_cq_moderation.cq_period_mode) { |
| case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: |
| rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; |
| break; |
| case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: |
| default: |
| rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; |
| } |
| |
| rq->page_cache.head = 0; |
| rq->page_cache.tail = 0; |
| |
| return 0; |
| |
| err_free_by_rq_type: |
| switch (rq->wq_type) { |
| case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
| kvfree(rq->mpwqe.info); |
| err_rq_mkey: |
| mlx5_core_destroy_mkey(mdev, &rq->umr_mkey); |
| err_rq_drop_page: |
| mlx5e_free_mpwqe_rq_drop_page(rq); |
| break; |
| default: /* MLX5_WQ_TYPE_CYCLIC */ |
| mlx5e_free_di_list(rq); |
| err_rq_frags: |
| kvfree(rq->wqe.frags); |
| } |
| err_rq_wq_destroy: |
| mlx5_wq_destroy(&rq->wq_ctrl); |
| err_rq_xdp_prog: |
| if (params->xdp_prog) |
| bpf_prog_put(params->xdp_prog); |
| |
| return err; |
| } |
| |
| static void mlx5e_free_rq(struct mlx5e_rq *rq) |
| { |
| struct bpf_prog *old_prog; |
| int i; |
| |
| if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) { |
| old_prog = rcu_dereference_protected(rq->xdp_prog, |
| lockdep_is_held(&rq->priv->state_lock)); |
| if (old_prog) |
| bpf_prog_put(old_prog); |
| } |
| |
| switch (rq->wq_type) { |
| case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: |
| kvfree(rq->mpwqe.info); |
| mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey); |
| mlx5e_free_mpwqe_rq_drop_page(rq); |
| break; |
| default: /* MLX5_WQ_TYPE_CYCLIC */ |
| kvfree(rq->wqe.frags); |
| mlx5e_free_di_list(rq); |
| } |
| |
| for (i = rq->page_cache.head; i != rq->page_cache.tail; |
| i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) { |
| struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i]; |
| |
| /* With AF_XDP, page_cache is not used, so this loop is not |
| * entered, and it's safe to call mlx5e_page_release_dynamic |
| * directly. |
| */ |
| mlx5e_page_release_dynamic(rq, dma_info, false); |
| } |
| |
| xdp_rxq_info_unreg(&rq->xdp_rxq); |
| page_pool_destroy(rq->page_pool); |
| mlx5_wq_destroy(&rq->wq_ctrl); |
| } |
| |
| int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param) |
| { |
| struct mlx5_core_dev *mdev = rq->mdev; |
| u8 ts_format; |
| void *in; |
| void *rqc; |
| void *wq; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(create_rq_in) + |
| sizeof(u64) * rq->wq_ctrl.buf.npages; |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| ts_format = mlx5_is_real_time_rq(mdev) ? |
| MLX5_TIMESTAMP_FORMAT_REAL_TIME : |
| MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; |
| rqc = MLX5_ADDR_OF(create_rq_in, in, ctx); |
| wq = MLX5_ADDR_OF(rqc, rqc, wq); |
| |
| memcpy(rqc, param->rqc, sizeof(param->rqc)); |
| |
| MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn); |
| MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST); |
| MLX5_SET(rqc, rqc, ts_format, ts_format); |
| MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - |
| MLX5_ADAPTER_PAGE_SHIFT); |
| MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); |
| |
| mlx5_fill_page_frag_array(&rq->wq_ctrl.buf, |
| (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); |
| |
| err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state) |
| { |
| struct mlx5_core_dev *mdev = rq->mdev; |
| |
| void *in; |
| void *rqc; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_rq_in); |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY) |
| mlx5e_rqwq_reset(rq); |
| |
| rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); |
| |
| MLX5_SET(modify_rq_in, in, rq_state, curr_state); |
| MLX5_SET(rqc, rqc, state, next_state); |
| |
| err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable) |
| { |
| struct mlx5_core_dev *mdev = rq->mdev; |
| |
| void *in; |
| void *rqc; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_rq_in); |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); |
| |
| MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); |
| MLX5_SET64(modify_rq_in, in, modify_bitmask, |
| MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS); |
| MLX5_SET(rqc, rqc, scatter_fcs, enable); |
| MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); |
| |
| err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd) |
| { |
| struct mlx5_core_dev *mdev = rq->mdev; |
| void *in; |
| void *rqc; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_rq_in); |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx); |
| |
| MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY); |
| MLX5_SET64(modify_rq_in, in, modify_bitmask, |
| MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD); |
| MLX5_SET(rqc, rqc, vsd, vsd); |
| MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY); |
| |
| err = mlx5_core_modify_rq(mdev, rq->rqn, in); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| void mlx5e_destroy_rq(struct mlx5e_rq *rq) |
| { |
| mlx5_core_destroy_rq(rq->mdev, rq->rqn); |
| } |
| |
| int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time) |
| { |
| unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time); |
| |
| u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq)); |
| |
| do { |
| if (mlx5e_rqwq_get_cur_sz(rq) >= min_wqes) |
| return 0; |
| |
| msleep(20); |
| } while (time_before(jiffies, exp_time)); |
| |
| netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n", |
| rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes); |
| |
| mlx5e_reporter_rx_timeout(rq); |
| return -ETIMEDOUT; |
| } |
| |
| void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq) |
| { |
| struct mlx5_wq_ll *wq; |
| u16 head; |
| int i; |
| |
| if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) |
| return; |
| |
| wq = &rq->mpwqe.wq; |
| head = wq->head; |
| |
| /* Outstanding UMR WQEs (in progress) start at wq->head */ |
| for (i = 0; i < rq->mpwqe.umr_in_progress; i++) { |
| rq->dealloc_wqe(rq, head); |
| head = mlx5_wq_ll_get_wqe_next_ix(wq, head); |
| } |
| |
| rq->mpwqe.actual_wq_head = wq->head; |
| rq->mpwqe.umr_in_progress = 0; |
| rq->mpwqe.umr_completed = 0; |
| } |
| |
| void mlx5e_free_rx_descs(struct mlx5e_rq *rq) |
| { |
| __be16 wqe_ix_be; |
| u16 wqe_ix; |
| |
| if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
| struct mlx5_wq_ll *wq = &rq->mpwqe.wq; |
| |
| mlx5e_free_rx_in_progress_descs(rq); |
| |
| while (!mlx5_wq_ll_is_empty(wq)) { |
| struct mlx5e_rx_wqe_ll *wqe; |
| |
| wqe_ix_be = *wq->tail_next; |
| wqe_ix = be16_to_cpu(wqe_ix_be); |
| wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); |
| rq->dealloc_wqe(rq, wqe_ix); |
| mlx5_wq_ll_pop(wq, wqe_ix_be, |
| &wqe->next.next_wqe_index); |
| } |
| } else { |
| struct mlx5_wq_cyc *wq = &rq->wqe.wq; |
| |
| while (!mlx5_wq_cyc_is_empty(wq)) { |
| wqe_ix = mlx5_wq_cyc_get_tail(wq); |
| rq->dealloc_wqe(rq, wqe_ix); |
| mlx5_wq_cyc_pop(wq); |
| } |
| } |
| |
| } |
| |
| int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, |
| struct mlx5e_xsk_param *xsk, int node, |
| struct mlx5e_rq *rq) |
| { |
| struct mlx5_core_dev *mdev = rq->mdev; |
| int err; |
| |
| err = mlx5e_alloc_rq(params, xsk, param, node, rq); |
| if (err) |
| return err; |
| |
| err = mlx5e_create_rq(rq, param); |
| if (err) |
| goto err_free_rq; |
| |
| err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY); |
| if (err) |
| goto err_destroy_rq; |
| |
| if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev)) |
| __set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */ |
| |
| if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) |
| __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); |
| |
| if (params->rx_dim_enabled) |
| __set_bit(MLX5E_RQ_STATE_AM, &rq->state); |
| |
| /* We disable csum_complete when XDP is enabled since |
| * XDP programs might manipulate packets which will render |
| * skb->checksum incorrect. |
| */ |
| if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || params->xdp_prog) |
| __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state); |
| |
| /* For CQE compression on striding RQ, use stride index provided by |
| * HW if capability is supported. |
| */ |
| if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) && |
| MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) |
| __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state); |
| |
| return 0; |
| |
| err_destroy_rq: |
| mlx5e_destroy_rq(rq); |
| err_free_rq: |
| mlx5e_free_rq(rq); |
| |
| return err; |
| } |
| |
| void mlx5e_activate_rq(struct mlx5e_rq *rq) |
| { |
| set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); |
| if (rq->icosq) { |
| mlx5e_trigger_irq(rq->icosq); |
| } else { |
| local_bh_disable(); |
| napi_schedule(rq->cq.napi); |
| local_bh_enable(); |
| } |
| } |
| |
| void mlx5e_deactivate_rq(struct mlx5e_rq *rq) |
| { |
| clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); |
| synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ |
| } |
| |
| void mlx5e_close_rq(struct mlx5e_rq *rq) |
| { |
| cancel_work_sync(&rq->dim.work); |
| if (rq->icosq) |
| cancel_work_sync(&rq->icosq->recover_work); |
| cancel_work_sync(&rq->recover_work); |
| mlx5e_destroy_rq(rq); |
| mlx5e_free_rx_descs(rq); |
| mlx5e_free_rq(rq); |
| } |
| |
| static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) |
| { |
| kvfree(sq->db.xdpi_fifo.xi); |
| kvfree(sq->db.wqe_info); |
| } |
| |
| static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) |
| { |
| struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; |
| int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
| int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; |
| |
| xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, |
| GFP_KERNEL, numa); |
| if (!xdpi_fifo->xi) |
| return -ENOMEM; |
| |
| xdpi_fifo->pc = &sq->xdpi_fifo_pc; |
| xdpi_fifo->cc = &sq->xdpi_fifo_cc; |
| xdpi_fifo->mask = dsegs_per_wq - 1; |
| |
| return 0; |
| } |
| |
| static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) |
| { |
| int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
| int err; |
| |
| sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, |
| GFP_KERNEL, numa); |
| if (!sq->db.wqe_info) |
| return -ENOMEM; |
| |
| err = mlx5e_alloc_xdpsq_fifo(sq, numa); |
| if (err) { |
| mlx5e_free_xdpsq_db(sq); |
| return err; |
| } |
| |
| return 0; |
| } |
| |
| static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, |
| struct mlx5e_params *params, |
| struct xsk_buff_pool *xsk_pool, |
| struct mlx5e_sq_param *param, |
| struct mlx5e_xdpsq *sq, |
| bool is_redirect) |
| { |
| void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); |
| struct mlx5_core_dev *mdev = c->mdev; |
| struct mlx5_wq_cyc *wq = &sq->wq; |
| int err; |
| |
| sq->pdev = c->pdev; |
| sq->mkey_be = c->mkey_be; |
| sq->channel = c; |
| sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; |
| sq->min_inline_mode = params->tx_min_inline_mode; |
| sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
| sq->xsk_pool = xsk_pool; |
| |
| sq->stats = sq->xsk_pool ? |
| &c->priv->channel_stats[c->ix].xsksq : |
| is_redirect ? |
| &c->priv->channel_stats[c->ix].xdpsq : |
| &c->priv->channel_stats[c->ix].rq_xdpsq; |
| |
| param->wq.db_numa_node = cpu_to_node(c->cpu); |
| err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); |
| if (err) |
| return err; |
| wq->db = &wq->db[MLX5_SND_DBR]; |
| |
| err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); |
| if (err) |
| goto err_sq_wq_destroy; |
| |
| return 0; |
| |
| err_sq_wq_destroy: |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| |
| return err; |
| } |
| |
| static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) |
| { |
| mlx5e_free_xdpsq_db(sq); |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| } |
| |
| static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) |
| { |
| kvfree(sq->db.wqe_info); |
| } |
| |
| static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) |
| { |
| int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
| size_t size; |
| |
| size = array_size(wq_sz, sizeof(*sq->db.wqe_info)); |
| sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa); |
| if (!sq->db.wqe_info) |
| return -ENOMEM; |
| |
| return 0; |
| } |
| |
| static void mlx5e_icosq_err_cqe_work(struct work_struct *recover_work) |
| { |
| struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, |
| recover_work); |
| |
| mlx5e_reporter_icosq_cqe_err(sq); |
| } |
| |
| static int mlx5e_alloc_icosq(struct mlx5e_channel *c, |
| struct mlx5e_sq_param *param, |
| struct mlx5e_icosq *sq) |
| { |
| void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); |
| struct mlx5_core_dev *mdev = c->mdev; |
| struct mlx5_wq_cyc *wq = &sq->wq; |
| int err; |
| |
| sq->channel = c; |
| sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; |
| sq->reserved_room = param->stop_room; |
| |
| param->wq.db_numa_node = cpu_to_node(c->cpu); |
| err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); |
| if (err) |
| return err; |
| wq->db = &wq->db[MLX5_SND_DBR]; |
| |
| err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); |
| if (err) |
| goto err_sq_wq_destroy; |
| |
| INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); |
| |
| return 0; |
| |
| err_sq_wq_destroy: |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| |
| return err; |
| } |
| |
| static void mlx5e_free_icosq(struct mlx5e_icosq *sq) |
| { |
| mlx5e_free_icosq_db(sq); |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| } |
| |
| void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) |
| { |
| kvfree(sq->db.wqe_info); |
| kvfree(sq->db.skb_fifo.fifo); |
| kvfree(sq->db.dma_fifo); |
| } |
| |
| int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) |
| { |
| int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); |
| int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS; |
| |
| sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, |
| sizeof(*sq->db.dma_fifo)), |
| GFP_KERNEL, numa); |
| sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz, |
| sizeof(*sq->db.skb_fifo.fifo)), |
| GFP_KERNEL, numa); |
| sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, |
| sizeof(*sq->db.wqe_info)), |
| GFP_KERNEL, numa); |
| if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) { |
| mlx5e_free_txqsq_db(sq); |
| return -ENOMEM; |
| } |
| |
| sq->dma_fifo_mask = df_sz - 1; |
| |
| sq->db.skb_fifo.pc = &sq->skb_fifo_pc; |
| sq->db.skb_fifo.cc = &sq->skb_fifo_cc; |
| sq->db.skb_fifo.mask = df_sz - 1; |
| |
| return 0; |
| } |
| |
| static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, |
| int txq_ix, |
| struct mlx5e_params *params, |
| struct mlx5e_sq_param *param, |
| struct mlx5e_txqsq *sq, |
| int tc) |
| { |
| void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); |
| struct mlx5_core_dev *mdev = c->mdev; |
| struct mlx5_wq_cyc *wq = &sq->wq; |
| int err; |
| |
| sq->pdev = c->pdev; |
| sq->tstamp = c->tstamp; |
| sq->clock = &mdev->clock; |
| sq->mkey_be = c->mkey_be; |
| sq->netdev = c->netdev; |
| sq->mdev = c->mdev; |
| sq->priv = c->priv; |
| sq->ch_ix = c->ix; |
| sq->txq_ix = txq_ix; |
| sq->uar_map = mdev->mlx5e_res.hw_objs.bfreg.map; |
| sq->min_inline_mode = params->tx_min_inline_mode; |
| sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); |
| INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); |
| if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert)) |
| set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); |
| if (MLX5_IPSEC_DEV(c->priv->mdev)) |
| set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); |
| if (param->is_mpw) |
| set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state); |
| sq->stop_room = param->stop_room; |
| sq->ptp_cyc2time = mlx5_sq_ts_translator(mdev); |
| |
| param->wq.db_numa_node = cpu_to_node(c->cpu); |
| err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); |
| if (err) |
| return err; |
| wq->db = &wq->db[MLX5_SND_DBR]; |
| |
| err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); |
| if (err) |
| goto err_sq_wq_destroy; |
| |
| INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); |
| sq->dim.mode = params->tx_cq_moderation.cq_period_mode; |
| |
| return 0; |
| |
| err_sq_wq_destroy: |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| |
| return err; |
| } |
| |
| void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) |
| { |
| mlx5e_free_txqsq_db(sq); |
| mlx5_wq_destroy(&sq->wq_ctrl); |
| } |
| |
| static int mlx5e_create_sq(struct mlx5_core_dev *mdev, |
| struct mlx5e_sq_param *param, |
| struct mlx5e_create_sq_param *csp, |
| u32 *sqn) |
| { |
| u8 ts_format; |
| void *in; |
| void *sqc; |
| void *wq; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(create_sq_in) + |
| sizeof(u64) * csp->wq_ctrl->buf.npages; |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| ts_format = mlx5_is_real_time_sq(mdev) ? |
| MLX5_TIMESTAMP_FORMAT_REAL_TIME : |
| MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; |
| sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); |
| wq = MLX5_ADDR_OF(sqc, sqc, wq); |
| |
| memcpy(sqc, param->sqc, sizeof(param->sqc)); |
| MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz); |
| MLX5_SET(sqc, sqc, tis_num_0, csp->tisn); |
| MLX5_SET(sqc, sqc, cqn, csp->cqn); |
| MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn); |
| MLX5_SET(sqc, sqc, ts_format, ts_format); |
| |
| |
| if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) |
| MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode); |
| |
| MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); |
| MLX5_SET(sqc, sqc, flush_in_error_en, 1); |
| |
| MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); |
| MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); |
| MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - |
| MLX5_ADAPTER_PAGE_SHIFT); |
| MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); |
| |
| mlx5_fill_page_frag_array(&csp->wq_ctrl->buf, |
| (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); |
| |
| err = mlx5_core_create_sq(mdev, in, inlen, sqn); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn, |
| struct mlx5e_modify_sq_param *p) |
| { |
| u64 bitmask = 0; |
| void *in; |
| void *sqc; |
| int inlen; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_sq_in); |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); |
| |
| MLX5_SET(modify_sq_in, in, sq_state, p->curr_state); |
| MLX5_SET(sqc, sqc, state, p->next_state); |
| if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) { |
| bitmask |= 1; |
| MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index); |
| } |
| if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) { |
| bitmask |= 1 << 2; |
| MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id); |
| } |
| MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask); |
| |
| err = mlx5_core_modify_sq(mdev, sqn, in); |
| |
| kvfree(in); |
| |
| return err; |
| } |
| |
| static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn) |
| { |
| mlx5_core_destroy_sq(mdev, sqn); |
| } |
| |
| int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, |
| struct mlx5e_sq_param *param, |
| struct mlx5e_create_sq_param *csp, |
| u16 qos_queue_group_id, |
| u32 *sqn) |
| { |
| struct mlx5e_modify_sq_param msp = {0}; |
| int err; |
| |
| err = mlx5e_create_sq(mdev, param, csp, sqn); |
| if (err) |
| return err; |
| |
| msp.curr_state = MLX5_SQC_STATE_RST; |
| msp.next_state = MLX5_SQC_STATE_RDY; |
| if (qos_queue_group_id) { |
| msp.qos_update = true; |
| msp.qos_queue_group_id = qos_queue_group_id; |
| } |
| err = mlx5e_modify_sq(mdev, *sqn, &msp); |
| if (err) |
| mlx5e_destroy_sq(mdev, *sqn); |
| |
| return err; |
| } |
| |
| static int mlx5e_set_sq_maxrate(struct net_device *dev, |
| struct mlx5e_txqsq *sq, u32 rate); |
| |
| int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, |
| struct mlx5e_params *params, struct mlx5e_sq_param *param, |
| struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid) |
| { |
| struct mlx5e_create_sq_param csp = {}; |
| u32 tx_rate; |
| int err; |
| |
| err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); |
| if (err) |
| return err; |
| |
| if (qos_queue_group_id) |
| sq->stats = c->priv->htb.qos_sq_stats[qos_qid]; |
| else |
| sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; |
| |
| csp.tisn = tisn; |
| csp.tis_lst_sz = 1; |
| csp.cqn = sq->cq.mcq.cqn; |
| csp.wq_ctrl = &sq->wq_ctrl; |
| csp.min_inline_mode = sq->min_inline_mode; |
| err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn); |
| if (err) |
| goto err_free_txqsq; |
| |
| tx_rate = c->priv->tx_rates[sq->txq_ix]; |
| if (tx_rate) |
| mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); |
| |
| if (params->tx_dim_enabled) |
| sq->state |= BIT(MLX5E_SQ_STATE_AM); |
| |
| return 0; |
| |
| err_free_txqsq: |
| mlx5e_free_txqsq(sq); |
| |
| return err; |
| } |
| |
| void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) |
| { |
| sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix); |
| set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
| netdev_tx_reset_queue(sq->txq); |
| netif_tx_start_queue(sq->txq); |
| } |
| |
| void mlx5e_tx_disable_queue(struct netdev_queue *txq) |
| { |
| __netif_tx_lock_bh(txq); |
| netif_tx_stop_queue(txq); |
| __netif_tx_unlock_bh(txq); |
| } |
| |
| void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) |
| { |
| struct mlx5_wq_cyc *wq = &sq->wq; |
| |
| clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
| synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ |
| |
| mlx5e_tx_disable_queue(sq->txq); |
| |
| /* last doorbell out, godspeed .. */ |
| if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { |
| u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
| struct mlx5e_tx_wqe *nop; |
| |
| sq->db.wqe_info[pi] = (struct mlx5e_tx_wqe_info) { |
| .num_wqebbs = 1, |
| }; |
| |
| nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); |
| mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); |
| } |
| } |
| |
| void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) |
| { |
| struct mlx5_core_dev *mdev = sq->mdev; |
| struct mlx5_rate_limit rl = {0}; |
| |
| cancel_work_sync(&sq->dim.work); |
| cancel_work_sync(&sq->recover_work); |
| mlx5e_destroy_sq(mdev, sq->sqn); |
| if (sq->rate_limit) { |
| rl.rate = sq->rate_limit; |
| mlx5_rl_remove_rate(mdev, &rl); |
| } |
| mlx5e_free_txqsq_descs(sq); |
| mlx5e_free_txqsq(sq); |
| } |
| |
| void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) |
| { |
| struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, |
| recover_work); |
| |
| mlx5e_reporter_tx_err_cqe(sq); |
| } |
| |
| int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, |
| struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) |
| { |
| struct mlx5e_create_sq_param csp = {}; |
| int err; |
| |
| err = mlx5e_alloc_icosq(c, param, sq); |
| if (err) |
| return err; |
| |
| csp.cqn = sq->cq.mcq.cqn; |
| csp.wq_ctrl = &sq->wq_ctrl; |
| csp.min_inline_mode = params->tx_min_inline_mode; |
| err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); |
| if (err) |
| goto err_free_icosq; |
| |
| if (param->is_tls) { |
| sq->ktls_resync = mlx5e_ktls_rx_resync_create_resp_list(); |
| if (IS_ERR(sq->ktls_resync)) { |
| err = PTR_ERR(sq->ktls_resync); |
| goto err_destroy_icosq; |
| } |
| } |
| return 0; |
| |
| err_destroy_icosq: |
| mlx5e_destroy_sq(c->mdev, sq->sqn); |
| err_free_icosq: |
| mlx5e_free_icosq(sq); |
| |
| return err; |
| } |
| |
| void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) |
| { |
| set_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); |
| } |
| |
| void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) |
| { |
| clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); |
| synchronize_net(); /* Sync with NAPI. */ |
| } |
| |
| void mlx5e_close_icosq(struct mlx5e_icosq *sq) |
| { |
| struct mlx5e_channel *c = sq->channel; |
| |
| if (sq->ktls_resync) |
| mlx5e_ktls_rx_resync_destroy_resp_list(sq->ktls_resync); |
| mlx5e_destroy_sq(c->mdev, sq->sqn); |
| mlx5e_free_icosq_descs(sq); |
| mlx5e_free_icosq(sq); |
| } |
| |
| int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, |
| struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, |
| struct mlx5e_xdpsq *sq, bool is_redirect) |
| { |
| struct mlx5e_create_sq_param csp = {}; |
| int err; |
| |
| err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect); |
| if (err) |
| return err; |
| |
| csp.tis_lst_sz = 1; |
| csp.tisn = c->priv->tisn[c->lag_port][0]; /* tc = 0 */ |
| csp.cqn = sq->cq.mcq.cqn; |
| csp.wq_ctrl = &sq->wq_ctrl; |
| csp.min_inline_mode = sq->min_inline_mode; |
| set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
| err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn); |
| if (err) |
| goto err_free_xdpsq; |
| |
| mlx5e_set_xmit_fp(sq, param->is_mpw); |
| |
| if (!param->is_mpw) { |
| unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; |
| unsigned int inline_hdr_sz = 0; |
| int i; |
| |
| if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { |
| inline_hdr_sz = MLX5E_XDP_MIN_INLINE; |
| ds_cnt++; |
| } |
| |
| /* Pre initialize fixed WQE fields */ |
| for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { |
| struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); |
| struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
| struct mlx5_wqe_eth_seg *eseg = &wqe->eth; |
| struct mlx5_wqe_data_seg *dseg; |
| |
| sq->db.wqe_info[i] = (struct mlx5e_xdp_wqe_info) { |
| .num_wqebbs = 1, |
| .num_pkts = 1, |
| }; |
| |
| cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
| eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); |
| |
| dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); |
| dseg->lkey = sq->mkey_be; |
| } |
| } |
| |
| return 0; |
| |
| err_free_xdpsq: |
| clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
| mlx5e_free_xdpsq(sq); |
| |
| return err; |
| } |
| |
| void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) |
| { |
| struct mlx5e_channel *c = sq->channel; |
| |
| clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); |
| synchronize_net(); /* Sync with NAPI. */ |
| |
| mlx5e_destroy_sq(c->mdev, sq->sqn); |
| mlx5e_free_xdpsq_descs(sq); |
| mlx5e_free_xdpsq(sq); |
| } |
| |
| static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv, |
| struct mlx5e_cq_param *param, |
| struct mlx5e_cq *cq) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| struct mlx5_core_cq *mcq = &cq->mcq; |
| int eqn_not_used; |
| unsigned int irqn; |
| int err; |
| u32 i; |
| |
| err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); |
| if (err) |
| return err; |
| |
| err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, |
| &cq->wq_ctrl); |
| if (err) |
| return err; |
| |
| mcq->cqe_sz = 64; |
| mcq->set_ci_db = cq->wq_ctrl.db.db; |
| mcq->arm_db = cq->wq_ctrl.db.db + 1; |
| *mcq->set_ci_db = 0; |
| *mcq->arm_db = 0; |
| mcq->vector = param->eq_ix; |
| mcq->comp = mlx5e_completion_event; |
| mcq->event = mlx5e_cq_error_event; |
| mcq->irqn = irqn; |
| |
| for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { |
| struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); |
| |
| cqe->op_own = 0xf1; |
| } |
| |
| cq->mdev = mdev; |
| cq->netdev = priv->netdev; |
| cq->priv = priv; |
| |
| return 0; |
| } |
| |
| static int mlx5e_alloc_cq(struct mlx5e_priv *priv, |
| struct mlx5e_cq_param *param, |
| struct mlx5e_create_cq_param *ccp, |
| struct mlx5e_cq *cq) |
| { |
| int err; |
| |
| param->wq.buf_numa_node = ccp->node; |
| param->wq.db_numa_node = ccp->node; |
| param->eq_ix = ccp->ix; |
| |
| err = mlx5e_alloc_cq_common(priv, param, cq); |
| |
| cq->napi = ccp->napi; |
| cq->ch_stats = ccp->ch_stats; |
| |
| return err; |
| } |
| |
| static void mlx5e_free_cq(struct mlx5e_cq *cq) |
| { |
| mlx5_wq_destroy(&cq->wq_ctrl); |
| } |
| |
| static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) |
| { |
| u32 out[MLX5_ST_SZ_DW(create_cq_out)]; |
| struct mlx5_core_dev *mdev = cq->mdev; |
| struct mlx5_core_cq *mcq = &cq->mcq; |
| |
| void *in; |
| void *cqc; |
| int inlen; |
| unsigned int irqn_not_used; |
| int eqn; |
| int err; |
| |
| err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); |
| if (err) |
| return err; |
| |
| inlen = MLX5_ST_SZ_BYTES(create_cq_in) + |
| sizeof(u64) * cq->wq_ctrl.buf.npages; |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); |
| |
| memcpy(cqc, param->cqc, sizeof(param->cqc)); |
| |
| mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, |
| (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); |
| |
| MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); |
| MLX5_SET(cqc, cqc, c_eqn, eqn); |
| MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); |
| MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - |
| MLX5_ADAPTER_PAGE_SHIFT); |
| MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); |
| |
| err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); |
| |
| kvfree(in); |
| |
| if (err) |
| return err; |
| |
| mlx5e_cq_arm(cq); |
| |
| return 0; |
| } |
| |
| static void mlx5e_destroy_cq(struct mlx5e_cq *cq) |
| { |
| mlx5_core_destroy_cq(cq->mdev, &cq->mcq); |
| } |
| |
| int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder, |
| struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, |
| struct mlx5e_cq *cq) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| int err; |
| |
| err = mlx5e_alloc_cq(priv, param, ccp, cq); |
| if (err) |
| return err; |
| |
| err = mlx5e_create_cq(cq, param); |
| if (err) |
| goto err_free_cq; |
| |
| if (MLX5_CAP_GEN(mdev, cq_moderation)) |
| mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); |
| return 0; |
| |
| err_free_cq: |
| mlx5e_free_cq(cq); |
| |
| return err; |
| } |
| |
| void mlx5e_close_cq(struct mlx5e_cq *cq) |
| { |
| mlx5e_destroy_cq(cq); |
| mlx5e_free_cq(cq); |
| } |
| |
| static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, |
| struct mlx5e_params *params, |
| struct mlx5e_create_cq_param *ccp, |
| struct mlx5e_channel_param *cparam) |
| { |
| int err; |
| int tc; |
| |
| for (tc = 0; tc < c->num_tc; tc++) { |
| err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp, |
| ccp, &c->sq[tc].cq); |
| if (err) |
| goto err_close_tx_cqs; |
| } |
| |
| return 0; |
| |
| err_close_tx_cqs: |
| for (tc--; tc >= 0; tc--) |
| mlx5e_close_cq(&c->sq[tc].cq); |
| |
| return err; |
| } |
| |
| static void mlx5e_close_tx_cqs(struct mlx5e_channel *c) |
| { |
| int tc; |
| |
| for (tc = 0; tc < c->num_tc; tc++) |
| mlx5e_close_cq(&c->sq[tc].cq); |
| } |
| |
| static int mlx5e_open_sqs(struct mlx5e_channel *c, |
| struct mlx5e_params *params, |
| struct mlx5e_channel_param *cparam) |
| { |
| int err, tc; |
| |
| for (tc = 0; tc < params->num_tc; tc++) { |
| int txq_ix = c->ix + tc * params->num_channels; |
| |
| err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, |
| params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0); |
| if (err) |
| goto err_close_sqs; |
| } |
| |
| return 0; |
| |
| err_close_sqs: |
| for (tc--; tc >= 0; tc--) |
| mlx5e_close_txqsq(&c->sq[tc]); |
| |
| return err; |
| } |
| |
| static void mlx5e_close_sqs(struct mlx5e_channel *c) |
| { |
| int tc; |
| |
| for (tc = 0; tc < c->num_tc; tc++) |
| mlx5e_close_txqsq(&c->sq[tc]); |
| } |
| |
| static int mlx5e_set_sq_maxrate(struct net_device *dev, |
| struct mlx5e_txqsq *sq, u32 rate) |
| { |
| struct mlx5e_priv *priv = netdev_priv(dev); |
| struct mlx5_core_dev *mdev = priv->mdev; |
| struct mlx5e_modify_sq_param msp = {0}; |
| struct mlx5_rate_limit rl = {0}; |
| u16 rl_index = 0; |
| int err; |
| |
| if (rate == sq->rate_limit) |
| /* nothing to do */ |
| return 0; |
| |
| if (sq->rate_limit) { |
| rl.rate = sq->rate_limit; |
| /* remove current rl index to free space to next ones */ |
| mlx5_rl_remove_rate(mdev, &rl); |
| } |
| |
| sq->rate_limit = 0; |
| |
| if (rate) { |
| rl.rate = rate; |
| err = mlx5_rl_add_rate(mdev, &rl_index, &rl); |
| if (err) { |
| netdev_err(dev, "Failed configuring rate %u: %d\n", |
| rate, err); |
| return err; |
| } |
| } |
| |
| msp.curr_state = MLX5_SQC_STATE_RDY; |
| msp.next_state = MLX5_SQC_STATE_RDY; |
| msp.rl_index = rl_index; |
| msp.rl_update = true; |
| err = mlx5e_modify_sq(mdev, sq->sqn, &msp); |
| if (err) { |
| netdev_err(dev, "Failed configuring rate %u: %d\n", |
| rate, err); |
| /* remove the rate from the table */ |
| if (rate) |
| mlx5_rl_remove_rate(mdev, &rl); |
| return err; |
| } |
| |
| sq->rate_limit = rate; |
| return 0; |
| } |
| |
| static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate) |
| { |
| struct mlx5e_priv *priv = netdev_priv(dev); |
| struct mlx5_core_dev *mdev = priv->mdev; |
| struct mlx5e_txqsq *sq = priv->txq2sq[index]; |
| int err = 0; |
| |
| if (!mlx5_rl_is_supported(mdev)) { |
| netdev_err(dev, "Rate limiting is not supported on this device\n"); |
| return -EINVAL; |
| } |
| |
| /* rate is given in Mb/sec, HW config is in Kb/sec */ |
| rate = rate << 10; |
| |
| /* Check whether rate in valid range, 0 is always valid */ |
| if (rate && !mlx5_rl_is_in_range(mdev, rate)) { |
| netdev_err(dev, "TX rate %u, is not in range\n", rate); |
| return -ERANGE; |
| } |
| |
| mutex_lock(&priv->state_lock); |
| if (test_bit(MLX5E_STATE_OPENED, &priv->state)) |
| err = mlx5e_set_sq_maxrate(dev, sq, rate); |
| if (!err) |
| priv->tx_rates[index] = rate; |
| mutex_unlock(&priv->state_lock); |
| |
| return err; |
| } |
| |
| static int mlx5e_open_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *params, |
| struct mlx5e_rq_param *rq_params) |
| { |
| int err; |
| |
| err = mlx5e_init_rxq_rq(c, params, &c->rq); |
| if (err) |
| return err; |
| |
| return mlx5e_open_rq(params, rq_params, NULL, cpu_to_node(c->cpu), &c->rq); |
| } |
| |
| static int mlx5e_open_queues(struct mlx5e_channel *c, |
| struct mlx5e_params *params, |
| struct mlx5e_channel_param *cparam) |
| { |
| struct dim_cq_moder icocq_moder = {0, 0}; |
| struct mlx5e_create_cq_param ccp; |
| int err; |
| |
| mlx5e_build_create_cq_param(&ccp, c); |
| |
| err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp, |
| &c->async_icosq.cq); |
| if (err) |
| return err; |
| |
| err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp, |
| &c->icosq.cq); |
| if (err) |
| goto err_close_async_icosq_cq; |
| |
| err = mlx5e_open_tx_cqs(c, params, &ccp, cparam); |
| if (err) |
| goto err_close_icosq_cq; |
| |
| err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp, |
| &c->xdpsq.cq); |
| if (err) |
| goto err_close_tx_cqs; |
| |
| err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp, |
| &c->rq.cq); |
| if (err) |
| goto err_close_xdp_tx_cqs; |
| |
| err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, |
| &ccp, &c->rq_xdpsq.cq) : 0; |
| if (err) |
| goto err_close_rx_cq; |
| |
| spin_lock_init(&c->async_icosq_lock); |
| |
| err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq); |
| if (err) |
| goto err_close_xdpsq_cq; |
| |
| err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq); |
| if (err) |
| goto err_close_async_icosq; |
| |
| err = mlx5e_open_sqs(c, params, cparam); |
| if (err) |
| goto err_close_icosq; |
| |
| if (c->xdp) { |
| err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, |
| &c->rq_xdpsq, false); |
| if (err) |
| goto err_close_sqs; |
| } |
| |
| err = mlx5e_open_rxq_rq(c, params, &cparam->rq); |
| if (err) |
| goto err_close_xdp_sq; |
| |
| err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, NULL, &c->xdpsq, true); |
| if (err) |
| goto err_close_rq; |
| |
| return 0; |
| |
| err_close_rq: |
| mlx5e_close_rq(&c->rq); |
| |
| err_close_xdp_sq: |
| if (c->xdp) |
| mlx5e_close_xdpsq(&c->rq_xdpsq); |
| |
| err_close_sqs: |
| mlx5e_close_sqs(c); |
| |
| err_close_icosq: |
| mlx5e_close_icosq(&c->icosq); |
| |
| err_close_async_icosq: |
| mlx5e_close_icosq(&c->async_icosq); |
| |
| err_close_xdpsq_cq: |
| if (c->xdp) |
| mlx5e_close_cq(&c->rq_xdpsq.cq); |
| |
| err_close_rx_cq: |
| mlx5e_close_cq(&c->rq.cq); |
| |
| err_close_xdp_tx_cqs: |
| mlx5e_close_cq(&c->xdpsq.cq); |
| |
| err_close_tx_cqs: |
| mlx5e_close_tx_cqs(c); |
| |
| err_close_icosq_cq: |
| mlx5e_close_cq(&c->icosq.cq); |
| |
| err_close_async_icosq_cq: |
| mlx5e_close_cq(&c->async_icosq.cq); |
| |
| return err; |
| } |
| |
| static void mlx5e_close_queues(struct mlx5e_channel *c) |
| { |
| mlx5e_close_xdpsq(&c->xdpsq); |
| mlx5e_close_rq(&c->rq); |
| if (c->xdp) |
| mlx5e_close_xdpsq(&c->rq_xdpsq); |
| mlx5e_close_sqs(c); |
| mlx5e_close_icosq(&c->icosq); |
| mlx5e_close_icosq(&c->async_icosq); |
| if (c->xdp) |
| mlx5e_close_cq(&c->rq_xdpsq.cq); |
| mlx5e_close_cq(&c->rq.cq); |
| mlx5e_close_cq(&c->xdpsq.cq); |
| mlx5e_close_tx_cqs(c); |
| mlx5e_close_cq(&c->icosq.cq); |
| mlx5e_close_cq(&c->async_icosq.cq); |
| } |
| |
| static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix) |
| { |
| u16 port_aff_bias = mlx5_core_is_pf(mdev) ? 0 : MLX5_CAP_GEN(mdev, vhca_id); |
| |
| return (ix + port_aff_bias) % mlx5e_get_num_lag_ports(mdev); |
| } |
| |
| static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, |
| struct mlx5e_params *params, |
| struct mlx5e_channel_param *cparam, |
| struct xsk_buff_pool *xsk_pool, |
| struct mlx5e_channel **cp) |
| { |
| int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix)); |
| struct net_device *netdev = priv->netdev; |
| struct mlx5e_xsk_param xsk; |
| struct mlx5e_channel *c; |
| unsigned int irq; |
| int err; |
| int eqn; |
| |
| err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); |
| if (err) |
| return err; |
| |
| c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); |
| if (!c) |
| return -ENOMEM; |
| |
| c->priv = priv; |
| c->mdev = priv->mdev; |
| c->tstamp = &priv->tstamp; |
| c->ix = ix; |
| c->cpu = cpu; |
| c->pdev = mlx5_core_dma_dev(priv->mdev); |
| c->netdev = priv->netdev; |
| c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); |
| c->num_tc = params->num_tc; |
| c->xdp = !!params->xdp_prog; |
| c->stats = &priv->channel_stats[ix].ch; |
| c->aff_mask = irq_get_effective_affinity_mask(irq); |
| c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); |
| |
| netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); |
| |
| err = mlx5e_open_queues(c, params, cparam); |
| if (unlikely(err)) |
| goto err_napi_del; |
| |
| if (xsk_pool) { |
| mlx5e_build_xsk_param(xsk_pool, &xsk); |
| err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c); |
| if (unlikely(err)) |
| goto err_close_queues; |
| } |
| |
| *cp = c; |
| |
| return 0; |
| |
| err_close_queues: |
| mlx5e_close_queues(c); |
| |
| err_napi_del: |
| netif_napi_del(&c->napi); |
| |
| kvfree(c); |
| |
| return err; |
| } |
| |
| static void mlx5e_activate_channel(struct mlx5e_channel *c) |
| { |
| int tc; |
| |
| napi_enable(&c->napi); |
| |
| for (tc = 0; tc < c->num_tc; tc++) |
| mlx5e_activate_txqsq(&c->sq[tc]); |
| mlx5e_activate_icosq(&c->icosq); |
| mlx5e_activate_icosq(&c->async_icosq); |
| mlx5e_activate_rq(&c->rq); |
| |
| if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
| mlx5e_activate_xsk(c); |
| } |
| |
| static void mlx5e_deactivate_channel(struct mlx5e_channel *c) |
| { |
| int tc; |
| |
| if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
| mlx5e_deactivate_xsk(c); |
| |
| mlx5e_deactivate_rq(&c->rq); |
| mlx5e_deactivate_icosq(&c->async_icosq); |
| mlx5e_deactivate_icosq(&c->icosq); |
| for (tc = 0; tc < c->num_tc; tc++) |
| mlx5e_deactivate_txqsq(&c->sq[tc]); |
| mlx5e_qos_deactivate_queues(c); |
| |
| napi_disable(&c->napi); |
| } |
| |
| static void mlx5e_close_channel(struct mlx5e_channel *c) |
| { |
| if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) |
| mlx5e_close_xsk(c); |
| mlx5e_close_queues(c); |
| mlx5e_qos_close_queues(c); |
| netif_napi_del(&c->napi); |
| |
| kvfree(c); |
| } |
| |
| int mlx5e_open_channels(struct mlx5e_priv *priv, |
| struct mlx5e_channels *chs) |
| { |
| struct mlx5e_channel_param *cparam; |
| int err = -ENOMEM; |
| int i; |
| |
| chs->num = chs->params.num_channels; |
| |
| chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL); |
| cparam = kvzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL); |
| if (!chs->c || !cparam) |
| goto err_free; |
| |
| err = mlx5e_build_channel_param(priv->mdev, &chs->params, priv->q_counter, cparam); |
| if (err) |
| goto err_free; |
| |
| for (i = 0; i < chs->num; i++) { |
| struct xsk_buff_pool *xsk_pool = NULL; |
| |
| if (chs->params.xdp_prog) |
| xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i); |
| |
| err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]); |
| if (err) |
| goto err_close_channels; |
| } |
| |
| if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) { |
| err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp); |
| if (err) |
| goto err_close_channels; |
| } |
| |
| err = mlx5e_qos_open_queues(priv, chs); |
| if (err) |
| goto err_close_ptp; |
| |
| mlx5e_health_channels_update(priv); |
| kvfree(cparam); |
| return 0; |
| |
| err_close_ptp: |
| if (chs->ptp) |
| mlx5e_ptp_close(chs->ptp); |
| |
| err_close_channels: |
| for (i--; i >= 0; i--) |
| mlx5e_close_channel(chs->c[i]); |
| |
| err_free: |
| kfree(chs->c); |
| kvfree(cparam); |
| chs->num = 0; |
| return err; |
| } |
| |
| static void mlx5e_activate_channels(struct mlx5e_channels *chs) |
| { |
| int i; |
| |
| for (i = 0; i < chs->num; i++) |
| mlx5e_activate_channel(chs->c[i]); |
| |
| if (chs->ptp) |
| mlx5e_ptp_activate_channel(chs->ptp); |
| } |
| |
| #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */ |
| |
| static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs) |
| { |
| int err = 0; |
| int i; |
| |
| for (i = 0; i < chs->num; i++) { |
| int timeout = err ? 0 : MLX5E_RQ_WQES_TIMEOUT; |
| |
| err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq, timeout); |
| |
| /* Don't wait on the XSK RQ, because the newer xdpsock sample |
| * doesn't provide any Fill Ring entries at the setup stage. |
| */ |
| } |
| |
| return err ? -ETIMEDOUT : 0; |
| } |
| |
| static void mlx5e_deactivate_channels(struct mlx5e_channels *chs) |
| { |
| int i; |
| |
| if (chs->ptp) |
| mlx5e_ptp_deactivate_channel(chs->ptp); |
| |
| for (i = 0; i < chs->num; i++) |
| mlx5e_deactivate_channel(chs->c[i]); |
| } |
| |
| void mlx5e_close_channels(struct mlx5e_channels *chs) |
| { |
| int i; |
| |
| if (chs->ptp) { |
| mlx5e_ptp_close(chs->ptp); |
| chs->ptp = NULL; |
| } |
| for (i = 0; i < chs->num; i++) |
| mlx5e_close_channel(chs->c[i]); |
| |
| kfree(chs->c); |
| chs->num = 0; |
| } |
| |
| static int |
| mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| void *rqtc; |
| int inlen; |
| int err; |
| u32 *in; |
| int i; |
| |
| inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); |
| |
| MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); |
| MLX5_SET(rqtc, rqtc, rqt_max_size, sz); |
| |
| for (i = 0; i < sz; i++) |
| MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); |
| |
| err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); |
| if (!err) |
| rqt->enabled = true; |
| |
| kvfree(in); |
| return err; |
| } |
| |
| void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) |
| { |
| rqt->enabled = false; |
| mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); |
| } |
| |
| int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) |
| { |
| struct mlx5e_rqt *rqt = &priv->indir_rqt; |
| int err; |
| |
| err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt); |
| if (err) |
| mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err); |
| return err; |
| } |
| |
| int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) |
| { |
| int err; |
| int ix; |
| |
| for (ix = 0; ix < n; ix++) { |
| err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); |
| if (unlikely(err)) |
| goto err_destroy_rqts; |
| } |
| |
| return 0; |
| |
| err_destroy_rqts: |
| mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err); |
| for (ix--; ix >= 0; ix--) |
| mlx5e_destroy_rqt(priv, &tirs[ix].rqt); |
| |
| return err; |
| } |
| |
| void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) |
| { |
| int i; |
| |
| for (i = 0; i < n; i++) |
| mlx5e_destroy_rqt(priv, &tirs[i].rqt); |
| } |
| |
| static int mlx5e_rx_hash_fn(int hfunc) |
| { |
| return (hfunc == ETH_RSS_HASH_TOP) ? |
| MLX5_RX_HASH_FN_TOEPLITZ : |
| MLX5_RX_HASH_FN_INVERTED_XOR8; |
| } |
| |
| int mlx5e_bits_invert(unsigned long a, int size) |
| { |
| int inv = 0; |
| int i; |
| |
| for (i = 0; i < size; i++) |
| inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; |
| |
| return inv; |
| } |
| |
| static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, |
| struct mlx5e_redirect_rqt_param rrp, void *rqtc) |
| { |
| int i; |
| |
| for (i = 0; i < sz; i++) { |
| u32 rqn; |
| |
| if (rrp.is_rss) { |
| int ix = i; |
| |
| if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) |
| ix = mlx5e_bits_invert(i, ilog2(sz)); |
| |
| ix = priv->rss_params.indirection_rqt[ix]; |
| rqn = rrp.rss.channels->c[ix]->rq.rqn; |
| } else { |
| rqn = rrp.rqn; |
| } |
| MLX5_SET(rqtc, rqtc, rq_num[i], rqn); |
| } |
| } |
| |
| int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, |
| struct mlx5e_redirect_rqt_param rrp) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| void *rqtc; |
| int inlen; |
| u32 *in; |
| int err; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); |
| |
| MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); |
| MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); |
| mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc); |
| err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); |
| |
| kvfree(in); |
| return err; |
| } |
| |
| static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, |
| struct mlx5e_redirect_rqt_param rrp) |
| { |
| if (!rrp.is_rss) |
| return rrp.rqn; |
| |
| if (ix >= rrp.rss.channels->num) |
| return priv->drop_rq.rqn; |
| |
| return rrp.rss.channels->c[ix]->rq.rqn; |
| } |
| |
| static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, |
| struct mlx5e_redirect_rqt_param rrp, |
| struct mlx5e_redirect_rqt_param *ptp_rrp) |
| { |
| u32 rqtn; |
| int ix; |
| |
| if (priv->indir_rqt.enabled) { |
| /* RSS RQ table */ |
| rqtn = priv->indir_rqt.rqtn; |
| mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); |
| } |
| |
| for (ix = 0; ix < priv->max_nch; ix++) { |
| struct mlx5e_redirect_rqt_param direct_rrp = { |
| .is_rss = false, |
| { |
| .rqn = mlx5e_get_direct_rqn(priv, ix, rrp) |
| }, |
| }; |
| |
| /* Direct RQ Tables */ |
| if (!priv->direct_tir[ix].rqt.enabled) |
| continue; |
| |
| rqtn = priv->direct_tir[ix].rqt.rqtn; |
| mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); |
| } |
| if (ptp_rrp) { |
| rqtn = priv->ptp_tir.rqt.rqtn; |
| mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp); |
| } |
| } |
| |
| static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, |
| struct mlx5e_channels *chs) |
| { |
| bool rx_ptp_support = priv->profile->rx_ptp_support; |
| struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL; |
| struct mlx5e_redirect_rqt_param rrp = { |
| .is_rss = true, |
| { |
| .rss = { |
| .channels = chs, |
| .hfunc = priv->rss_params.hfunc, |
| } |
| }, |
| }; |
| struct mlx5e_redirect_rqt_param ptp_rrp; |
| |
| if (rx_ptp_support) { |
| u32 ptp_rqn; |
| |
| ptp_rrp.is_rss = false; |
| ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ? |
| priv->drop_rq.rqn : ptp_rqn; |
| ptp_rrp_p = &ptp_rrp; |
| } |
| mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p); |
| } |
| |
| static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) |
| { |
| bool rx_ptp_support = priv->profile->rx_ptp_support; |
| struct mlx5e_redirect_rqt_param drop_rrp = { |
| .is_rss = false, |
| { |
| .rqn = priv->drop_rq.rqn, |
| }, |
| }; |
| |
| mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL); |
| } |
| |
| static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { |
| [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
| .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, |
| .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
| }, |
| [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
| .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, |
| .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
| }, |
| [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
| .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, |
| .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
| }, |
| [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
| .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, |
| .rx_hash_fields = MLX5_HASH_IP_L4PORTS, |
| }, |
| [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
| }, |
| [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
| }, |
| [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
| }, |
| [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, |
| }, |
| [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP, |
| }, |
| [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, |
| .l4_prot_type = 0, |
| .rx_hash_fields = MLX5_HASH_IP, |
| }, |
| }; |
| |
| struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) |
| { |
| return tirc_default_config[tt]; |
| } |
| |
| static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) |
| { |
| if (!params->lro_en) |
| return; |
| |
| #define ROUGH_MAX_L2_L3_HDR_SZ 256 |
| |
| MLX5_SET(tirc, tirc, lro_enable_mask, |
| MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | |
| MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); |
| MLX5_SET(tirc, tirc, lro_max_ip_payload_size, |
| (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); |
| MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); |
| } |
| |
| void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, |
| const struct mlx5e_tirc_config *ttconfig, |
| void *tirc, bool inner) |
| { |
| void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : |
| MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); |
| |
| MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); |
| if (rss_params->hfunc == ETH_RSS_HASH_TOP) { |
| void *rss_key = MLX5_ADDR_OF(tirc, tirc, |
| rx_hash_toeplitz_key); |
| size_t len = MLX5_FLD_SZ_BYTES(tirc, |
| rx_hash_toeplitz_key); |
| |
| MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); |
| memcpy(rss_key, rss_params->toeplitz_hash_key, len); |
| } |
| MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, |
| ttconfig->l3_prot_type); |
| MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, |
| ttconfig->l4_prot_type); |
| MLX5_SET(rx_hash_field_select, hfso, selected_fields, |
| ttconfig->rx_hash_fields); |
| } |
| |
| static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, |
| enum mlx5e_traffic_types tt, |
| u32 rx_hash_fields) |
| { |
| *ttconfig = tirc_default_config[tt]; |
| ttconfig->rx_hash_fields = rx_hash_fields; |
| } |
| |
| void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) |
| { |
| void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); |
| struct mlx5e_rss_params *rss = &priv->rss_params; |
| struct mlx5_core_dev *mdev = priv->mdev; |
| int ctxlen = MLX5_ST_SZ_BYTES(tirc); |
| struct mlx5e_tirc_config ttconfig; |
| int tt; |
| |
| MLX5_SET(modify_tir_in, in, bitmask.hash, 1); |
| |
| for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
| memset(tirc, 0, ctxlen); |
| mlx5e_update_rx_hash_fields(&ttconfig, tt, |
| rss->rx_hash_fields[tt]); |
| mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); |
| mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); |
| } |
| |
| /* Verify inner tirs resources allocated */ |
| if (!priv->inner_indir_tir[0].tirn) |
| return; |
| |
| for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
| memset(tirc, 0, ctxlen); |
| mlx5e_update_rx_hash_fields(&ttconfig, tt, |
| rss->rx_hash_fields[tt]); |
| mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); |
| mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); |
| } |
| } |
| |
| static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| |
| void *in; |
| void *tirc; |
| int inlen; |
| int err; |
| int tt; |
| int ix; |
| |
| inlen = MLX5_ST_SZ_BYTES(modify_tir_in); |
| in = kvzalloc(inlen, GFP_KERNEL); |
| if (!in) |
| return -ENOMEM; |
| |
| MLX5_SET(modify_tir_in, in, bitmask.lro, 1); |
| tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); |
| |
| mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); |
| |
| for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { |
| err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); |
| if (err) |
| goto free_in; |
| } |
| |
| for (ix = 0; ix < priv->max_nch; ix++) { |
| err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); |
| if (err) |
| goto free_in; |
| } |
| |
| free_in: |
| kvfree(in); |
| |
| return err; |
| } |
| |
| static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); |
| |
| static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, |
| struct mlx5e_params *params, u16 mtu) |
| { |
| u16 hw_mtu = MLX5E_SW2HW_MTU(params, mtu); |
| int err; |
| |
| err = mlx5_set_port_mtu(mdev, hw_mtu, 1); |
| if (err) |
| return err; |
| |
| /* Update vport context MTU */ |
| mlx5_modify_nic_vport_mtu(mdev, hw_mtu); |
| return 0; |
| } |
| |
| static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, |
| struct mlx5e_params *params, u16 *mtu) |
| { |
| u16 hw_mtu = 0; |
| int err; |
| |
| err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu); |
| if (err || !hw_mtu) /* fallback to port oper mtu */ |
| mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1); |
| |
| *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); |
| } |
| |
| int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) |
| { |
| struct mlx5e_params *params = &priv->channels.params; |
| struct net_device *netdev = priv->netdev; |
| struct mlx5_core_dev *mdev = priv->mdev; |
| u16 mtu; |
| int err; |
| |
| err = mlx5e_set_mtu(mdev, params, params->sw_mtu); |
| if (err) |
| return err; |
| |
| mlx5e_query_mtu(mdev, params, &mtu); |
| if (mtu != params->sw_mtu) |
| netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n", |
| __func__, mtu, params->sw_mtu); |
| |
| params->sw_mtu = mtu; |
| return 0; |
| } |
| |
| MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_set_dev_port_mtu); |
| |
| void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) |
| { |
| struct mlx5e_params *params = &priv->channels.params; |
| struct net_device *netdev = priv->netdev; |
| struct mlx5_core_dev *mdev = priv->mdev; |
| u16 max_mtu; |
| |
| /* MTU range: 68 - hw-specific max */ |
| netdev->min_mtu = ETH_MIN_MTU; |
| |
| mlx5_query_port_max_mtu(mdev, &max_mtu, 1); |
| netdev->max_mtu = min_t(unsigned int, MLX5E_HW2SW_MTU(params, max_mtu), |
| ETH_MAX_MTU); |
| } |
| |
| static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) |
| { |
| int tc; |
| |
| netdev_reset_tc(netdev); |
| |
| if (ntc == 1) |
| return; |
| |
| netdev_set_num_tc(netdev, ntc); |
| |
| /* Map netdev TCs to offset 0 |
| * We have our own UP to TXQ mapping for QoS |
| */ |
| for (tc = 0; tc < ntc; tc++) |
| netdev_set_tc_queue(netdev, tc, nch, 0); |
| } |
| |
| int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) |
| { |
| int qos_queues, nch, ntc, num_txqs, err; |
| |
| qos_queues = mlx5e_qos_cur_leaf_nodes(priv); |
| |
| nch = priv->channels.params.num_channels; |
| ntc = priv->channels.params.num_tc; |
| num_txqs = nch * ntc + qos_queues; |
| if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) |
| num_txqs += ntc; |
| |
| mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs); |
| err = netif_set_real_num_tx_queues(priv->netdev, num_txqs); |
| if (err) |
| netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err); |
| |
| return err; |
| } |
| |
| static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) |
| { |
| struct net_device *netdev = priv->netdev; |
| int old_num_txqs, old_ntc; |
| int num_rxqs, nch, ntc; |
| int err; |
| |
| old_num_txqs = netdev->real_num_tx_queues; |
| old_ntc = netdev->num_tc ? : 1; |
| |
| nch = priv->channels.params.num_channels; |
| ntc = priv->channels.params.num_tc; |
| num_rxqs = nch * priv->profile->rq_groups; |
| |
| mlx5e_netdev_set_tcs(netdev, nch, ntc); |
| |
| err = mlx5e_update_tx_netdev_queues(priv); |
| if (err) |
| goto err_tcs; |
| err = netif_set_real_num_rx_queues(netdev, num_rxqs); |
| if (err) { |
| netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err); |
| goto err_txqs; |
| } |
| |
| return 0; |
| |
| err_txqs: |
| /* netif_set_real_num_rx_queues could fail only when nch increased. Only |
| * one of nch and ntc is changed in this function. That means, the call |
| * to netif_set_real_num_tx_queues below should not fail, because it |
| * decreases the number of TX queues. |
| */ |
| WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); |
| |
| err_tcs: |
| mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc); |
| return err; |
| } |
| |
| static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv, |
| struct mlx5e_params *params) |
| { |
| struct mlx5_core_dev *mdev = priv->mdev; |
| int num_comp_vectors, ix, irq; |
| |
| num_comp_vectors = mlx5_comp_vectors_count(mdev); |
| |
| for (ix = 0; ix < params->num_channels; ix++) { |
| cpumask_clear(priv->scratchpad.cpumask); |
| |
| for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) { |
| int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(mdev, irq)); |
| |
| cpumask_set_cpu(cpu, priv->scratchpad.cpumask); |
| } |
| |
| netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix); |
| } |
| } |
| |
| int mlx5e_num_channels_changed(struct mlx5e_priv *priv) |
| { |
| u16 count = priv->channels.params.num_channels; |
| int err; |
| |
| err = mlx5e_update_netdev_queues(priv); |
| if (err) |
| return err; |
| |
| mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); |
| |
| if (!netif_is_rxfh_configured(priv->netdev)) |
| mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, |
| MLX5E_INDIR_RQT_SIZE, count); |
| |
| return 0; |
| } |
| |
| MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_num_channels_changed); |
| |
| static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) |
| { |
| int i, ch, tc, num_tc; |
| |
| ch = priv->channels.num; |
| num_tc = priv->channels.params.num_tc; |
| |
| for (i = 0; i < ch; i++) { |
| for (tc = 0; tc < num_tc; tc++) { |
| struct mlx5e_channel *c = priv->channels.c[i]; |
| struct mlx5e_txqsq *sq = &c->sq[tc]; |
| |
| priv->txq2sq[sq->txq_ix] = sq; |
| priv->channel_tc2realtxq[i][tc] = i + tc * ch; |
| } |
| } |
| |
| if (!priv->channels.ptp) |
| return; |
| |
| if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state)) |
| return; |
| |
| for (tc = 0; tc < num_tc; tc++) { |
| struct mlx5e_ptp *c = priv->channels.ptp; |
| struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; |
| |
| priv->txq2sq[sq->txq_ix] = sq; |
| priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc; |
| } |
| } |
| |
| static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) |
| { |
| /* Sync with mlx5e_select_queue. */ |
| WRITE_ONCE(priv->num_tc_x_num_ch, |
| priv->channels.params.num_tc * priv->channels.num); |
| } |
| |
| void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) |
| { |
| mlx5e_update_num_tc_x_num_ch(priv); |
| mlx5e_build_txq_maps(priv); |
| mlx5e_activate_channels(&priv->channels); |
| mlx5e_qos_activate_queues(priv); |
| mlx5e_xdp_tx_enable(priv); |
| netif_tx_start_all_queues(priv->netdev); |
| |
| if (mlx5e_is_vport_rep(priv)) |
| mlx5e_add_sqs_fwd_rules(priv); |
| |
| mlx5e_wait_channels_min_rx_wqes(&priv->channels); |
| mlx5e_redirect_rqts_to_channels(priv, &priv->channels); |
| |
| mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels); |
| } |
| |
| void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) |
| { |
| mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels); |
| |
| mlx5e_redirect_rqts_to_drop(priv); |
| |
| if (mlx5e_is_vport_rep(priv)) |
| mlx5e_remove_sqs_fwd_rules(priv); |
| |
| /* FIXME: This is a W/A only for tx timeout watch dog false alarm when |
| * polling for inactive tx queues. |
| */ |
| netif_tx_stop_all_queues(priv->netdev); |
| netif_tx_disable(priv->netdev); |
| mlx5e_xdp_tx_disable(priv); |
| mlx5e_deactivate_channels(&priv->channels); |
| } |
| |
| static int mlx5e_switch_priv_params(struct mlx5e_priv *priv, |
| struct mlx5e_params *new_params, |
| mlx5e_fp_preactivate preactivate, |
| void *context) |
| { |
| struct mlx5e_params old_params; |
| |
| old_params = priv->channels.params; |
| priv->channels.params = *new_params; |
| |
| if (preactivate) { |
| int err; |
| |
| err = preactivate(priv, context); |
| if (err) { |
| priv->channels.params = old_params; |
| return err; |
| } |
| } |
| |
| return 0; |
| } |
| |
| static int mlx5e_switch_priv_channels(struct mlx5e_priv *priv, |
| struct mlx5e_channels *new_chs, |
| mlx5e_fp_preactivate preactivate, |
| void *context) |
| { |
| struct net_device *netdev = priv->netdev; |
| struct mlx5e_channels old_chs; |
| int carrier_ok; |
| int err = 0; |
| |
| carrier_ok = netif_carrier_ok(netdev); |
| netif_carrier_off(netdev); |
| |
| mlx5e_deactivate_priv_channels(priv); |
| |
| old_chs = priv->channels; |
| priv->channels = *new_chs; |
| |
| /* New channels are ready to roll, call the preactivate hook if needed |
| * to modify HW settings or update kernel parameters. |
| */ |
| if (preactivate) { |
| err = preactivate(priv, context); |
| if (err) { |
| priv->channels = old_chs; |
| goto out; |
| } |
| } |
| |
| mlx5e_close_channels(&old_chs); |
| priv->profile->update_rx(priv); |
| |
| out: |
| mlx5e_activate_priv_channels(priv); |
| |
| /* return carrier back if needed */ |
| if (carrier_ok) |
| netif_carrier_on(netdev); |
| |
| return err; |
| } |
| |
| int mlx5e_safe_switch_params(struct mlx5e_priv *priv, |
| struct mlx5e_params *params, |
| mlx5e_fp_preactivate preactivate, |
| void *context, bool reset) |
| { |
| struct mlx5e_channels new_chs = {}; |
| int err; |
| |
| reset &= test_bit(MLX5E_STATE_OPENED, &priv->state); |
| if (!reset) |
| return mlx5e_switch_priv_params(priv, params, preactivate, context); |
| |
| new_chs.params = *params; |
| err = mlx5e_open_channels(priv, &new_chs); |
| if (err) |
| return err; |
| err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context); |
| if (err) |
| mlx5e_close_channels(&new_chs); |
| |
| return err; |
| } |
| |
| int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv) |
| { |
| return mlx5e_safe_switch_params(priv, &priv->channels.params, NULL, NULL, true); |
| } |
| |
| void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
| { |
| priv->tstamp.tx_type = HWTSTAMP_TX_OFF; |
| priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE; |
| } |
| |
| static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, |
| enum mlx5_port_status state) |
| { |
| struct mlx5_eswitch *esw = mdev->priv.eswitch; |
| int vport_admin_state; |
| |
| mlx5_set_port_admin_status(mdev, state); |
| |
| if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS
|