blob: ce710f22b1fffbdf3ae28c5220a3dac00aff44fd [file] [log] [blame]
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/flow_dissector.h>
#include <net/flow_offload.h>
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_skbedit.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/tc_act/tc_mpls.h>
#include <net/arp.h>
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
#include <net/bonding.h>
#include "en.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
};
#define MLX5E_TC_MAX_SPLITS 1
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
* [0] Helper item 0
* |- list_head item 0
* |- index (0)
* [1] Helper item 1
* |- list_head item 1
* |- index (1)
* To access the containing struct from one of the list_head items:
* 1. Get the helper item from the list_head item using
* helper item =
* container_of(list_head item, helper struct type, list_head field)
* 2. Get the contining struct from the helper item and its index in the array:
* containing struct =
* container_of(helper item, containing struct type, helper field[index])
*/
struct encap_flow_item {
struct mlx5e_encap_entry *e; /* attached encap instance */
struct list_head list;
int index;
};
struct mlx5e_tc_flow {
struct rhash_head node;
struct mlx5e_priv *priv;
u64 cookie;
unsigned long flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
/* flows sharing the same reformat object - currently mpls decap */
struct list_head l3_to_l2_reformat;
struct mlx5e_decap_entry *decap_reformat;
/* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
* destinations.
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
struct net_device *orig_dev; /* netdev adding flow first */
int tmp_efi_index;
struct list_head tmp_list; /* temporary flow list used by neigh update */
refcount_t refcnt;
struct rcu_head rcu_head;
struct completion init_done;
int tunnel_id; /* the mapped tunnel id of this flow */
struct mlx5_flow_attr *attr;
};
struct mlx5e_tc_flow_parse_attr {
const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev;
struct mlx5_flow_spec spec;
struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
struct ethhdr eth;
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
.mlen = 2,
},
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
.moffset = 1,
.mlen = 3,
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
[ZONE_TO_REG] = zone_to_reg_ct,
[ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
[CTSTATE_TO_REG] = ctstate_to_reg_ct,
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
/* For NIC rules we store the retore metadata directly
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
[NIC_CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
.mlen = 2,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 data,
u32 mask)
{
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
void *headers_c = spec->match_criteria;
void *headers_v = spec->match_value;
void *fmask, *fval;
fmask = headers_c + soffset;
fval = headers_v + soffset;
mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
memcpy(fmask, &mask, match_len);
memcpy(fval, &data, match_len);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 *data,
u32 *mask)
{
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
void *headers_c = spec->match_criteria;
void *headers_v = spec->match_value;
void *fmask, *fval;
fmask = headers_c + soffset;
fval = headers_v + soffset;
memcpy(mask, fmask, match_len);
memcpy(data, fval, match_len);
*mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
*data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
}
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
char *modact;
int err;
err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
if (err)
return err;
modact = mod_hdr_acts->actions +
(mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */
if (mlen == 4)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
MLX5_SET(set_action_in, modact, offset, moffset * 8);
MLX5_SET(set_action_in, modact, length, mlen * 8);
MLX5_SET(set_action_in, modact, data, data);
mod_hdr_acts->num_actions++;
return 0;
}
#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
if (esw_offloads_mode(esw)) {
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
return uplink_priv->ct_priv;
}
return priv->fs.tc.ct;
}
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw_offloads_mode(esw))
return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
}
void
mlx5_tc_rule_delete(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (esw_offloads_mode(esw)) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return;
}
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
struct mlx5_core_dev *func_mdev;
struct mlx5e_priv *func_priv;
u32 tdn;
u32 tirn;
int num_channels;
struct mlx5e_rqt indir_rqt;
u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_ttc_table ttc;
};
struct mlx5e_hairpin_entry {
/* a node of a hash table which keeps all the hairpin entries */
struct hlist_node hairpin_hlist;
/* protects flows list */
spinlock_t flows_lock;
/* flows sharing the same hairpin */
struct list_head flows;
/* hpe's that were not fully initialized when dead peer update event
* function traversed them.
*/
struct list_head dead_peer_wait_list;
u16 peer_vhca_id;
u8 prio;
struct mlx5e_hairpin *hp;
refcount_t refcnt;
struct completion res_ready;
};
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
{
if (!flow || !refcount_inc_not_zero(&flow->refcnt))
return ERR_PTR(-EINVAL);
return flow;
}
static void mlx5e_flow_put(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (refcount_dec_and_test(&flow->refcnt)) {
mlx5e_tc_del_flow(priv, flow);
kfree_rcu(flow, rcu_head);
}
}
static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
{
/* Complete all memory stores before setting bit. */
smp_mb__before_atomic();
set_bit(flag, &flow->flags);
}
#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
unsigned long flag)
{
/* test_and_set_bit() provides all necessary barriers */
return test_and_set_bit(flag, &flow->flags);
}
#define flow_flag_test_and_set(flow, flag) \
__flow_flag_test_and_set(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
{
/* Complete all memory stores before clearing bit. */
smp_mb__before_atomic();
clear_bit(flag, &flow->flags);
}
#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
{
bool ret = test_bit(flag, &flow->flags);
/* Read fields of flow structure only after checking flags. */
smp_mb__after_atomic();
return ret;
}
#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
}
static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, FT);
}
static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
}
static int get_flow_name_space(struct mlx5e_tc_flow *flow)
{
return mlx5e_is_eswitch_flow(flow) ?
MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
}
static struct mod_hdr_tbl *
get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
&esw->offloads.mod_hdr :
&priv->fs.tc.mod_hdr;
}
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr)
{
struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
get_flow_name_space(flow),
&parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
modify_hdr = mlx5e_mod_hdr_get(mh);
flow->attr->modify_hdr = modify_hdr;
flow->mh = mh;
return 0;
}
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
/* flow wasn't fully initialized */
if (!flow->mh)
return;
mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
flow->mh);
flow->mh = NULL;
}
static
struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
{
struct net_device *netdev;
struct mlx5e_priv *priv;
netdev = __dev_get_by_index(net, ifindex);
priv = netdev_priv(netdev);
return priv->mdev;
}
static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
{
u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
void *tirc;
int err;
err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
if (err)
goto alloc_tdn_err;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
if (err)
goto create_tir_err;
return 0;
create_tir_err:
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
alloc_tdn_err:
return err;
}
static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
{
mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
}
static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
{
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
struct mlx5e_priv *priv = hp->func_priv;
int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
mlx5e_build_default_indir_rqt(indirection_rqt, sz,
hp->num_channels);
for (i = 0; i < sz; i++) {
ix = i;
if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, ilog2(sz));
ix = indirection_rqt[ix];
rqn = hp->pair->rqn[ix];
MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
}
static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
{
int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
void *rqtc;
u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
if (!err)
hp->indir_rqt.enabled = true;
kvfree(in);
return err;
}
static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
u32 in[MLX5_ST_SZ_DW(create_tir_in)];
int tt, i, err;
void *tirc;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
err = mlx5_core_create_tir(hp->func_mdev, in,
&hp->indir_tirn[tt]);
if (err) {
mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
goto err_destroy_tirs;
}
}
return 0;
err_destroy_tirs:
for (i = 0; i < tt; i++)
mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
return err;
}
static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
{
int tt;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
}
static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
ttc_params->any_tt_tirn = hp->tirn;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_TC_PRIO;
}
static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
struct ttc_params ttc_params;
int err;
err = mlx5e_hairpin_create_indirect_rqt(hp);
if (err)
return err;
err = mlx5e_hairpin_create_indirect_tirs(hp);
if (err)
goto err_create_indirect_tirs;
mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
if (err)
goto err_create_ttc_table;
netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
hp->num_channels, hp->ttc.ft.t->id);
return 0;
err_create_ttc_table:
mlx5e_hairpin_destroy_indirect_tirs(hp);
err_create_indirect_tirs:
mlx5e_destroy_rqt(priv, &hp->indir_rqt);
return err;
}
static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
{
struct mlx5e_priv *priv = hp->func_priv;
mlx5e_destroy_ttc_table(priv, &hp->ttc);
mlx5e_hairpin_destroy_indirect_tirs(hp);
mlx5e_destroy_rqt(priv, &hp->indir_rqt);
}
static struct mlx5e_hairpin *
mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
int peer_ifindex)
{
struct mlx5_core_dev *func_mdev, *peer_mdev;
struct mlx5e_hairpin *hp;
struct mlx5_hairpin *pair;
int err;
hp = kzalloc(sizeof(*hp), GFP_KERNEL);
if (!hp)
return ERR_PTR(-ENOMEM);
func_mdev = priv->mdev;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
if (IS_ERR(pair)) {
err = PTR_ERR(pair);
goto create_pair_err;
}
hp->pair = pair;
hp->func_mdev = func_mdev;
hp->func_priv = priv;
hp->num_channels = params->num_channels;
err = mlx5e_hairpin_create_transport(hp);
if (err)
goto create_transport_err;
if (hp->num_channels > 1) {
err = mlx5e_hairpin_rss_init(hp);
if (err)
goto rss_init_err;
}
return hp;
rss_init_err:
mlx5e_hairpin_destroy_transport(hp);
create_transport_err:
mlx5_core_hairpin_destroy(hp->pair);
create_pair_err:
kfree(hp);
return ERR_PTR(err);
}
static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
{
if (hp->num_channels > 1)
mlx5e_hairpin_rss_cleanup(hp);
mlx5e_hairpin_destroy_transport(hp);
mlx5_core_hairpin_destroy(hp->pair);
kvfree(hp);
}
static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
{
return (peer_vhca_id << 16 | prio);
}
static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
u16 peer_vhca_id, u8 prio)
{
struct mlx5e_hairpin_entry *hpe;
u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
hairpin_hlist, hash_key) {
if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
refcount_inc(&hpe->refcnt);
return hpe;
}
}
return NULL;
}
static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
struct mlx5e_hairpin_entry *hpe)
{
/* no more hairpin flows for us, release the hairpin pair */
if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
return;
hash_del(&hpe->hairpin_hlist);
mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
if (!IS_ERR_OR_NULL(hpe->hp)) {
netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
dev_name(hpe->hp->pair->peer_mdev->device));
mlx5e_hairpin_destroy(hpe->hp);
}
WARN_ON(!list_empty(&hpe->flows));
kfree(hpe);
}
#define UNKNOWN_MATCH_PRIO 8
static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec, u8 *match_prio,
struct netlink_ext_ack *extack)
{
void *headers_c, *headers_v;
u8 prio_val, prio_mask = 0;
bool vlan_present;
#ifdef CONFIG_MLX5_CORE_EN_DCB
if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
NL_SET_ERR_MSG_MOD(extack,
"only PCP trust state supported for hairpin");
return -EOPNOTSUPP;
}
#endif
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
if (vlan_present) {
prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
}
if (!vlan_present || !prio_mask) {
prio_val = UNKNOWN_MATCH_PRIO;
} else if (prio_mask != 0x7) {
NL_SET_ERR_MSG_MOD(extack,
"masked priority match not supported for hairpin");
return -EOPNOTSUPP;
}
*match_prio = prio_val;
return 0;
}
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
struct mlx5e_hairpin *hp;
u64 link_speed64;
u32 link_speed;
u8 match_prio;
u16 peer_id;
int err;
peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
return -EOPNOTSUPP;
}
peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
extack);
if (err)
return err;
mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
if (hpe) {
mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
wait_for_completion(&hpe->res_ready);
if (IS_ERR(hpe->hp)) {
err = -EREMOTEIO;
goto out_err;
}
goto attach_flow;
}
hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
if (!hpe) {
mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
return -ENOMEM;
}
spin_lock_init(&hpe->flows_lock);
INIT_LIST_HEAD(&hpe->flows);
INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
hpe->peer_vhca_id = peer_id;
hpe->prio = match_prio;
refcount_set(&hpe->refcnt, 1);
init_completion(&hpe->res_ready);
hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
params.log_data_size = 15;
params.log_data_size = min_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.log_data_size = max_t(u8, params.log_data_size,
MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
params.log_num_packets = params.log_data_size -
MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
params.log_num_packets = min_t(u8, params.log_num_packets,
MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
params.q_counter = priv->q_counter;
/* set hairpin pair per each 50Gbs share of the link */
mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
link_speed = max_t(u32, link_speed, 50000);
link_speed64 = link_speed;
do_div(link_speed64, 50000);
params.num_channels = link_speed64;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
hpe->hp = hp;
complete_all(&hpe->res_ready);
if (IS_ERR(hp)) {
err = PTR_ERR(hp);
goto out_err;
}
netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
hp->tirn, hp->pair->rqn[0],
dev_name(hp->pair->peer_mdev->device),
hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
flow->hpe = hpe;
spin_lock(&hpe->flows_lock);
list_add(&flow->hairpin, &hpe->flows);
spin_unlock(&hpe->flows_lock);
return 0;
out_err:
mlx5e_hairpin_put(priv, hpe);
return err;
}
static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
/* flow wasn't fully initialized */
if (!flow->hpe)
return;
spin_lock(&flow->hpe->flows_lock);
list_del(&flow->hairpin);
spin_unlock(&flow->hpe->flows_lock);
mlx5e_hairpin_put(priv, flow->hpe);
flow->hpe = NULL;
}
struct mlx5_flow_handle *
mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5_flow_context *flow_context = &spec->flow_context;
struct mlx5_fs_chains *nic_chains = nic_chains(priv);
struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
};
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *ft;
int dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
flow_context->flow_tag = nic_attr->flow_tag;
if (attr->dest_ft) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = attr->dest_ft;
dest_ix++;
} else if (nic_attr->hairpin_ft) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[dest_ix].ft = nic_attr->hairpin_ft;
dest_ix++;
} else if (nic_attr->hairpin_tirn) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
dest_ix++;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
if (attr->dest_chain) {
dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
if (IS_ERR(dest[dest_ix].ft))
return ERR_CAST(dest[dest_ix].ft);
} else {
dest[dest_ix].ft = priv->fs.vlan.ft.t;
}
dest_ix++;
}
if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
dest_ix++;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_hdr = attr->modify_hdr;
mutex_lock(&tc->t_lock);
if (IS_ERR_OR_NULL(tc->t)) {
/* Create the root table here if doesn't exist yet */
tc->t =
mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
if (IS_ERR(tc->t)) {
mutex_unlock(&tc->t_lock);
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
rule = ERR_CAST(priv->fs.tc.t);
goto err_ft_get;
}
}
mutex_unlock(&tc->t_lock);
if (attr->chain || attr->prio)
ft = mlx5_chains_get_table(nic_chains,
attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
else
ft = attr->ft;
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_ft_get;
}
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rules(ft, spec,
&flow_act, dest, dest_ix);
if (IS_ERR(rule))
goto err_rule;
return rule;
err_rule:
if (attr->chain || attr->prio)
mlx5_chains_put_table(nic_chains,
attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
err_ft_get:
if (attr->dest_chain)
mlx5_chains_put_table(nic_chains,
attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
return ERR_CAST(rule);
}
static int
mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_fc *counter = NULL;
int err;
if (flow_flag_test(flow, HAIRPIN)) {
err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
if (err)
return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
if (flow_flag_test(flow, CT))
flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
attr, &parse_attr->mod_hdr_acts);
else
flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
attr);
return PTR_ERR_OR_ZERO(flow->rule[0]);
}
void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
struct mlx5_fs_chains *nic_chains = nic_chains(priv);
mlx5_del_flow_rules(rule);
if (attr->chain || attr->prio)
mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
MLX5E_TC_FT_LEVEL);
if (attr->dest_chain)
mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
MLX5E_TC_FT_LEVEL);
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_tc_table *tc = &priv->fs.tc;
flow_flag_clear(flow, OFFLOADED);
if (flow_flag_test(flow, CT))
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
else if (!IS_ERR_OR_NULL(flow->rule[0]))
mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
/* Remove root table if no rules are left to avoid
* extra steering hops.
*/
mutex_lock(&priv->fs.tc.t_lock);
if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
!IS_ERR_OR_NULL(tc->t)) {
mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
kvfree(attr->parse_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
mlx5_fc_destroy(priv->mdev, attr->counter);
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
kfree(flow->attr);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, int out_index);
static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *mirred_dev,
int out_index,
struct netlink_ext_ack *extack,
struct net_device **encap_dev,
bool *encap_valid);
static int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack);
static void mlx5e_detach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
}
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
if (IS_ERR(rule))
return rule;
if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
return flow->rule[1];
}
}
return rule;
}
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
if (flow_flag_test(flow, CT)) {
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
}
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
static struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr)
return ERR_PTR(-ENOMEM);
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
kfree(slow_attr);
return rule;
}
static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr) {
mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
return;
}
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
* function.
*/
static void unready_flow_add(struct mlx5e_tc_flow *flow,
struct list_head *unready_flows)
{
flow_flag_set(flow, NOT_READY);
list_add_tail(&flow->unready, unready_flows);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
* function.
*/
static void unready_flow_del(struct mlx5e_tc_flow *flow)
{
list_del(&flow->unready);
flow_flag_clear(flow, NOT_READY);
}
static void add_unready_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock);
unready_flow_add(flow, &uplink_priv->unready_flows);
mutex_unlock(&uplink_priv->unready_flows_lock);
}
static void remove_unready_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock);
unready_flow_del(flow);
mutex_unlock(&uplink_priv->unready_flows_lock);
}
static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
bool encap_valid = true;
u32 max_prio, max_chain;
int err = 0;
int out_index;
if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
}
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
return -EOPNOTSUPP;
}
max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
return -EOPNOTSUPP;
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
err = mlx5e_attach_decap(priv, flow, extack);
if (err)
return err;
}
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev),
mirred_ifindex);
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
return err;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[out_index].rep = rpriv->rep;
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
return err;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw_attr->counter_dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
attr->counter = counter;
}
/* we get here if one of the following takes place:
* (1) there's no error
* (2) there's an encap action and we don't have valid neigh
*/
if (!encap_valid)
flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
if (IS_ERR(flow->rule[0]))
return PTR_ERR(flow->rule[0]);
else
flow_flag_set(flow, OFFLOADED);
return 0;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
headers_v,
geneve_tlv_option_0_data);
return !!geneve_tlv_opt_0_data;
}
static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
int out_index;
mlx5e_put_flow_tunnel_id(flow);
if (flow_flag_test(flow, NOT_READY))
remove_unready_flow(flow);
if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW))
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
}
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
kvfree(attr->parse_attr);
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
kfree(flow->attr);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
e->reformat_type,
e->encap_size, e->encap_header,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
PTR_ERR(e->pkt_reformat));
return;
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, flow_list, tmp_list) {
bool all_flow_encaps_valid = true;
int i;
if (!mlx5e_is_offloaded_flow(flow))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
* a valid neighbour.
*/
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
continue;
if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
all_flow_encaps_valid = false;
break;
}
}
/* Do not offload flows with unresolved neighbors */
if (!all_flow_encaps_valid)
continue;
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
continue;
}
mlx5e_tc_unoffload_from_slow_path(esw, flow);
flow->rule[0] = rule;
/* was unset when slow path rule removed */
flow_flag_set(flow, OFFLOADED);
}
}
void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
err);
continue;
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
}
/* we know that the encap is valid */
e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
return flow->attr->counter;
}
/* Takes reference to all flows attached to encap and adds the flows to
* flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
*/
void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
{
struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow;
list_for_each_entry(efi, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
if (IS_ERR(mlx5e_flow_get(flow)))
continue;
wait_for_completion(&flow->init_done);
flow->tmp_efi_index = efi->index;
list_add(&flow->tmp_list, flow_list);
}
}
/* Iterate over tmp_list of flows attached to flow_list head. */
void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
{
struct mlx5e_tc_flow *flow, *tmp;
list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
mlx5e_flow_put(priv, flow);
}
static struct mlx5e_encap_entry *
mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
struct mlx5e_encap_entry *e)
{
struct mlx5e_encap_entry *next = NULL;
retry:
rcu_read_lock();
/* find encap with non-zero reference counter value */
for (next = e ?
list_next_or_null_rcu(&nhe->encap_list,
&e->encap_list,
struct mlx5e_encap_entry,
encap_list) :
list_first_or_null_rcu(&nhe->encap_list,
struct mlx5e_encap_entry,
encap_list);
next;
next = list_next_or_null_rcu(&nhe->encap_list,
&next->encap_list,
struct mlx5e_encap_entry,
encap_list))
if (mlx5e_encap_take(next))
break;
rcu_read_unlock();
/* release starting encap */
if (e)
mlx5e_encap_put(netdev_priv(e->out_dev), e);
if (!next)
return next;
/* wait for encap to be fully initialized */
wait_for_completion(&next->res_ready);
/* continue searching if encap entry is not in valid state after completion */
if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
e = next;
goto retry;
}
return next;
}
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
struct mlx5e_encap_entry *e = NULL;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
struct neighbour *n;
u64 lastuse;
if (m_neigh->family == AF_INET)
tbl = &arp_tbl;
#if IS_ENABLED(CONFIG_IPV6)
else if (m_neigh->family == AF_INET6)
tbl = ipv6_stub->nd_tbl;
#endif
else
return;
/* mlx5e_get_next_valid_encap() releases previous encap before returning
* next one.
*/
while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
struct mlx5e_priv *priv = netdev_priv(e->out_dev);
struct encap_flow_item *efi, *tmp;
struct mlx5_eswitch *esw;
LIST_HEAD(flow_list);
esw = priv->mdev->priv.eswitch;
mutex_lock(&esw->offloads.encap_tbl_lock);
list_for_each_entry_safe(efi, tmp, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
if (IS_ERR(mlx5e_flow_get(flow)))
continue;
list_add(&flow->tmp_list, &flow_list);
if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
break;
}
}
}
mutex_unlock(&esw->offloads.encap_tbl_lock);
mlx5e_put_encap_flow_list(priv, &flow_list);
if (neigh_used) {
/* release current encap before breaking the loop */
mlx5e_encap_put(priv, e);
break;
}
}
trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
if (neigh_used) {
nhe->reported_lastuse = jiffies;
/* find the relevant neigh according to the cached device and
* dst ip pair
*/
n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
if (!n)
return;
neigh_event_send(n, NULL);
neigh_release(n);
}
}
static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
WARN_ON(!list_empty(&e->flows));
if (e->compl_result > 0) {
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
}
kfree(e->tun_info);
kfree(e->encap_header);
kfree_rcu(e, rcu);
}
static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
struct mlx5e_decap_entry *d)
{
WARN_ON(!list_empty(&d->flows));
if (!d->compl_result)
mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
kfree_rcu(d, rcu);
}
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
return;
hash_del_rcu(&e->encap_hlist);
mutex_unlock(&esw->offloads.encap_tbl_lock);
mlx5e_encap_dealloc(priv, e);
}
static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
return;
hash_del_rcu(&d->hlist);
mutex_unlock(&esw->offloads.decap_tbl_lock);
mlx5e_decap_dealloc(priv, d);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, int out_index)
{
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
/* flow wasn't fully initialized */
if (!e)
return;
mutex_lock(&esw->offloads.encap_tbl_lock);
list_del(&flow->encaps[out_index].list);
flow->encaps[out_index].e = NULL;
if (!refcount_dec_and_test(&e->refcnt)) {
mutex_unlock(&esw->offloads.encap_tbl_lock);
return;
}
hash_del_rcu(&e->encap_hlist);
mutex_unlock(&esw->offloads.encap_tbl_lock);
mlx5e_encap_dealloc(priv, e);
}
static void mlx5e_detach_decap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_decap_entry *d = flow->decap_reformat;
if (!d)
return;
mutex_lock(&esw->offloads.decap_tbl_lock);
list_del(&flow->l3_to_l2_reformat);
flow->decap_reformat = NULL;
if (!refcount_dec_and_test(&d->refcnt)) {
mutex_unlock(&esw->offloads.decap_tbl_lock);
return;
}
hash_del_rcu(&d->hlist);
mutex_unlock(&esw->offloads.decap_tbl_lock);
mlx5e_decap_dealloc(priv, d);
}
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
if (!flow_flag_test(flow, ESWITCH) ||
!flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
list_del(&flow->peer);
mutex_unlock(&esw->offloads.peer_mutex);
flow_flag_clear(flow, DUP);
if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
kfree(flow->peer_flow);
}
flow->peer_flow = NULL;
}
static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_core_dev *dev = flow->priv->mdev;
struct mlx5_devcom *devcom = dev->priv.devcom;
struct mlx5_eswitch *peer_esw;
peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
if (!peer_esw)
return;
__mlx5e_tc_del_fdb_peer_flow(flow);
mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
mlx5e_tc_del_fdb_peer_flow(flow);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
}
}
static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_action *flow_action = &rule->action;
const struct flow_action_entry *act;
int i;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_GOTO:
return true;
default:
continue;
}
}
return false;
}
static int
enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
struct flow_dissector_key_enc_opts *opts,
struct netlink_ext_ack *extack,
bool *dont_care)
{
struct geneve_opt *opt;
int off = 0;
*dont_care = true;
while (opts->len > off) {
opt = (struct geneve_opt *)&opts->data[off];
if (!(*dont_care) || opt->opt_class || opt->type ||
memchr_inv(opt->opt_data, 0, opt->length * 4)) {
*dont_care = false;
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
NL_SET_ERR_MSG(extack,
"Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
}
}
off += sizeof(struct geneve_opt) + opt->length * 4;
}
return 0;
}
#define COPY_DISSECTOR(rule, diss_key, dst)\
({ \
struct flow_rule *__rule = (rule);\
typeof(dst) __dst = dst;\
\
memcpy(__dst,\
skb_flow_dissector_target(__rule->match.dissector,\
diss_key,\
__rule->match.key),\
sizeof(*__dst));\
})
static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
bool enc_opts_is_dont_care = true;
u32 tun_id, enc_opts_id = 0;
struct mlx5_eswitch *esw;
u32 value, mask;
int err;
esw = priv->mdev->priv.eswitch;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
memset(&tunnel_key, 0, sizeof(tunnel_key));
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
&tunnel_key.enc_control);
if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
&tunnel_key.enc_ipv4);
else
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
&tunnel_key.enc_ipv6);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
&tunnel_key.enc_tp);
COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
&tunnel_key.enc_key_id);
tunnel_key.filter_ifindex = filter_dev->ifindex;
err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
if (err)
return err;
flow_rule_match_enc_opts(rule, &enc_opts_match);
err = enc_opts_is_dont_care_or_full_match(priv,
enc_opts_match.mask,
extack,
&enc_opts_is_dont_care);
if (err)
goto err_enc_opts;
if (!enc_opts_is_dont_care) {
memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
memcpy(&tun_enc_opts.key, enc_opts_match.key,
sizeof(*enc_opts_match.key));
memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
sizeof(*enc_opts_match.mask));
err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
&tun_enc_opts, &enc_opts_id);
if (err)
goto err_enc_opts;
}
value = tun_id << ENC_OPTS_BITS | enc_opts_id;
mask = enc_opts_id ? TUNNEL_ID_MASK :
(TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
if (attr->chain) {
mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
TUNNEL_TO_REG, value, mask);
} else {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
TUNNEL_TO_REG, value);
if (err)
goto err_set;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
}
flow->tunnel_id = value;
return 0;
err_set:
if (enc_opts_id)
mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
enc_opts_id);
err_enc_opts:
mapping_remove(uplink_priv->tunnel_mapping, tun_id);
return err;
}
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
{
u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
struct mlx5_rep_uplink_priv *uplink_priv;
struct mlx5e_rep_priv *uplink_rpriv;
struct mlx5_eswitch *esw;
esw = flow->priv->mdev->priv.eswitch;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
if (tun_id)
mapping_remove(uplink_priv->tunnel_mapping, tun_id);
if (enc_opts_id)
mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
enc_opts_id);
}
u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
{
return flow->tunnel_id;
}
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
void *headers_c, void *headers_v)
{
bool ip_version_cap;
ip_version_cap = outer ?
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.outer_ip_version) :
MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
ft_field_support.inner_ip_version);
if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
(match->key->n_proto == htons(ETH_P_IP) ||
match->key->n_proto == htons(ETH_P_IPV6))) {
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
ntohs(match->mask->n_proto));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
ntohs(match->key->n_proto));
}
}
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *match_level,
bool *match_inner)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
bool needs_mapping, sets_mapping;
int err;
if (!mlx5e_is_eswitch_flow(flow))
return -EOPNOTSUPP;
needs_mapping = !!flow->attr->chain;
sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
*match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
NL_SET_ERR_MSG(extack,
"Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
}
if (!flow->attr->chain) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
NL_SET_ERR_MSG_MOD(extack,
"Failed to parse tunnel attributes");
netdev_warn(priv->netdev,
"Failed to parse tunnel attributes");
return err;
}
/* With mpls over udp we decapsulate using packet reformat
* object
*/
if (!netif_is_bareudp(filter_dev))
flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
return 0;
return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
}
static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
inner_headers);
}
static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_value,
inner_headers);
}
static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
}
static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
{
return MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
}
static void *get_match_headers_value(u32 flags,
struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_value(spec) :
get_match_outer_headers_value(spec);
}
static void *get_match_headers_criteria(u32 flags,
struct mlx5_flow_spec *spec)
{
return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
get_match_inner_headers_criteria(spec) :
get_match_outer_headers_criteria(spec);
}
static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
struct net_device *ingress_dev;
struct flow_match_meta match;
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
return 0;
flow_rule_match_meta(rule, &match);
if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EOPNOTSUPP;
}
ingress_dev = __dev_get_by_index(dev_net(filter_dev),
match.key->ingress_ifindex);
if (!ingress_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't find the ingress port to match on");
return -ENOENT;
}
if (ingress_dev != filter_dev) {
NL_SET_ERR_MSG_MOD(extack,
"Can't match on the ingress filter port");
return -EOPNOTSUPP;
}
return 0;
}
static bool skip_key_basic(struct net_device *filter_dev,
struct flow_cls_offload *f)
{
/* When doing mpls over udp decap, the user needs to provide
* MPLS_UC as the protocol in order to be able to match on mpls
* label fields. However, the actual ethertype is IP so we want to
* avoid matching on this, otherwise we'll fail the match.
*/
if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
return true;
return false;
}
static int __parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev,
u8 *inner_match_level, u8 *outer_match_level)
{
struct netlink_ext_ack *extack = f->common.extack;
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers);
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
match_level = outer_match_level;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_META) |
BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_CVLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_TCP) |
BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_CT) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
BIT(FLOW_DISSECTOR_KEY_MPLS))) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
if (mlx5e_get_tc_tun(filter_dev)) {
bool match_inner = false;
err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
outer_match_level, &match_inner);
if (err)
return err;
if (match_inner) {
/* header pointers should point to the inner headers
* if the packet was decapsulated already.
* outer headers are set by parse_tunnel_attr.
*/
match_level = inner_match_level;
headers_c = get_match_inner_headers_criteria(spec);
headers_v = get_match_inner_headers_value(spec);
}
}
err = mlx5e_flower_parse_meta(filter_dev, f);
if (err)
return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
!skip_key_basic(filter_dev, f)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
mlx5e_tc_set_ethertype(priv->mdev, &match,
match_level == outer_match_level,
headers_c, headers_v);
if (match.mask->n_proto)
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
is_vlan_dev(filter_dev)) {
struct flow_dissector_key_vlan filter_dev_mask;
struct flow_dissector_key_vlan filter_dev_key;
struct flow_match_vlan match;
if (is_vlan_dev(filter_dev)) {
match.key = &filter_dev_key;
match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
match.key->vlan_priority = 0;
match.mask = &filter_dev_mask;
memset(match.mask, 0xff, sizeof(*match.mask));
match.mask->vlan_priority = 0;
} else {
flow_rule_match_vlan(rule, &match);
}
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
cvlan_tag, 1);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
match.mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
match.key->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
match.mask->vlan_priority);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
}
} else if (*match_level != MLX5_MATCH_NONE) {
/* cvlan_tag enabled in match criteria and
* disabled in match value means both S & C tags
* don't exist (untagged of both)
*/
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
struct flow_match_vlan match;
flow_rule_match_cvlan(rule, &match);
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_svlan_tag, 1);
} else {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_cvlan_tag, 1);
MLX5_SET(fte_match_set_misc, misc_v,
outer_second_cvlan_tag, 1);
}
MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
match.mask->vlan_id);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
match.key->vlan_id);
MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
match.mask->vlan_priority);
MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
flow_rule_match_eth_addrs(rule, &match);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dmac_47_16),
match.mask->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dmac_47_16),
match.key->dst);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
smac_47_16),
match.mask->src);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
smac_47_16),
match.key->src);
if (!is_zero_ether_addr(match.mask->src) ||
!is_zero_ether_addr(match.mask->dst))
*match_level = MLX5_MATCH_L2;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
return -EOPNOTSUPP;
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
match.key->flags & FLOW_DIS_IS_FRAGMENT);
/* the HW doesn't need L3 inline to match on frag=no */
if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
*match_level = MLX5_MATCH_L2;
/* *** L2 attributes parsing up to here *** */
else
*match_level = MLX5_MATCH_L3;
}
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
flow_rule_match_basic(rule, &match);
ip_proto = match.key->ip_proto;
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
match.mask->ip_proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
match.key->ip_proto);
if (match.mask->ip_proto)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&match.key->dst, sizeof(match.key->dst));
if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L3;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
struct flow_match_ipv6_addrs match;
flow_rule_match_ipv6_addrs(rule, &match);
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&match.mask->src, sizeof(match.mask->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
&match.key->src, sizeof(match.key->src));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&match.mask->dst, sizeof(match.mask->dst));
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&match.key->dst, sizeof(match.key->dst));
if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
*match_level = MLX5_MATCH_L3;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
struct flow_match_ip match;
flow_rule_match_ip(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
match.mask->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
match.key->tos & 0x3);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
match.mask->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
match.key->tos >> 2);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
match.mask->ttl);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
match.key->ttl);
if (match.mask->ttl &&
!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
ft_field_support.outer_ipv4_ttl)) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on TTL is not supported");
return -EOPNOTSUPP;
}
if (match.mask->tos || match.mask->ttl)
*match_level = MLX5_MATCH_L3;
}
/* *** L3 attributes parsing up to here *** */
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
flow_rule_match_ports(rule, &match);
switch (ip_proto) {
case IPPROTO_TCP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
tcp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
tcp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
tcp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
tcp_dport, ntohs(match.key->dst));
break;
case IPPROTO_UDP:
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_sport, ntohs(match.mask->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_sport, ntohs(match.key->src));
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_dport, ntohs(match.mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_dport, ntohs(match.key->dst));
break;
default:
NL_SET_ERR_MSG_MOD(extack,
"Only UDP and TCP transports are supported for L4 matching");
netdev_err(priv->netdev,
"Only UDP and TCP transport are supported\n");
return -EINVAL;
}
if (match.mask->src || match.mask->dst)
*match_level = MLX5_MATCH_L4;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
struct flow_match_tcp match;
flow_rule_match_tcp(rule, &match);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
ntohs(match.mask->flags));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
ntohs(match.key->flags));
if (match.mask->flags)
*match_level = MLX5_MATCH_L4;
}
return 0;
}
static int parse_cls_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct net_device *filter_dev)
{
u8 inner_match_level, outer_match_level, non_tunnel_match_level;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
bool is_eswitch_flow;
int err;
inner_match_level = MLX5_MATCH_NONE;
outer_match_level = MLX5_MATCH_NONE;
err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
&inner_match_level, &outer_match_level);
non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
outer_match_level : inner_match_level;
is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
if (!err && is_eswitch_flow) {
rep = rpriv->rep;
if (rep->vport != MLX5_VPORT_UPLINK &&
(esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
esw->offloads.inline_mode < non_tunnel_match_level)) {
NL_SET_ERR_MSG_MOD(extack,
"Flow is not offloaded due to min inline setting");
netdev_warn(priv->netdev,
"Flow is not offloaded due to min inline setting, required %d actual %d\n",
non_tunnel_match_level, esw->offloads.inline_mode);
return -EOPNOTSUPP;
}
}
flow->attr->inner_match_level = inner_match_level;
flow->attr->outer_match_level = outer_match_level;
return err;
}
struct pedit_headers {
struct ethhdr eth;
struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
struct udphdr udp;
};
struct pedit_headers_action {
struct pedit_headers vals;
struct pedit_headers masks;
u32 pedits;
};
static int pedit_header_offsets[] = {
[FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
[FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
[FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
[FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
[FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
};
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
struct pedit_headers_action *hdrs)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
if (*curr_pmask & mask) /* disallow acting twice on the same location */
goto out_err;
*curr_pmask |= mask;
*curr_pval |= (val & mask);
return 0;
out_err:
return -EOPNOTSUPP;
}
struct mlx5_fields {
u8 field;
u8 field_bsize;
u32 field_mask;
u32 offset;
u32 match_offset;
};
#define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
{MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
offsetof(struct pedit_headers, field) + (off), \
MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
/* masked values are the same and there are no rewrites that do not have a
* match.
*/
#define SAME_VAL_MASK(type, valp, maskp<