| // SPDX-License-Identifier: GPL-2.0-or-later |
| /* |
| * NET3 Protocol independent device support routines. |
| * |
| * Derived from the non IP parts of dev.c 1.0.19 |
| * Authors: Ross Biro |
| * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| * |
| * Additional Authors: |
| * Florian la Roche <rzsfl@rz.uni-sb.de> |
| * Alan Cox <gw4pts@gw4pts.ampr.org> |
| * David Hinds <dahinds@users.sourceforge.net> |
| * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> |
| * Adam Sulmicki <adam@cfar.umd.edu> |
| * Pekka Riikonen <priikone@poesidon.pspt.fi> |
| * |
| * Changes: |
| * D.J. Barrow : Fixed bug where dev->refcnt gets set |
| * to 2 if register_netdev gets called |
| * before net_dev_init & also removed a |
| * few lines of code in the process. |
| * Alan Cox : device private ioctl copies fields back. |
| * Alan Cox : Transmit queue code does relevant |
| * stunts to keep the queue safe. |
| * Alan Cox : Fixed double lock. |
| * Alan Cox : Fixed promisc NULL pointer trap |
| * ???????? : Support the full private ioctl range |
| * Alan Cox : Moved ioctl permission check into |
| * drivers |
| * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI |
| * Alan Cox : 100 backlog just doesn't cut it when |
| * you start doing multicast video 8) |
| * Alan Cox : Rewrote net_bh and list manager. |
| * Alan Cox : Fix ETH_P_ALL echoback lengths. |
| * Alan Cox : Took out transmit every packet pass |
| * Saved a few bytes in the ioctl handler |
| * Alan Cox : Network driver sets packet type before |
| * calling netif_rx. Saves a function |
| * call a packet. |
| * Alan Cox : Hashed net_bh() |
| * Richard Kooijman: Timestamp fixes. |
| * Alan Cox : Wrong field in SIOCGIFDSTADDR |
| * Alan Cox : Device lock protection. |
| * Alan Cox : Fixed nasty side effect of device close |
| * changes. |
| * Rudi Cilibrasi : Pass the right thing to |
| * set_mac_address() |
| * Dave Miller : 32bit quantity for the device lock to |
| * make it work out on a Sparc. |
| * Bjorn Ekwall : Added KERNELD hack. |
| * Alan Cox : Cleaned up the backlog initialise. |
| * Craig Metz : SIOCGIFCONF fix if space for under |
| * 1 device. |
| * Thomas Bogendoerfer : Return ENODEV for dev_open, if there |
| * is no device open function. |
| * Andi Kleen : Fix error reporting for SIOCGIFCONF |
| * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF |
| * Cyrus Durgin : Cleaned for KMOD |
| * Adam Sulmicki : Bug Fix : Network Device Unload |
| * A network device unload needs to purge |
| * the backlog queue. |
| * Paul Rusty Russell : SIOCSIFNAME |
| * Pekka Riikonen : Netdev boot-time settings code |
| * Andrew Morton : Make unregister_netdevice wait |
| * indefinitely on dev->refcnt |
| * J Hadi Salim : - Backlog queue sampling |
| * - netif_rx() feedback |
| */ |
| |
| #include <linux/uaccess.h> |
| #include <linux/bitops.h> |
| #include <linux/capability.h> |
| #include <linux/cpu.h> |
| #include <linux/types.h> |
| #include <linux/kernel.h> |
| #include <linux/hash.h> |
| #include <linux/slab.h> |
| #include <linux/sched.h> |
| #include <linux/sched/mm.h> |
| #include <linux/mutex.h> |
| #include <linux/rwsem.h> |
| #include <linux/string.h> |
| #include <linux/mm.h> |
| #include <linux/socket.h> |
| #include <linux/sockios.h> |
| #include <linux/errno.h> |
| #include <linux/interrupt.h> |
| #include <linux/if_ether.h> |
| #include <linux/netdevice.h> |
| #include <linux/etherdevice.h> |
| #include <linux/ethtool.h> |
| #include <linux/skbuff.h> |
| #include <linux/kthread.h> |
| #include <linux/bpf.h> |
| #include <linux/bpf_trace.h> |
| #include <net/net_namespace.h> |
| #include <net/sock.h> |
| #include <net/busy_poll.h> |
| #include <linux/rtnetlink.h> |
| #include <linux/stat.h> |
| #include <net/dsa.h> |
| #include <net/dst.h> |
| #include <net/dst_metadata.h> |
| #include <net/gro.h> |
| #include <net/pkt_sched.h> |
| #include <net/pkt_cls.h> |
| #include <net/checksum.h> |
| #include <net/xfrm.h> |
| #include <linux/highmem.h> |
| #include <linux/init.h> |
| #include <linux/module.h> |
| #include <linux/netpoll.h> |
| #include <linux/rcupdate.h> |
| #include <linux/delay.h> |
| #include <net/iw_handler.h> |
| #include <asm/current.h> |
| #include <linux/audit.h> |
| #include <linux/dmaengine.h> |
| #include <linux/err.h> |
| #include <linux/ctype.h> |
| #include <linux/if_arp.h> |
| #include <linux/if_vlan.h> |
| #include <linux/ip.h> |
| #include <net/ip.h> |
| #include <net/mpls.h> |
| #include <linux/ipv6.h> |
| #include <linux/in.h> |
| #include <linux/jhash.h> |
| #include <linux/random.h> |
| #include <trace/events/napi.h> |
| #include <trace/events/net.h> |
| #include <trace/events/skb.h> |
| #include <trace/events/qdisc.h> |
| #include <linux/inetdevice.h> |
| #include <linux/cpu_rmap.h> |
| #include <linux/static_key.h> |
| #include <linux/hashtable.h> |
| #include <linux/vmalloc.h> |
| #include <linux/if_macvlan.h> |
| #include <linux/errqueue.h> |
| #include <linux/hrtimer.h> |
| #include <linux/netfilter_netdev.h> |
| #include <linux/crash_dump.h> |
| #include <linux/sctp.h> |
| #include <net/udp_tunnel.h> |
| #include <linux/net_namespace.h> |
| #include <linux/indirect_call_wrapper.h> |
| #include <net/devlink.h> |
| #include <linux/pm_runtime.h> |
| #include <linux/prandom.h> |
| #include <linux/once_lite.h> |
| |
| #include "dev.h" |
| #include "net-sysfs.h" |
| |
| |
| static DEFINE_SPINLOCK(ptype_lock); |
| struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; |
| struct list_head ptype_all __read_mostly; /* Taps */ |
| |
| static int netif_rx_internal(struct sk_buff *skb); |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct netdev_notifier_info *info); |
| static int call_netdevice_notifiers_extack(unsigned long val, |
| struct net_device *dev, |
| struct netlink_ext_ack *extack); |
| static struct napi_struct *napi_by_id(unsigned int napi_id); |
| |
| /* |
| * The @dev_base_head list is protected by @dev_base_lock and the rtnl |
| * semaphore. |
| * |
| * Pure readers hold dev_base_lock for reading, or rcu_read_lock() |
| * |
| * Writers must hold the rtnl semaphore while they loop through the |
| * dev_base_head list, and hold dev_base_lock for writing when they do the |
| * actual updates. This allows pure readers to access the list even |
| * while a writer is preparing to update it. |
| * |
| * To put it another way, dev_base_lock is held for writing only to |
| * protect against pure readers; the rtnl semaphore provides the |
| * protection against other writers. |
| * |
| * See, for example usages, register_netdevice() and |
| * unregister_netdevice(), which must be called with the rtnl |
| * semaphore held. |
| */ |
| DEFINE_RWLOCK(dev_base_lock); |
| EXPORT_SYMBOL(dev_base_lock); |
| |
| static DEFINE_MUTEX(ifalias_mutex); |
| |
| /* protects napi_hash addition/deletion and napi_gen_id */ |
| static DEFINE_SPINLOCK(napi_hash_lock); |
| |
| static unsigned int napi_gen_id = NR_CPUS; |
| static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); |
| |
| static DECLARE_RWSEM(devnet_rename_sem); |
| |
| static inline void dev_base_seq_inc(struct net *net) |
| { |
| while (++net->dev_base_seq == 0) |
| ; |
| } |
| |
| static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) |
| { |
| unsigned int hash = full_name_hash(net, name, strnlen(name, IFNAMSIZ)); |
| |
| return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; |
| } |
| |
| static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) |
| { |
| return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; |
| } |
| |
| static inline void rps_lock_irqsave(struct softnet_data *sd, |
| unsigned long *flags) |
| { |
| if (IS_ENABLED(CONFIG_RPS)) |
| spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); |
| else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| local_irq_save(*flags); |
| } |
| |
| static inline void rps_lock_irq_disable(struct softnet_data *sd) |
| { |
| if (IS_ENABLED(CONFIG_RPS)) |
| spin_lock_irq(&sd->input_pkt_queue.lock); |
| else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| local_irq_disable(); |
| } |
| |
| static inline void rps_unlock_irq_restore(struct softnet_data *sd, |
| unsigned long *flags) |
| { |
| if (IS_ENABLED(CONFIG_RPS)) |
| spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); |
| else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| local_irq_restore(*flags); |
| } |
| |
| static inline void rps_unlock_irq_enable(struct softnet_data *sd) |
| { |
| if (IS_ENABLED(CONFIG_RPS)) |
| spin_unlock_irq(&sd->input_pkt_queue.lock); |
| else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) |
| local_irq_enable(); |
| } |
| |
| static struct netdev_name_node *netdev_name_node_alloc(struct net_device *dev, |
| const char *name) |
| { |
| struct netdev_name_node *name_node; |
| |
| name_node = kmalloc(sizeof(*name_node), GFP_KERNEL); |
| if (!name_node) |
| return NULL; |
| INIT_HLIST_NODE(&name_node->hlist); |
| name_node->dev = dev; |
| name_node->name = name; |
| return name_node; |
| } |
| |
| static struct netdev_name_node * |
| netdev_name_node_head_alloc(struct net_device *dev) |
| { |
| struct netdev_name_node *name_node; |
| |
| name_node = netdev_name_node_alloc(dev, dev->name); |
| if (!name_node) |
| return NULL; |
| INIT_LIST_HEAD(&name_node->list); |
| return name_node; |
| } |
| |
| static void netdev_name_node_free(struct netdev_name_node *name_node) |
| { |
| kfree(name_node); |
| } |
| |
| static void netdev_name_node_add(struct net *net, |
| struct netdev_name_node *name_node) |
| { |
| hlist_add_head_rcu(&name_node->hlist, |
| dev_name_hash(net, name_node->name)); |
| } |
| |
| static void netdev_name_node_del(struct netdev_name_node *name_node) |
| { |
| hlist_del_rcu(&name_node->hlist); |
| } |
| |
| static struct netdev_name_node *netdev_name_node_lookup(struct net *net, |
| const char *name) |
| { |
| struct hlist_head *head = dev_name_hash(net, name); |
| struct netdev_name_node *name_node; |
| |
| hlist_for_each_entry(name_node, head, hlist) |
| if (!strcmp(name_node->name, name)) |
| return name_node; |
| return NULL; |
| } |
| |
| static struct netdev_name_node *netdev_name_node_lookup_rcu(struct net *net, |
| const char *name) |
| { |
| struct hlist_head *head = dev_name_hash(net, name); |
| struct netdev_name_node *name_node; |
| |
| hlist_for_each_entry_rcu(name_node, head, hlist) |
| if (!strcmp(name_node->name, name)) |
| return name_node; |
| return NULL; |
| } |
| |
| bool netdev_name_in_use(struct net *net, const char *name) |
| { |
| return netdev_name_node_lookup(net, name); |
| } |
| EXPORT_SYMBOL(netdev_name_in_use); |
| |
| int netdev_name_node_alt_create(struct net_device *dev, const char *name) |
| { |
| struct netdev_name_node *name_node; |
| struct net *net = dev_net(dev); |
| |
| name_node = netdev_name_node_lookup(net, name); |
| if (name_node) |
| return -EEXIST; |
| name_node = netdev_name_node_alloc(dev, name); |
| if (!name_node) |
| return -ENOMEM; |
| netdev_name_node_add(net, name_node); |
| /* The node that holds dev->name acts as a head of per-device list. */ |
| list_add_tail(&name_node->list, &dev->name_node->list); |
| |
| return 0; |
| } |
| |
| static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node) |
| { |
| list_del(&name_node->list); |
| netdev_name_node_del(name_node); |
| kfree(name_node->name); |
| netdev_name_node_free(name_node); |
| } |
| |
| int netdev_name_node_alt_destroy(struct net_device *dev, const char *name) |
| { |
| struct netdev_name_node *name_node; |
| struct net *net = dev_net(dev); |
| |
| name_node = netdev_name_node_lookup(net, name); |
| if (!name_node) |
| return -ENOENT; |
| /* lookup might have found our primary name or a name belonging |
| * to another device. |
| */ |
| if (name_node == dev->name_node || name_node->dev != dev) |
| return -EINVAL; |
| |
| __netdev_name_node_alt_destroy(name_node); |
| |
| return 0; |
| } |
| |
| static void netdev_name_node_alt_flush(struct net_device *dev) |
| { |
| struct netdev_name_node *name_node, *tmp; |
| |
| list_for_each_entry_safe(name_node, tmp, &dev->name_node->list, list) |
| __netdev_name_node_alt_destroy(name_node); |
| } |
| |
| /* Device list insertion */ |
| static void list_netdevice(struct net_device *dev) |
| { |
| struct net *net = dev_net(dev); |
| |
| ASSERT_RTNL(); |
| |
| write_lock(&dev_base_lock); |
| list_add_tail_rcu(&dev->dev_list, &net->dev_base_head); |
| netdev_name_node_add(net, dev->name_node); |
| hlist_add_head_rcu(&dev->index_hlist, |
| dev_index_hash(net, dev->ifindex)); |
| write_unlock(&dev_base_lock); |
| |
| dev_base_seq_inc(net); |
| } |
| |
| /* Device list removal |
| * caller must respect a RCU grace period before freeing/reusing dev |
| */ |
| static void unlist_netdevice(struct net_device *dev) |
| { |
| ASSERT_RTNL(); |
| |
| /* Unlink dev from the device chain */ |
| write_lock(&dev_base_lock); |
| list_del_rcu(&dev->dev_list); |
| netdev_name_node_del(dev->name_node); |
| hlist_del_rcu(&dev->index_hlist); |
| write_unlock(&dev_base_lock); |
| |
| dev_base_seq_inc(dev_net(dev)); |
| } |
| |
| /* |
| * Our notifier list |
| */ |
| |
| static RAW_NOTIFIER_HEAD(netdev_chain); |
| |
| /* |
| * Device drivers call our routines to queue packets here. We empty the |
| * queue in the local softnet handler. |
| */ |
| |
| DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); |
| EXPORT_PER_CPU_SYMBOL(softnet_data); |
| |
| #ifdef CONFIG_LOCKDEP |
| /* |
| * register_netdevice() inits txq->_xmit_lock and sets lockdep class |
| * according to dev->type |
| */ |
| static const unsigned short netdev_lock_type[] = { |
| ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25, |
| ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET, |
| ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM, |
| ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP, |
| ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD, |
| ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25, |
| ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP, |
| ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD, |
| ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI, |
| ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE, |
| ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET, |
| ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL, |
| ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM, |
| ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE, |
| ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE}; |
| |
| static const char *const netdev_lock_name[] = { |
| "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25", |
| "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET", |
| "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM", |
| "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP", |
| "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD", |
| "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25", |
| "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP", |
| "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD", |
| "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI", |
| "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE", |
| "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET", |
| "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL", |
| "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM", |
| "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE", |
| "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"}; |
| |
| static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)]; |
| |
| static inline unsigned short netdev_lock_pos(unsigned short dev_type) |
| { |
| int i; |
| |
| for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++) |
| if (netdev_lock_type[i] == dev_type) |
| return i; |
| /* the last key is used by default */ |
| return ARRAY_SIZE(netdev_lock_type) - 1; |
| } |
| |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev_type); |
| lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| int i; |
| |
| i = netdev_lock_pos(dev->type); |
| lockdep_set_class_and_name(&dev->addr_list_lock, |
| &netdev_addr_lock_key[i], |
| netdev_lock_name[i]); |
| } |
| #else |
| static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock, |
| unsigned short dev_type) |
| { |
| } |
| |
| static inline void netdev_set_addr_lockdep_class(struct net_device *dev) |
| { |
| } |
| #endif |
| |
| /******************************************************************************* |
| * |
| * Protocol management and registration routines |
| * |
| *******************************************************************************/ |
| |
| |
| /* |
| * Add a protocol ID to the list. Now that the input handler is |
| * smarter we can dispense with all the messy stuff that used to be |
| * here. |
| * |
| * BEWARE!!! Protocol handlers, mangling input packets, |
| * MUST BE last in hash buckets and checking protocol handlers |
| * MUST start from promiscuous ptype_all chain in net_bh. |
| * It is true now, do not change it. |
| * Explanation follows: if protocol handler, mangling packet, will |
| * be the first on list, it is not able to sense, that packet |
| * is cloned and should be copied-on-write, so that it will |
| * change it and subsequent readers will get broken packet. |
| * --ANK (980803) |
| */ |
| |
| static inline struct list_head *ptype_head(const struct packet_type *pt) |
| { |
| if (pt->type == htons(ETH_P_ALL)) |
| return pt->dev ? &pt->dev->ptype_all : &ptype_all; |
| else |
| return pt->dev ? &pt->dev->ptype_specific : |
| &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK]; |
| } |
| |
| /** |
| * dev_add_pack - add packet handler |
| * @pt: packet type declaration |
| * |
| * Add a protocol handler to the networking stack. The passed &packet_type |
| * is linked into kernel lists and may not be freed until it has been |
| * removed from the kernel lists. |
| * |
| * This call does not sleep therefore it can not |
| * guarantee all CPU's that are in middle of receiving packets |
| * will see the new packet type (until the next received packet). |
| */ |
| |
| void dev_add_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| |
| spin_lock(&ptype_lock); |
| list_add_rcu(&pt->list, head); |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(dev_add_pack); |
| |
| /** |
| * __dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * The packet type might still be in use by receivers |
| * and must not be freed until after all the CPU's have gone |
| * through a quiescent state. |
| */ |
| void __dev_remove_pack(struct packet_type *pt) |
| { |
| struct list_head *head = ptype_head(pt); |
| struct packet_type *pt1; |
| |
| spin_lock(&ptype_lock); |
| |
| list_for_each_entry(pt1, head, list) { |
| if (pt == pt1) { |
| list_del_rcu(&pt->list); |
| goto out; |
| } |
| } |
| |
| pr_warn("dev_remove_pack: %p not found\n", pt); |
| out: |
| spin_unlock(&ptype_lock); |
| } |
| EXPORT_SYMBOL(__dev_remove_pack); |
| |
| /** |
| * dev_remove_pack - remove packet handler |
| * @pt: packet type declaration |
| * |
| * Remove a protocol handler that was previously added to the kernel |
| * protocol handlers by dev_add_pack(). The passed &packet_type is removed |
| * from the kernel lists and can be freed or reused once this function |
| * returns. |
| * |
| * This call sleeps to guarantee that no CPU is looking at the packet |
| * type after return. |
| */ |
| void dev_remove_pack(struct packet_type *pt) |
| { |
| __dev_remove_pack(pt); |
| |
| synchronize_net(); |
| } |
| EXPORT_SYMBOL(dev_remove_pack); |
| |
| |
| /******************************************************************************* |
| * |
| * Device Interface Subroutines |
| * |
| *******************************************************************************/ |
| |
| /** |
| * dev_get_iflink - get 'iflink' value of a interface |
| * @dev: targeted interface |
| * |
| * Indicates the ifindex the interface is linked to. |
| * Physical interfaces have the same 'ifindex' and 'iflink' values. |
| */ |
| |
| int dev_get_iflink(const struct net_device *dev) |
| { |
| if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) |
| return dev->netdev_ops->ndo_get_iflink(dev); |
| |
| return dev->ifindex; |
| } |
| EXPORT_SYMBOL(dev_get_iflink); |
| |
| /** |
| * dev_fill_metadata_dst - Retrieve tunnel egress information. |
| * @dev: targeted interface |
| * @skb: The packet. |
| * |
| * For better visibility of tunnel traffic OVS needs to retrieve |
| * egress tunnel information for a packet. Following API allows |
| * user to get this info. |
| */ |
| int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
| { |
| struct ip_tunnel_info *info; |
| |
| if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst) |
| return -EINVAL; |
| |
| info = skb_tunnel_info_unclone(skb); |
| if (!info) |
| return -ENOMEM; |
| if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX))) |
| return -EINVAL; |
| |
| return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_fill_metadata_dst); |
| |
| static struct net_device_path *dev_fwd_path(struct net_device_path_stack *stack) |
| { |
| int k = stack->num_paths++; |
| |
| if (WARN_ON_ONCE(k >= NET_DEVICE_PATH_STACK_MAX)) |
| return NULL; |
| |
| return &stack->path[k]; |
| } |
| |
| int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr, |
| struct net_device_path_stack *stack) |
| { |
| const struct net_device *last_dev; |
| struct net_device_path_ctx ctx = { |
| .dev = dev, |
| }; |
| struct net_device_path *path; |
| int ret = 0; |
| |
| memcpy(ctx.daddr, daddr, sizeof(ctx.daddr)); |
| stack->num_paths = 0; |
| while (ctx.dev && ctx.dev->netdev_ops->ndo_fill_forward_path) { |
| last_dev = ctx.dev; |
| path = dev_fwd_path(stack); |
| if (!path) |
| return -1; |
| |
| memset(path, 0, sizeof(struct net_device_path)); |
| ret = ctx.dev->netdev_ops->ndo_fill_forward_path(&ctx, path); |
| if (ret < 0) |
| return -1; |
| |
| if (WARN_ON_ONCE(last_dev == ctx.dev)) |
| return -1; |
| } |
| |
| if (!ctx.dev) |
| return ret; |
| |
| path = dev_fwd_path(stack); |
| if (!path) |
| return -1; |
| path->type = DEV_PATH_ETHERNET; |
| path->dev = ctx.dev; |
| |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(dev_fill_forward_path); |
| |
| /** |
| * __dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. Must be called under RTNL semaphore |
| * or @dev_base_lock. If the name is found a pointer to the device |
| * is returned. If the name is not found then %NULL is returned. The |
| * reference counters are not incremented so the caller must be |
| * careful with locks. |
| */ |
| |
| struct net_device *__dev_get_by_name(struct net *net, const char *name) |
| { |
| struct netdev_name_node *node_name; |
| |
| node_name = netdev_name_node_lookup(net, name); |
| return node_name ? node_name->dev : NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_name); |
| |
| /** |
| * dev_get_by_name_rcu - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. |
| * If the name is found a pointer to the device is returned. |
| * If the name is not found then %NULL is returned. |
| * The reference counters are not incremented so the caller must be |
| * careful with locks. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_name_rcu(struct net *net, const char *name) |
| { |
| struct netdev_name_node *node_name; |
| |
| node_name = netdev_name_node_lookup_rcu(net, name); |
| return node_name ? node_name->dev : NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_name_rcu); |
| |
| /** |
| * dev_get_by_name - find a device by its name |
| * @net: the applicable net namespace |
| * @name: name to find |
| * |
| * Find an interface by name. This can be called from any |
| * context and does its own locking. The returned handle has |
| * the usage count incremented and the caller must use dev_put() to |
| * release it when it is no longer needed. %NULL is returned if no |
| * matching device is found. |
| */ |
| |
| struct net_device *dev_get_by_name(struct net *net, const char *name) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_name_rcu(net, name); |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_name); |
| |
| /** |
| * __dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold either the RTNL semaphore |
| * or @dev_base_lock. |
| */ |
| |
| struct net_device *__dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(__dev_get_by_index); |
| |
| /** |
| * dev_get_by_index_rcu - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not |
| * had its reference counter increased so the caller must be careful |
| * about locking. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| struct hlist_head *head = dev_index_hash(net, ifindex); |
| |
| hlist_for_each_entry_rcu(dev, head, index_hlist) |
| if (dev->ifindex == ifindex) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_index_rcu); |
| |
| |
| /** |
| * dev_get_by_index - find a device by its ifindex |
| * @net: the applicable net namespace |
| * @ifindex: index of device |
| * |
| * Search for an interface by index. Returns NULL if the device |
| * is not found or a pointer to the device. The device returned has |
| * had a reference added and the pointer is safe until the user calls |
| * dev_put to indicate they have finished with it. |
| */ |
| |
| struct net_device *dev_get_by_index(struct net *net, int ifindex) |
| { |
| struct net_device *dev; |
| |
| rcu_read_lock(); |
| dev = dev_get_by_index_rcu(net, ifindex); |
| dev_hold(dev); |
| rcu_read_unlock(); |
| return dev; |
| } |
| EXPORT_SYMBOL(dev_get_by_index); |
| |
| /** |
| * dev_get_by_napi_id - find a device by napi_id |
| * @napi_id: ID of the NAPI struct |
| * |
| * Search for an interface by NAPI ID. Returns %NULL if the device |
| * is not found or a pointer to the device. The device has not had |
| * its reference counter increased so the caller must be careful |
| * about locking. The caller must hold RCU lock. |
| */ |
| |
| struct net_device *dev_get_by_napi_id(unsigned int napi_id) |
| { |
| struct napi_struct *napi; |
| |
| WARN_ON_ONCE(!rcu_read_lock_held()); |
| |
| if (napi_id < MIN_NAPI_ID) |
| return NULL; |
| |
| napi = napi_by_id(napi_id); |
| |
| return napi ? napi->dev : NULL; |
| } |
| EXPORT_SYMBOL(dev_get_by_napi_id); |
| |
| /** |
| * netdev_get_name - get a netdevice name, knowing its ifindex. |
| * @net: network namespace |
| * @name: a pointer to the buffer where the name will be stored. |
| * @ifindex: the ifindex of the interface to get the name from. |
| */ |
| int netdev_get_name(struct net *net, char *name, int ifindex) |
| { |
| struct net_device *dev; |
| int ret; |
| |
| down_read(&devnet_rename_sem); |
| rcu_read_lock(); |
| |
| dev = dev_get_by_index_rcu(net, ifindex); |
| if (!dev) { |
| ret = -ENODEV; |
| goto out; |
| } |
| |
| strcpy(name, dev->name); |
| |
| ret = 0; |
| out: |
| rcu_read_unlock(); |
| up_read(&devnet_rename_sem); |
| return ret; |
| } |
| |
| /** |
| * dev_getbyhwaddr_rcu - find a device by its hardware address |
| * @net: the applicable net namespace |
| * @type: media type of device |
| * @ha: hardware address |
| * |
| * Search for an interface by MAC address. Returns NULL if the device |
| * is not found or a pointer to the device. |
| * The caller must hold RCU or RTNL. |
| * The returned device has not had its ref count increased |
| * and the caller must therefore be careful about locking |
| * |
| */ |
| |
| struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, |
| const char *ha) |
| { |
| struct net_device *dev; |
| |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type && |
| !memcmp(dev->dev_addr, ha, dev->addr_len)) |
| return dev; |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(dev_getbyhwaddr_rcu); |
| |
| struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) |
| { |
| struct net_device *dev, *ret = NULL; |
| |
| rcu_read_lock(); |
| for_each_netdev_rcu(net, dev) |
| if (dev->type == type) { |
| dev_hold(dev); |
| ret = dev; |
| break; |
| } |
| rcu_read_unlock(); |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_getfirstbyhwtype); |
| |
| /** |
| * __dev_get_by_flags - find any device with given flags |
| * @net: the applicable net namespace |
| * @if_flags: IFF_* values |
| * @mask: bitmask of bits in if_flags to check |
| * |
| * Search for any interface with the given flags. Returns NULL if a device |
| * is not found or a pointer to the device. Must be called inside |
| * rtnl_lock(), and result refcount is unchanged. |
| */ |
| |
| struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags, |
| unsigned short mask) |
| { |
| struct net_device *dev, *ret; |
| |
| ASSERT_RTNL(); |
| |
| ret = NULL; |
| for_each_netdev(net, dev) { |
| if (((dev->flags ^ if_flags) & mask) == 0) { |
| ret = dev; |
| break; |
| } |
| } |
| return ret; |
| } |
| EXPORT_SYMBOL(__dev_get_by_flags); |
| |
| /** |
| * dev_valid_name - check if name is okay for network device |
| * @name: name string |
| * |
| * Network device names need to be valid file names to |
| * allow sysfs to work. We also disallow any kind of |
| * whitespace. |
| */ |
| bool dev_valid_name(const char *name) |
| { |
| if (*name == '\0') |
| return false; |
| if (strnlen(name, IFNAMSIZ) == IFNAMSIZ) |
| return false; |
| if (!strcmp(name, ".") || !strcmp(name, "..")) |
| return false; |
| |
| while (*name) { |
| if (*name == '/' || *name == ':' || isspace(*name)) |
| return false; |
| name++; |
| } |
| return true; |
| } |
| EXPORT_SYMBOL(dev_valid_name); |
| |
| /** |
| * __dev_alloc_name - allocate a name for a device |
| * @net: network namespace to allocate the device name in |
| * @name: name format string |
| * @buf: scratch buffer and result name string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| static int __dev_alloc_name(struct net *net, const char *name, char *buf) |
| { |
| int i = 0; |
| const char *p; |
| const int max_netdevices = 8*PAGE_SIZE; |
| unsigned long *inuse; |
| struct net_device *d; |
| |
| if (!dev_valid_name(name)) |
| return -EINVAL; |
| |
| p = strchr(name, '%'); |
| if (p) { |
| /* |
| * Verify the string as this thing may have come from |
| * the user. There must be either one "%d" and no other "%" |
| * characters. |
| */ |
| if (p[1] != 'd' || strchr(p + 2, '%')) |
| return -EINVAL; |
| |
| /* Use one page as a bit array of possible slots */ |
| inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC); |
| if (!inuse) |
| return -ENOMEM; |
| |
| for_each_netdev(net, d) { |
| struct netdev_name_node *name_node; |
| list_for_each_entry(name_node, &d->name_node->list, list) { |
| if (!sscanf(name_node->name, name, &i)) |
| continue; |
| if (i < 0 || i >= max_netdevices) |
| continue; |
| |
| /* avoid cases where sscanf is not exact inverse of printf */ |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!strncmp(buf, name_node->name, IFNAMSIZ)) |
| __set_bit(i, inuse); |
| } |
| if (!sscanf(d->name, name, &i)) |
| continue; |
| if (i < 0 || i >= max_netdevices) |
| continue; |
| |
| /* avoid cases where sscanf is not exact inverse of printf */ |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!strncmp(buf, d->name, IFNAMSIZ)) |
| __set_bit(i, inuse); |
| } |
| |
| i = find_first_zero_bit(inuse, max_netdevices); |
| free_page((unsigned long) inuse); |
| } |
| |
| snprintf(buf, IFNAMSIZ, name, i); |
| if (!netdev_name_in_use(net, buf)) |
| return i; |
| |
| /* It is possible to run out of possible slots |
| * when the name is long and there isn't enough space left |
| * for the digits, or if all bits are used. |
| */ |
| return -ENFILE; |
| } |
| |
| static int dev_alloc_name_ns(struct net *net, |
| struct net_device *dev, |
| const char *name) |
| { |
| char buf[IFNAMSIZ]; |
| int ret; |
| |
| BUG_ON(!net); |
| ret = __dev_alloc_name(net, name, buf); |
| if (ret >= 0) |
| strlcpy(dev->name, buf, IFNAMSIZ); |
| return ret; |
| } |
| |
| /** |
| * dev_alloc_name - allocate a name for a device |
| * @dev: device |
| * @name: name format string |
| * |
| * Passed a format string - eg "lt%d" it will try and find a suitable |
| * id. It scans list of devices to build up a free map, then chooses |
| * the first empty slot. The caller must hold the dev_base or rtnl lock |
| * while allocating the name and adding the device in order to avoid |
| * duplicates. |
| * Limited to bits_per_byte * page size devices (ie 32K on most platforms). |
| * Returns the number of the unit assigned or a negative errno code. |
| */ |
| |
| int dev_alloc_name(struct net_device *dev, const char *name) |
| { |
| return dev_alloc_name_ns(dev_net(dev), dev, name); |
| } |
| EXPORT_SYMBOL(dev_alloc_name); |
| |
| static int dev_get_valid_name(struct net *net, struct net_device *dev, |
| const char *name) |
| { |
| BUG_ON(!net); |
| |
| if (!dev_valid_name(name)) |
| return -EINVAL; |
| |
| if (strchr(name, '%')) |
| return dev_alloc_name_ns(net, dev, name); |
| else if (netdev_name_in_use(net, name)) |
| return -EEXIST; |
| else if (dev->name != name) |
| strlcpy(dev->name, name, IFNAMSIZ); |
| |
| return 0; |
| } |
| |
| /** |
| * dev_change_name - change name of a device |
| * @dev: device |
| * @newname: name (or format string) must be at least IFNAMSIZ |
| * |
| * Change name of a device, can pass format strings "eth%d". |
| * for wildcarding. |
| */ |
| int dev_change_name(struct net_device *dev, const char *newname) |
| { |
| unsigned char old_assign_type; |
| char oldname[IFNAMSIZ]; |
| int err = 0; |
| int ret; |
| struct net *net; |
| |
| ASSERT_RTNL(); |
| BUG_ON(!dev_net(dev)); |
| |
| net = dev_net(dev); |
| |
| /* Some auto-enslaved devices e.g. failover slaves are |
| * special, as userspace might rename the device after |
| * the interface had been brought up and running since |
| * the point kernel initiated auto-enslavement. Allow |
| * live name change even when these slave devices are |
| * up and running. |
| * |
| * Typically, users of these auto-enslaving devices |
| * don't actually care about slave name change, as |
| * they are supposed to operate on master interface |
| * directly. |
| */ |
| if (dev->flags & IFF_UP && |
| likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK))) |
| return -EBUSY; |
| |
| down_write(&devnet_rename_sem); |
| |
| if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { |
| up_write(&devnet_rename_sem); |
| return 0; |
| } |
| |
| memcpy(oldname, dev->name, IFNAMSIZ); |
| |
| err = dev_get_valid_name(net, dev, newname); |
| if (err < 0) { |
| up_write(&devnet_rename_sem); |
| return err; |
| } |
| |
| if (oldname[0] && !strchr(oldname, '%')) |
| netdev_info(dev, "renamed from %s\n", oldname); |
| |
| old_assign_type = dev->name_assign_type; |
| dev->name_assign_type = NET_NAME_RENAMED; |
| |
| rollback: |
| ret = device_rename(&dev->dev, dev->name); |
| if (ret) { |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| up_write(&devnet_rename_sem); |
| return ret; |
| } |
| |
| up_write(&devnet_rename_sem); |
| |
| netdev_adjacent_rename_links(dev, oldname); |
| |
| write_lock(&dev_base_lock); |
| netdev_name_node_del(dev->name_node); |
| write_unlock(&dev_base_lock); |
| |
| synchronize_rcu(); |
| |
| write_lock(&dev_base_lock); |
| netdev_name_node_add(net, dev->name_node); |
| write_unlock(&dev_base_lock); |
| |
| ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev); |
| ret = notifier_to_errno(ret); |
| |
| if (ret) { |
| /* err >= 0 after dev_alloc_name() or stores the first errno */ |
| if (err >= 0) { |
| err = ret; |
| down_write(&devnet_rename_sem); |
| memcpy(dev->name, oldname, IFNAMSIZ); |
| memcpy(oldname, newname, IFNAMSIZ); |
| dev->name_assign_type = old_assign_type; |
| old_assign_type = NET_NAME_RENAMED; |
| goto rollback; |
| } else { |
| netdev_err(dev, "name change rollback failed: %d\n", |
| ret); |
| } |
| } |
| |
| return err; |
| } |
| |
| /** |
| * dev_set_alias - change ifalias of a device |
| * @dev: device |
| * @alias: name up to IFALIASZ |
| * @len: limit of bytes to copy from info |
| * |
| * Set ifalias for a device, |
| */ |
| int dev_set_alias(struct net_device *dev, const char *alias, size_t len) |
| { |
| struct dev_ifalias *new_alias = NULL; |
| |
| if (len >= IFALIASZ) |
| return -EINVAL; |
| |
| if (len) { |
| new_alias = kmalloc(sizeof(*new_alias) + len + 1, GFP_KERNEL); |
| if (!new_alias) |
| return -ENOMEM; |
| |
| memcpy(new_alias->ifalias, alias, len); |
| new_alias->ifalias[len] = 0; |
| } |
| |
| mutex_lock(&ifalias_mutex); |
| new_alias = rcu_replace_pointer(dev->ifalias, new_alias, |
| mutex_is_locked(&ifalias_mutex)); |
| mutex_unlock(&ifalias_mutex); |
| |
| if (new_alias) |
| kfree_rcu(new_alias, rcuhead); |
| |
| return len; |
| } |
| EXPORT_SYMBOL(dev_set_alias); |
| |
| /** |
| * dev_get_alias - get ifalias of a device |
| * @dev: device |
| * @name: buffer to store name of ifalias |
| * @len: size of buffer |
| * |
| * get ifalias for a device. Caller must make sure dev cannot go |
| * away, e.g. rcu read lock or own a reference count to device. |
| */ |
| int dev_get_alias(const struct net_device *dev, char *name, size_t len) |
| { |
| const struct dev_ifalias *alias; |
| int ret = 0; |
| |
| rcu_read_lock(); |
| alias = rcu_dereference(dev->ifalias); |
| if (alias) |
| ret = snprintf(name, len, "%s", alias->ifalias); |
| rcu_read_unlock(); |
| |
| return ret; |
| } |
| |
| /** |
| * netdev_features_change - device changes features |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed features. |
| */ |
| void netdev_features_change(struct net_device *dev) |
| { |
| call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev); |
| } |
| EXPORT_SYMBOL(netdev_features_change); |
| |
| /** |
| * netdev_state_change - device changes state |
| * @dev: device to cause notification |
| * |
| * Called to indicate a device has changed state. This function calls |
| * the notifier chains for netdev_chain and sends a NEWLINK message |
| * to the routing socket. |
| */ |
| void netdev_state_change(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| struct netdev_notifier_change_info change_info = { |
| .info.dev = dev, |
| }; |
| |
| call_netdevice_notifiers_info(NETDEV_CHANGE, |
| &change_info.info); |
| rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); |
| } |
| } |
| EXPORT_SYMBOL(netdev_state_change); |
| |
| /** |
| * __netdev_notify_peers - notify network peers about existence of @dev, |
| * to be called when rtnl lock is already held. |
| * @dev: network device |
| * |
| * Generate traffic such that interested network peers are aware of |
| * @dev, such as by generating a gratuitous ARP. This may be used when |
| * a device wants to inform the rest of the network about some sort of |
| * reconfiguration such as a failover event or virtual machine |
| * migration. |
| */ |
| void __netdev_notify_peers(struct net_device *dev) |
| { |
| ASSERT_RTNL(); |
| call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); |
| call_netdevice_notifiers(NETDEV_RESEND_IGMP, dev); |
| } |
| EXPORT_SYMBOL(__netdev_notify_peers); |
| |
| /** |
| * netdev_notify_peers - notify network peers about existence of @dev |
| * @dev: network device |
| * |
| * Generate traffic such that interested network peers are aware of |
| * @dev, such as by generating a gratuitous ARP. This may be used when |
| * a device wants to inform the rest of the network about some sort of |
| * reconfiguration such as a failover event or virtual machine |
| * migration. |
| */ |
| void netdev_notify_peers(struct net_device *dev) |
| { |
| rtnl_lock(); |
| __netdev_notify_peers(dev); |
| rtnl_unlock(); |
| } |
| EXPORT_SYMBOL(netdev_notify_peers); |
| |
| static int napi_threaded_poll(void *data); |
| |
| static int napi_kthread_create(struct napi_struct *n) |
| { |
| int err = 0; |
| |
| /* Create and wake up the kthread once to put it in |
| * TASK_INTERRUPTIBLE mode to avoid the blocked task |
| * warning and work with loadavg. |
| */ |
| n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d", |
| n->dev->name, n->napi_id); |
| if (IS_ERR(n->thread)) { |
| err = PTR_ERR(n->thread); |
| pr_err("kthread_run failed with err %d\n", err); |
| n->thread = NULL; |
| } |
| |
| return err; |
| } |
| |
| static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) |
| { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| int ret; |
| |
| ASSERT_RTNL(); |
| dev_addr_check(dev); |
| |
| if (!netif_device_present(dev)) { |
| /* may be detached because parent is runtime-suspended */ |
| if (dev->dev.parent) |
| pm_runtime_resume(dev->dev.parent); |
| if (!netif_device_present(dev)) |
| return -ENODEV; |
| } |
| |
| /* Block netpoll from trying to do any rx path servicing. |
| * If we don't do this there is a chance ndo_poll_controller |
| * or ndo_poll may be running while we open the device |
| */ |
| netpoll_poll_disable(dev); |
| |
| ret = call_netdevice_notifiers_extack(NETDEV_PRE_UP, dev, extack); |
| ret = notifier_to_errno(ret); |
| if (ret) |
| return ret; |
| |
| set_bit(__LINK_STATE_START, &dev->state); |
| |
| if (ops->ndo_validate_addr) |
| ret = ops->ndo_validate_addr(dev); |
| |
| if (!ret && ops->ndo_open) |
| ret = ops->ndo_open(dev); |
| |
| netpoll_poll_enable(dev); |
| |
| if (ret) |
| clear_bit(__LINK_STATE_START, &dev->state); |
| else { |
| dev->flags |= IFF_UP; |
| dev_set_rx_mode(dev); |
| dev_activate(dev); |
| add_device_randomness(dev->dev_addr, dev->addr_len); |
| } |
| |
| return ret; |
| } |
| |
| /** |
| * dev_open - prepare an interface for use. |
| * @dev: device to open |
| * @extack: netlink extended ack |
| * |
| * Takes a device from down to up state. The device's private open |
| * function is invoked and then the multicast lists are loaded. Finally |
| * the device is moved into the up state and a %NETDEV_UP message is |
| * sent to the netdev notifier chain. |
| * |
| * Calling this function on an active interface is a nop. On a failure |
| * a negative errno code is returned. |
| */ |
| int dev_open(struct net_device *dev, struct netlink_ext_ack *extack) |
| { |
| int ret; |
| |
| if (dev->flags & IFF_UP) |
| return 0; |
| |
| ret = __dev_open(dev, extack); |
| if (ret < 0) |
| return ret; |
| |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_UP, dev); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(dev_open); |
| |
| static void __dev_close_many(struct list_head *head) |
| { |
| struct net_device *dev; |
| |
| ASSERT_RTNL(); |
| might_sleep(); |
| |
| list_for_each_entry(dev, head, close_list) { |
| /* Temporarily disable netpoll until the interface is down */ |
| netpoll_poll_disable(dev); |
| |
| call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
| |
| clear_bit(__LINK_STATE_START, &dev->state); |
| |
| /* Synchronize to scheduled poll. We cannot touch poll list, it |
| * can be even on different cpu. So just clear netif_running(). |
| * |
| * dev->stop() will invoke napi_disable() on all of it's |
| * napi_struct instances on this device. |
| */ |
| smp_mb__after_atomic(); /* Commit netif_running(). */ |
| } |
| |
| dev_deactivate_many(head); |
| |
| list_for_each_entry(dev, head, close_list) { |
| const struct net_device_ops *ops = dev->netdev_ops; |
| |
| /* |
| * Call the device specific close. This cannot fail. |
| * Only if device is UP |
| * |
| * We allow it to be called even after a DETACH hot-plug |
| * event. |
| */ |
| if (ops->ndo_stop) |
| ops->ndo_stop(dev); |
| |
| dev->flags &= ~IFF_UP; |
| netpoll_poll_enable(dev); |
| } |
| } |
| |
| static void __dev_close(struct net_device *dev) |
| { |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| __dev_close_many(&single); |
| list_del(&single); |
| } |
| |
| void dev_close_many(struct list_head *head, bool unlink) |
| { |
| struct net_device *dev, *tmp; |
| |
| /* Remove the devices that don't need to be closed */ |
| list_for_each_entry_safe(dev, tmp, head, close_list) |
| if (!(dev->flags & IFF_UP)) |
| list_del_init(&dev->close_list); |
| |
| __dev_close_many(head); |
| |
| list_for_each_entry_safe(dev, tmp, head, close_list) { |
| rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL); |
| call_netdevice_notifiers(NETDEV_DOWN, dev); |
| if (unlink) |
| list_del_init(&dev->close_list); |
| } |
| } |
| EXPORT_SYMBOL(dev_close_many); |
| |
| /** |
| * dev_close - shutdown an interface. |
| * @dev: device to shutdown |
| * |
| * This function moves an active device into down state. A |
| * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device |
| * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier |
| * chain. |
| */ |
| void dev_close(struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| LIST_HEAD(single); |
| |
| list_add(&dev->close_list, &single); |
| dev_close_many(&single, true); |
| list_del(&single); |
| } |
| } |
| EXPORT_SYMBOL(dev_close); |
| |
| |
| /** |
| * dev_disable_lro - disable Large Receive Offload on a device |
| * @dev: device |
| * |
| * Disable Large Receive Offload (LRO) on a net device. Must be |
| * called under RTNL. This is needed if received packets may be |
| * forwarded to another interface. |
| */ |
| void dev_disable_lro(struct net_device *dev) |
| { |
| struct net_device *lower_dev; |
| struct list_head *iter; |
| |
| dev->wanted_features &= ~NETIF_F_LRO; |
| netdev_update_features(dev); |
| |
| if (unlikely(dev->features & NETIF_F_LRO)) |
| netdev_WARN(dev, "failed to disable LRO!\n"); |
| |
| netdev_for_each_lower_dev(dev, lower_dev, iter) |
| dev_disable_lro(lower_dev); |
| } |
| EXPORT_SYMBOL(dev_disable_lro); |
| |
| /** |
| * dev_disable_gro_hw - disable HW Generic Receive Offload on a device |
| * @dev: device |
| * |
| * Disable HW Generic Receive Offload (GRO_HW) on a net device. Must be |
| * called under RTNL. This is needed if Generic XDP is installed on |
| * the device. |
| */ |
| static void dev_disable_gro_hw(struct net_device *dev) |
| { |
| dev->wanted_features &= ~NETIF_F_GRO_HW; |
| netdev_update_features(dev); |
| |
| if (unlikely(dev->features & NETIF_F_GRO_HW)) |
| netdev_WARN(dev, "failed to disable GRO_HW!\n"); |
| } |
| |
| const char *netdev_cmd_to_name(enum netdev_cmd cmd) |
| { |
| #define N(val) \ |
| case NETDEV_##val: \ |
| return "NETDEV_" __stringify(val); |
| switch (cmd) { |
| N(UP) N(DOWN) N(REBOOT) N(CHANGE) N(REGISTER) N(UNREGISTER) |
| N(CHANGEMTU) N(CHANGEADDR) N(GOING_DOWN) N(CHANGENAME) N(FEAT_CHANGE) |
| N(BONDING_FAILOVER) N(PRE_UP) N(PRE_TYPE_CHANGE) N(POST_TYPE_CHANGE) |
| N(POST_INIT) N(RELEASE) N(NOTIFY_PEERS) N(JOIN) N(CHANGEUPPER) |
| N(RESEND_IGMP) N(PRECHANGEMTU) N(CHANGEINFODATA) N(BONDING_INFO) |
| N(PRECHANGEUPPER) N(CHANGELOWERSTATE) N(UDP_TUNNEL_PUSH_INFO) |
| N(UDP_TUNNEL_DROP_INFO) N(CHANGE_TX_QUEUE_LEN) |
| N(CVLAN_FILTER_PUSH_INFO) N(CVLAN_FILTER_DROP_INFO) |
| N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO) |
| N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE) |
| N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA) |
| } |
| #undef N |
| return "UNKNOWN_NETDEV_EVENT"; |
| } |
| EXPORT_SYMBOL_GPL(netdev_cmd_to_name); |
| |
| static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val, |
| struct net_device *dev) |
| { |
| struct netdev_notifier_info info = { |
| .dev = dev, |
| }; |
| |
| return nb->notifier_call(nb, val, &info); |
| } |
| |
| static int call_netdevice_register_notifiers(struct notifier_block *nb, |
| struct net_device *dev) |
| { |
| int err; |
| |
| err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev); |
| err = notifier_to_errno(err); |
| if (err) |
| return err; |
| |
| if (!(dev->flags & IFF_UP)) |
| return 0; |
| |
| call_netdevice_notifier(nb, NETDEV_UP, dev); |
| return 0; |
| } |
| |
| static void call_netdevice_unregister_notifiers(struct notifier_block *nb, |
| struct net_device *dev) |
| { |
| if (dev->flags & IFF_UP) { |
| call_netdevice_notifier(nb, NETDEV_GOING_DOWN, |
| dev); |
| call_netdevice_notifier(nb, NETDEV_DOWN, dev); |
| } |
| call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev); |
| } |
| |
| static int call_netdevice_register_net_notifiers(struct notifier_block *nb, |
| struct net *net) |
| { |
| struct net_device *dev; |
| int err; |
| |
| for_each_netdev(net, dev) { |
| err = call_netdevice_register_notifiers(nb, dev); |
| if (err) |
| goto rollback; |
| } |
| return 0; |
| |
| rollback: |
| for_each_netdev_continue_reverse(net, dev) |
| call_netdevice_unregister_notifiers(nb, dev); |
| return err; |
| } |
| |
| static void call_netdevice_unregister_net_notifiers(struct notifier_block *nb, |
| struct net *net) |
| { |
| struct net_device *dev; |
| |
| for_each_netdev(net, dev) |
| call_netdevice_unregister_notifiers(nb, dev); |
| } |
| |
| static int dev_boot_phase = 1; |
| |
| /** |
| * register_netdevice_notifier - register a network notifier block |
| * @nb: notifier |
| * |
| * Register a notifier to be called when network device events occur. |
| * The notifier passed is linked into the kernel structures and must |
| * not be reused until it has been unregistered. A negative errno code |
| * is returned on a failure. |
| * |
| * When registered all registration and up events are replayed |
| * to the new notifier to allow device to have a race free |
| * view of the network device list. |
| */ |
| |
| int register_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net *net; |
| int err; |
| |
| /* Close race with setup_net() and cleanup_net() */ |
| down_write(&pernet_ops_rwsem); |
| rtnl_lock(); |
| err = raw_notifier_chain_register(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| if (dev_boot_phase) |
| goto unlock; |
| for_each_net(net) { |
| err = call_netdevice_register_net_notifiers(nb, net); |
| if (err) |
| goto rollback; |
| } |
| |
| unlock: |
| rtnl_unlock(); |
| up_write(&pernet_ops_rwsem); |
| return err; |
| |
| rollback: |
| for_each_net_continue_reverse(net) |
| call_netdevice_unregister_net_notifiers(nb, net); |
| |
| raw_notifier_chain_unregister(&netdev_chain, nb); |
| goto unlock; |
| } |
| EXPORT_SYMBOL(register_netdevice_notifier); |
| |
| /** |
| * unregister_netdevice_notifier - unregister a network notifier block |
| * @nb: notifier |
| * |
| * Unregister a notifier previously registered by |
| * register_netdevice_notifier(). The notifier is unlinked into the |
| * kernel structures and may then be reused. A negative errno code |
| * is returned on a failure. |
| * |
| * After unregistering unregister and down device events are synthesized |
| * for all devices on the device list to the removed notifier to remove |
| * the need for special case cleanup code. |
| */ |
| |
| int unregister_netdevice_notifier(struct notifier_block *nb) |
| { |
| struct net *net; |
| int err; |
| |
| /* Close race with setup_net() and cleanup_net() */ |
| down_write(&pernet_ops_rwsem); |
| rtnl_lock(); |
| err = raw_notifier_chain_unregister(&netdev_chain, nb); |
| if (err) |
| goto unlock; |
| |
| for_each_net(net) |
| call_netdevice_unregister_net_notifiers(nb, net); |
| |
| unlock: |
| rtnl_unlock(); |
| up_write(&pernet_ops_rwsem); |
| return err; |
| } |
| EXPORT_SYMBOL(unregister_netdevice_notifier); |
| |
| static int __register_netdevice_notifier_net(struct net *net, |
| struct notifier_block *nb, |
| bool ignore_call_fail) |
| { |
| int err; |
| |
| err = raw_notifier_chain_register(&net->netdev_chain, nb); |
| if (err) |
| return err; |
| if (dev_boot_phase) |
| return 0; |
| |
| err = call_netdevice_register_net_notifiers(nb, net); |
| if (err && !ignore_call_fail) |
| goto chain_unregister; |
| |
| return 0; |
| |
| chain_unregister: |
| raw_notifier_chain_unregister(&net->netdev_chain, nb); |
| return err; |
| } |
| |
| static int __unregister_netdevice_notifier_net(struct net *net, |
| struct notifier_block *nb) |
| { |
| int err; |
| |
| err = raw_notifier_chain_unregister(&net->netdev_chain, nb); |
| if (err) |
| return err; |
| |
| call_netdevice_unregister_net_notifiers(nb, net); |
| return 0; |
| } |
| |
| /** |
| * register_netdevice_notifier_net - register a per-netns network notifier block |
| * @net: network namespace |
| * @nb: notifier |
| * |
| * Register a notifier to be called when network device events occur. |
| * The notifier passed is linked into the kernel structures and must |
| * not be reused until it has been unregistered. A negative errno code |
| * is returned on a failure. |
| * |
| * When registered all registration and up events are replayed |
| * to the new notifier to allow device to have a race free |
| * view of the network device list. |
| */ |
| |
| int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb) |
| { |
| int err; |
| |
| rtnl_lock(); |
| err = __register_netdevice_notifier_net(net, nb, false); |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(register_netdevice_notifier_net); |
| |
| /** |
| * unregister_netdevice_notifier_net - unregister a per-netns |
| * network notifier block |
| * @net: network namespace |
| * @nb: notifier |
| * |
| * Unregister a notifier previously registered by |
| * register_netdevice_notifier(). The notifier is unlinked into the |
| * kernel structures and may then be reused. A negative errno code |
| * is returned on a failure. |
| * |
| * After unregistering unregister and down device events are synthesized |
| * for all devices on the device list to the removed notifier to remove |
| * the need for special case cleanup code. |
| */ |
| |
| int unregister_netdevice_notifier_net(struct net *net, |
| struct notifier_block *nb) |
| { |
| int err; |
| |
| rtnl_lock(); |
| err = __unregister_netdevice_notifier_net(net, nb); |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(unregister_netdevice_notifier_net); |
| |
| int register_netdevice_notifier_dev_net(struct net_device *dev, |
| struct notifier_block *nb, |
| struct netdev_net_notifier *nn) |
| { |
| int err; |
| |
| rtnl_lock(); |
| err = __register_netdevice_notifier_net(dev_net(dev), nb, false); |
| if (!err) { |
| nn->nb = nb; |
| list_add(&nn->list, &dev->net_notifier_list); |
| } |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(register_netdevice_notifier_dev_net); |
| |
| int unregister_netdevice_notifier_dev_net(struct net_device *dev, |
| struct notifier_block *nb, |
| struct netdev_net_notifier *nn) |
| { |
| int err; |
| |
| rtnl_lock(); |
| list_del(&nn->list); |
| err = __unregister_netdevice_notifier_net(dev_net(dev), nb); |
| rtnl_unlock(); |
| return err; |
| } |
| EXPORT_SYMBOL(unregister_netdevice_notifier_dev_net); |
| |
| static void move_netdevice_notifiers_dev_net(struct net_device *dev, |
| struct net *net) |
| { |
| struct netdev_net_notifier *nn; |
| |
| list_for_each_entry(nn, &dev->net_notifier_list, list) { |
| __unregister_netdevice_notifier_net(dev_net(dev), nn->nb); |
| __register_netdevice_notifier_net(net, nn->nb, true); |
| } |
| } |
| |
| /** |
| * call_netdevice_notifiers_info - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @info: notifier information data |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| static int call_netdevice_notifiers_info(unsigned long val, |
| struct netdev_notifier_info *info) |
| { |
| struct net *net = dev_net(info->dev); |
| int ret; |
| |
| ASSERT_RTNL(); |
| |
| /* Run per-netns notifier block chain first, then run the global one. |
| * Hopefully, one day, the global one is going to be removed after |
| * all notifier block registrators get converted to be per-netns. |
| */ |
| ret = raw_notifier_call_chain(&net->netdev_chain, val, info); |
| if (ret & NOTIFY_STOP_MASK) |
| return ret; |
| return raw_notifier_call_chain(&netdev_chain, val, info); |
| } |
| |
| /** |
| * call_netdevice_notifiers_info_robust - call per-netns notifier blocks |
| * for and rollback on error |
| * @val_up: value passed unmodified to notifier function |
| * @val_down: value passed unmodified to the notifier function when |
| * recovering from an error on @val_up |
| * @info: notifier information data |
| * |
| * Call all per-netns network notifier blocks, but not notifier blocks on |
| * the global notifier chain. Parameters and return value are as for |
| * raw_notifier_call_chain_robust(). |
| */ |
| |
| static int |
| call_netdevice_notifiers_info_robust(unsigned long val_up, |
| unsigned long val_down, |
| struct netdev_notifier_info *info) |
| { |
| struct net *net = dev_net(info->dev); |
| |
| ASSERT_RTNL(); |
| |
| return raw_notifier_call_chain_robust(&net->netdev_chain, |
| val_up, val_down, info); |
| } |
| |
| static int call_netdevice_notifiers_extack(unsigned long val, |
| struct net_device *dev, |
| struct netlink_ext_ack *extack) |
| { |
| struct netdev_notifier_info info = { |
| .dev = dev, |
| .extack = extack, |
| }; |
| |
| return call_netdevice_notifiers_info(val, &info); |
| } |
| |
| /** |
| * call_netdevice_notifiers - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| |
| int call_netdevice_notifiers(unsigned long val, struct net_device *dev) |
| { |
| return call_netdevice_notifiers_extack(val, dev, NULL); |
| } |
| EXPORT_SYMBOL(call_netdevice_notifiers); |
| |
| /** |
| * call_netdevice_notifiers_mtu - call all network notifier blocks |
| * @val: value passed unmodified to notifier function |
| * @dev: net_device pointer passed unmodified to notifier function |
| * @arg: additional u32 argument passed to the notifier function |
| * |
| * Call all network notifier blocks. Parameters and return value |
| * are as for raw_notifier_call_chain(). |
| */ |
| static int call_netdevice_notifiers_mtu(unsigned long val, |
| struct net_device *dev, u32 arg) |
| { |
| struct netdev_notifier_info_ext info = { |
| .info.dev = dev, |
| .ext.mtu = arg, |
| }; |
| |
| BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0); |
| |
| return call_netdevice_notifiers_info(val, &info.info); |
| } |
| |
| #ifdef CONFIG_NET_INGRESS |
| static DEFINE_STATIC_KEY_FALSE(ingress_needed_key); |
| |
| void net_inc_ingress_queue(void) |
| { |
| static_branch_inc(&ingress_needed_key); |
| } |
| EXPORT_SYMBOL_GPL(net_inc_ingress_queue); |
| |
| void net_dec_ingress_queue(void) |
| { |
| static_branch_dec(&ingress_needed_key); |
| } |
| EXPORT_SYMBOL_GPL(net_dec_ingress_queue); |
| #endif |
| |
| #ifdef CONFIG_NET_EGRESS |
| static DEFINE_STATIC_KEY_FALSE(egress_needed_key); |
| |
| void net_inc_egress_queue(void) |
| { |
| static_branch_inc(&egress_needed_key); |
| } |
| EXPORT_SYMBOL_GPL(net_inc_egress_queue); |
| |
| void net_dec_egress_queue(void) |
| { |
| static_branch_dec(&egress_needed_key); |
| } |
| EXPORT_SYMBOL_GPL(net_dec_egress_queue); |
| #endif |
| |
| DEFINE_STATIC_KEY_FALSE(netstamp_needed_key); |
| EXPORT_SYMBOL(netstamp_needed_key); |
| #ifdef CONFIG_JUMP_LABEL |
| static atomic_t netstamp_needed_deferred; |
| static atomic_t netstamp_wanted; |
| static void netstamp_clear(struct work_struct *work) |
| { |
| int deferred = atomic_xchg(&netstamp_needed_deferred, 0); |
| int wanted; |
| |
| wanted = atomic_add_return(deferred, &netstamp_wanted); |
| if (wanted > 0) |
| static_branch_enable(&netstamp_needed_key); |
| else |
| static_branch_disable(&netstamp_needed_key); |
| } |
| static DECLARE_WORK(netstamp_work, netstamp_clear); |
| #endif |
| |
| void net_enable_timestamp(void) |
| { |
| #ifdef CONFIG_JUMP_LABEL |
| int wanted; |
| |
| while (1) { |
| wanted = atomic_read(&netstamp_wanted); |
| if (wanted <= 0) |
| break; |
| if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted + 1) == wanted) |
| return; |
| } |
| atomic_inc(&netstamp_needed_deferred); |
| schedule_work(&netstamp_work); |
| #else |
| static_branch_inc(&netstamp_needed_key); |
| #endif |
| } |
| EXPORT_SYMBOL(net_enable_timestamp); |
| |
| void net_disable_timestamp(void) |
| { |
| #ifdef CONFIG_JUMP_LABEL |
| int wanted; |
| |
| while (1) { |
| wanted = atomic_read(&netstamp_wanted); |
| if (wanted <= 1) |
| break; |
| if (atomic_cmpxchg(&netstamp_wanted, wanted, wanted - 1) == wanted) |
| return; |
| } |
| atomic_dec(&netstamp_needed_deferred); |
| schedule_work(&netstamp_work); |
| #else |
| static_branch_dec(&netstamp_needed_key); |
| #endif |
| } |
| EXPORT_SYMBOL(net_disable_timestamp); |
| |
| static inline void net_timestamp_set(struct sk_buff *skb) |
| { |
| skb->tstamp = 0; |
| skb->mono_delivery_time = 0; |
| if (static_branch_unlikely(&netstamp_needed_key)) |
| skb->tstamp = ktime_get_real(); |
| } |
| |
| #define net_timestamp_check(COND, SKB) \ |
| if (static_branch_unlikely(&netstamp_needed_key)) { \ |
| if ((COND) && !(SKB)->tstamp) \ |
| (SKB)->tstamp = ktime_get_real(); \ |
| } \ |
| |
| bool is_skb_forwardable(const struct net_device *dev, const struct sk_buff *skb) |
| { |
| return __is_skb_forwardable(dev, skb, true); |
| } |
| EXPORT_SYMBOL_GPL(is_skb_forwardable); |
| |
| static int __dev_forward_skb2(struct net_device *dev, struct sk_buff *skb, |
| bool check_mtu) |
| { |
| int ret = ____dev_forward_skb(dev, skb, check_mtu); |
| |
| if (likely(!ret)) { |
| skb->protocol = eth_type_trans(skb, dev); |
| skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
| } |
| |
| return ret; |
| } |
| |
| int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| return __dev_forward_skb2(dev, skb, true); |
| } |
| EXPORT_SYMBOL_GPL(__dev_forward_skb); |
| |
| /** |
| * dev_forward_skb - loopback an skb to another netif |
| * |
| * @dev: destination network device |
| * @skb: buffer to forward |
| * |
| * return values: |
| * NET_RX_SUCCESS (no congestion) |
| * NET_RX_DROP (packet was dropped, but freed) |
| * |
| * dev_forward_skb can be used for injecting an skb from the |
| * start_xmit function of one device into the receive queue |
| * of another device. |
| * |
| * The receiving device may be in another namespace, so |
| * we have to clear all information in the skb that could |
| * impact namespace isolation. |
| */ |
| int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
| { |
| return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb); |
| } |
| EXPORT_SYMBOL_GPL(dev_forward_skb); |
| |
| int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb) |
| { |
| return __dev_forward_skb2(dev, skb, false) ?: netif_rx_internal(skb); |
| } |
| |
| static inline int deliver_skb(struct sk_buff *skb, |
| struct packet_type *pt_prev, |
| struct net_device *orig_dev) |
| { |
| if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) |
| return -ENOMEM; |
| refcount_inc(&skb->users); |
| return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); |
| } |
| |
| static inline void deliver_ptype_list_skb(struct sk_buff *skb, |
| struct packet_type **pt, |
| struct net_device *orig_dev, |
| __be16 type, |
| struct list_head *ptype_list) |
| { |
| struct packet_type *ptype, *pt_prev = *pt; |
| |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| if (ptype->type != type) |
| continue; |
| if (pt_prev) |
| deliver_skb(skb, pt_prev, orig_dev); |
| pt_prev = ptype; |
| } |
| *pt = pt_prev; |
| } |
| |
| static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) |
| { |
| if (!ptype->af_packet_priv || !skb->sk) |
| return false; |
| |
| if (ptype->id_match) |
| return ptype->id_match(ptype, skb->sk); |
| else if ((struct sock *)ptype->af_packet_priv == skb->sk) |
| return true; |
| |
| return false; |
| } |
| |
| /** |
| * dev_nit_active - return true if any network interface taps are in use |
| * |
| * @dev: network device to check for the presence of taps |
| */ |
| bool dev_nit_active(struct net_device *dev) |
| { |
| return !list_empty(&ptype_all) || !list_empty(&dev->ptype_all); |
| } |
| EXPORT_SYMBOL_GPL(dev_nit_active); |
| |
| /* |
| * Support routine. Sends outgoing frames to any network |
| * taps currently in use. |
| */ |
| |
| void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) |
| { |
| struct packet_type *ptype; |
| struct sk_buff *skb2 = NULL; |
| struct packet_type *pt_prev = NULL; |
| struct list_head *ptype_list = &ptype_all; |
| |
| rcu_read_lock(); |
| again: |
| list_for_each_entry_rcu(ptype, ptype_list, list) { |
| if (ptype->ignore_outgoing) |
| continue; |
| |
| /* Never send packets back to the socket |
| * they originated from - MvS (miquels@drinkel.ow.org) |
| */ |
| if (skb_loop_sk(ptype, skb)) |
| continue; |
| |
| if (pt_prev) { |
| deliver_skb(skb2, pt_prev, skb->dev); |
| pt_prev = ptype; |
| continue; |
| } |
| |
| /* need to clone skb, done only once */ |
| skb2 = skb_clone(skb, GFP_ATOMIC); |
| if (!skb2) |
| goto out_unlock; |
| |
| net_timestamp_set(skb2); |
| |
| /* skb->nh should be correctly |
| * set by sender, so that the second statement is |
| * just protection against buggy protocols. |
| */ |
| skb_reset_mac_header(skb2); |
| |
| if (skb_network_header(skb2) < skb2->data || |
| skb_network_header(skb2) > skb_tail_pointer(skb2)) { |
| net_crit_ratelimited("protocol %04x is buggy, dev %s\n", |
| ntohs(skb2->protocol), |
| dev->name); |
| skb_reset_network_header(skb2); |
| } |
| |
| skb2->transport_header = skb2->network_header; |
| skb2->pkt_type = PACKET_OUTGOING; |
| pt_prev = ptype; |
| } |
| |
| if (ptype_list == &ptype_all) { |
| ptype_list = &dev->ptype_all; |
| goto again; |
| } |
| out_unlock: |
| if (pt_prev) { |
| if (!skb_orphan_frags_rx(skb2, GFP_ATOMIC)) |
| pt_prev->func(skb2, skb->dev, pt_prev, skb->dev); |
| else |
| kfree_skb(skb2); |
| } |
| rcu_read_unlock(); |
| } |
| EXPORT_SYMBOL_GPL(dev_queue_xmit_nit); |
| |
| /** |
| * netif_setup_tc - Handle tc mappings on real_num_tx_queues change |
| * @dev: Network device |
| * @txq: number of queues available |
| * |
| * If real_num_tx_queues is changed the tc mappings may no longer be |
| * valid. To resolve this verify the tc mapping remains valid and if |
| * not NULL the mapping. With no priorities mapping to this |
| * offset/count pair it will no longer be used. In the worst case TC0 |
| * is invalid nothing can be done so disable priority mappings. If is |
| * expected that drivers will fix this mapping if they can before |
| * calling netif_set_real_num_tx_queues. |
| */ |
| static void netif_setup_tc(struct net_device *dev, unsigned int txq) |
| { |
| int i; |
| struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; |
| |
| /* If TC0 is invalidated disable TC mapping */ |
| if (tc->offset + tc->count > txq) { |
| netdev_warn(dev, "Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n"); |
| dev->num_tc = 0; |
| return; |
| } |
| |
| /* Invalidated prio to tc mappings set to TC0 */ |
| for (i = 1; i < TC_BITMASK + 1; i++) { |
| int q = netdev_get_prio_tc_map(dev, i); |
| |
| tc = &dev->tc_to_txq[q]; |
| if (tc->offset + tc->count > txq) { |
| netdev_warn(dev, "Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n", |
| i, q); |
| netdev_set_prio_tc_map(dev, i, 0); |
| } |
| } |
| } |
| |
| int netdev_txq_to_tc(struct net_device *dev, unsigned int txq) |
| { |
| if (dev->num_tc) { |
| struct netdev_tc_txq *tc = &dev->tc_to_txq[0]; |
| int i; |
| |
| /* walk through the TCs and see if it falls into any of them */ |
| for (i = 0; i < TC_MAX_QUEUE; i++, tc++) { |
| if ((txq - tc->offset) < tc->count) |
| return i; |
| } |
| |
| /* didn't find it, just return -1 to indicate no match */ |
| return -1; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_txq_to_tc); |
| |
| #ifdef CONFIG_XPS |
| static struct static_key xps_needed __read_mostly; |
| static struct static_key xps_rxqs_needed __read_mostly; |
| static DEFINE_MUTEX(xps_map_mutex); |
| #define xmap_dereference(P) \ |
| rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) |
| |
| static bool remove_xps_queue(struct xps_dev_maps *dev_maps, |
| struct xps_dev_maps *old_maps, int tci, u16 index) |
| { |
| struct xps_map *map = NULL; |
| int pos; |
| |
| if (dev_maps) |
| map = xmap_dereference(dev_maps->attr_map[tci]); |
| if (!map) |
| return false; |
| |
| for (pos = map->len; pos--;) { |
| if (map->queues[pos] != index) |
| continue; |
| |
| if (map->len > 1) { |
| map->queues[pos] = map->queues[--map->len]; |
| break; |
| } |
| |
| if (old_maps) |
| RCU_INIT_POINTER(old_maps->attr_map[tci], NULL); |
| RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); |
| kfree_rcu(map, rcu); |
| return false; |
| } |
| |
| return true; |
| } |
| |
| static bool remove_xps_queue_cpu(struct net_device *dev, |
| struct xps_dev_maps *dev_maps, |
| int cpu, u16 offset, u16 count) |
| { |
| int num_tc = dev_maps->num_tc; |
| bool active = false; |
| int tci; |
| |
| for (tci = cpu * num_tc; num_tc--; tci++) { |
| int i, j; |
| |
| for (i = count, j = offset; i--; j++) { |
| if (!remove_xps_queue(dev_maps, NULL, tci, j)) |
| break; |
| } |
| |
| active |= i < 0; |
| } |
| |
| return active; |
| } |
| |
| static void reset_xps_maps(struct net_device *dev, |
| struct xps_dev_maps *dev_maps, |
| enum xps_map_type type) |
| { |
| static_key_slow_dec_cpuslocked(&xps_needed); |
| if (type == XPS_RXQS) |
| static_key_slow_dec_cpuslocked(&xps_rxqs_needed); |
| |
| RCU_INIT_POINTER(dev->xps_maps[type], NULL); |
| |
| kfree_rcu(dev_maps, rcu); |
| } |
| |
| static void clean_xps_maps(struct net_device *dev, enum xps_map_type type, |
| u16 offset, u16 count) |
| { |
| struct xps_dev_maps *dev_maps; |
| bool active = false; |
| int i, j; |
| |
| dev_maps = xmap_dereference(dev->xps_maps[type]); |
| if (!dev_maps) |
| return; |
| |
| for (j = 0; j < dev_maps->nr_ids; j++) |
| active |= remove_xps_queue_cpu(dev, dev_maps, j, offset, count); |
| if (!active) |
| reset_xps_maps(dev, dev_maps, type); |
| |
| if (type == XPS_CPUS) { |
| for (i = offset + (count - 1); count--; i--) |
| netdev_queue_numa_node_write( |
| netdev_get_tx_queue(dev, i), NUMA_NO_NODE); |
| } |
| } |
| |
| static void netif_reset_xps_queues(struct net_device *dev, u16 offset, |
| u16 count) |
| { |
| if (!static_key_false(&xps_needed)) |
| return; |
| |
| cpus_read_lock(); |
| mutex_lock(&xps_map_mutex); |
| |
| if (static_key_false(&xps_rxqs_needed)) |
| clean_xps_maps(dev, XPS_RXQS, offset, count); |
| |
| clean_xps_maps(dev, XPS_CPUS, offset, count); |
| |
| mutex_unlock(&xps_map_mutex); |
| cpus_read_unlock(); |
| } |
| |
| static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index) |
| { |
| netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); |
| } |
| |
| static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, |
| u16 index, bool is_rxqs_map) |
| { |
| struct xps_map *new_map; |
| int alloc_len = XPS_MIN_MAP_ALLOC; |
| int i, pos; |
| |
| for (pos = 0; map && pos < map->len; pos++) { |
| if (map->queues[pos] != index) |
| continue; |
| return map; |
| } |
| |
| /* Need to add tx-queue to this CPU's/rx-queue's existing map */ |
| if (map) { |
| if (pos < map->alloc_len) |
| return map; |
| |
| alloc_len = map->alloc_len * 2; |
| } |
| |
| /* Need to allocate new map to store tx-queue on this CPU's/rx-queue's |
| * map |
| */ |
| if (is_rxqs_map) |
| new_map = kzalloc(XPS_MAP_SIZE(alloc_len), GFP_KERNEL); |
| else |
| new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL, |
| cpu_to_node(attr_index)); |
| if (!new_map) |
| return NULL; |
| |
| for (i = 0; i < pos; i++) |
| new_map->queues[i] = map->queues[i]; |
| new_map->alloc_len = alloc_len; |
| new_map->len = pos; |
| |
| return new_map; |
| } |
| |
| /* Copy xps maps at a given index */ |
| static void xps_copy_dev_maps(struct xps_dev_maps *dev_maps, |
| struct xps_dev_maps *new_dev_maps, int index, |
| int tc, bool skip_tc) |
| { |
| int i, tci = index * dev_maps->num_tc; |
| struct xps_map *map; |
| |
| /* copy maps belonging to foreign traffic classes */ |
| for (i = 0; i < dev_maps->num_tc; i++, tci++) { |
| if (i == tc && skip_tc) |
| continue; |
| |
| /* fill in the new device map from the old device map */ |
| map = xmap_dereference(dev_maps->attr_map[tci]); |
| RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); |
| } |
| } |
| |
| /* Must be called under cpus_read_lock */ |
| int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, |
| u16 index, enum xps_map_type type) |
| { |
| struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; |
| const unsigned long *online_mask = NULL; |
| bool active = false, copy = false; |
| int i, j, tci, numa_node_id = -2; |
| int maps_sz, num_tc = 1, tc = 0; |
| struct xps_map *map, *new_map; |
| unsigned int nr_ids; |
| |
| if (dev->num_tc) { |
| /* Do not allow XPS on subordinate device directly */ |
| num_tc = dev->num_tc; |
| if (num_tc < 0) |
| return -EINVAL; |
| |
| /* If queue belongs to subordinate dev use its map */ |
| dev = netdev_get_tx_queue(dev, index)->sb_dev ? : dev; |
| |
| tc = netdev_txq_to_tc(dev, index); |
| if (tc < 0) |
| return -EINVAL; |
| } |
| |
| mutex_lock(&xps_map_mutex); |
| |
| dev_maps = xmap_dereference(dev->xps_maps[type]); |
| if (type == XPS_RXQS) { |
| maps_sz = XPS_RXQ_DEV_MAPS_SIZE(num_tc, dev->num_rx_queues); |
| nr_ids = dev->num_rx_queues; |
| } else { |
| maps_sz = XPS_CPU_DEV_MAPS_SIZE(num_tc); |
| if (num_possible_cpus() > 1) |
| online_mask = cpumask_bits(cpu_online_mask); |
| nr_ids = nr_cpu_ids; |
| } |
| |
| if (maps_sz < L1_CACHE_BYTES) |
| maps_sz = L1_CACHE_BYTES; |
| |
| /* The old dev_maps could be larger or smaller than the one we're |
| * setting up now, as dev->num_tc or nr_ids could have been updated in |
| * between. We could try to be smart, but let's be safe instead and only |
| * copy foreign traffic classes if the two map sizes match. |
| */ |
| if (dev_maps && |
| dev_maps->num_tc == num_tc && dev_maps->nr_ids == nr_ids) |
| copy = true; |
| |
| /* allocate memory for queue storage */ |
| for (j = -1; j = netif_attrmask_next_and(j, online_mask, mask, nr_ids), |
| j < nr_ids;) { |
| if (!new_dev_maps) { |
| new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); |
| if (!new_dev_maps) { |
| mutex_unlock(&xps_map_mutex); |
| return -ENOMEM; |
| } |
| |
| new_dev_maps->nr_ids = nr_ids; |
| new_dev_maps->num_tc = num_tc; |
| } |
| |
| tci = j * num_tc + tc; |
| map = copy ? xmap_dereference(dev_maps->attr_map[tci]) : NULL; |
| |
| map = expand_xps_map(map, j, index, type == XPS_RXQS); |
| if (!map) |
| goto error; |
| |
| RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); |
| } |
| |
| if (!new_dev_maps) |
| goto out_no_new_maps; |
| |
| if (!dev_maps) { |
| /* Increment static keys at most once per type */ |
| static_key_slow_inc_cpuslocked(&xps_needed); |
| if (type == XPS_RXQS) |
| static_key_slow_inc_cpuslocked(&xps_rxqs_needed); |
| } |
| |
| for (j = 0; j < nr_ids; j++) { |
| bool skip_tc = false; |
| |
| tci = j * num_tc + tc; |
| if (netif_attr_test_mask(j, mask, nr_ids) && |
| netif_attr_test_online(j, online_mask, nr_ids)) { |
| /* add tx-queue to CPU/rx-queue maps */ |
| int pos = 0; |
| |
| skip_tc = true; |
| |
| map = xmap_dereference(new_dev_maps->attr_map[tci]); |
| while ((pos < map->len) && (map->queues[pos] != index)) |
| pos++; |
| |
| if (pos == map->len) |
| map->queues[map->len++] = index; |
| #ifdef CONFIG_NUMA |
| if (type == XPS_CPUS) { |
| if (numa_node_id == -2) |
| numa_node_id = cpu_to_node(j); |
| else if (numa_node_id != cpu_to_node(j)) |
| numa_node_id = -1; |
| } |
| #endif |
| } |
| |
| if (copy) |
| xps_copy_dev_maps(dev_maps, new_dev_maps, j, tc, |
| skip_tc); |
| } |
| |
| rcu_assign_pointer(dev->xps_maps[type], new_dev_maps); |
| |
| /* Cleanup old maps */ |
| if (!dev_maps) |
| goto out_no_old_maps; |
| |
| for (j = 0; j < dev_maps->nr_ids; j++) { |
| for (i = num_tc, tci = j * dev_maps->num_tc; i--; tci++) { |
| map = xmap_dereference(dev_maps->attr_map[tci]); |
| if (!map) |
| continue; |
| |
| if (copy) { |
| new_map = xmap_dereference(new_dev_maps->attr_map[tci]); |
| if (map == new_map) |
| continue; |
| } |
| |
| RCU_INIT_POINTER(dev_maps->attr_map[tci], NULL); |
| kfree_rcu(map, rcu); |
| } |
| } |
| |
| old_dev_maps = dev_maps; |
| |
| out_no_old_maps: |
| dev_maps = new_dev_maps; |
| active = true; |
| |
| out_no_new_maps: |
| if (type == XPS_CPUS) |
| /* update Tx queue numa node */ |
| netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index), |
| (numa_node_id >= 0) ? |
| numa_node_id : NUMA_NO_NODE); |
| |
| if (!dev_maps) |
| goto out_no_maps; |
| |
| /* removes tx-queue from unused CPUs/rx-queues */ |
| for (j = 0; j < dev_maps->nr_ids; j++) { |
| tci = j * dev_maps->num_tc; |
| |
| for (i = 0; i < dev_maps->num_tc; i++, tci++) { |
| if (i == tc && |
| netif_attr_test_mask(j, mask, dev_maps->nr_ids) && |
| netif_attr_test_online(j, online_mask, dev_maps->nr_ids)) |
| continue; |
| |
| active |= remove_xps_queue(dev_maps, |
| copy ? old_dev_maps : NULL, |
| tci, index); |
| } |
| } |
| |
| if (old_dev_maps) |
| kfree_rcu(old_dev_maps, rcu); |
| |
| /* free map if not active */ |
| if (!active) |
| reset_xps_maps(dev, dev_maps, type); |
| |
| out_no_maps: |
| mutex_unlock(&xps_map_mutex); |
| |
| return 0; |
| error: |
| /* remove any maps that we added */ |
| for (j = 0; j < nr_ids; j++) { |
| for (i = num_tc, tci = j * num_tc; i--; tci++) { |
| new_map = xmap_dereference(new_dev_maps->attr_map[tci]); |
| map = copy ? |
| xmap_dereference(dev_maps->attr_map[tci]) : |
| NULL; |
| if (new_map && new_map != map) |
| kfree(new_map); |
| } |
| } |
| |
| mutex_unlock(&xps_map_mutex); |
| |
| kfree(new_dev_maps); |
| return -ENOMEM; |
| } |
| EXPORT_SYMBOL_GPL(__netif_set_xps_queue); |
| |
| int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, |
| u16 index) |
| { |
| int ret; |
| |
| cpus_read_lock(); |
| ret = __netif_set_xps_queue(dev, cpumask_bits(mask), index, XPS_CPUS); |
| cpus_read_unlock(); |
| |
| return ret; |
| } |
| EXPORT_SYMBOL(netif_set_xps_queue); |
| |
| #endif |
| static void netdev_unbind_all_sb_channels(struct net_device *dev) |
| { |
| struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; |
| |
| /* Unbind any subordinate channels */ |
| while (txq-- != &dev->_tx[0]) { |
| if (txq->sb_dev) |
| netdev_unbind_sb_channel(dev, txq->sb_dev); |
| } |
| } |
| |
| void netdev_reset_tc(struct net_device *dev) |
| { |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(dev, 0); |
| #endif |
| netdev_unbind_all_sb_channels(dev); |
| |
| /* Reset TC configuration of device */ |
| dev->num_tc = 0; |
| memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); |
| memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); |
| } |
| EXPORT_SYMBOL(netdev_reset_tc); |
| |
| int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) |
| { |
| if (tc >= dev->num_tc) |
| return -EINVAL; |
| |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues(dev, offset, count); |
| #endif |
| dev->tc_to_txq[tc].count = count; |
| dev->tc_to_txq[tc].offset = offset; |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_set_tc_queue); |
| |
| int netdev_set_num_tc(struct net_device *dev, u8 num_tc) |
| { |
| if (num_tc > TC_MAX_QUEUE) |
| return -EINVAL; |
| |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(dev, 0); |
| #endif |
| netdev_unbind_all_sb_channels(dev); |
| |
| dev->num_tc = num_tc; |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_set_num_tc); |
| |
| void netdev_unbind_sb_channel(struct net_device *dev, |
| struct net_device *sb_dev) |
| { |
| struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; |
| |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(sb_dev, 0); |
| #endif |
| memset(sb_dev->tc_to_txq, 0, sizeof(sb_dev->tc_to_txq)); |
| memset(sb_dev->prio_tc_map, 0, sizeof(sb_dev->prio_tc_map)); |
| |
| while (txq-- != &dev->_tx[0]) { |
| if (txq->sb_dev == sb_dev) |
| txq->sb_dev = NULL; |
| } |
| } |
| EXPORT_SYMBOL(netdev_unbind_sb_channel); |
| |
| int netdev_bind_sb_channel_queue(struct net_device *dev, |
| struct net_device *sb_dev, |
| u8 tc, u16 count, u16 offset) |
| { |
| /* Make certain the sb_dev and dev are already configured */ |
| if (sb_dev->num_tc >= 0 || tc >= dev->num_tc) |
| return -EINVAL; |
| |
| /* We cannot hand out queues we don't have */ |
| if ((offset + count) > dev->real_num_tx_queues) |
| return -EINVAL; |
| |
| /* Record the mapping */ |
| sb_dev->tc_to_txq[tc].count = count; |
| sb_dev->tc_to_txq[tc].offset = offset; |
| |
| /* Provide a way for Tx queue to find the tc_to_txq map or |
| * XPS map for itself. |
| */ |
| while (count--) |
| netdev_get_tx_queue(dev, count + offset)->sb_dev = sb_dev; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_bind_sb_channel_queue); |
| |
| int netdev_set_sb_channel(struct net_device *dev, u16 channel) |
| { |
| /* Do not use a multiqueue device to represent a subordinate channel */ |
| if (netif_is_multiqueue(dev)) |
| return -ENODEV; |
| |
| /* We allow channels 1 - 32767 to be used for subordinate channels. |
| * Channel 0 is meant to be "native" mode and used only to represent |
| * the main root device. We allow writing 0 to reset the device back |
| * to normal mode after being used as a subordinate channel. |
| */ |
| if (channel > S16_MAX) |
| return -EINVAL; |
| |
| dev->num_tc = -channel; |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(netdev_set_sb_channel); |
| |
| /* |
| * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues |
| * greater than real_num_tx_queues stale skbs on the qdisc must be flushed. |
| */ |
| int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) |
| { |
| bool disabling; |
| int rc; |
| |
| disabling = txq < dev->real_num_tx_queues; |
| |
| if (txq < 1 || txq > dev->num_tx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED || |
| dev->reg_state == NETREG_UNREGISTERING) { |
| ASSERT_RTNL(); |
| |
| rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, |
| txq); |
| if (rc) |
| return rc; |
| |
| if (dev->num_tc) |
| netif_setup_tc(dev, txq); |
| |
| dev_qdisc_change_real_num_tx(dev, txq); |
| |
| dev->real_num_tx_queues = txq; |
| |
| if (disabling) { |
| synchronize_net(); |
| qdisc_reset_all_tx_gt(dev, txq); |
| #ifdef CONFIG_XPS |
| netif_reset_xps_queues_gt(dev, txq); |
| #endif |
| } |
| } else { |
| dev->real_num_tx_queues = txq; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_tx_queues); |
| |
| #ifdef CONFIG_SYSFS |
| /** |
| * netif_set_real_num_rx_queues - set actual number of RX queues used |
| * @dev: Network device |
| * @rxq: Actual number of RX queues |
| * |
| * This must be called either with the rtnl_lock held or before |
| * registration of the net device. Returns 0 on success, or a |
| * negative error code. If called before registration, it always |
| * succeeds. |
| */ |
| int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq) |
| { |
| int rc; |
| |
| if (rxq < 1 || rxq > dev->num_rx_queues) |
| return -EINVAL; |
| |
| if (dev->reg_state == NETREG_REGISTERED) { |
| ASSERT_RTNL(); |
| |
| rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues, |
| rxq); |
| if (rc) |
| return rc; |
| } |
| |
| dev->real_num_rx_queues = rxq; |
| return 0; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_rx_queues); |
| #endif |
| |
| /** |
| * netif_set_real_num_queues - set actual number of RX and TX queues used |
| * @dev: Network device |
| * @txq: Actual number of TX queues |
| * @rxq: Actual number of RX queues |
| * |
| * Set the real number of both TX and RX queues. |
| * Does nothing if the number of queues is already correct. |
| */ |
| int netif_set_real_num_queues(struct net_device *dev, |
| unsigned int txq, unsigned int rxq) |
| { |
| unsigned int old_rxq = dev->real_num_rx_queues; |
| int err; |
| |
| if (txq < 1 || txq > dev->num_tx_queues || |
| rxq < 1 || rxq > dev->num_rx_queues) |
| return -EINVAL; |
| |
| /* Start from increases, so the error path only does decreases - |
| * decreases can't fail. |
| */ |
| if (rxq > dev->real_num_rx_queues) { |
| err = netif_set_real_num_rx_queues(dev, rxq); |
| if (err) |
| return err; |
| } |
| if (txq > dev->real_num_tx_queues) { |
| err = netif_set_real_num_tx_queues(dev, txq); |
| if (err) |
| goto undo_rx; |
| } |
| if (rxq < dev->real_num_rx_queues) |
| WARN_ON(netif_set_real_num_rx_queues(dev, rxq)); |
| if (txq < dev->real_num_tx_queues) |
| WARN_ON(netif_set_real_num_tx_queues(dev, txq)); |
| |
| return 0; |
| undo_rx: |
| WARN_ON(netif_set_real_num_rx_queues(dev, old_rxq)); |
| return err; |
| } |
| EXPORT_SYMBOL(netif_set_real_num_queues); |
| |
| /** |
| * netif_set_tso_max_size() - set the max size of TSO frames supported |
| * @dev: netdev to update |
| * @size: max skb->len of a TSO frame |
| * |
| * Set the limit on the size of TSO super-frames the device can handle. |
| * Unless explicitly set the stack will assume the value of |
| * %GSO_LEGACY_MAX_SIZE. |
| */ |
| void netif_set_tso_max_size(struct net_device *dev, unsigned int size) |
| { |
| dev->tso_max_size = min(GSO_MAX_SIZE, size); |
| if (size < READ_ONCE(dev->gso_max_size)) |
| netif_set_gso_max_size(dev, size); |
| } |
| EXPORT_SYMBOL(netif_set_tso_max_size); |
| |
| /** |
| * netif_set_tso_max_segs() - set the max number of segs supported for TSO |
| * @dev: netdev to update |
| * @segs: max number of TCP segments |
| * |
| * Set the limit on the number of TCP segments the device can generate from |
| * a single TSO super-frame. |
| * Unless explicitly set the stack will assume the value of %GSO_MAX_SEGS. |
| */ |
| void netif_set_tso_max_segs(struct net_device *dev, unsigned int segs) |
| { |
| dev->tso_max_segs = segs; |
| if (segs < READ_ONCE(dev->gso_max_segs)) |
| netif_set_gso_max_segs(dev, segs); |
| } |
| EXPORT_SYMBOL(netif_set_tso_max_segs); |
| |
| /** |
| * netif_inherit_tso_max() - copy all TSO limits from a lower device to an upper |
| * @to: netdev to update |
| * @from: netdev from which to copy the limits |
| */ |
| void netif_inherit_tso_max(struct net_device *to, const struct net_device *from) |
| { |
| netif_set_tso_max_size(to, from->tso_max_size); |
| netif_set_tso_max_segs(to, from->tso_max_segs); |
| } |
| EXPORT_SYMBOL(netif_inherit_tso_max); |
| |
| /** |
| * netif_get_num_default_rss_queues - default number of RSS queues |
| * |
| * Default value is the number of physical cores if there are only 1 or 2, or |
| * divided by 2 if there are more. |
| */ |
| int netif_get_num_default_rss_queues(void) |
| { |
| cpumask_var_t cpus; |
| int cpu, count = 0; |
| |
| if (unlikely(is_kdump_kernel() || !zalloc_cpumask_var(&cpus, GFP_KERNEL))) |
| return 1; |
| |
| cpumask_copy(cpus, cpu_online_mask); |
| for_each_cpu(cpu, cpus) { |
| ++count; |
| cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu)); |
| } |
| free_cpumask_var(cpus); |
| |
| return count > 2 ? DIV_ROUND_UP(count, 2) : count; |
| } |
| EXPORT_SYMBOL(netif_get_num_default_rss_queues); |
| |
| static void __netif_reschedule(struct Qdisc *q) |
| { |
| struct softnet_data *sd; |
| unsigned long flags; |
| |
| local_irq_save(flags); |
| sd = this_cpu_ptr(&softnet_data); |
| q->next_sched = NULL; |
| *sd->output_queue_tailp = q; |
| sd->output_queue_tailp = &q->next_sched; |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| |
| void __netif_schedule(struct Qdisc *q) |
| { |
| if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) |
| __netif_reschedule(q); |
| } |
| EXPORT_SYMBOL(__netif_schedule); |
| |
| struct dev_kfree_skb_cb { |
| enum skb_free_reason reason; |
| }; |
| |
| static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) |
| { |
| return (struct dev_kfree_skb_cb *)skb->cb; |
| } |
| |
| void netif_schedule_queue(struct netdev_queue *txq) |
| { |
| rcu_read_lock(); |
| if (!netif_xmit_stopped(txq)) { |
| struct Qdisc *q = rcu_dereference(txq->qdisc); |
| |
| __netif_schedule(q); |
| } |
| rcu_read_unlock(); |
| } |
| EXPORT_SYMBOL(netif_schedule_queue); |
| |
| void netif_tx_wake_queue(struct netdev_queue *dev_queue) |
| { |
| if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) { |
| struct Qdisc *q; |
| |
| rcu_read_lock(); |
| q = rcu_dereference(dev_queue->qdisc); |
| __netif_schedule(q); |
| rcu_read_unlock(); |
| } |
| } |
| EXPORT_SYMBOL(netif_tx_wake_queue); |
| |
| void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| unsigned long flags; |
| |
| if (unlikely(!skb)) |
| return; |
| |
| if (likely(refcount_read(&skb->users) == 1)) { |
| smp_rmb(); |
| refcount_set(&skb->users, 0); |
| } else if (likely(!refcount_dec_and_test(&skb->users))) { |
| return; |
| } |
| get_kfree_skb_cb(skb)->reason = reason; |
| local_irq_save(flags); |
| skb->next = __this_cpu_read(softnet_data.completion_queue); |
| __this_cpu_write(softnet_data.completion_queue, skb); |
| raise_softirq_irqoff(NET_TX_SOFTIRQ); |
| local_irq_restore(flags); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_irq); |
| |
| void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) |
| { |
| if (in_hardirq() || irqs_disabled()) |
| __dev_kfree_skb_irq(skb, reason); |
| else |
| dev_kfree_skb(skb); |
| } |
| EXPORT_SYMBOL(__dev_kfree_skb_any); |
| |
| |
| /** |
| * netif_device_detach - mark device as removed |
| * @dev: network device |
| * |
| * Mark device as removed from system and therefore no longer available. |
| */ |
| void netif_device_detach(struct net_device *dev) |
| { |
| if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_stop_all_queues(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_detach); |
| |
| /** |
| * netif_device_attach - mark device as attached |
| * @dev: network device |
| * |
| * Mark device as attached from system and restart if needed. |
| */ |
| void netif_device_attach(struct net_device *dev) |
| { |
| if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
| netif_running(dev)) { |
| netif_tx_wake_all_queues(dev); |
| __netdev_watchdog_up(dev); |
| } |
| } |
| EXPORT_SYMBOL(netif_device_attach); |
| |
| /* |
| * Returns a Tx hash based on the given packet descriptor a Tx queues' number |
| * to be used as a distribution range. |
| */ |
| static u16 skb_tx_hash(const struct net_device *dev, |
| const struct net_device *sb_dev, |
| struct sk_buff *skb) |
| { |
| u32 hash; |
| u16 qoffset = 0; |
| u16 qcount = dev->real_num_tx_queues; |
| |
| if (dev->num_tc) { |
| u8 tc = netdev_get_prio_tc_map(dev, skb->priority); |
| |
| qoffset = sb_dev->tc_to_txq[tc].offset; |
| qcount = sb_dev->tc_to_txq[tc].count; |
| if (unlikely(!qcount)) { |
| net_warn_ratelimited("%s: invalid qcount, qoffset %u for tc %u\n", |
| sb_dev->name, qoffset, tc); |
| qoffset = 0; |
| qcount = dev->real_num_tx_queues; |
| } |
| } |
| |
| if (skb_rx_queue_recorded(skb)) { |
| hash = skb_get_rx_queue(skb); |
| if (hash >= qoffset) |
| hash -= qoffset; |
| while (unlikely(hash >= qcount)) |
| hash -= qcount; |
| return hash + qoffset; |
| } |
| |
| return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset; |
| } |
| |
| static void skb_warn_bad_offload(const struct sk_buff *skb) |
| { |
| static const netdev_features_t null_features; |
| struct net_device *dev = skb->dev; |
| const char *name = ""; |
| |
| if (!net_ratelimit()) |
| return; |
| |
| if (dev) { |
| if (dev->dev.parent) |
| name = dev_driver_string(dev->dev.parent); |
| else |
| name = netdev_name(dev); |
| } |
| skb_dump(KERN_WARNING, skb, false); |
| WARN(1, "%s: caps=(%pNF, %pNF)\n", |
| name, dev ? &dev->features : &null_features, |
| skb->sk ? &skb->sk->sk_route_caps : &null_features); |
| } |
| |
| /* |
| * Invalidate hardware checksum when packet is to be mangled, and |
| * complete checksum manually on outgoing path. |
| */ |
| int skb_checksum_help(struct sk_buff *skb) |
| { |
| __wsum csum; |
| int ret = 0, offset; |
| |
| if (skb->ip_summed == CHECKSUM_COMPLETE) |
| goto out_set_summed; |
| |
| if (unlikely(skb_is_gso(skb))) { |
| skb_warn_bad_offload(skb); |
| return -EINVAL; |
| } |
| |
| /* Before computing a checksum, we should make sure no frag could |
| * be modified by an external entity : checksum could be wrong. |
| */ |
| if (skb_has_shared_frag(skb)) { |
| ret = __skb_linearize(skb); |
| if (ret) |
| goto out; |
| } |
| |
| offset = skb_checksum_start_offset(skb); |
| ret = -EINVAL; |
| if (WARN_ON_ONCE(offset >= skb_headlen(skb))) { |
| DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); |
| goto out; |
| } |
| csum = skb_checksum(skb, offset, skb->len - offset, 0); |
| |
| offset += skb->csum_offset; |
| if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) { |
| DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false); |
| goto out; |
| } |
| ret = skb_ensure_writable(skb, offset + sizeof(__sum16)); |
| if (ret) |
| goto out; |
| |
| *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; |
| out_set_summed: |
| skb->ip_summed = CHECKSUM_NONE; |
| out: |
| return ret; |
| } |
| EXPORT_SYMBOL(skb_checksum_help); |
| |
| int skb_crc32c_csum_help |