// SPDX-License-Identifier: GPL-2.0-only /* * Common framework for low-level network console, dump, and debugger code * * Sep 8 2003 Matt Mackall <mpm@selenic.com> * * based on the netconsole code from: * * Copyright (C) 2001 Ingo Molnar <mingo@redhat.com> * Copyright (C) 2002 Red Hat, Inc.
*/
if (skb_vlan_tag_present(skb) &&
!vlan_hw_offload_capable(features, skb->vlan_proto)) {
skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { /* This is actually a packet drop, but we * don't want the code that calls this * function to try and operate on a NULL skb.
*/ goto out;
}
}
staticint netif_local_xmit_active(struct net_device *dev)
{ int i;
for (i = 0; i < dev->num_tx_queues; i++) { struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id()) return 1;
}
return 0;
}
staticvoid poll_one_napi(struct napi_struct *napi)
{ int work;
/* If we set this bit but see that it has already been set, * that indicates that napi has been disabled and we need * to abort this operation
*/ if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state)) return;
/* We explicitly pass the polling call a budget of 0 to * indicate that we are clearing the Tx path only.
*/
work = napi->poll(napi, 0);
WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
trace_napi_poll(napi, work, 0);
clear_bit(NAPI_STATE_NPSVC, &napi->state);
}
staticvoid poll_napi(struct net_device *dev)
{ struct napi_struct *napi; int cpu = smp_processor_id();
/* Don't do any rx activity if the dev_lock mutex is held * the dev_open/close paths use this to block netpoll activity * while changing device state
*/ if (!ni || down_trylock(&ni->dev_lock)) return;
/* Some drivers will take the same locks in poll and xmit, * we can't poll if local CPU is already in xmit.
*/ if (!netif_running(dev) || netif_local_xmit_active(dev)) {
up(&ni->dev_lock); return;
}
ops = dev->netdev_ops; if (ops->ndo_poll_controller)
ops->ndo_poll_controller(dev);
/* call with IRQ disabled */ static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
{
netdev_tx_t status = NETDEV_TX_BUSY;
netdev_tx_t ret = NET_XMIT_DROP; struct net_device *dev; unsignedlong tries; /* It is up to the caller to keep npinfo alive. */ struct netpoll_info *npinfo;
lockdep_assert_irqs_disabled();
dev = np->dev;
rcu_read_lock();
npinfo = rcu_dereference_bh(dev->npinfo);
/* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq;
txq = netdev_core_pick_tx(dev, skb, NULL);
/* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) { if (HARD_TX_TRYLOCK(dev, txq)) { if (!netif_xmit_stopped(txq))
status = netpoll_start_xmit(skb, dev, txq);
HARD_TX_UNLOCK(dev, txq);
if (dev_xmit_complete(status)) break;
}
/* tickle device maybe there is some cleanup */
netpoll_poll_dev(np->dev);
udelay(USEC_PER_POLL);
}
WARN_ONCE(!irqs_disabled(), "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
dev->name, dev->netdev_ops->ndo_start_xmit);
}
if (!dev_xmit_complete(status)) {
skb_queue_tail(&npinfo->txq, skb);
schedule_delayed_work(&npinfo->tx_work,0);
}
ret = NETDEV_TX_OK;
out:
rcu_read_unlock(); return ret;
}
staticvoid netpoll_udp_checksum(struct netpoll *np, struct sk_buff *skb, int len)
{ struct udphdr *udph; int udp_len;
udp_len = len + sizeof(struct udphdr);
udph = udp_hdr(skb);
/* check needs to be set, since it will be consumed in csum_partial */
udph->check = 0; if (np->ipv6)
udph->check = csum_ipv6_magic(&np->local_ip.in6,
&np->remote_ip.in6,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0)); else
udph->check = csum_tcpudp_magic(np->local_ip.ip,
np->remote_ip.ip,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0)); if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
}
/* * Returns a pointer to a string representation of the identifier used * to select the egress interface for the given netpoll instance. buf * must be a buffer of length at least MAC_ADDR_STR_LEN + 1.
*/ staticchar *egress_dev(struct netpoll *np, char *buf)
{ if (np->dev_name[0]) return np->dev_name;
if (!np->local_ip.ip) { if (!np->ipv6) {
err = netpoll_take_ipv4(np, ndev); if (err) goto put;
} else {
err = netpoll_take_ipv6(np, ndev); if (err) goto put;
}
ip_overwritten = true;
}
err = __netpoll_setup(np, ndev); if (err) goto flush;
rtnl_unlock();
/* Make sure all NAPI polls which started before dev->npinfo * was visible have exited before we start calling NAPI poll. * NAPI skips locking if dev->npinfo is NULL.
*/
synchronize_rcu();
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
cancel_delayed_work(&npinfo->tx_work);
/* clean after last, unfinished work */
__skb_queue_purge(&npinfo->txq); /* now cancel it again */
cancel_delayed_work(&npinfo->tx_work);
kfree(npinfo);
}
npinfo = rtnl_dereference(np->dev->npinfo); if (!npinfo) return;
/* At this point, there is a single npinfo instance per netdevice, and * its refcnt tracks how many netpoll structures are linked to it. We * only perform npinfo cleanup when the refcnt decrements to zero.
*/ if (refcount_dec_and_test(&npinfo->refcnt)) { conststruct net_device_ops *ops;
ops = np->dev->netdev_ops; if (ops->ndo_netpoll_cleanup)
ops->ndo_netpoll_cleanup(np->dev);
/* Wait for transmitting packets to finish before freeing. */
synchronize_rcu();
__netpoll_cleanup(np);
kfree(np);
}
EXPORT_SYMBOL_GPL(__netpoll_free);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.