/** * struct udp_tunnel_nic - UDP tunnel port offload state * @work: async work for talking to hardware from process context * @dev: netdev pointer * @lock: protects all fields * @need_sync: at least one port start changed * @need_replay: space was freed, we need a replay of all ports * @work_pending: @work is currently scheduled * @n_tables: number of tables under @entries * @missed: bitmap of tables which overflown * @entries: table of tables of ports currently offloaded
*/ struct udp_tunnel_nic { struct work_struct work;
/* We ensure all work structs are done using driver state, but not the code. * We need a workqueue we can flush before module gets removed.
*/ staticstruct workqueue_struct *udp_tunnel_nic_workqueue;
staticconstchar *udp_tunnel_nic_tunnel_type_name(unsignedint type)
{ switch (type) { case UDP_TUNNEL_TYPE_VXLAN: return"vxlan"; case UDP_TUNNEL_TYPE_GENEVE: return"geneve"; case UDP_TUNNEL_TYPE_VXLAN_GPE: return"vxlan-gpe"; default: return"unknown";
}
}
for (i = 0; i < utn->n_tables; i++) for (j = 0; j < info->tables[i].n_entries; j++) if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j])) returnfalse; returntrue;
}
if (err)
netdev_warn(dev, "UDP tunnel port sync failed port %d type %s: %d\n",
be16_to_cpu(entry->port),
udp_tunnel_nic_tunnel_type_name(entry->type),
err);
}
for (i = 0; i < utn->n_tables; i++) for (j = 0; j < info->tables[i].n_entries; j++)
udp_tunnel_nic_device_sync_one(dev, utn, i, j);
}
staticvoid
udp_tunnel_nic_device_sync_by_table(struct net_device *dev, struct udp_tunnel_nic *utn)
{ conststruct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info; unsignedint i, j; int err;
for (i = 0; i < utn->n_tables; i++) { /* Find something that needs sync in this table */ for (j = 0; j < info->tables[i].n_entries; j++) if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j])) break; if (j == info->tables[i].n_entries) continue;
err = info->sync_table(dev, i); if (err)
netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
i, err);
if (dev->udp_tunnel_nic_info->sync_table)
udp_tunnel_nic_device_sync_by_table(dev, utn); else
udp_tunnel_nic_device_sync_by_port(dev, utn);
utn->need_sync = 0; /* Can't replay directly here, in case we come from the tunnel driver's * notification - trying to replay may deadlock inside tunnel driver.
*/
utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
}
/* If not going from used to unused or vice versa - all done. * For dodgy entries make sure we try to sync again (queue the entry).
*/
entry->use_cnt += use_cnt_adj; if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj)) return;
/* Cancel the op before it was sent to the device, if possible, * otherwise we'd need to take special care to issue commands * in the same order the ports arrived.
*/ if (use_cnt_adj < 0) {
from = UDP_TUNNEL_NIC_ENTRY_ADD;
to = UDP_TUNNEL_NIC_ENTRY_DEL;
} else {
from = UDP_TUNNEL_NIC_ENTRY_DEL;
to = UDP_TUNNEL_NIC_ENTRY_ADD;
}
if (entry->flags & from) {
entry->flags &= ~from; if (!dodgy) return;
}
/* Try to find existing matching entry and adjust its use count, instead of * adding a new one. Returns true if entry was found. In case of delete the * entry may have gotten removed in the process, in which case it will be * queued for removal.
*/ staticbool
udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn, struct udp_tunnel_info *ti, int use_cnt_adj)
{ conststruct udp_tunnel_nic_table_info *table; unsignedint i, j;
for (i = 0; i < utn->n_tables; i++) {
table = &dev->udp_tunnel_nic_info->tables[i]; if (!udp_tunnel_nic_table_is_capable(table, ti)) continue;
for (j = 0; j < table->n_entries; j++) if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
use_cnt_adj)) returntrue;
}
/* The different table may still fit this port in, but there * are no devices currently which have multiple tables accepting * the same tunnel type, and false positives are okay.
*/
__set_bit(i, &utn->missed);
}
utn = dev->udp_tunnel_nic; if (!utn) return; if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY) return; if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
ti->port == htons(IANA_VXLAN_UDP_PORT)) { if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n"); return;
}
if (!udp_tunnel_nic_is_capable(dev, utn, ti)) return;
/* It may happen that a tunnel of one type is removed and different * tunnel type tries to reuse its port before the device was informed. * Rely on utn->missed to re-add this port later.
*/ if (udp_tunnel_nic_has_collision(dev, utn, ti)) return;
if (!udp_tunnel_nic_add_existing(dev, utn, ti))
udp_tunnel_nic_add_new(dev, utn, ti);
/* Freeze all the ports we are already tracking so that the replay * does not double up the refcount.
*/ for (i = 0; i < utn->n_tables; i++) for (j = 0; j < info->tables[i].n_entries; j++)
udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
utn->missed = 0;
utn->need_replay = 0;
for (i = 0; i < n_tables; i++) {
utn->entries[i] = kcalloc(info->tables[i].n_entries, sizeof(*utn->entries[i]), GFP_KERNEL); if (!utn->entries[i]) goto err_free_prev_entries;
}
return utn;
err_free_prev_entries: while (i--)
kfree(utn->entries[i]);
kfree(utn); return NULL;
}
BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
UDP_TUNNEL_NIC_MAX_TABLES); /* Expect use count of at most 2 (IPv4, IPv6) per device */
BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
/* Check that the driver info is sane */ if (WARN_ON(!info->set_port != !info->unset_port) ||
WARN_ON(!info->set_port == !info->sync_table) ||
WARN_ON(!info->tables[0].n_entries)) return -EINVAL;
if (WARN_ON(info->shared &&
info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) return -EINVAL;
n_tables = 1; for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) { if (!info->tables[i].n_entries) continue;
n_tables++; if (WARN_ON(!info->tables[i - 1].n_entries)) return -EINVAL;
}
/* Create UDP tunnel state structures */ if (info->shared) {
node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM;
/* For a shared table remove this dev from the list of sharing devices * and if there are other devices just detach.
*/ if (info->shared) { struct udp_tunnel_nic_shared_node *node, *first;
list_for_each_entry(node, &info->shared->devices, list) if (node->dev == dev) break; if (list_entry_is_head(node, &info->shared->devices, list)) {
udp_tunnel_nic_unlock(dev); return;
}
list_del(&node->list);
kfree(node);
first = list_first_entry_or_null(&info->shared->devices,
typeof(*first), list); if (first) {
udp_tunnel_drop_rx_info(dev);
utn->dev = first->dev;
udp_tunnel_nic_unlock(dev); goto release_dev;
}
info->shared->udp_tunnel_nic_info = NULL;
}
/* Flush before we check work, so we don't waste time adding entries * from the work which we will boot immediately.
*/
udp_tunnel_nic_flush(dev, utn);
udp_tunnel_nic_unlock(dev);
/* Wait for the work to be done using the state, netdev core will * retry unregister until we give up our reference on this device.
*/ if (utn->work_pending) return;
info = dev->udp_tunnel_nic_info; if (!info) return NOTIFY_DONE;
if (event == NETDEV_REGISTER) { int err;
err = udp_tunnel_nic_register(dev); if (err)
netdev_warn(dev, "failed to register for UDP tunnel offloads: %d", err); return notifier_from_errno(err);
} /* All other events will need the udp_tunnel_nic state */
utn = dev->udp_tunnel_nic; if (!utn) return NOTIFY_DONE;
if (event == NETDEV_UNREGISTER) {
udp_tunnel_nic_unregister(dev, utn); return NOTIFY_OK;
}
/* All other events only matter if NIC has to be programmed open */ if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)) return NOTIFY_DONE;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.