/* num_vfs module param is obsolete. * Use sysfs method to enable/disable VFs.
*/ staticunsignedint num_vfs;
module_param(num_vfs, uint, 0444);
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
static ushort rx_frag_size = 2048;
module_param(rx_frag_size, ushort, 0444);
MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
/* Per-module error detection/recovery workq shared across all functions. * Each function schedules its own work request on this shared workq.
*/ staticstruct workqueue_struct *be_err_recovery_workq;
val |= qid & DB_EQ_RING_ID_MASK;
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (be_check_error(adapter, BE_ERROR_HW)) return;
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT; if (clear_int)
val |= 1 << DB_EQ_CLR_SHIFT;
val |= 1 << DB_EQ_EVNT_SHIFT;
val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
iowrite32(val, adapter->db + DB_EQ_OFFSET);
}
val |= qid & DB_CQ_RING_ID_MASK;
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
if (be_check_error(adapter, BE_ERROR_HW)) return;
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
staticint be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
{ int i;
/* Check if mac has already been added as part of uc-list */ for (i = 0; i < adapter->uc_macs; i++) { if (ether_addr_equal(adapter->uc_list[i].mac, mac)) { /* mac already added, skip addition */
adapter->pmac_id[0] = adapter->pmac_id[i + 1]; return 0;
}
}
staticvoid be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
{ int i;
/* Skip deletion if the programmed mac is * being used in uc-list
*/ for (i = 0; i < adapter->uc_macs; i++) { if (adapter->pmac_id[i + 1] == pmac_id) return;
}
be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
}
if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL;
/* Proceed further only if, User provided MAC is different * from active MAC
*/ if (ether_addr_equal(addr->sa_data, adapter->dev_mac)) return 0;
/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC * address
*/ if (BEx_chip(adapter) && be_virtfn(adapter) &&
!check_privilege(adapter, BE_PRIV_FILTMGMT)) return -EPERM;
/* if device is not running, copy MAC to netdev->dev_addr */ if (!netif_running(netdev)) goto done;
/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT * privilege or if PF did not provision the new MAC address. * On BE3, this cmd will always fail if the VF doesn't have the * FILTMGMT privilege. This failure is OK, only if the PF programmed * the MAC for the VF.
*/
mutex_lock(&adapter->rx_filter_lock);
status = be_dev_mac_add(adapter, (u8 *)addr->sa_data); if (!status) {
/* Delete the old programmed MAC. This call may fail if the * old MAC was already deleted by the PF driver.
*/ if (adapter->pmac_id[0] != old_pmac_id)
be_dev_mac_del(adapter, old_pmac_id);
}
mutex_unlock(&adapter->rx_filter_lock); /* Decide if the new MAC is successfully activated only after * querying the FW
*/
status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
adapter->if_handle, true, 0); if (status) goto err;
/* The MAC change did not happen, either due to lack of privilege * or PF didn't pre-provision.
*/ if (!ether_addr_equal(addr->sa_data, mac)) {
status = -EPERM; goto err;
}
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
eth_hw_addr_set(netdev, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data); return 0;
err:
dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); return status;
}
/* BE2 supports only v0 cmd */ staticvoid *hw_stats_from_cmd(struct be_adapter *adapter)
{ if (BE2_chip(adapter)) { struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
/* receiver fifo overrun */ /* drops_no_pbuf is no per i/f, it's per BE card */
stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
drvs->rx_input_fifo_overflow_drop +
drvs->rx_drops_no_pbuf;
}
/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb * to avoid the swap and shift/mask operations in wrb_fill().
*/ staticinlinevoid wrb_fill_dummy(struct be_eth_wrb *wrb)
{
wrb->frag_pa_hi = 0;
wrb->frag_pa_lo = 0;
wrb->frag_len = 0;
wrb->rsvd0 = 0;
}
vlan_tag = skb_vlan_tag_get(skb);
vlan_prio = skb_vlan_tag_get_prio(skb); /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
adapter->recommended_prio_bits;
return vlan_tag;
}
/* Used only for IP tunnel packets */ static u16 skb_inner_ip_proto(struct sk_buff *skb)
{ return (inner_ip_hdr(skb)->version == 4) ?
inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
}
/* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this * hack is not needed, the evt bit is set while ringing DB.
*/
SET_TX_WRB_HDR_BITS(event, hdr,
BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
SET_TX_WRB_HDR_BITS(vlan, hdr,
BE_WRB_F_GET(wrb_params->features, VLAN));
SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
/* Bring the queue back to the state it was in before be_xmit_enqueue() routine * was invoked. The producer index is restored to the previous packet and the * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
*/ staticvoid be_xmit_restore(struct be_adapter *adapter, struct be_tx_obj *txo, u32 head, bool map_single,
u32 copied)
{ struct device *dev; struct be_eth_wrb *wrb; struct be_queue_info *txq = &txo->q;
dev = &adapter->pdev->dev;
txq->head = head;
/* skip the first wrb (hdr); it's not mapped */
queue_head_inc(txq); while (copied) {
wrb = queue_head_node(txq);
unmap_tx_frag(dev, wrb, map_single);
map_single = false;
copied -= le32_to_cpu(wrb->frag_len);
queue_head_inc(txq);
}
txq->head = head;
}
/* Enqueue the given packet for transmit. This routine allocates WRBs for the * packet, dma maps the packet buffers and sets up the WRBs. Returns the number * of WRBs used up by the packet.
*/ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, struct sk_buff *skb, struct be_wrb_params *wrb_params)
{
u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); struct device *dev = &adapter->pdev->dev; bool map_single = false;
u32 head;
dma_addr_t busaddr; int len;
head = be_tx_get_wrb_hdr(txo);
if (skb->len > skb->data_len) {
len = skb_headlen(skb);
/* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. * For padded packets, Lancer computes incorrect checksum.
*/
eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
VLAN_ETH_HLEN : ETH_HLEN; if (skb->len <= 60 &&
(lancer_chip(adapter) || BE3_chip(adapter) ||
skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) {
ip = (struct iphdr *)ip_hdr(skb); if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)))) goto tx_drop;
}
/* If vlan tag is already inlined in the packet, skip HW VLAN * tagging in pvid-tagging mode
*/ if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
/* HW has a bug wherein it will calculate CSUM for VLAN * pkts even though it is disabled. * Manually insert VLAN in pkt.
*/ if (skb->ip_summed != CHECKSUM_PARTIAL &&
skb_vlan_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err;
}
/* HW may lockup when VLAN HW tagging is requested on * certain ipv6 packets. Drop such pkts if the HW workaround to * skip HW tagging is not enabled by FW.
*/ if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
(adapter->pvid || adapter->qnq_vid) &&
!qnq_async_evt_rcvd(adapter))) goto tx_drop;
/* Manual VLAN tag insertion to prevent: * ASIC lockup when the ASIC inserts VLAN tag into * certain ipv6 packets. Insert VLAN tags in driver, * and set event, completion, vlan bits accordingly * in the Tx WRB.
*/ if (be_ipv6_tx_stall_chk(adapter, skb) &&
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params); if (unlikely(!skb)) goto err;
}
/* Lancer, SH and BE3 in SRIOV mode have a bug wherein * packets that are 32b or less may cause a transmit stall * on that port. The workaround is to pad such packets * (len <= 32 bytes) to a minimum length of 36b.
*/ if (skb->len <= 32) { if (skb_put_padto(skb, 36)) return NULL;
}
if (BEx_chip(adapter) || lancer_chip(adapter)) {
skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params); if (!skb) return NULL;
}
/* The stack can send us skbs with length greater than * what the HW can handle. Trim the extra bytes.
*/
WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
err = pskb_trim(skb, BE_MAX_GSO_SIZE);
WARN_ON(err);
/* Mark the last request eventable if it hasn't been marked already */ if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
/* compose a dummy wrb if there are odd set of wrbs to notify */ if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
wrb_fill_dummy(queue_head_node(txq));
queue_head_inc(txq);
atomic_inc(&txq->used);
txo->pend_wrb_cnt++;
hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
TX_HDR_WRB_NUM_SHIFT);
hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
TX_HDR_WRB_NUM_SHIFT);
}
be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
txo->pend_wrb_cnt = 0;
}
if (is_udp_pkt((*skb))) { struct udphdr *udp = udp_hdr((*skb));
switch (ntohs(udp->dest)) { case DHCP_CLIENT_PORT:
os2bmc = is_dhcp_client_filt_enabled(adapter); goto done; case DHCP_SERVER_PORT:
os2bmc = is_dhcp_srvr_filt_enabled(adapter); goto done; case NET_BIOS_PORT1: case NET_BIOS_PORT2:
os2bmc = is_nbios_filt_enabled(adapter); goto done; case DHCPV6_RAS_PORT:
os2bmc = is_ipv6_ras_filt_enabled(adapter); goto done; default: break;
}
}
done: /* For packets over a vlan, which are destined * to BMC, asic expects the vlan to be inline in the packet.
*/ if (os2bmc)
*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) goto drop_skb;
/* if os2bmc is enabled and if the pkt is destined to bmc, * enqueue the pkt a 2nd time with mgmt bit set.
*/ if (be_send_pkt_to_bmc(adapter, &skb)) {
BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params); if (unlikely(!wrb_cnt)) goto drop_skb; else
skb_get(skb);
}
if (be_is_txq_full(txo)) {
netif_stop_subqueue(netdev, q_idx);
tx_stats(txo)->tx_stops++;
}
if (flush || __netif_subqueue_stopped(netdev, q_idx))
be_xmit_flush(adapter, txo);
return NETDEV_TX_OK;
drop_skb:
dev_kfree_skb_any(skb);
drop:
tx_stats(txo)->tx_drv_drops++; /* Flush the already enqueued tx requests */ if (flush && txo->pend_wrb_cnt)
be_xmit_flush(adapter, txo);
status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF); if (!status) {
dev_info(dev, "Disabling VLAN promiscuous mode\n");
adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
} return status;
}
/* * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE. * If the user configures more, place BE in vlan promiscuous mode.
*/ staticint be_vid_config(struct be_adapter *adapter)
{ struct device *dev = &adapter->pdev->dev;
u16 vids[BE_NUM_VLANS_SUPPORTED];
u16 num = 0, i = 0; int status = 0;
/* No need to change the VLAN state if the I/F is in promiscuous */ if (adapter->netdev->flags & IFF_PROMISC) return 0;
if (adapter->vlans_added > be_max_vlans(adapter)) return be_set_vlan_promisc(adapter);
if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
status = be_clear_vlan_promisc(adapter); if (status) return status;
} /* Construct VLAN Table to give to HW */
for_each_set_bit(i, adapter->vids, VLAN_N_VID)
vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0); if (status) {
dev_err(dev, "Setting HW VLAN filtering failed\n"); /* Set to VLAN promisc mode as setting VLAN filter failed */ if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
addl_status(status) ==
MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES) return be_set_vlan_promisc(adapter);
} return status;
}
staticint be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
{ struct be_adapter *adapter = netdev_priv(netdev); int status = 0;
mutex_lock(&adapter->rx_filter_lock);
/* Packets with VID 0 are always received by Lancer by default */ if (lancer_chip(adapter) && vid == 0) goto done;
staticvoid be_set_mc_promisc(struct be_adapter *adapter)
{ int status;
if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) return;
status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON); if (!status)
adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
}
staticvoid be_set_uc_promisc(struct be_adapter *adapter)
{ int status;
if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) return;
status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON); if (!status)
adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
}
staticvoid be_clear_uc_promisc(struct be_adapter *adapter)
{ int status;
if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)) return;
status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF); if (!status)
adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
}
/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync(). * We use a single callback function for both sync and unsync. We really don't * add/remove addresses through this callback. But, we use it to detect changes * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
*/ staticint be_uc_list_update(struct net_device *netdev, constunsignedchar *addr)
{ struct be_adapter *adapter = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
adapter->update_mc_list = false;
} elseif (netdev->flags & IFF_ALLMULTI ||
netdev_mc_count(netdev) > be_max_mc(adapter)) { /* Enable multicast promisc if num configured exceeds * what we support
*/
mc_promisc = true;
adapter->update_mc_list = false;
} elseif (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) { /* Update mc-list unconditionally if the iface was previously * in mc-promisc mode and now is out of that mode.
*/
adapter->update_mc_list = true;
}
if (adapter->update_mc_list) { int i = 0;
/* cache the mc-list in adapter */
netdev_for_each_mc_addr(ha, netdev) {
ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
i++;
}
adapter->mc_count = netdev_mc_count(netdev);
}
netif_addr_unlock_bh(netdev);
if (mc_promisc) {
be_set_mc_promisc(adapter);
} elseif (adapter->update_mc_list) {
status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON); if (!status)
adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS; else
be_set_mc_promisc(adapter);
if (netdev->flags & IFF_PROMISC) {
adapter->update_uc_list = false;
} elseif (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
uc_promisc = true;
adapter->update_uc_list = false;
} elseif (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) { /* Update uc-list unconditionally if the iface was previously * in uc-promisc mode and now is out of that mode.
*/
adapter->update_uc_list = true;
}
if (adapter->update_uc_list) { /* cache the uc-list in adapter array */
i = 0;
netdev_for_each_uc_addr(ha, netdev) {
ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
i++;
}
curr_uc_macs = netdev_uc_count(netdev);
}
netif_addr_unlock_bh(netdev);
if (uc_promisc) {
be_set_uc_promisc(adapter);
} elseif (adapter->update_uc_list) {
be_clear_uc_promisc(adapter);
for (i = 0; i < adapter->uc_macs; i++)
be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
for (i = 0; i < curr_uc_macs; i++)
be_uc_mac_add(adapter, i);
adapter->uc_macs = curr_uc_macs;
adapter->update_uc_list = false;
}
}
if (netdev->flags & IFF_PROMISC) { if (!be_in_all_promisc(adapter))
be_set_all_promisc(adapter);
} elseif (be_in_all_promisc(adapter)) { /* We need to re-program the vlan-list or clear * vlan-promisc mode (if needed) when the interface * comes out of promisc mode.
*/
be_vid_config(adapter);
}
staticint be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
{ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
u16 vids[BE_NUM_VLANS_SUPPORTED]; int vf_if_id = vf_cfg->if_handle; int status;
/* Enable Transparent VLAN Tagging */
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0); if (status) return status;
/* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
vids[0] = 0;
status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1); if (!status)
dev_info(&adapter->pdev->dev, "Cleared guest VLANs on VF%d", vf);
/* After TVT is enabled, disallow VFs to program VLAN filters */ if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
~BE_PRIV_FILTMGMT, vf + 1); if (!status)
vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
} return 0;
}
staticint be_clear_vf_tvt(struct be_adapter *adapter, int vf)
{ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf]; struct device *dev = &adapter->pdev->dev; int status;
/* Reset Transparent VLAN Tagging. */
status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
vf_cfg->if_handle, 0, 0); if (status) return status;
/* Allow VFs to program VLAN filtering */ if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
BE_PRIV_FILTMGMT, vf + 1); if (!status) {
vf_cfg->privileges |= BE_PRIV_FILTMGMT;
dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
}
}
dev_info(dev, "Disable/re-enable i/f in VM to clear Transparent VLAN tag"); return 0;
}
if (vlan_proto != htons(ETH_P_8021Q)) return -EPROTONOSUPPORT;
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
status = be_set_vf_tvt(adapter, vf, vlan);
} else {
status = be_clear_vf_tvt(adapter, vf);
}
if (status) {
dev_err(&adapter->pdev->dev, "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
status); return be_cmd_status(status);
}
vf_cfg->vlan_tag = vlan; return 0;
}
staticint be_set_vf_tx_rate(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate)
{ struct be_adapter *adapter = netdev_priv(netdev); struct device *dev = &adapter->pdev->dev; int percent_rate, status = 0;
u16 link_speed = 0;
u8 link_status;
if (!sriov_enabled(adapter)) return -EPERM;
if (vf >= adapter->num_vfs) return -EINVAL;
if (min_tx_rate) return -EINVAL;
if (!max_tx_rate) goto config_qos;
status = be_cmd_link_status_query(adapter, &link_speed,
&link_status, 0); if (status) goto err;
if (!link_status) {
dev_err(dev, "TX-rate setting not allowed when link is down\n");
status = -ENETDOWN; goto err;
}
if (max_tx_rate < 100 || max_tx_rate > link_speed) {
dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
link_speed);
status = -EINVAL; goto err;
}
/* On Skyhawk the QOS setting must be done only as a % value */
percent_rate = link_speed / 100; if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
percent_rate);
status = -EINVAL; goto err;
}
config_qos:
status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1); if (status) goto err;
/* Throwaway the data in the Rx completion */ staticvoid be_rx_compl_discard(struct be_rx_obj *rxo, struct be_rx_compl_info *rxcp)
{ struct be_rx_page_info *page_info;
u16 i, num_rcvd = rxcp->num_rcvd;
for (i = 0; i < num_rcvd; i++) {
page_info = get_rx_page_info(rxo);
put_page(page_info->page);
memset(page_info, 0, sizeof(*page_info));
}
}
/* * skb_fill_rx_data forms a complete skb for an ether frame * indicated by rxcp.
*/ staticvoid skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb, struct be_rx_compl_info *rxcp)
{ struct be_rx_page_info *page_info;
u16 i, j;
u16 hdr_len, curr_frag_len, remaining;
u8 *start;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.