/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which * of these schemes the driver may consider as follows: * * msi = 2: choose from among all three options * msi = 1: only consider MSI and INTx interrupts * msi = 0: force INTx interrupts
*/ staticint msi = 2;
module_param(msi, int, 0644);
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
/* * Normally we tell the chip to deliver Ingress Packets into our DMA buffers * offset by 2 bytes in order to have the IP headers line up on 4-byte * boundaries. This is a requirement for many architectures which will throw * a machine check fault if an attempt is made to access one of the 4-byte IP * header fields on a non-4-byte boundary. And it's a major performance issue * even on some architectures which allow it like some implementations of the * x86 ISA. However, some architectures don't mind this and for some very * edge-case performance sensitive applications (like forwarding large volumes * of small packets), setting this DMA offset to 0 will decrease the number of * PCI-E Bus transfers enough to measurably affect performance.
*/ staticint rx_dma_offset = 2;
/* TX Queue select used to determine what algorithm to use for selecting TX * queue. Select between the kernel provided function (select_queue=0) or user * cxgb_select_queue function (select_queue=1) * * Default: select_queue=0
*/ staticint select_queue;
module_param(select_queue, int, 0644);
MODULE_PARM_DESC(select_queue, "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
switch (p->link_cfg.speed) { case 100:
s = "100Mbps"; break; case 1000:
s = "1Gbps"; break; case 10000:
s = "10Gbps"; break; case 25000:
s = "25Gbps"; break; case 40000:
s = "40Gbps"; break; case 50000:
s = "50Gbps"; break; case 100000:
s = "100Gbps"; break; default:
pr_info("%s: unsupported speed: %d\n",
dev->name, p->link_cfg.speed); return;
}
#ifdef CONFIG_CHELSIO_T4_DCB /* Set up/tear down Data Center Bridging Priority mapping for a net device. */ staticvoid dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
{ struct port_info *pi = netdev_priv(dev); struct adapter *adap = pi->adapter; struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; int i;
/* We use a simple mapping of Port TX Queue Index to DCB * Priority when we're enabling DCB.
*/ for (i = 0; i < pi->nqsets; i++, txq++) {
u32 name, value; int err;
name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
value = enable ? i : 0xffffffff;
/* Since we can be called while atomic (from "interrupt * level") we need to issue the Set Parameters Commannd * without sleeping (timeout < 0).
*/
err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&name, &value,
-FW_CMD_MAX_TIMEOUT);
if (err)
dev_err(adap->pdev_dev, "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
enable ? "set" : "unset", pi->port_id, i, -err); else
txq->dcb_prio = enable ? value : 0;
}
}
int cxgb4_dcb_enabled(conststruct net_device *dev)
{ struct port_info *pi = netdev_priv(dev);
/* If the interface is running, then we'll need any "sticky" Link * Parameters redone with a new Transceiver Module.
*/
pi->link_cfg.redo_l1cfg = netif_running(dev);
}
/* * usecs to sleep while draining the dbfifo
*/ staticint dbfifo_drain_delay = 1000;
module_param(dbfifo_drain_delay, int, 0644);
MODULE_PARM_DESC(dbfifo_drain_delay, "usecs to sleep while draining the dbfifo");
/* Calculate the hash vector for the updated list and program it */
list_for_each_entry(entry, &adap->mac_hlist, list) {
ucast |= is_unicast_ether_addr(entry->addr);
vec |= (1ULL << hash_mac_addr(entry->addr));
} return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
vec, false);
}
staticint cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
{ struct port_info *pi = netdev_priv(netdev); struct adapter *adap = pi->adapter; int ret;
u64 mhash = 0;
u64 uhash = 0; /* idx stores the index of allocated filters, * its size should be modified based on the number of * MAC addresses that we allocate filters for
*/
ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
idx, ucast ? &uhash : &mhash, false); if (ret < 0) goto out; /* if hash != 0, then add the addr to hash addr list * so on the end we will calculate the hash for the * list and program it
*/ if (uhash || mhash) {
new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); if (!new_entry) return -ENOMEM;
ether_addr_copy(new_entry->addr, mac_addr);
list_add_tail(&new_entry->list, &adap->mac_hlist);
ret = cxgb4_set_addr_hash(pi);
}
out: return ret < 0 ? ret : 0;
}
/* If the MAC address to be removed is in the hash addr * list, delete it from the list and update hash vector
*/
list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { if (ether_addr_equal(entry->addr, mac_addr)) {
list_del(&entry->list);
kfree(entry); return cxgb4_set_addr_hash(pi);
}
}
ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false); return ret < 0 ? -EINVAL : 0;
}
/* * Set Rx properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged.
*/ staticint set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{ struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter;
/** * cxgb4_change_mac - Update match filter for a MAC address. * @pi: the port_info * @viid: the VI id * @tcam_idx: TCAM index of existing filter for old value of MAC address, * or -1 * @addr: the new MAC address value * @persist: whether a new MAC allocation should be persistent * @smt_idx: the destination to store the new SMT index. * * Modifies an MPS filter and sets it to the new MAC address if * @tcam_idx >= 0, or adds the MAC address to a new filter if * @tcam_idx < 0. In the latter case the address is added persistently * if @persist is %true. * Addresses are programmed to hash region, if tcam runs out of entries. *
*/ int cxgb4_change_mac(struct port_info *pi, unsignedint viid, int *tcam_idx, const u8 *addr, bool persist,
u8 *smt_idx)
{ struct adapter *adapter = pi->adapter; struct hash_mac_addr *entry, *new_entry; int ret;
ret = t4_change_mac(adapter, adapter->mbox, viid,
*tcam_idx, addr, persist, smt_idx); /* We ran out of TCAM entries. try programming hash region. */ if (ret == -ENOMEM) { /* If the MAC address to be updated is in the hash addr * list, update it from the list
*/
list_for_each_entry(entry, &adapter->mac_hlist, list) { if (entry->iface_mac) {
ether_addr_copy(entry->addr, addr); goto set_hash;
}
}
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); if (!new_entry) return -ENOMEM;
ether_addr_copy(new_entry->addr, addr);
new_entry->iface_mac = true;
list_add_tail(&new_entry->list, &adapter->mac_hlist);
set_hash:
ret = cxgb4_set_addr_hash(pi);
} elseif (ret >= 0) {
*tcam_idx = ret;
ret = 0;
}
return ret;
}
/* * link_start - enable a port * @dev: the port to enable * * Performs the MAC and PHY actions needed to enable a port.
*/ staticint link_start(struct net_device *dev)
{ struct port_info *pi = netdev_priv(dev); unsignedint mb = pi->adapter->mbox; int ret;
/* * We do not set address filters and promiscuity here, the stack does * that step explicitly.
*/
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
dev->mtu, -1, -1, -1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret == 0)
ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
dev->dev_addr, true, &pi->smt_idx); if (ret == 0)
ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
&pi->link_cfg); if (ret == 0) {
local_bh_disable();
ret = t4_enable_pi_params(pi->adapter, mb, pi, true, true, CXGB4_DCB_ENABLED);
local_bh_enable();
}
return ret;
}
#ifdef CONFIG_CHELSIO_T4_DCB /* Handle a Data Center Bridging update message from the firmware. */ staticvoid dcb_rpl(struct adapter *adap, conststruct fw_port_cmd *pcmd)
{ int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid)); struct net_device *dev = adap->port[adap->chan_map[port]]; int old_dcb_enabled = cxgb4_dcb_enabled(dev); int new_dcb_enabled;
/* If the DCB has become enabled or disabled on the port then we're * going to need to set up/tear down DCB Priority parameters for the * TX Queues associated with the port.
*/ if (new_dcb_enabled != old_dcb_enabled)
dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
} #endif/* CONFIG_CHELSIO_T4_DCB */
staticint setup_ppod_edram(struct adapter *adap)
{ unsignedint param, val; int ret;
/* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check * if firmware supports ppod edram feature or not. If firmware * returns 1, then driver can enable this feature by sending * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to * enable ppod edram feature.
*/
param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); if (ret < 0) {
dev_warn(adap->pdev_dev, "querying PPOD_EDRAM support failed: %d\n",
ret); return -1;
}
staticvoid adap_config_hpfilter(struct adapter *adapter)
{
u32 param, val = 0; int ret;
/* Enable HP filter region. Older fw will fail this request and * it is fine.
*/
param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
1, ¶m, &val);
/* An error means FW doesn't know about HP filter support, * it's not a problem, don't return an error.
*/ if (ret < 0)
dev_err(adapter->pdev_dev, "HP filter region isn't supported by FW\n");
}
ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
rss_size); if (ret) return ret;
/* If Tunnel All Lookup isn't specified in the global RSS * Configuration, then we need to specify a default Ingress * Queue for any ingress packets which aren't hashed. We'll * use our first ingress queue ...
*/ return t4_config_vi_rss(adap, adap->mbox, viid,
FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
FW_RSS_VI_CONFIG_CMD_UDPEN_F,
rss[0]);
}
/** * cxgb4_write_rss - write the RSS table for a given port * @pi: the port * @queues: array of queue indices for RSS * * Sets up the portion of the HW RSS table for the port's VI to distribute * packets to the Rx queues in @queues. * Should never be called before setting up sge eth rx queues
*/ int cxgb4_write_rss(conststruct port_info *pi, const u16 *queues)
{ struct adapter *adapter = pi->adapter; conststruct sge_eth_rxq *rxq; int i, err;
u16 *rss;
/* * Return the channel of the ingress queue with the given qid.
*/ staticunsignedint rxq_to_chan(conststruct sge *p, unsignedint qid)
{
qid -= p->ingr_start; return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
}
void cxgb4_quiesce_rx(struct sge_rspq *q)
{ if (q->handler)
napi_disable(&q->napi);
}
/* * Wait until all NAPI handlers are descheduled.
*/ staticvoid quiesce_rx(struct adapter *adap)
{ int i;
for (i = 0; i < adap->sge.ingr_sz; i++) { struct sge_rspq *q = adap->sge.ingr_map[i];
if (!q) continue;
cxgb4_quiesce_rx(q);
}
}
/* Disable interrupt and napi handler */ staticvoid disable_interrupts(struct adapter *adap)
{ struct sge *s = &adap->sge;
/** * setup_sge_queues - configure SGE Tx/Rx/response queues * @adap: the adapter * * Determines how many sets of SGE queues to use and initializes them. * We support multiple queue sets per port if we have MSI-X, otherwise * just one queue set per port.
*/ staticint setup_sge_queues(struct adapter *adap)
{ struct sge_uld_rxq_info *rxq_info = NULL; struct sge *s = &adap->sge; unsignedint cmplqid = 0; int err, i, j, msix = 0;
if (is_uld(adap))
rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
if (!(adap->flags & CXGB4_USING_MSIX))
msix = -((int)s->intrq.abs_id + 1);
for_each_port(adap, i) { /* Note that cmplqid below is 0 if we don't * have RDMA queues, and that's the right value.
*/ if (rxq_info)
cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
#ifdef CONFIG_CHELSIO_T4_DCB /* If a Data Center Bridging has been successfully negotiated on this * link then we'll use the skb's priority to map it to a TX Queue. * The skb's priority is determined via the VLAN Tag Priority Code * Point field.
*/ if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
u16 vlan_tci; int err;
err = vlan_get_tag(skb, &vlan_tci); if (unlikely(err)) { if (net_ratelimit())
netdev_warn(dev, "TX Packet without VLAN Tag on DCB Link\n");
txq = 0;
} else {
txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; #ifdef CONFIG_CHELSIO_T4_FCOE if (skb->protocol == htons(ETH_P_FCOE))
txq = skb->priority & 0x7; #endif/* CONFIG_CHELSIO_T4_FCOE */
} return txq;
} #endif/* CONFIG_CHELSIO_T4_DCB */
staticint closest_timer(conststruct sge *s, int time)
{ int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
delta = time - s->timer_val[i]; if (delta < 0)
delta = -delta; if (delta < min_delta) {
min_delta = delta;
match = i;
}
} return match;
}
staticint closest_thres(conststruct sge *s, int thres)
{ int i, delta, match = 0, min_delta = INT_MAX;
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
delta = thres - s->counter_val[i]; if (delta < 0)
delta = -delta; if (delta < min_delta) {
min_delta = delta;
match = i;
}
} return match;
}
/** * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters * @q: the Rx queue * @us: the hold-off time in us, or 0 to disable timer * @cnt: the hold-off packet count, or 0 to disable counter * * Sets an Rx queue's interrupt hold-off time and packet count. At least * one of the two needs to be enabled for the queue to generate interrupts.
*/ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsignedint us, unsignedint cnt)
{ struct adapter *adap = q->adap;
if ((us | cnt) == 0)
cnt = 1;
if (cnt) { int err;
u32 v, new_idx;
new_idx = closest_thres(&adap->sge, cnt); if (q->desc && q->pktcnt_idx != new_idx) { /* the queue has already been created, update it */
v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
&v, &new_idx); if (err) return err;
}
q->pktcnt_idx = new_idx;
}
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); return 0;
}
/* Mirror VIs can be created dynamically after stack had * already setup Rx modes like MTU, promisc, allmulti, etc. * on main VI. So, parse what the stack had setup on the * main VI and update the same on the mirror VI.
*/
ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); if (ret) {
dev_err(adap->pdev_dev, "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret); return ret;
}
/* Enable replication bit for the device's MAC address * in MPS TCAM, so that the packets for the main VI are * replicated to mirror VI.
*/
ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
dev->dev_addr, true, NULL); if (ret) {
dev_err(adap->pdev_dev, "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret); return ret;
}
/* Enabling a Virtual Interface can result in an interrupt * during the processing of the VI Enable command and, in some * paths, result in an attempt to issue another command in the * interrupt context. Thus, we disable interrupts during the * course of the VI Enable command ...
*/
local_bh_disable();
ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true, false);
local_bh_enable(); if (ret)
dev_err(adap->pdev_dev, "Failed starting Mirror VI 0x%x, ret: %d\n",
pi->viid_mirror, ret);
/* * Queue a TID release request and if necessary schedule a work queue to * process it.
*/ staticvoid cxgb4_queue_tid_release(struct tid_info *t, unsignedint chan, unsignedint tid)
{ struct adapter *adap = container_of(t, struct adapter, tids); void **p = &t->tid_tab[tid - t->tid_base];
/* * Process the list of pending TID release requests.
*/ staticvoid process_tid_release_list(struct work_struct *work)
{ struct sk_buff *skb; struct adapter *adap;
/* * Release a TID and inform HW. If we are unable to allocate the release * message we defer to a work queue.
*/ void cxgb4_remove_tid(struct tid_info *t, unsignedint chan, unsignedint tid, unsignedshort family)
{ struct adapter *adap = container_of(t, struct adapter, tids); struct sk_buff *skb;
if (tid_out_of_range(&adap->tids, tid)) {
dev_err(adap->pdev_dev, "tid %d out of range\n", tid); return;
}
if (t->tid_tab[tid - adap->tids.tid_base]) {
t->tid_tab[tid - adap->tids.tid_base] = NULL;
atomic_dec(&t->conns_in_use); if (t->hash_base && (tid >= t->hash_base)) { if (family == AF_INET6)
atomic_sub(2, &t->hash_tids_in_use); else
atomic_dec(&t->hash_tids_in_use);
} else { if (family == AF_INET6)
atomic_sub(2, &t->tids_in_use); else
atomic_dec(&t->tids_in_use);
}
}
/* Setup the free list for atid_tab and clear the stid bitmap. */ if (natids) { while (--natids)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
if (is_offload(adap)) {
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); /* Reserve stid 0 for T4/T5 adapters */ if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
if (t->neotids)
bitmap_zero(t->eotid_bmap, t->neotids);
}
if (t->nhpftids)
bitmap_zero(t->hpftid_bmap, t->nhpftids);
bitmap_zero(t->ftid_bmap, t->nftids); return 0;
}
/** * cxgb4_create_server - create an IP server * @dev: the device * @stid: the server TID * @sip: local IP address to bind server to * @sport: the server's TCP port * @vlan: the VLAN header information * @queue: queue to direct messages from this server to * * Create an IP server for the given port and address. * Returns <0 on error and one of the %NET_XMIT_* values on success.
*/ int cxgb4_create_server(conststruct net_device *dev, unsignedint stid,
__be32 sip, __be16 sport, __be16 vlan, unsignedint queue)
{ unsignedint chan; struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req *req; int ret;
skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM;
/* cxgb4_create_server6 - create an IPv6 server * @dev: the device * @stid: the server TID * @sip: local IPv6 address to bind server to * @sport: the server's TCP port * @queue: queue to direct messages from this server to * * Create an IPv6 server for the given port and address. * Returns <0 on error and one of the %NET_XMIT_* values on success.
*/ int cxgb4_create_server6(conststruct net_device *dev, unsignedint stid, conststruct in6_addr *sip, __be16 sport, unsignedint queue)
{ unsignedint chan; struct sk_buff *skb; struct adapter *adap; struct cpl_pass_open_req6 *req; int ret;
skb = alloc_skb(sizeof(*req), GFP_KERNEL); if (!skb) return -ENOMEM;
/** * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU * @mtus: the HW MTU table * @mtu: the target MTU * @idx: index of selected entry in the MTU table * * Returns the index and the value in the HW MTU table that is closest to * but does not exceed @mtu, unless @mtu is smaller than any value in the * table, in which case that smallest available value is selected.
*/ unsignedint cxgb4_best_mtu(constunsignedshort *mtus, unsignedshort mtu, unsignedint *idx)
{ unsignedint i = 0;
while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
++i; if (idx)
*idx = i; return mtus[i];
}
EXPORT_SYMBOL(cxgb4_best_mtu);
/** * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned * @mtus: the HW MTU table * @header_size: Header Size * @data_size_max: maximum Data Segment Size * @data_size_align: desired Data Segment Size Alignment (2^N) * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) * * Similar to cxgb4_best_mtu() but instead of searching the Hardware * MTU Table based solely on a Maximum MTU parameter, we break that * parameter up into a Header Size and Maximum Data Segment Size, and * provide a desired Data Segment Size Alignment. If we find an MTU in * the Hardware MTU Table which will result in a Data Segment Size with * the requested alignment _and_ that MTU isn't "too far" from the * closest MTU, then we'll return that rather than the closest MTU.
*/ unsignedint cxgb4_best_aligned_mtu(constunsignedshort *mtus, unsignedshort header_size, unsignedshort data_size_max, unsignedshort data_size_align, unsignedint *mtu_idxp)
{ unsignedshort max_mtu = header_size + data_size_max; unsignedshort data_size_align_mask = data_size_align - 1; int mtu_idx, aligned_mtu_idx;
/* Scan the MTU Table till we find an MTU which is larger than our * Maximum MTU or we reach the end of the table. Along the way, * record the last MTU found, if any, which will result in a Data * Segment Length matching the requested alignment.
*/ for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { unsignedshort data_size = mtus[mtu_idx] - header_size;
/* If this MTU minus the Header Size would result in a * Data Segment Size of the desired alignment, remember it.
*/ if ((data_size & data_size_align_mask) == 0)
aligned_mtu_idx = mtu_idx;
/* If we're not at the end of the Hardware MTU Table and the * next element is larger than our Maximum MTU, drop out of * the loop.
*/ if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) break;
}
/* If we fell out of the loop because we ran to the end of the table, * then we just have to use the last [largest] entry.
*/ if (mtu_idx == NMTUS)
mtu_idx--;
/* If we found an MTU which resulted in the requested Data Segment * Length alignment and that's "not far" from the largest MTU which is * less than or equal to the maximum MTU, then use that.
*/ if (aligned_mtu_idx >= 0 &&
mtu_idx - aligned_mtu_idx <= 1)
mtu_idx = aligned_mtu_idx;
/* If the caller has passed in an MTU Index pointer, pass the * MTU Index back. Return the MTU value.
*/ if (mtu_idxp)
*mtu_idxp = mtu_idx; return mtus[mtu_idx];
}
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
/** * cxgb4_port_chan - get the HW channel of a port * @dev: the net device for the port * * Return the HW Tx channel of the given port.
*/ unsignedint cxgb4_port_chan(conststruct net_device *dev)
{ return netdev2pinfo(dev)->tx_chan;
}
EXPORT_SYMBOL(cxgb4_port_chan);
/** * cxgb4_port_e2cchan - get the HW c-channel of a port * @dev: the net device for the port * * Return the HW RX c-channel of the given port.
*/ unsignedint cxgb4_port_e2cchan(conststruct net_device *dev)
{ return netdev2pinfo(dev)->rx_cchan;
}
EXPORT_SYMBOL(cxgb4_port_e2cchan);
/** * cxgb4_port_viid - get the VI id of a port * @dev: the net device for the port * * Return the VI id of the given port.
*/ unsignedint cxgb4_port_viid(conststruct net_device *dev)
{ return netdev2pinfo(dev)->viid;
}
EXPORT_SYMBOL(cxgb4_port_viid);
/** * cxgb4_port_idx - get the index of a port * @dev: the net device for the port * * Return the index of the given port.
*/ unsignedint cxgb4_port_idx(conststruct net_device *dev)
{ return netdev2pinfo(dev)->port_id;
}
EXPORT_SYMBOL(cxgb4_port_idx);
/* Figure out where the offset lands in the Memory Type/Address scheme. * This code assumes that the memory is laid out starting at offset 0 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have * MC0, and some have both MC0 and MC1.
*/
size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
edc0_size = EDRAM0_SIZE_G(size) << 20;
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
edc1_size = EDRAM1_SIZE_G(size) << 20;
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
mc0_size = EXT_MEM0_SIZE_G(size) << 20;
staticvoid enable_txq_db(struct adapter *adap, struct sge_txq *q)
{
spin_lock_irq(&q->db_lock); if (q->db_pidx_inc) { /* Make sure that all writes to the TX descriptors * are committed before we tell HW about them.
*/
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.15 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.