/* * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
staticvoid eq_set_ci(struct mlx4_eq *eq, int req_not)
{
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
req_not << 31),
eq->doorbell); /* We still want ordering, just not swabbing, so add a barrier */
wmb();
}
staticstruct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor,
u8 eqe_size)
{ /* (entry & (eq->nent - 1)) gives us a cyclic array */ unsignedlong offset = (entry & (eq->nent - 1)) * eqe_size; /* CX3 is capable of extending the EQE from 32 to 64 bytes with * strides of 64B,128B and 256B. * When 64B EQE is used, the first (in the lower addresses) * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes * contain the legacy EQE information. * In all other cases, the first 32B contains the legacy EQE info.
*/ return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE;
}
if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) goto consume;
if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) goto consume;
} /* All active slaves need to receive the event */ if (slave == ALL_SLAVES) { for (i = 0; i <= dev->persist->num_vfs; i++) {
phys_port = 0; if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT &&
eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) {
phys_port = eqe->event.port_mgmt_change.port;
slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); if (slave_port < 0) /* VF doesn't have this port */ continue;
eqe->event.port_mgmt_change.port = slave_port;
} if (mlx4_GEN_EQE(dev, i, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
i); if (phys_port)
eqe->event.port_mgmt_change.port = phys_port;
}
} else { if (mlx4_GEN_EQE(dev, slave, eqe))
mlx4_warn(dev, "Failed to generate event for slave %d\n",
slave);
}
consume:
++slave_eq->cons;
}
}
spin_lock_irqsave(&slave_eq->event_lock, flags);
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags); return;
}
memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
s_eqe->slave_id = slave; /* ensure all information is written before setting the ownership bit */
dma_wmb();
s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
++slave_eq->prod;
if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port); return -1;
}
s_state[slave].port_state[port] = state;
return 0;
}
staticvoid set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
{ int i; enum slave_port_gen_event gen_event; struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
port);
for (i = 0; i < dev->persist->num_vfs + 1; i++) if (test_bit(i, slaves_pport.slaves))
set_and_calc_slave_port_state(dev, i, port,
event, &gen_event);
} /************************************************************************** The function get as input the new event to that port, and according to the prev state change the slave's port state. The events are: MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, MLX4_PORT_STATE_DEV_EVENT_PORT_UP MLX4_PORT_STATE_IB_EVENT_GID_VALID MLX4_PORT_STATE_IB_EVENT_GID_INVALID
***************************************************************************/ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
u8 port, int event, enum slave_port_gen_event *gen_event)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *ctx = NULL; unsignedlong flags; int ret = -1; struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); enum slave_port_state cur_state =
mlx4_get_slave_port_state(dev, slave, port);
*gen_event = SLAVE_PORT_GEN_EVENT_NONE;
if (slave >= dev->num_slaves || port > dev->caps.num_ports ||
port <= 0 || !test_bit(port - 1, actv_ports.ports)) {
pr_err("%s: Error: asking for slave:%d, port:%d\n",
__func__, slave, port); return ret;
}
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
i); /* In case of 'Reset flow' FLR can be generated for * a slave before mlx4_load_one is done. * make sure interface is up before trying to delete * slave resources which weren't allocated yet.
*/ if (dev->persist->interface_state &
MLX4_INTERFACE_STATE_UP)
mlx4_delete_all_resources_for_slave(dev, i); /*return the slave to running mode*/
spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
slave_state[i].is_slave_going_down = 0;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); /*notify the FW:*/
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); if (err)
mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
i);
}
}
}
staticint mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_eqe *eqe; int cqn; int eqes_found = 0; int set_ci = 0; int port; int slave = 0; int ret; int flr_slave;
u8 update_slave_state; int i; enum slave_port_gen_event gen_event; unsignedlong flags; struct mlx4_vport_state *s_info; int eqe_size = dev->caps.eqe_size;
while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { /* * Make sure we read EQ entry contents after we've * checked the ownership bit.
*/
dma_rmb();
case MLX4_EVENT_TYPE_PATH_MIG: case MLX4_EVENT_TYPE_COMM_EST: case MLX4_EVENT_TYPE_SQ_DRAINED: case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: case MLX4_EVENT_TYPE_PATH_MIG_FAILED: case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
mlx4_dbg(dev, "event %d arrived\n", eqe->type); if (mlx4_is_master(dev)) { /* forward only to slave owning the QP */
ret = mlx4_get_slave_from_resource_id(dev,
RES_QP,
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave); if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret); break;
}
case MLX4_EVENT_TYPE_SRQ_LIMIT:
mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
__func__, be32_to_cpu(eqe->event.srq.srqn),
eq->eqn);
fallthrough; case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */
ret = mlx4_get_slave_from_resource_id(dev,
RES_SRQ,
be32_to_cpu(eqe->event.srq.srqn)
& 0xffffff,
&slave); if (ret && ret != -ENOENT) {
mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret); break;
} if (eqe->type ==
MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
__func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
case MLX4_EVENT_TYPE_CMD:
mlx4_cmd_event(dev,
be16_to_cpu(eqe->event.cmd.token),
eqe->event.cmd.status,
be64_to_cpu(eqe->event.cmd.out_param)); break;
case MLX4_EVENT_TYPE_PORT_CHANGE: { struct mlx4_slaves_pport slaves_port;
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
slaves_port = mlx4_phys_to_slaves_pport(dev, port); if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
mlx4_dispatch_event(
dev, MLX4_DEV_EVENT_PORT_DOWN, &port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1; if (!mlx4_is_master(dev)) break; for (i = 0; i < dev->persist->num_vfs + 1;
i++) { int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (i == mlx4_master_func_num(dev)) continue;
mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (reported_port << 28));
mlx4_slave_event(dev, i, eqe);
}
} else { /* IB port */
set_and_calc_slave_port_state(dev, i, port,
MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
&gen_event); /*we can be in pending state, then do not send port_down event*/ if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { if (i == mlx4_master_func_num(dev)) continue;
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (mlx4_phys_to_slave_port(dev, i, port) << 28));
mlx4_slave_event(dev, i, eqe);
}
}
}
} else {
mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
&port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
if (!mlx4_is_master(dev)) break; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) for (i = 0;
i < dev->persist->num_vfs + 1;
i++) { int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port);
if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) continue; if (i == mlx4_master_func_num(dev)) continue;
s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
eqe->event.port_change.port =
cpu_to_be32(
(be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF)
| (reported_port << 28));
mlx4_slave_event(dev, i, eqe);
}
} else/* IB port */ /* port-up event will be sent to a slave when the * slave's alias-guid is set. This is done in alias_GUID.c
*/
set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
} break;
}
case MLX4_EVENT_TYPE_CQ_ERROR:
mlx4_warn(dev, "CQ %s on CQN %06x\n",
eqe->event.cq_err.syndrome == 1 ? "overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); if (mlx4_is_master(dev)) {
ret = mlx4_get_slave_from_resource_id(dev,
RES_CQ,
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave); if (ret && ret != -ENOENT) {
mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret); break;
}
case MLX4_EVENT_TYPE_EQ_OVERFLOW:
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); break;
case MLX4_EVENT_TYPE_OP_REQUIRED:
atomic_inc(&priv->opreq_count); /* FW commands can't be executed from interrupt context * working in deferred task
*/
queue_work(mlx4_wq, &priv->opreq_task); break;
case MLX4_EVENT_TYPE_COMM_CHANNEL: if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event for non master device\n"); break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
eqe->event.comm_channel_arm.bit_vec, sizeof(eqe->event.comm_channel_arm.bit_vec));
queue_work(priv->mfunc.master.comm_wq,
&priv->mfunc.master.comm_work); break;
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Non-master function received FLR event\n"); break;
}
mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
case MLX4_EVENT_TYPE_FATAL_WARNING: if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { if (mlx4_is_master(dev)) for (i = 0; i < dev->num_slaves; i++) {
mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
__func__, i); if (i == dev->caps.function) continue;
mlx4_slave_event(dev, i, eqe);
}
mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
be16_to_cpu(eqe->event.warming.warning_threshold),
be16_to_cpu(eqe->event.warming.current_temperature));
} else
mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
mlx4_dispatch_event(
dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, eqe); break;
case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: switch (eqe->subtype) { case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE:
mlx4_warn(dev, "Bad cable detected on port %u\n",
eqe->event.bad_cable.port); break; case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE:
mlx4_warn(dev, "Unsupported cable detected\n"); break; default:
mlx4_dbg(dev, "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW"); break;
} break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default:
mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
!!(eqe->owner & 0x80) ^
!!(eq->cons_index & eq->nent) ? "HW" : "SW"); break;
}
++eq->cons_index;
eqes_found = 1;
++set_ci;
/* * The HCA will think the queue has overflowed if we * don't tell it we've been processing events. We * create our EQs with MLX4_NUM_SPARE_EQE extra * entries, so we must update our consumer index at * least that often.
*/ if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
eq_set_ci(eq, 0);
set_ci = 0;
}
}
eq_set_ci(eq, 1);
return eqes_found;
}
static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
{ struct mlx4_dev *dev = dev_ptr; struct mlx4_priv *priv = mlx4_priv(dev); int work = 0; int i;
staticint mlx4_num_eq_uar(struct mlx4_dev *dev)
{ /* * Each UAR holds 4 EQ doorbells. To figure out how many UARs * we need to map, take the difference of highest index and * the lowest index we'll use and add 1.
*/ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
dev->caps.reserved_eqs / 4 + 1;
}
for (i = 0; i < mlx4_num_eq_uar(dev); ++i) if (priv->eq_table.uar_map[i]) {
iounmap(priv->eq_table.uar_map[i]);
priv->eq_table.uar_map[i] = NULL;
}
}
staticint mlx4_create_eq(struct mlx4_dev *dev, int nent,
u8 intr, struct mlx4_eq *eq)
{ struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cmd_mailbox *mailbox; struct mlx4_eq_context *eq_context; int npages;
u64 *dma_list = NULL;
dma_addr_t t;
u64 mtt_addr; int err = -ENOMEM; int i;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2)); /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with * strides of 64B,128B and 256B.
*/
npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE;
eq->page_list = kmalloc_array(npages, sizeof(*eq->page_list),
GFP_KERNEL); if (!eq->page_list) goto err_out;
for (i = 0; i < npages; ++i)
eq->page_list[i].buf = NULL;
dma_list = kmalloc_array(npages, sizeof(*dma_list), GFP_KERNEL); if (!dma_list) goto err_out_free;
mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) goto err_out_free;
eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
pdev->dev,
PAGE_SIZE, &t,
GFP_KERNEL); if (!eq->page_list[i].buf) goto err_out_free_pages;
dma_list[i] = t;
eq->page_list[i].map = t;
}
eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); if (eq->eqn == -1) goto err_out_free_pages;
err_out_free_pages: for (i = 0; i < npages; ++i) if (eq->page_list[i].buf)
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
staticvoid mlx4_free_eq(struct mlx4_dev *dev, struct mlx4_eq *eq)
{ struct mlx4_priv *priv = mlx4_priv(dev); int err; int i; /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with * strides of 64B,128B and 256B
*/ int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE;
mlx4_mtt_cleanup(dev, &eq->mtt); for (i = 0; i < npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf,
eq->page_list[i].map);
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
/* arm ASYNC eq */
eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
err_out_unmap: while (i > 0)
mlx4_free_eq(dev, &priv->eq_table.eq[--i]); #ifdef CONFIG_RFS_ACCEL for (i = 1; i <= dev->caps.num_ports; i++) { if (mlx4_priv(dev)->port[i].rmap) {
free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
mlx4_priv(dev)->port[i].rmap = NULL;
}
} #endif
mlx4_free_irqs(dev);
err_out_clr_int: if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
/* A test that verifies that we can accept interrupts * on the vector allocated for asynchronous events
*/ int mlx4_test_async(struct mlx4_dev *dev)
{ return mlx4_NOP(dev);
}
EXPORT_SYMBOL(mlx4_test_async);
/* A test that verifies that we can accept interrupts * on the given irq vector of the tested port. * Interrupts are checked using the NOP command.
*/ int mlx4_test_interrupt(struct mlx4_dev *dev, int vector)
{ struct mlx4_priv *priv = mlx4_priv(dev); int err;
/* Temporary use polling for command completions */
mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asynchronous events */
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); goto out;
}
/* Go back to using events */
mlx4_cmd_use_events(dev);
err = mlx4_NOP(dev);
/* once we allocated EQ, we don't release it because it might be binded * to cpu_rmap.
*/
mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
Messung V0.5
¤ Dauer der Verarbeitung: 0.8 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.