/* A reboot/assertion causes the MCDI status word to be set after the * command word is set or a REBOOT event is sent. If we notice a reboot * via these mechanisms then wait 250ms for the status word to be set.
*/ #define MCDI_STATUS_DELAY_US 100 #define MCDI_STATUS_DELAY_COUNT 2500 #define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx); if (rc) goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver * is loaded. We should do this before we reset the NIC.
*/
rc = efx_mcdi_drv_attach(efx, true, &already_attached); if (rc) {
pci_err(efx->pci_dev, "Unable to register driver with MCPU\n"); goto fail2;
} if (already_attached) /* Not a fatal error */
pci_err(efx->pci_dev, "Host already registered with MCPU\n");
if (efx->mcdi->fn_flags &
(1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
efx->primary = efx;
#ifdef CONFIG_SFC_MCDI_LOGGING if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) { int bytes = 0; int i; /* Lengths should always be a whole number of dwords, so scream * if they're not.
*/
WARN_ON_ONCE(hdr_len % 4);
WARN_ON_ONCE(inlen % 4);
/* We own the logging buffer, as only one MCDI can be in * progress on a NIC at any one time. So no need for locking.
*/ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, " %08x",
le32_to_cpu(hdr[i].u32[0]));
for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, " %08x",
le32_to_cpu(inbuf[i].u32[0]));
#ifdef CONFIG_SFC_MCDI_LOGGING if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
size_t hdr_len, data_len; int bytes = 0; int i;
WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
hdr_len = mcdi->resp_hdr_len / 4; /* MCDI_DECLARE_BUF ensures that underlying buffer is padded * to dword size, and the MCDI buffer is always dword size
*/
data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
/* We own the logging buffer, as only one MCDI can be in * progress on a NIC at any one time. So no need for locking.
*/ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, " %08x", le32_to_cpu(hdr.u32[0]));
}
for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
efx->type->mcdi_read_response(efx, &hdr,
mcdi->resp_hdr_len + (i * 4), 4);
bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes, " %08x", le32_to_cpu(hdr.u32[0]));
}
staticint efx_mcdi_poll(struct efx_nic *efx)
{ struct efx_mcdi_iface *mcdi = efx_mcdi(efx); unsignedlong time, finish; unsignedint spins; int rc;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
rc = efx_mcdi_poll_reboot(efx); if (rc) {
spin_lock_bh(&mcdi->iface_lock);
mcdi->resprc = rc;
mcdi->resp_hdr_len = 0;
mcdi->resp_data_len = 0;
spin_unlock_bh(&mcdi->iface_lock); return 0;
}
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy, * because generally mcdi responses are fast. After that, back off * and poll once a jiffy (approximately)
*/
spins = USER_TICK_USEC;
finish = jiffies + MCDI_RPC_TIMEOUT;
while (1) { if (spins != 0) {
--spins;
udelay(1);
} else {
schedule_timeout_uninterruptible(1);
}
time = jiffies;
if (efx_mcdi_poll_once(efx)) break;
if (time_after(time, finish)) return -ETIMEDOUT;
}
/* Return rc=0 like wait_event_timeout() */ return 0;
}
/* Test and clear MC-rebooted flag for this port/function; reset * software state as necessary.
*/ int efx_mcdi_poll_reboot(struct efx_nic *efx)
{ if (!efx->mcdi) return 0;
staticvoid efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
{ /* Wait until the interface becomes QUIESCENT and we win the race * to mark it RUNNING_SYNC.
*/
wait_event(mcdi->wq,
cmpxchg(&mcdi->state,
MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
MCDI_STATE_QUIESCENT);
}
if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
MCDI_RPC_TIMEOUT) == 0) return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions. * In which case, poll for completions directly. If efx_mcdi_ev_cpl() * completed the request first, then we'll just end up completing the * request again, which is safe. * * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which * wait_event_timeout() implicitly provides.
*/ if (mcdi->mode == MCDI_MODE_POLL) return efx_mcdi_poll(efx);
return 0;
}
/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the * requester. Return whether this was done. Does not take any locks.
*/ staticbool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
{ if (cmpxchg(&mcdi->state,
MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
MCDI_STATE_RUNNING_SYNC) {
wake_up(&mcdi->wq); returntrue;
}
/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the * asynchronous completion function, and release the interface. * Return whether this was done. Must be called in bh-disabled * context. Will take iface_lock and async_lock.
*/ staticbool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
{ struct efx_nic *efx = mcdi->efx; struct efx_mcdi_async_param *async;
size_t hdr_len, data_len, err_len;
efx_dword_t *outbuf;
MCDI_DECLARE_BUF_ERR(errbuf); int rc;
if (cmpxchg(&mcdi->state,
MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
MCDI_STATE_RUNNING_ASYNC) returnfalse;
spin_lock(&mcdi->iface_lock); if (timeout) { /* Ensure that if the completion event arrives later, * the seqno check in efx_mcdi_ev_cpl() will fail
*/
++mcdi->seqno;
++mcdi->credits;
rc = -ETIMEDOUT;
hdr_len = 0;
data_len = 0;
} else {
rc = mcdi->resprc;
hdr_len = mcdi->resp_hdr_len;
data_len = mcdi->resp_data_len;
}
spin_unlock(&mcdi->iface_lock);
/* Stop the timer. In case the timer function is running, we * must wait for it to return so that there is no possibility * of it aborting the next request.
*/ if (!timeout)
timer_delete_sync(&mcdi->async_timer);
if ((seqno ^ mcdi->seqno) & SEQ_MASK) { if (mcdi->credits) /* The request has been cancelled */
--mcdi->credits; else
netif_err(efx, hw, efx->net_dev, "MC response mismatch tx seq 0x%x rx " "seq 0x%x\n", seqno, mcdi->seqno);
} else { if (efx->type->mcdi_max_ver >= 2) { /* MCDI v2 responses don't fit in an event */
efx_mcdi_read_response_header(efx);
} else {
mcdi->resprc = efx_mcdi_errno(mcdi_err);
mcdi->resp_hdr_len = 4;
mcdi->resp_data_len = datalen;
}
wake = true;
}
spin_unlock(&mcdi->iface_lock);
if (wake) { if (!efx_mcdi_complete_async(mcdi, false))
(void) efx_mcdi_complete_sync(mcdi);
/* If the interface isn't RUNNING_ASYNC or * RUNNING_SYNC then we've received a duplicate * completion after we've already transitioned back to * QUIESCENT. [A subsequent invocation would increment * seqno, so would have failed the seqno check].
*/
}
}
if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
netif_err(efx, hw, efx->net_dev, "MCDI request was completed without an event\n");
rc = 0;
}
efx_mcdi_abandon(efx);
/* Close the race with efx_mcdi_ev_cpl() executing just too late * and completing a request we've just cancelled, by ensuring * that the seqno check therein fails.
*/
spin_lock_bh(&mcdi->iface_lock);
++mcdi->seqno;
++mcdi->credits;
spin_unlock_bh(&mcdi->iface_lock);
}
if (proxy_handle)
*proxy_handle = 0;
if (rc != 0) { if (outlen_actual)
*outlen_actual = 0;
} else {
size_t hdr_len, data_len, err_len;
/* At the very least we need a memory barrier here to ensure * we pick up changes from efx_mcdi_ev_cpl(). Protect against * a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
rc = mcdi->resprc; if (raw_rc)
*raw_rc = mcdi->resprc_raw;
hdr_len = mcdi->resp_hdr_len;
data_len = mcdi->resp_data_len;
err_len = min(sizeof(errbuf), data_len);
spin_unlock_bh(&mcdi->iface_lock);
BUG_ON(rc > 0);
efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(outlen, data_len)); if (outlen_actual)
*outlen_actual = data_len;
mcdi->proxy_rx_status = efx_mcdi_errno(status); /* Ensure the status is written before we update the handle, since the * latter is used to check if we've finished.
*/
wmb();
mcdi->proxy_rx_handle = handle;
wake_up(&mcdi->proxy_rx_wq);
}
if (proxy_handle) { /* Handle proxy authorisation. This allows approval of MCDI * operations to be delegated to the admin function, allowing * fine control over (eg) multicast subscriptions.
*/ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
efx->type->is_vf) { /* If the EVB port isn't available within a VF this may * mean the PF is still bringing the switch up. We should * retry our request shortly.
*/ unsignedlong abort_time = jiffies + MCDI_RPC_TIMEOUT; unsignedint delay_us = 10000;
netif_dbg(efx, hw, efx->net_dev, "%s: NO_EVB_PORT; will retry request\n",
__func__);
/** * efx_mcdi_rpc - Issue an MCDI command and wait for completion * @efx: NIC through which to issue the command * @cmd: Command type number * @inbuf: Command parameters * @inlen: Length of command parameters, in bytes. Must be a multiple * of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1. * @outbuf: Response buffer. May be %NULL if @outlen is 0. * @outlen: Length of response buffer, in bytes. If the actual * response is longer than @outlen & ~3, it will be truncated * to that length. * @outlen_actual: Pointer through which to return the actual response * length. May be %NULL if this is not needed. * * This function may sleep and therefore must be called in an appropriate * context. * * Return: A negative error code, or zero if successful. The error * code may come from the MCDI response or may indicate a failure * to communicate with the MC. In the former case, the response * will still be copied to @outbuf and *@outlen_actual will be * set accordingly. In the latter case, *@outlen_actual will be * set to zero.
*/ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, false);
}
/* Normally, on receiving an error code in the MCDI response, * efx_mcdi_rpc will log an error message containing (among other * things) the raw error code, by means of efx_mcdi_display_error. * This _quiet version suppresses that; if the caller wishes to log * the error conditionally on the return code, it should call this * function and is then responsible for calling efx_mcdi_display_error * as needed.
*/ int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{ return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
outlen_actual, true);
}
int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf, size_t inlen)
{ struct efx_mcdi_iface *mcdi = efx_mcdi(efx); int rc;
rc = efx_mcdi_check_supported(efx, cmd, inlen); if (rc) return rc;
if (efx->mc_bist_for_other_fn) return -ENETDOWN;
if (mcdi->mode == MCDI_MODE_FAIL) return -ENETDOWN;
if (mcdi->mode == MCDI_MODE_EVENTS) {
list_add_tail(&async->list, &mcdi->async_list);
/* If this is at the front of the queue, try to start it * immediately
*/ if (mcdi->async_list.next == &async->list &&
efx_mcdi_acquire_async(mcdi)) {
efx_mcdi_send_request(efx, cmd, inbuf, inlen);
mod_timer(&mcdi->async_timer,
jiffies + MCDI_RPC_TIMEOUT);
}
} else {
kfree(async);
rc = -ENETDOWN;
}
spin_unlock_bh(&mcdi->async_lock);
return rc;
}
/** * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously * @efx: NIC through which to issue the command * @cmd: Command type number * @inbuf: Command parameters * @inlen: Length of command parameters, in bytes * @outlen: Length to allocate for response buffer, in bytes * @complete: Function to be called on completion or cancellation. * @cookie: Arbitrary value to be passed to @complete. * * This function does not sleep and therefore may be called in atomic * context. It will fail if event queues are disabled or if MCDI * event completions have been disabled due to an error. * * If it succeeds, the @complete function will be called exactly once * in atomic context, when one of the following occurs: * (a) the completion event is received (in NAPI context) * (b) event queues are disabled (in the process that disables them) * (c) the request times-out (in timer context)
*/ int
efx_mcdi_rpc_async(struct efx_nic *efx, unsignedint cmd, const efx_dword_t *inbuf, size_t inlen, size_t outlen,
efx_mcdi_async_completer *complete, unsignedlong cookie)
{ return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
cookie, false);
}
/* Switch to polled MCDI completions. This can be called in various * error conditions with various locks held, so it must be lockless. * Caller is responsible for flushing asynchronous requests later.
*/ void efx_mcdi_mode_poll(struct efx_nic *efx)
{ struct efx_mcdi_iface *mcdi;
if (!efx->mcdi) return;
mcdi = efx_mcdi(efx); /* If already in polling mode, nothing to do. * If in fail-fast state, don't switch to polled completion. * FLR recovery will do that later.
*/ if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL) return;
/* We can switch from event completion to polled completion, because * mcdi requests are always completed in shared memory. We do this by * switching the mode to POLL'd then completing the request. * efx_mcdi_await_completion() will then call efx_mcdi_poll(). * * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), * which efx_mcdi_complete_sync() provides for us.
*/
mcdi->mode = MCDI_MODE_POLL;
efx_mcdi_complete_sync(mcdi);
}
/* Flush any running or queued asynchronous requests, after event processing * is stopped
*/ void efx_mcdi_flush_async(struct efx_nic *efx)
{ struct efx_mcdi_async_param *async, *next; struct efx_mcdi_iface *mcdi;
if (!efx->mcdi) return;
mcdi = efx_mcdi(efx);
/* We must be in poll or fail mode so no more requests can be queued */
BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
timer_delete_sync(&mcdi->async_timer);
/* If a request is still running, make sure we give the MC * time to complete it so that the response won't overwrite our * next request.
*/ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
efx_mcdi_poll(efx);
mcdi->state = MCDI_STATE_QUIESCENT;
}
/* Nothing else will access the async list now, so it is safe * to walk it without holding async_lock. If we hold it while * calling a completer then lockdep may warn that we have * acquired locks in the wrong order.
*/
list_for_each_entry_safe(async, next, &mcdi->async_list, list) { if (async->complete)
async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
list_del(&async->list);
kfree(async);
}
}
mcdi = efx_mcdi(efx); /* If already in event completion mode, nothing to do. * If in fail-fast state, don't switch to event completion. FLR * recovery will do that later.
*/ if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL) return;
/* We can't switch from polled to event completion in the middle of a * request, because the completion method is specified in the request. * So acquire the interface to serialise the requestors. We don't need * to acquire the iface_lock to change the mode here, but we do need a * write memory barrier ensure that efx_mcdi_rpc() sees it, which * efx_mcdi_acquire() provides.
*/
efx_mcdi_acquire_sync(mcdi);
mcdi->mode = MCDI_MODE_EVENTS;
efx_mcdi_release(mcdi);
}
/* If there is an outstanding MCDI request, it has been terminated * either by a BADASSERT or REBOOT event. If the mcdi interface is * in polled mode, then do nothing because the MC reboot handler will * set the header correctly. However, if the mcdi interface is waiting * for a CMDDONE event it won't receive it [and since all MCDI events * are sent to the same queue, we can't be racing with * efx_mcdi_ev_cpl()] * * If there is an outstanding asynchronous request, we can't * complete it now (efx_mcdi_complete() would deadlock). The * reset process will take care of this. * * There's a race here with efx_mcdi_send_request(), because * we might receive a REBOOT event *before* the request has * been copied out. In polled mode (during startup) this is * irrelevant, because efx_mcdi_complete_sync() is ignored. In * event mode, this condition is just an edge-case of * receiving a REBOOT event after posting the MCDI * request. Did the mc reboot before or after the copyout? The * best we can do always is just return failure. * * If there is an outstanding proxy response expected it is not going * to arrive. We should thus abort it.
*/
spin_lock(&mcdi->iface_lock);
efx_mcdi_proxy_abort(mcdi);
if (efx_mcdi_complete_sync(mcdi)) { if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
mcdi->resp_hdr_len = 0;
mcdi->resp_data_len = 0;
++mcdi->credits;
}
} else { int count;
/* Consume the status word since efx_mcdi_rpc_finish() won't */ for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
rc = efx_mcdi_poll_reboot(efx); if (rc) break;
udelay(MCDI_STATUS_DELAY_US);
}
/* On EF10, a CODE_MC_REBOOT event can be received without the * reboot detection in efx_mcdi_poll_reboot() being triggered. * If zero was returned from the final call to * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the * MC has definitely rebooted so prepare for the reset.
*/ if (!rc && efx->type->mcdi_reboot_detected)
efx->type->mcdi_reboot_detected(efx);
mcdi->new_epoch = true;
/* Nobody was waiting for an MCDI request, so trigger a reset */
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
}
spin_unlock(&mcdi->iface_lock);
}
/* The MC is going down in to BIST mode. set the BIST flag to block * new MCDI, cancel any outstanding MCDI and schedule a BIST-type reset * (which doesn't actually execute a reset, it waits for the controlling * function to reset it).
*/ staticvoid efx_mcdi_ev_bist(struct efx_nic *efx)
{ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try * to recover.
*/ staticvoid efx_mcdi_abandon(struct efx_nic *efx)
{ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) return; /* it had already been done */
netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
}
staticvoid efx_handle_drain_event(struct efx_nic *efx)
{ if (atomic_dec_and_test(&efx->active_queues))
wake_up(&efx->flush_wq);
WARN_ON(atomic_read(&efx->active_queues) < 0);
}
/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */ void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
{ struct efx_nic *efx = channel->efx; int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
switch (code) { case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev, "MC watchdog or assertion failure at 0x%x\n", data);
efx_mcdi_ev_death(efx, -EINTR); break;
case MCDI_EVENT_CODE_PMNOTICE:
netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); break;
case MCDI_EVENT_CODE_CMDDONE:
efx_mcdi_ev_cpl(efx,
MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); break;
case MCDI_EVENT_CODE_LINKCHANGE:
efx_mcdi_process_link_change(efx, event); break; case MCDI_EVENT_CODE_SENSOREVT:
efx_sensor_event(efx, event); break; case MCDI_EVENT_CODE_SCHEDERR:
netif_dbg(efx, hw, efx->net_dev, "MC Scheduler alert (0x%x)\n", data); break; case MCDI_EVENT_CODE_REBOOT: case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, -EIO); break; case MCDI_EVENT_CODE_MC_BIST:
netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
efx_mcdi_ev_bist(efx); break; case MCDI_EVENT_CODE_MAC_STATS_DMA: /* MAC stats are gather lazily. We can ignore this. */ break; case MCDI_EVENT_CODE_PTP_FAULT: case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event); break; case MCDI_EVENT_CODE_PTP_TIME:
efx_time_sync_event(channel, event); break; case MCDI_EVENT_CODE_TX_FLUSH: case MCDI_EVENT_CODE_RX_FLUSH: /* Two flush events will be sent: one to the same event * queue as completions, and one to event queue 0. * In the latter case the {RX,TX}_FLUSH_TO_DRIVER * flag will be set, and we should ignore the event * because we want to wait for all completions.
*/
BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN); if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
efx_handle_drain_event(efx); break; case MCDI_EVENT_CODE_TX_ERR: case MCDI_EVENT_CODE_RX_ERR:
netif_err(efx, hw, efx->net_dev, "%s DMA error (event: "EFX_QWORD_FMT")\n",
code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
EFX_QWORD_VAL(*event));
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); break; case MCDI_EVENT_CODE_PROXY_RESPONSE:
efx_mcdi_ev_proxy_response(efx,
MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC)); break; default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event " EFX_QWORD_FMT "\n",
EFX_QWORD_VAL(*event));
}
}
/************************************************************************** * * Specific request functions * **************************************************************************
*/
if (efx->type->print_additional_fwver)
offset += efx->type->print_additional_fwver(efx, buf + offset,
len - offset);
/* It's theoretically possible for the string to exceed 31 * characters, though in practice the first three version * components are short enough that this doesn't happen.
*/ if (WARN_ON(offset >= len))
buf[0] = 0;
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen); /* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID * specified will fail with EPERM, and we have to tell the MC we don't * care what firmware we get.
*/ if (rc == -EPERM) {
pci_dbg(efx->pci_dev, "%s with fw-variant setting failed EPERM, trying without it\n",
__func__);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
MC_CMD_FW_DONT_CARE);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
&outlen);
} if (rc) {
efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
outbuf, outlen, rc); goto fail;
} if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
rc = -EIO; goto fail;
}
if (driver_operating) { if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
efx->mcdi->fn_flags =
MCDI_DWORD(outbuf,
DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
} else { /* Synthesise flags for Siena */
efx->mcdi->fn_flags =
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
(efx_port_num(efx) == 0) <<
MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
}
}
/* We currently assume we have control of the external link * and are completely trusted by firmware. Abort probing * if that's not true for this function.
*/
if (was_attached != NULL)
*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); return 0;
/* This function finds types using the new NVRAM_PARTITIONS mcdi. */ staticint efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
u32 *nvram_types)
{
efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
GFP_KERNEL);
size_t outlen; int rc;
switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { case MC_CMD_NVRAM_TEST_PASS: case MC_CMD_NVRAM_TEST_NOTSUPP: return 0; default: return -EIO;
}
}
/* This function tests nvram partitions using the new mcdi partition lookup scheme */ int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
{
u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
GFP_KERNEL); unsignedint number; int rc, i;
if (!nvram_types) return -ENOMEM;
rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types); if (rc) goto fail;
/* Require at least one check */
rc = -EAGAIN;
for (i = 0; i < number; i++) { if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG) continue;
rc = efx_mcdi_nvram_test(efx, nvram_types[i]); if (rc) goto fail;
}
fail:
kfree(nvram_types); return rc;
}
int efx_mcdi_nvram_test_all(struct efx_nic *efx)
{
u32 nvram_types; unsignedint type; int rc;
rc = efx_mcdi_nvram_types(efx, &nvram_types); if (rc) goto fail1;
type = 0; while (nvram_types != 0) { if (nvram_types & 1) {
rc = efx_mcdi_nvram_test(efx, type); if (rc) goto fail2;
}
type++;
nvram_types >>= 1;
}
/* Returns 1 if an assertion was read, 0 if no assertion had fired, * negative on error.
*/ staticint efx_mcdi_read_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN); unsignedint flags, index; constchar *reason;
size_t outlen; int retry; int rc;
/* Attempt to read any stored assertion state before we reboot * the mcfw out of the assertion handler. Retry twice, once * because a boot-time assertion might cause this command to fail * with EINTR. And once again because GET_ASSERTS can race with
* MC_CMD_REBOOT running on the other port. */
retry = 2; do {
MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
outbuf, sizeof(outbuf), &outlen); if (rc == -EPERM) return 0;
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc) {
efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
outlen, rc); return rc;
} if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) return -EIO;
/* Print out any recorded assertion state */
flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) return 0;
/* Print out the registers */ for (index = 0;
index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
index++)
netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
1 + index,
MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
index));
return 1;
}
staticint efx_mcdi_exit_assertion(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); int rc;
/* If the MC is running debug firmware, it might now be * waiting for a debugger to attach, but we just want it to * reboot. We set a flag that makes the command a no-op if it * has already done so. * The MCDI will thus return either 0 or -EIO.
*/
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
NULL, 0, NULL); if (rc == -EIO)
rc = 0; if (rc)
efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
NULL, 0, rc); return rc;
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
{ int rc;
rc = efx_mcdi_read_assertion(efx); if (rc <= 0) return rc;
return efx_mcdi_exit_assertion(efx);
}
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
staticint efx_mcdi_reset_mc(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN); int rc;
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
NULL, 0, NULL); /* White is black, and up is down */ if (rc == -EIO) return 0; if (rc == 0)
rc = -EIO; return rc;
}
if (impl_out)
*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
if (enabled_out)
*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
return 0;
fail: /* Older firmware lacks GET_WORKAROUNDS and this isn't especially * terrifying. The call site will have to deal with it though.
*/
netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err, "%s: failed rc=%d\n", __func__, rc); return rc;
}
/* Failure to read a privilege mask is never fatal, because we can always * carry on as though we didn't have the privilege we were interested in. * So use efx_mcdi_rpc_quiet().
*/ int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
{
MCDI_DECLARE_BUF(fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
MCDI_DECLARE_BUF(pm_inbuf, MC_CMD_PRIVILEGE_MASK_IN_LEN);
MCDI_DECLARE_BUF(pm_outbuf, MC_CMD_PRIVILEGE_MASK_OUT_LEN);
size_t outlen;
u16 pf, vf; int rc;
if (!efx || !mask) return -EINVAL;
/* Get our function number */
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0,
fi_outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN,
&outlen); if (rc != 0) return rc; if (outlen < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) return -EIO;
/* Old firmware doesn't support background update finish and abort * operations. Fallback to waiting if the requested mode is not * supported.
*/ if (!efx_has_cap(efx, NVRAM_UPDATE_POLL_VERIFY_RESULT) ||
(!efx_has_cap(efx, NVRAM_UPDATE_ABORT_SUPPORTED) &&
mode == EFX_UPDATE_FINISH_ABORT))
mode = EFX_UPDATE_FINISH_WAIT;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.