/* Is transition required? */ if (ipc->in_d0ix == enable) return 0;
ret = avs_dsp_op(adev, set_d0ix, enable); if (ret) { /* Prevent further d0ix attempts on conscious IPC failure. */ if (ret == -AVS_EIPC)
atomic_inc(&ipc->d0ix_disable_depth);
if (!atomic_read(&ipc->d0ix_disable_depth)) {
cancel_delayed_work_sync(&ipc->d0ix_work); return avs_dsp_set_d0ix(adev, false);
}
return 0;
}
int avs_dsp_disable_d0ix(struct avs_dev *adev)
{ struct avs_ipc *ipc = adev->ipc;
/* Prevent PG only on the first disable. */ if (atomic_inc_return(&ipc->d0ix_disable_depth) == 1) {
cancel_delayed_work_sync(&ipc->d0ix_work); return avs_dsp_set_d0ix(adev, false);
}
return 0;
}
int avs_dsp_enable_d0ix(struct avs_dev *adev)
{ struct avs_ipc *ipc = adev->ipc;
if (atomic_dec_and_test(&ipc->d0ix_disable_depth))
queue_delayed_work(system_power_efficient_wq, &ipc->d0ix_work,
msecs_to_jiffies(AVS_D0IX_DELAY_MS)); return 0;
}
substream = pcm->streams[dir].substream; if (!substream || !substream->runtime) continue;
/* No need for _irq() as we are in nonatomic context. */
snd_pcm_stream_lock(substream);
snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
snd_pcm_stream_unlock(substream);
}
}
}
mutex_unlock(&adev->comp_list_mutex);
/* Avoid deadlock as the exception may be the response to SET_D0IX. */ if (current_work() != &ipc->d0ix_work.work)
cancel_delayed_work_sync(&ipc->d0ix_work);
ipc->in_d0ix = false; /* Re-enabled on recovery completion. */
pm_runtime_disable(adev->dev);
/* Process received notification. */
avs_dsp_op(adev, coredump, msg);
case AVS_NOTIFY_PHRASE_DETECTED:
data_size = sizeof(struct avs_notify_voice_data); break;
case AVS_NOTIFY_RESOURCE_EVENT:
data_size = sizeof(struct avs_notify_res_data); break;
case AVS_NOTIFY_LOG_BUFFER_STATUS: case AVS_NOTIFY_EXCEPTION_CAUGHT: break;
case AVS_NOTIFY_MODULE_EVENT: /* To know the total payload size, header needs to be read first. */
memcpy_fromio(&mod_data, avs_uplink_addr(adev), sizeof(mod_data));
data_size = sizeof(mod_data) + mod_data.data_size; break;
/* * Response may either be solicited - a reply for a request that has * been sent beforehand - or unsolicited (notification).
*/ if (avs_msg_is_reply(header)) { /* Response processing is invoked from IRQ thread. */
spin_lock_irq(&ipc->rx_lock);
avs_dsp_receive_rx(adev, header);
ipc->rx_completed = true;
spin_unlock_irq(&ipc->rx_lock);
} else {
avs_dsp_process_notification(adev, header);
}
staticint avs_ipc_wait_busy_completion(struct avs_ipc *ipc, int timeout)
{
u32 repeats_left = 128; /* to avoid infinite looping */ int ret;
again:
ret = wait_for_completion_timeout(&ipc->busy_completion, msecs_to_jiffies(timeout));
/* DSP could be unresponsive at this point. */ if (!ipc->ready) return -EPERM;
if (!ret) { if (!avs_ipc_is_busy(ipc)) return -ETIMEDOUT; /* * Firmware did its job, either notification or reply * has been received - now wait until it's processed.
*/
wait_for_completion_killable(&ipc->busy_completion);
}
/* Ongoing notification's bottom-half may cause early wakeup */
spin_lock(&ipc->rx_lock); if (!ipc->rx_completed) { if (repeats_left) { /* Reply delayed due to notification. */
repeats_left--;
reinit_completion(&ipc->busy_completion);
spin_unlock(&ipc->rx_lock); goto again;
}
ret = avs_ipc_wait_busy_completion(ipc, timeout); if (ret) { if (ret == -ETIMEDOUT) { union avs_notify_msg msg = AVS_NOTIFICATION(EXCEPTION_CAUGHT);
/* Same treatment as on exception, just stack_dump=0. */
avs_dsp_exception_caught(adev, &msg);
} gotoexit;
}
ret = ipc->rx.rsp.status; /* * If IPC channel is blocked e.g.: due to ongoing recovery, * -EPERM error code is expected and thus it's not an actual error. * * Unsupported IPCs are of no harm either.
*/ if (ret == -EPERM || ret == AVS_IPC_NOT_SUPPORTED)
dev_dbg(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
name, request->glb.primary, request->glb.ext.val, ret); elseif (ret)
dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
name, request->glb.primary, request->glb.ext.val, ret);
if (reply) {
reply->header = ipc->rx.header;
reply->size = ipc->rx.size; if (reply->data && ipc->rx.size)
memcpy(reply->data, ipc->rx.data, reply->size);
}
staticint avs_dsp_do_send_rom_msg(struct avs_dev *adev, struct avs_ipc_msg *request, int timeout, constchar *name)
{ struct avs_ipc *ipc = adev->ipc; int ret;
mutex_lock(&ipc->msg_mutex);
spin_lock(&ipc->rx_lock);
avs_ipc_msg_init(ipc, NULL); /* * with hw still stalled, memory windows may not be * configured properly so avoid accessing SRAM
*/
avs_dsp_send_tx(adev, request, false);
spin_unlock(&ipc->rx_lock);
/* ROM messages must be sent before main core is unstalled */
ret = avs_dsp_op(adev, stall, AVS_MAIN_CORE_MASK, false); if (!ret) {
ret = wait_for_completion_timeout(&ipc->done_completion, msecs_to_jiffies(timeout));
ret = ret ? 0 : -ETIMEDOUT;
} if (ret)
dev_err(adev->dev, "%s (0x%08x 0x%08x) failed: %d\n",
name, request->glb.primary, request->glb.ext.val, ret);
/* * No particular bit setting order. All of these are required * to have a functional SW <-> FW communication.
*/
value = enable ? AVS_ADSP_ADSPIC_IPC : 0;
snd_hdac_adsp_updatel(adev, AVS_ADSP_REG_ADSPIC, AVS_ADSP_ADSPIC_IPC, value);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.