int mmc_deselect_cards(struct mmc_host *host)
{ return _mmc_select_card(host, NULL);
}
/* * Write the value specified in the device tree or board code into the optional * 16 bit Driver Stage Register. This can be used to tune raise/fall times and * drive strength of the DAT and CMD outputs. The actual meaning of a given * value is hardware dependant. * The presence of the DSR register can be determined from the CSD register, * bit 76.
*/ int mmc_set_dsr(struct mmc_host *host)
{ struct mmc_command cmd = {};
/* * Non-SPI hosts need to prevent chipselect going active during * GO_IDLE; that would put chips into SPI mode. Remind them of * that in case of hardware that won't pull up DAT3/nCS otherwise. * * SPI hosts ignore ios.chip_select; it's managed according to * rules that must accommodate non-MMC slaves which this layer * won't even know about.
*/ if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_HIGH);
mmc_delay(1);
}
err = __mmc_go_idle(host);
if (!mmc_host_is_spi(host)) {
mmc_set_chip_select(host, MMC_CS_DONTCARE);
mmc_delay(1);
}
err = mmc_wait_for_cmd(host, cmd, 0); if (err) return err;
if (mmc_host_is_spi(host)) { if (!(cmd->resp[0] & R1_SPI_IDLE)) {
*busy = false; return 0;
}
} else { if (cmd->resp[0] & MMC_CARD_BUSY) {
*busy = false; return 0;
}
}
*busy = true;
/* * According to eMMC specification v5.1 section 6.4.3, we * should issue CMD1 repeatedly in the idle state until * the eMMC is ready. Otherwise some eMMC devices seem to enter * the inactive mode after mmc_init_card() issued CMD0 when * the eMMC device is busy.
*/ if (!ocr && !mmc_host_is_spi(host))
cmd->arg = cmd->resp[0] | BIT(30);
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err;
memcpy(cxd, cmd.resp, sizeof(u32) * 4);
return 0;
}
/* * NOTE: void *buf, caller for the buf is required to use DMA-capable * buffer or on-stack buffer (with some overhead in callee).
*/ int mmc_send_adtc_data(struct mmc_card *card, struct mmc_host *host, u32 opcode,
u32 args, void *buf, unsigned len)
{ struct mmc_request mrq = {}; struct mmc_command cmd = {}; struct mmc_data data = {}; struct scatterlist sg;
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = args;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) { /* * The spec states that CSR and CID accesses have a timeout * of 64 clock cycles.
*/
data.timeout_ns = 0;
data.timeout_clks = 64;
} else
mmc_set_data_timeout(&data, card);
mmc_wait_for_req(host, &mrq);
if (cmd.error) return cmd.error; if (data.error) return data.error;
int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
{ int err;
u8 *ext_csd;
if (!card || !new_ext_csd) return -EINVAL;
if (!mmc_card_can_ext_csd(card)) return -EOPNOTSUPP;
/* * As the ext_csd is so large and mostly unused, we don't store the * raw block in mmc_card.
*/
ext_csd = kzalloc(512, GFP_KERNEL); if (!ext_csd) return -ENOMEM;
timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1; do { /* * Due to the possibility of being preempted while polling, * check the expiration time first.
*/
expired = time_after(jiffies, timeout);
err = (*busy_cb)(cb_data, &busy); if (err) return err;
/* Timeout if the device still remains busy. */ if (expired && busy) {
pr_err("%s: Card stuck being busy! %s\n",
mmc_hostname(host), __func__); return -ETIMEDOUT;
}
/* Throttle the polling rate to avoid hogging the CPU. */ if (busy) {
usleep_range(udelay, udelay * 2); if (udelay < udelay_max)
udelay *= 2;
}
} while (busy);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd, unsignedint timeout_ms)
{ /* * If the max_busy_timeout of the host is specified, make sure it's * enough to fit the used timeout_ms. In case it's not, let's instruct * the host to avoid HW busy detection, by converting to a R1 response * instead of a R1B. Note, some hosts requires R1B, which also means * they are on their own when it comes to deal with the busy timeout.
*/ if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
(timeout_ms > host->max_busy_timeout)) {
cmd->flags = MMC_CMD_AC | MMC_RSP_SPI_R1 | MMC_RSP_R1; returnfalse;
}
/** * __mmc_switch - modify EXT_CSD register * @card: the MMC card associated with the data transfer * @set: cmd set values * @index: EXT_CSD register index * @value: value to program into EXT_CSD register * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout * @timing: new timing to change to * @send_status: send status cmd to poll for busy * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy * @retries: number of retries * * Modifies the EXT_CSD register for selected card.
*/ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, unsignedint timeout_ms, unsignedchar timing, bool send_status, bool retry_crc_err, unsignedint retries)
{ struct mmc_host *host = card->host; int err; struct mmc_command cmd = {}; bool use_r1b_resp; unsignedchar old_timing = host->ios.timing;
mmc_retune_hold(host);
if (!timeout_ms) {
pr_warn("%s: unspecified timeout for CMD6 - use generic\n",
mmc_hostname(host));
timeout_ms = card->ext_csd.generic_cmd6_time;
}
err = mmc_wait_for_cmd(host, &cmd, retries); if (err) goto out;
/*If SPI or used HW busy detection above, then we don't need to poll. */ if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
mmc_host_is_spi(host)) goto out_tim;
/* * If the host doesn't support HW polling via the ->card_busy() ops and * when it's not allowed to poll by using CMD13, then we need to rely on * waiting the stated timeout to be sufficient.
*/ if (!send_status && !host->ops->card_busy) {
mmc_delay(timeout_ms); goto out_tim;
}
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, retry_crc_err, MMC_BUSY_CMD6); if (err) goto out;
out_tim: /* Switch to new timing before check switch status. */ if (timing)
mmc_set_timing(host, timing);
if (send_status) {
err = mmc_switch_status(card, true); if (err && timing)
mmc_set_timing(host, old_timing);
}
out:
mmc_retune_release(host);
/* * According to the tuning specs, Tuning process * is normally shorter 40 executions of CMD19, * and timeout value should be shorter than 150 ms
*/
data.timeout_ns = 150 * NSEC_PER_MSEC;
/* * eMMC specification specifies that CMD12 can be used to stop a tuning * command, but SD specification does not, so do nothing unless it is * eMMC.
*/ if (opcode != MMC_SEND_TUNING_BLOCK_HS200) return 0;
/* dma onto stack is unsafe/nonportable, but callers to this * routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL); if (!data_buf) return -ENOMEM;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we * rely on callers to never use this with "native" calls for reading * CSD or CID. Native versions of those commands use the R2 type, * not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.sg = &sg;
data.sg_len = 1;
mmc_set_data_timeout(&data, card);
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0; if (opcode == MMC_BUS_TEST_R) { for (i = 0; i < len / 4; i++) if ((test_buf[i] ^ data_buf[i]) != 0xff) {
err = -EIO; break;
}
}
kfree(data_buf);
if (cmd.error) return cmd.error; if (data.error) return data.error;
return err;
}
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{ int width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8; elseif (bus_width == MMC_BUS_WIDTH_4)
width = 4; elseif (bus_width == MMC_BUS_WIDTH_1) return 0; /* no need for test */ else return -EINVAL;
/* * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there * is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
}
/* No need to poll when using HW busy detection. */ if (host->caps & MMC_CAP_WAIT_WHILE_BUSY && use_r1b_resp) return 0;
/* Let's poll to find out when the HPI request completes. */ return mmc_poll_for_busy(card, busy_timeout_ms, false, MMC_BUSY_HPI);
}
/** * mmc_interrupt_hpi - Issue for High priority Interrupt * @card: the MMC card associated with the HPI transfer * * Issued High Priority Interrupt, and check for card status * until out-of prg-state.
*/ staticint mmc_interrupt_hpi(struct mmc_card *card)
{ int err;
u32 status;
if (!card->ext_csd.hpi_en) {
pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); return 1;
}
err = mmc_send_status(card, &status); if (err) {
pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); goto out;
}
switch (R1_CURRENT_STATE(status)) { case R1_STATE_IDLE: case R1_STATE_READY: case R1_STATE_STBY: case R1_STATE_TRAN: /* * In idle and transfer states, HPI is not needed and the caller * can issue the next intended command immediately
*/ goto out; case R1_STATE_PRG: break; default: /* In all other states, it's illegal to issue HPI */
pr_debug("%s: HPI cannot be sent. Card state=%d\n",
mmc_hostname(card->host), R1_CURRENT_STATE(status));
err = -EINVAL; goto out;
}
/** * mmc_run_bkops - Run BKOPS for supported cards * @card: MMC card to run BKOPS for * * Run background operations synchronously for cards having manual BKOPS * enabled and in case it reports urgent BKOPS level.
*/ void mmc_run_bkops(struct mmc_card *card)
{ int err;
if (!card->ext_csd.man_bkops_en) return;
err = mmc_read_bkops_status(card); if (err) {
pr_err("%s: Failed to read bkops status: %d\n",
mmc_hostname(card->host), err); return;
}
if (!card->ext_csd.raw_bkops_status ||
card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2) return;
mmc_retune_hold(card->host);
/* * For urgent BKOPS status, LEVEL_2 and higher, let's execute * synchronously. Future wise, we may consider to start BKOPS, for less * urgent levels by using an asynchronous background task, when idle.
*/
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); /* * If the BKOPS timed out, the card is probably still busy in the * R1_STATE_PRG. Rather than continue to wait, let's try to abort * it with a HPI command to get back into R1_STATE_TRAN.
*/ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
pr_warn("%s: BKOPS aborted\n", mmc_hostname(card->host)); elseif (err)
pr_warn("%s: Error %d running bkops\n",
mmc_hostname(card->host), err);
/* * If the sanitize operation timed out, the card is probably still busy * in the R1_STATE_PRG. Rather than continue to wait, let's try to abort * it with a HPI command to get back into R1_STATE_TRAN.
*/ if (err == -ETIMEDOUT && !mmc_interrupt_hpi(card))
pr_warn("%s: Sanitize aborted\n", mmc_hostname(host));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.