// SPDX-License-Identifier: GPL-2.0 /* * Marvell NAND flash controller driver * * Copyright (C) 2017 Marvell * Author: Miquel RAYNAL <miquel.raynal@free-electrons.com> * * * This NAND controller driver handles two versions of the hardware, * one is called NFCv1 and is available on PXA SoCs and the other is * called NFCv2 and is available on Armada SoCs. * * The main visible difference is that NFCv1 only has Hamming ECC * capabilities, while NFCv2 also embeds a BCH ECC engine. Also, DMA * is not used with NFCv2. * * The ECC layouts are depicted in details in Marvell AN-379, but here * is a brief description. * * When using Hamming, the data is split in 512B chunks (either 1, 2 * or 4) and each chunk will have its own ECC "digest" of 6B at the * beginning of the OOB area and eventually the remaining free OOB * bytes (also called "spare" bytes in the driver). This engine * corrects up to 1 bit per chunk and detects reliably an error if * there are at most 2 bitflips. Here is the page layout used by the * controller when Hamming is chosen: * * +-------------------------------------------------------------+ * | Data 1 | ... | Data N | ECC 1 | ... | ECCN | Free OOB bytes | * +-------------------------------------------------------------+ * * When using the BCH engine, there are N identical (data + free OOB + * ECC) sections and potentially an extra one to deal with * configurations where the chosen (data + free OOB + ECC) sizes do * not align with the page (data + OOB) size. ECC bytes are always * 30B per ECC chunk. Here is the page layout used by the controller * when BCH is chosen: * * +----------------------------------------- * | Data 1 | Free OOB bytes 1 | ECC 1 | ... * +----------------------------------------- * * ------------------------------------------- * ... | Data N | Free OOB bytes N | ECC N | * ------------------------------------------- * * --------------------------------------------+ * Last Data | Last Free OOB bytes | Last ECC | * --------------------------------------------+ * * In both cases, the layout seen by the user is always: all data * first, then all free OOB bytes and finally all ECC bytes. With BCH, * ECC bytes are 30B long and are padded with 0xFF to align on 32 * bytes. * * The controller has certain limitations that are handled by the * driver: * - It can only read 2k at a time. To overcome this limitation, the * driver issues data cycles on the bus, without issuing new * CMD + ADDR cycles. The Marvell term is "naked" operations. * - The ECC strength in BCH mode cannot be tuned. It is fixed 16 * bits. What can be tuned is the ECC block size as long as it * stays between 512B and 2kiB. It's usually chosen based on the * chip ECC requirements. For instance, using 2kiB ECC chunks * provides 4b/512B correctability. * - The controller will always treat data bytes, free OOB bytes * and ECC bytes in that order, no matter what the real layout is * (which is usually all data then all OOB bytes). The * marvell_nfc_layouts array below contains the currently * supported layouts. * - Because of these weird layouts, the Bad Block Markers can be * located in data section. In this case, the NAND_BBT_NO_OOB_BBM * option must be set to prevent scanning/writing bad block * markers.
*/
/* Data FIFO granularity, FIFO reads/writes must be a multiple of this length */ #define FIFO_DEPTH 8 #define FIFO_REP(x) (x / sizeof(u32)) #define BCH_SEQ_READS (32 / FIFO_DEPTH) /* NFC does not support transfers of larger chunks at a time */ #define MAX_CHUNK_SIZE 2112 /* NFCv1 cannot read more that 7 bytes of ID */ #define NFCV1_READID_LEN 7 /* Polling is done at a pace of POLL_PERIOD us until POLL_TIMEOUT is reached */ #define POLL_PERIOD 0 #define POLL_TIMEOUT 100000 /* Interrupt maximum wait period in ms */ #define IRQ_TIMEOUT 1000 /* Latency in clock cycles between SoC pins and NFC logic */ #define MIN_RD_DEL_CNT 3 /* Maximum number of contiguous address cycles */ #define MAX_ADDRESS_CYC_NFCV1 5 #define MAX_ADDRESS_CYC_NFCV2 7 /* System control registers/bits to enable the NAND controller on some SoCs */ #define GENCONF_SOC_DEVICE_MUX 0x208 #define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) #define GENCONF_SOC_DEVICE_MUX_ECC_CLK_RST BIT(20) #define GENCONF_SOC_DEVICE_MUX_ECC_CORE_RST BIT(21) #define GENCONF_SOC_DEVICE_MUX_NFC_INT_EN BIT(25) #define GENCONF_SOC_DEVICE_MUX_NFC_DEVBUS_ARB_EN BIT(27) #define GENCONF_CLK_GATING_CTRL 0x220 #define GENCONF_CLK_GATING_CTRL_ND_GATE BIT(2) #define GENCONF_ND_CLK_CTRL 0x700 #define GENCONF_ND_CLK_CTRL_EN BIT(0)
/** * struct marvell_hw_ecc_layout - layout of Marvell ECC * * Marvell ECC engine works differently than the others, in order to limit the * size of the IP, hardware engineers chose to set a fixed strength at 16 bits * per subpage, and depending on a the desired strength needed by the NAND chip, * a particular layout mixing data/spare/ecc is defined, with a possible last * chunk smaller that the others. * * @writesize: Full page size on which the layout applies * @chunk: Desired ECC chunk size on which the layout applies * @strength: Desired ECC strength (per chunk size bytes) on which the * layout applies * @nchunks: Total number of chunks * @full_chunk_cnt: Number of full-sized chunks, which is the number of * repetitions of the pattern: * (data_bytes + spare_bytes + ecc_bytes). * @data_bytes: Number of data bytes per chunk * @spare_bytes: Number of spare bytes per chunk * @ecc_bytes: Number of ecc bytes per chunk * @last_data_bytes: Number of data bytes in the last chunk * @last_spare_bytes: Number of spare bytes in the last chunk * @last_ecc_bytes: Number of ecc bytes in the last chunk
*/ struct marvell_hw_ecc_layout { /* Constraints */ int writesize; int chunk; int strength; /* Corresponding layout */ int nchunks; int full_chunk_cnt; int data_bytes; int spare_bytes; int ecc_bytes; int last_data_bytes; int last_spare_bytes; int last_ecc_bytes;
};
/** * struct marvell_nand_chip_sel - CS line description * * The Nand Flash Controller has up to 4 CE and 2 RB pins. The CE selection * is made by a field in NDCB0 register, and in another field in NDCB2 register. * The datasheet describes the logic with an error: ADDR5 field is once * declared at the beginning of NDCB2, and another time at its end. Because the * ADDR5 field of NDCB2 may be used by other bytes, it would be more logical * to use the last bit of this field instead of the first ones. * * @cs: Wanted CE lane. * @ndcb0_csel: Value of the NDCB0 register with or without the flag * selecting the wanted CE lane. This is set once when * the Device Tree is probed. * @rb: Ready/Busy pin for the flash chip
*/ struct marvell_nand_chip_sel { unsignedint cs;
u32 ndcb0_csel; unsignedint rb;
};
/** * struct marvell_nand_chip - stores NAND chip device related information * * @chip: Base NAND chip structure * @node: Used to store NAND chips into a list * @layout: NAND layout when using hardware ECC * @ndcr: Controller register value for this NAND chip * @ndtr0: Timing registers 0 value for this NAND chip * @ndtr1: Timing registers 1 value for this NAND chip * @addr_cyc: Amount of cycles needed to pass column address * @selected_die: Current active CS * @nsels: Number of CS lines required by the NAND chip * @sels: Array of CS lines descriptions
*/ struct marvell_nand_chip { struct nand_chip chip; struct list_head node; conststruct marvell_hw_ecc_layout *layout;
u32 ndcr;
u32 ndtr0;
u32 ndtr1; int addr_cyc; int selected_die; unsignedint nsels; struct marvell_nand_chip_sel sels[] __counted_by(nsels);
};
/** * struct marvell_nfc_caps - NAND controller capabilities for distinction * between compatible strings * * @max_cs_nb: Number of Chip Select lines available * @max_rb_nb: Number of Ready/Busy lines available * @need_system_controller: Indicates if the SoC needs to have access to the * system controller (ie. to enable the NAND controller) * @legacy_of_bindings: Indicates if DT parsing must be done using the old * fashion way * @is_nfcv2: NFCv2 has numerous enhancements compared to NFCv1, ie. * BCH error detection and correction algorithm, * NDCB3 register has been added * @use_dma: Use dma for data transfers * @max_mode_number: Maximum timing mode supported by the controller
*/ struct marvell_nfc_caps { unsignedint max_cs_nb; unsignedint max_rb_nb; bool need_system_controller; bool legacy_of_bindings; bool is_nfcv2; bool use_dma; unsignedint max_mode_number;
};
/** * struct marvell_nfc - stores Marvell NAND controller information * * @controller: Base controller structure * @dev: Parent device (used to print error messages) * @regs: NAND controller registers * @core_clk: Core clock * @reg_clk: Registers clock * @complete: Completion object to wait for NAND controller events * @assigned_cs: Bitmask describing already assigned CS lines * @chips: List containing all the NAND chips attached to * this NAND controller * @selected_chip: Currently selected target chip * @caps: NAND controller capabilities for each compatible string * @use_dma: Whetner DMA is used * @dma_chan: DMA channel (NFCv1 only) * @dma_buf: 32-bit aligned buffer for DMA transfers (NFCv1 only)
*/ struct marvell_nfc { struct nand_controller controller; struct device *dev; void __iomem *regs; struct clk *core_clk; struct clk *reg_clk; struct completion complete; unsignedlong assigned_cs; struct list_head chips; struct nand_chip *selected_chip; conststruct marvell_nfc_caps *caps;
/** * struct marvell_nfc_timings - NAND controller timings expressed in NAND * Controller clock cycles * * @tRP: ND_nRE pulse width * @tRH: ND_nRE high duration * @tWP: ND_nWE pulse time * @tWH: ND_nWE high duration * @tCS: Enable signal setup time * @tCH: Enable signal hold time * @tADL: Address to write data delay * @tAR: ND_ALE low to ND_nRE low delay * @tWHR: ND_nWE high to ND_nRE low for status read * @tRHW: ND_nRE high duration, read to write delay * @tR: ND_nWE high to ND_nRE low for read
*/ struct marvell_nfc_timings { /* NDTR0 fields */ unsignedint tRP; unsignedint tRH; unsignedint tWP; unsignedint tWH; unsignedint tCS; unsignedint tCH; unsignedint tADL; /* NDTR1 fields */ unsignedint tAR; unsignedint tWHR; unsignedint tRHW; unsignedint tR;
};
/** * TO_CYCLES() - Derives a duration in numbers of clock cycles. * * @ps: Duration in pico-seconds * @period_ns: Clock period in nano-seconds * * Convert the duration in nano-seconds, then divide by the period and * return the number of clock periods.
*/ #define TO_CYCLES(ps, period_ns) (DIV_ROUND_UP(ps / 1000, period_ns)) #define TO_CYCLES64(ps, period_ns) (DIV_ROUND_UP_ULL(div_u64(ps, 1000), \
period_ns))
/** * struct marvell_nfc_op - filled during the parsing of the ->exec_op() * subop subset of instructions. * * @ndcb: Array of values written to NDCBx registers * @cle_ale_delay_ns: Optional delay after the last CMD or ADDR cycle * @rdy_timeout_ms: Timeout for waits on Ready/Busy pin * @rdy_delay_ns: Optional delay after waiting for the RB pin * @data_delay_ns: Optional delay after the data xfer * @data_instr_idx: Index of the data instruction in the subop * @data_instr: Pointer to the data instruction in the subop
*/ struct marvell_nfc_op {
u32 ndcb[4]; unsignedint cle_ale_delay_ns; unsignedint rdy_timeout_ms; unsignedint rdy_delay_ns; unsignedint data_delay_ns; unsignedint data_instr_idx; conststruct nand_op_instr *data_instr;
};
/* * Internal helper to conditionnally apply a delay (from the above structure, * most of the time).
*/ staticvoid cond_delay(unsignedint ns)
{ if (!ns) return;
if (ns < 10000)
ndelay(ns); else
udelay(DIV_ROUND_UP(ns, 1000));
}
/* * The controller has many flags that could generate interrupts, most of them * are disabled and polling is used. For the very slow signals, using interrupts * may relax the CPU charge.
*/ staticvoid marvell_nfc_disable_int(struct marvell_nfc *nfc, u32 int_mask)
{
u32 reg;
/* * Callers of this function do not verify if the NAND is using a 16-bit * an 8-bit bus for normal operations, so we need to take care of that * here by leaving the configuration unchanged if the NAND does not have * the NAND_BUSWIDTH_16 flag set.
*/ if (!(chip->options & NAND_BUSWIDTH_16)) return;
/* * The command is being processed, wait for the ND_RUN bit to be * cleared by the NFC. If not, we must clear it by hand.
*/
ret = readl_relaxed_poll_timeout(nfc->regs + NDCR, val,
(val & NDCR_ND_RUN) == 0,
POLL_PERIOD, POLL_TIMEOUT); if (ret) {
dev_err(nfc->dev, "Timeout on NAND controller run mode\n");
writel_relaxed(readl(nfc->regs + NDCR) & ~NDCR_ND_RUN,
nfc->regs + NDCR); return ret;
}
return 0;
}
/* * Any time a command has to be sent to the controller, the following sequence * has to be followed: * - call marvell_nfc_prepare_cmd() * -> activate the ND_RUN bit that will kind of 'start a job' * -> wait the signal indicating the NFC is waiting for a command * - send the command (cmd and address cycles) * - enventually send or receive the data * - call marvell_nfc_end_cmd() with the corresponding flag * -> wait the flag to be triggered or cancel the job with a timeout * * The following helpers are here to factorize the code a bit so that * specialized functions responsible for executing the actual NAND * operations do not have to replicate the same code blocks.
*/ staticint marvell_nfc_prepare_cmd(struct nand_chip *chip)
{ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 ndcr, val; int ret;
/* Poll ND_RUN and clear NDSR before issuing any command */
ret = marvell_nfc_wait_ndrun(chip); if (ret) {
dev_err(nfc->dev, "Last operation did not succeed\n"); return ret;
}
/* Assert ND_RUN bit and wait the NFC to be ready */
writel_relaxed(ndcr | NDCR_ND_RUN, nfc->regs + NDCR);
ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
val & NDSR_WRCMDREQ,
POLL_PERIOD, POLL_TIMEOUT); if (ret) {
dev_err(nfc->dev, "Timeout on WRCMDRE\n"); return -ETIMEDOUT;
}
/* Command may be written, clear WRCMDREQ status bit */
writel_relaxed(NDSR_WRCMDREQ, nfc->regs + NDSR);
/* * Write NDCB0 four times only if LEN_OVRD is set or if ADDR6 or ADDR7 * fields are used (only available on NFCv2).
*/ if (nfc_op->ndcb[0] & NDCB0_LEN_OVRD ||
NDCB0_ADDR_GET_NUM_CYC(nfc_op->ndcb[0]) >= 6) { if (!WARN_ON_ONCE(!nfc->caps->is_nfcv2))
writel(nfc_op->ndcb[3], nfc->regs + NDCB0);
}
}
staticint marvell_nfc_end_cmd(struct nand_chip *chip, int flag, constchar *label)
{ struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
u32 val; int ret;
ret = readl_relaxed_poll_timeout(nfc->regs + NDSR, val,
val & flag,
POLL_PERIOD, POLL_TIMEOUT);
if (ret) {
dev_err(nfc->dev, "Timeout on %s (NDSR: 0x%08x)\n",
label, val); if (nfc->dma_chan)
dmaengine_terminate_all(nfc->dma_chan); return ret;
}
/* * DMA function uses this helper to poll on CMDD bits without wanting * them to be cleared.
*/ if (nfc->use_dma && (readl_relaxed(nfc->regs + NDCR) & NDCR_DMA_EN)) return 0;
/* * In case the interrupt was not served in the required time frame, * check if the ISR was not served or if something went actually wrong.
*/ if (!ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n"); return -ETIMEDOUT;
}
/* * Reset the NDCR register to a clean state for this particular chip, * also clear ND_RUN bit.
*/
ndcr_generic = readl_relaxed(nfc->regs + NDCR) &
NDCR_GENERIC_FIELDS_MASK & ~NDCR_ND_RUN;
writel_relaxed(ndcr_generic | marvell_nand->ndcr, nfc->regs + NDCR);
/* Also reset the interrupt status register */
marvell_nfc_clear_int(nfc, NDCR_ALL_INT);
if (chip == nfc->selected_chip && die_nr == marvell_nand->selected_die) return;
/* * RDY interrupt mask is one bit in NDCR while there are two status * bit in NDSR (RDY[cs0/cs2] and RDY[cs1/cs3]).
*/ if (st & NDSR_RDY(1))
st |= NDSR_RDY(0);
if (!(st & ien)) return IRQ_NONE;
marvell_nfc_disable_int(nfc, st & NDCR_ALL_INT);
if (st & (NDSR_RDY(0) | NDSR_RDY(1)))
complete(&nfc->complete);
/* * When enabling BCH, set threshold to 0 to always know the * number of corrected bitflips.
*/ if (chip->ecc.algo == NAND_ECC_ALGO_BCH)
writel_relaxed(NDECCCTRL_BCH_EN, nfc->regs + NDECCCTRL);
}
}
staticvoid marvell_nfc_check_empty_chunk(struct nand_chip *chip,
u8 *data, int data_len,
u8 *spare, int spare_len,
u8 *ecc, int ecc_len, unsignedint *max_bitflips)
{ struct mtd_info *mtd = nand_to_mtd(chip); int bf;
/* * Blank pages (all 0xFF) that have not been written may be recognized * as bad if bitflips occur, so whenever an uncorrectable error occurs, * check if the entire page (with ECC bytes) is actually blank or not.
*/ if (!data)
data_len = 0; if (!spare)
spare_len = 0; if (!ecc)
ecc_len = 0;
/* Update the stats and max_bitflips */
mtd->ecc_stats.corrected += bf;
*max_bitflips = max_t(unsignedint, *max_bitflips, bf);
}
/* * Check if a chunk is correct or not according to the hardware ECC engine. * mtd->ecc_stats.corrected is updated, as well as max_bitflips, however * mtd->ecc_stats.failure is not, the function will instead return a non-zero * value indicating that a check on the emptyness of the subpage must be * performed before actually declaring the subpage as "corrupted".
*/ staticint marvell_nfc_hw_ecc_check_bitflips(struct nand_chip *chip, unsignedint *max_bitflips)
{ struct mtd_info *mtd = nand_to_mtd(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); int bf = 0;
u32 ndsr;
ndsr = readl_relaxed(nfc->regs + NDSR);
/* Check uncorrectable error flag */ if (ndsr & NDSR_UNCERR) {
writel_relaxed(ndsr, nfc->regs + NDSR);
/* * Do not increment ->ecc_stats.failed now, instead, return a * non-zero value to indicate that this chunk was apparently * bad, and it should be check to see if it empty or not. If * the chunk (with ECC bytes) is not declared empty, the calling * function must increment the failure count.
*/ return -EBADMSG;
}
/* Check correctable error flag */ if (ndsr & NDSR_CORERR) {
writel_relaxed(ndsr, nfc->regs + NDSR);
/* NFCv2 needs more information about the operation being executed */ if (nfc->caps->is_nfcv2)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW);
ret = marvell_nfc_prepare_cmd(chip); if (ret) return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ, "RDDREQ while draining FIFO (data/oob)"); if (ret) return ret;
/* * Read the page then the OOB area. Unlike what is shown in current * documentation, spare bytes are protected by the ECC engine, and must * be at the beginning of the OOB area or running this driver on legacy * systems will prevent the discovery of the BBM/BBT.
*/ if (nfc->use_dma) {
marvell_nfc_xfer_data_dma(nfc, DMA_FROM_DEVICE,
lt->data_bytes + oob_bytes);
memcpy(data_buf, nfc->dma_buf, lt->data_bytes);
memcpy(oob_buf, nfc->dma_buf + lt->data_bytes, oob_bytes);
} else {
marvell_nfc_xfer_data_in_pio(nfc, data_buf, lt->data_bytes);
marvell_nfc_xfer_data_in_pio(nfc, oob_buf, oob_bytes);
}
ret = marvell_nfc_wait_cmdd(chip); return ret;
}
staticint marvell_nfc_hw_ecc_hmg_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_required, int page)
{
marvell_nfc_select_target(chip, chip->cur_cs); return marvell_nfc_hw_ecc_hmg_do_read_page(chip, buf, chip->oob_poi, true, page);
}
/* * When ECC failures are detected, check if the full page has been * written or not. Ignore the failure if it is actually empty.
*/
raw_buf = kmalloc(full_sz, GFP_KERNEL); if (!raw_buf) return -ENOMEM;
/* * Spare area in Hamming layouts is not protected by the ECC engine (even if * it appears before the ECC bytes when reading), the ->read_oob_raw() function * also stands for ->read_oob().
*/ staticint marvell_nfc_hw_ecc_hmg_read_oob_raw(struct nand_chip *chip, int page)
{
u8 *buf = nand_get_data_buf(chip);
/* * Spare area in Hamming layouts is not protected by the ECC engine (even if * it appears before the ECC bytes when reading), the ->write_oob_raw() function * also stands for ->write_oob().
*/ staticint marvell_nfc_hw_ecc_hmg_write_oob_raw(struct nand_chip *chip, int page)
{ struct mtd_info *mtd = nand_to_mtd(chip);
u8 *buf = nand_get_data_buf(chip);
/* * Trigger the monolithic read on the first chunk, then naked read on * intermediate chunks and finally a last naked read on the last chunk.
*/ if (chunk == 0)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW); elseif (chunk < lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_NAKED_RW); else
nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW);
marvell_nfc_send_cmd(chip, &nfc_op);
/* * According to the datasheet, when reading from NDDB * with BCH enabled, after each 32 bytes reads, we * have to make sure that the NDSR.RDDREQ bit is set. * * Drain the FIFO, 8 32-bit reads at a time, and skip * the polling on the last read. * * Length is a multiple of 32 bytes, hence it is a multiple of 8 too.
*/ for (i = 0; i < data_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
marvell_nfc_end_cmd(chip, NDSR_RDDREQ, "RDDREQ while draining FIFO (data)");
marvell_nfc_xfer_data_in_pio(nfc, data,
FIFO_DEPTH * BCH_SEQ_READS);
data += FIFO_DEPTH * BCH_SEQ_READS;
}
for (i = 0; i < spare_len; i += FIFO_DEPTH * BCH_SEQ_READS) {
marvell_nfc_end_cmd(chip, NDSR_RDDREQ, "RDDREQ while draining FIFO (OOB)");
marvell_nfc_xfer_data_in_pio(nfc, spare,
FIFO_DEPTH * BCH_SEQ_READS);
spare += FIFO_DEPTH * BCH_SEQ_READS;
}
}
staticint marvell_nfc_hw_ecc_bch_read_page(struct nand_chip *chip,
u8 *buf, int oob_required, int page)
{ struct mtd_info *mtd = nand_to_mtd(chip); conststruct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; int data_len = lt->data_bytes, spare_len = lt->spare_bytes;
u8 *data = buf, *spare = chip->oob_poi; int max_bitflips = 0;
u32 failure_mask = 0; int chunk, ret;
marvell_nfc_select_target(chip, chip->cur_cs);
/* * With BCH, OOB is not fully used (and thus not read entirely), not * expected bytes could show up at the end of the OOB buffer if not * explicitly erased.
*/ if (oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
marvell_nfc_enable_hw_ecc(chip);
for (chunk = 0; chunk < lt->nchunks; chunk++) { /* Update length for the last chunk */ if (chunk >= lt->full_chunk_cnt) {
data_len = lt->last_data_bytes;
spare_len = lt->last_spare_bytes;
}
/* Read the chunk and detect number of bitflips */
marvell_nfc_hw_ecc_bch_read_chunk(chip, chunk, data, data_len,
spare, spare_len, page);
ret = marvell_nfc_hw_ecc_check_bitflips(chip, &max_bitflips); if (ret)
failure_mask |= BIT(chunk);
data += data_len;
spare += spare_len;
}
marvell_nfc_disable_hw_ecc(chip);
if (!failure_mask) return max_bitflips;
/* * Please note that dumping the ECC bytes during a normal read with OOB * area would add a significant overhead as ECC bytes are "consumed" by * the controller in normal mode and must be re-read in raw mode. To * avoid dropping the performances, we prefer not to include them. The * user should re-read the page in raw mode if ECC bytes are required.
*/
/* * In case there is any subpage read error, we usually re-read only ECC * bytes in raw mode and check if the whole page is empty. In this case, * it is normal that the ECC check failed and we just ignore the error. * * However, it has been empirically observed that for some layouts (e.g * 2k page, 8b strength per 512B chunk), the controller tries to correct * bits and may create itself bitflips in the erased area. To overcome * this strange behavior, the whole page is re-read in raw mode, not * only the ECC bytes.
*/ for (chunk = 0; chunk < lt->nchunks; chunk++) { int data_off_in_page, spare_off_in_page, ecc_off_in_page; int data_off, spare_off, ecc_off; int data_len, spare_len, ecc_len;
/* No failure reported for this chunk, move to the next one */ if (!(failure_mask & BIT(chunk))) continue;
/* * Only re-read the ECC bytes, unless we are using the 2k/8b * layout which is buggy in the sense that the ECC engine will * try to correct data bytes anyway, creating bitflips. In this * case, re-read the entire page.
*/ if (lt->writesize == 2048 && lt->strength == 8) {
nand_change_read_column_op(chip, data_off_in_page,
buf + data_off, data_len, false);
nand_change_read_column_op(chip, spare_off_in_page,
chip->oob_poi + spare_off, spare_len, false);
}
/* * First operation dispatches the CMD_SEQIN command, issue the address * cycles and asks for the first chunk of data. * All operations in the middle (if any) will issue a naked write and * also ask for data. * Last operation (if any) asks for the last chunk of data through a * last naked write.
*/ if (chunk == 0) { if (lt->nchunks == 1)
xtype = XTYPE_MONOLITHIC_RW; else
xtype = XTYPE_WRITE_DISPATCH;
/* Always dispatch the PAGEPROG command on the last chunk */ if (chunk == lt->nchunks - 1)
nfc_op.ndcb[0] |= NDCB0_CMD2(NAND_CMD_PAGEPROG) | NDCB0_DBC;
ret = marvell_nfc_prepare_cmd(chip); if (ret) return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_WRDREQ, "WRDREQ while loading FIFO (data)"); if (ret) return ret;
/* Transfer the contents */
iowrite32_rep(nfc->regs + NDDB, data, FIFO_REP(data_len));
iowrite32_rep(nfc->regs + NDDB, spare, FIFO_REP(spare_len));
return 0;
}
staticint marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page)
{ conststruct nand_sdr_timings *sdr =
nand_get_sdr_timings(nand_get_interface_config(chip)); struct mtd_info *mtd = nand_to_mtd(chip); conststruct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const u8 *data = buf; const u8 *spare = chip->oob_poi; int data_len = lt->data_bytes; int spare_len = lt->spare_bytes; int chunk, ret;
u8 status;
marvell_nfc_select_target(chip, chip->cur_cs);
/* Spare data will be written anyway, so clear it to avoid garbage */ if (!oob_required)
memset(chip->oob_poi, 0xFF, mtd->oobsize);
/* * Waiting only for CMDD or PAGED is not enough, ECC are * partially written. No flag is set once the operation is * really finished but the ND_RUN bit is cleared, so wait for it * before stepping into the next command.
*/
marvell_nfc_wait_ndrun(chip);
}
ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max));
marvell_nfc_disable_hw_ecc(chip);
if (ret) return ret;
/* Check write status on the chip side */
ret = nand_status_op(chip, &status); if (ret) return ret;
ret = marvell_nfc_prepare_cmd(chip); if (ret) return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ, "RDDREQ/WRDREQ while draining raw data"); if (ret) return ret;
cond_delay(nfc_op.cle_ale_delay_ns);
if (reading) { if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); if (ret) return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
}
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip); if (ret) return ret;
cond_delay(nfc_op.data_delay_ns);
if (!reading) { if (nfc_op.rdy_timeout_ms) {
ret = marvell_nfc_wait_op(chip, nfc_op.rdy_timeout_ms); if (ret) return ret;
}
cond_delay(nfc_op.rdy_delay_ns);
}
/* * NDCR ND_RUN bit should be cleared automatically at the end of each * operation but experience shows that the behavior is buggy when it * comes to writes (with LEN_OVRD). Clear it by hand in this case.
*/ if (!reading) { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
/* * Naked access are different in that they need to be flagged as naked * by the controller. Reset the controller registers fields that inform * on the type and refill them according to the ongoing operation.
*/
nfc_op.ndcb[0] &= ~(NDCB0_CMD_TYPE(TYPE_MASK) |
NDCB0_CMD_XTYPE(XTYPE_MASK)); switch (subop->instrs[0].type) { case NAND_OP_CMD_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_CMD); break; case NAND_OP_ADDR_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_NAKED_ADDR); break; case NAND_OP_DATA_IN_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_READ) |
NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); break; case NAND_OP_DATA_OUT_INSTR:
nfc_op.ndcb[0] |= NDCB0_CMD_TYPE(TYPE_WRITE) |
NDCB0_CMD_XTYPE(XTYPE_LAST_NAKED_RW); break; default: /* This should never happen */ break;
}
ret = marvell_nfc_prepare_cmd(chip); if (ret) return ret;
marvell_nfc_send_cmd(chip, &nfc_op);
if (!nfc_op.data_instr) {
ret = marvell_nfc_wait_cmdd(chip);
cond_delay(nfc_op.cle_ale_delay_ns); return ret;
}
ret = marvell_nfc_end_cmd(chip, NDSR_RDDREQ | NDSR_WRDREQ, "RDDREQ/WRDREQ while draining raw data"); if (ret) return ret;
marvell_nfc_xfer_data_pio(chip, subop, &nfc_op);
ret = marvell_nfc_wait_cmdd(chip); if (ret) return ret;
/* * NDCR ND_RUN bit should be cleared automatically at the end of each * operation but experience shows that the behavior is buggy when it * comes to writes (with LEN_OVRD). Clear it by hand in this case.
*/ if (subop->instrs[0].type == NAND_OP_DATA_OUT_INSTR) { struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.