/* * HPNFC can work in 3 modes: * - PIO - can work in master or slave DMA * - CDMA - needs Master DMA for accessing command descriptors. * - Generic mode - can use only slave DMA. * CDMA and PIO modes can be used to execute only base commands. * Generic mode can be used to execute any command * on NAND flash memory. Driver uses CDMA mode for * block erasing, page reading, page programing. * Generic mode is used for executing rest of commands.
*/
/* Register definition. */ /* * Command register 0. * Writing data to this register will initiate a new transaction * of the NF controller.
*/ #define CMD_REG0 0x0000 /* Command type field mask. */ #define CMD_REG0_CT GENMASK(31, 30) /* Command type CDMA. */ #define CMD_REG0_CT_CDMA 0uL /* Command type generic. */ #define CMD_REG0_CT_GEN 3uL /* Command thread number field mask. */ #define CMD_REG0_TN GENMASK(27, 24)
/* Command register 2. */ #define CMD_REG2 0x0008 /* Command register 3. */ #define CMD_REG3 0x000C /* Pointer register to select which thread status will be selected. */ #define CMD_STATUS_PTR 0x0010 /* Command status register for selected thread. */ #define CMD_STATUS 0x0014
/* * Transfer config 0 register. * Configures data transfer parameters.
*/ #define TRAN_CFG_0 0x0400 /* Offset value from the beginning of the page. */ #define TRAN_CFG_0_OFFSET GENMASK(31, 16) /* Numbers of sectors to transfer within singlNF device's page. */ #define TRAN_CFG_0_SEC_CNT GENMASK(7, 0)
/* * Transfer config 1 register. * Configures data transfer parameters.
*/ #define TRAN_CFG_1 0x0404 /* Size of last data sector. */ #define TRAN_CFG_1_LAST_SEC_SIZE GENMASK(31, 16) /* Size of not-last data sector. */ #define TRAN_CFG_1_SECTOR_SIZE GENMASK(15, 0)
/* Transferred data block size for the slave DMA module. */ #define SDMA_SIZE 0x0440
/* Thread number associated with transferred data block * for the slave DMA module.
*/ #define SDMA_TRD_NUM 0x0444 /* Thread number mask. */ #define SDMA_TRD_NUM_SDMA_TRD GENMASK(2, 0)
/* Available hardware features of the controller. */ #define CTRL_FEATURES 0x804 /* Support for NV-DDR2/3 work mode. */ #define CTRL_FEATURES_NVDDR_2_3 BIT(28) /* Support for NV-DDR work mode. */ #define CTRL_FEATURES_NVDDR BIT(27) /* Support for asynchronous work mode. */ #define CTRL_FEATURES_ASYNC BIT(26) /* Support for asynchronous work mode. */ #define CTRL_FEATURES_N_BANKS GENMASK(25, 24) /* Slave and Master DMA data width. */ #define CTRL_FEATURES_DMA_DWITH64 BIT(21) /* Availability of Control Data feature.*/ #define CTRL_FEATURES_CONTROL_DATA BIT(10)
/* Register controlling the gate and loopback control related timing. */ #define PHY_GATE_LPBK_CTRL 0x2008 #define PHY_GATE_LPBK_CTRL_RDS GENMASK(24, 19)
/* Register holds the control for the master DLL logic. */ #define PHY_DLL_MASTER_CTRL 0x200C #define PHY_DLL_MASTER_CTRL_BYPASS_MODE BIT(23)
/* Register holds the control for the slave DLL logic. */ #define PHY_DLL_SLAVE_CTRL 0x2010
/* This register handles the global control settings for the PHY. */ #define PHY_CTRL 0x2080 #define PHY_CTRL_SDR_DQS BIT(14) #define PHY_CTRL_PHONY_DQS GENMASK(9, 4)
/* * This register handles the global control settings * for the termination selects for reads.
*/ #define PHY_TSEL 0x2084
/* Generic command layout. */ #define GCMD_LAY_CS GENMASK_ULL(11, 8) /* * This bit informs the minicotroller if it has to wait for tWB * after sending the last CMD/ADDR/DATA in the sequence.
*/ #define GCMD_LAY_TWB BIT_ULL(6) /* Type of generic instruction. */ #define GCMD_LAY_INSTR GENMASK_ULL(5, 0)
/* Transfer direction field of generic command data sequence. */ #define GCMD_DIR BIT_ULL(11) /* Read transfer direction of generic command data sequence. */ #define GCMD_DIR_READ 0 /* Write transfer direction of generic command data sequence. */ #define GCMD_DIR_WRITE 1
/* ECC enabled flag of generic command data sequence - ECC enabled. */ #define GCMD_ECC_EN BIT_ULL(12) /* Generic command data sequence - sector size. */ #define GCMD_SECT_SIZE GENMASK_ULL(31, 16) /* Generic command data sequence - sector count. */ #define GCMD_SECT_CNT GENMASK_ULL(39, 32) /* Generic command data sequence - last sector size. */ #define GCMD_LAST_SIZE GENMASK_ULL(55, 40)
/* CDMA descriptor fields. */ /* Erase command type of CDMA descriptor. */ #define CDMA_CT_ERASE 0x1000 /* Program page command type of CDMA descriptor. */ #define CDMA_CT_WR 0x2100 /* Read page command type of CDMA descriptor. */ #define CDMA_CT_RD 0x2200
/* * Command DMA descriptor flags. If set causes issue interrupt after * the completion of descriptor processing.
*/ #define CDMA_CF_INT BIT(8) /* * Command DMA descriptor flags - the next descriptor * address field is valid and descriptor processing should continue.
*/ #define CDMA_CF_CONT BIT(9) /* DMA master flag of command DMA descriptor. */ #define CDMA_CF_DMA_MASTER BIT(10)
/* Operation complete status of command descriptor. */ #define CDMA_CS_COMP BIT(15) /* Operation complete status of command descriptor. */ /* Command descriptor status - operation fail. */ #define CDMA_CS_FAIL BIT(14) /* Command descriptor status - page erased. */ #define CDMA_CS_ERP BIT(11) /* Command descriptor status - timeout occurred. */ #define CDMA_CS_TOUT BIT(10) /* * Maximum amount of correction applied to one ECC sector. * It is part of command descriptor status.
*/ #define CDMA_CS_MAXERR GENMASK(9, 2) /* Command descriptor status - uncorrectable ECC error. */ #define CDMA_CS_UNCE BIT(1) /* Command descriptor status - descriptor error. */ #define CDMA_CS_ERR BIT(0)
/* Status of operation - OK. */ #define STAT_OK 0 /* Status of operation - FAIL. */ #define STAT_FAIL 2 /* Status of operation - uncorrectable ECC error. */ #define STAT_ECC_UNCORR 3 /* Status of operation - page erased. */ #define STAT_ERASED 5 /* Status of operation - correctable ECC error. */ #define STAT_ECC_CORR 6 /* Status of operation - unsuspected state. */ #define STAT_UNKNOWN 7 /* Status of operation - operation is not completed yet. */ #define STAT_BUSY 0xFF
/* Cadence NAND flash controller capabilities get from driver data. */ struct cadence_nand_dt_devdata { /* Skew value of the output signals of the NAND Flash interface. */
u32 if_skew; /* It informs if slave DMA interface is connected to DMA engine. */ unsignedint has_dma:1;
};
/* Cadence NAND flash controller capabilities read from registers. */ struct cdns_nand_caps { /* Maximum number of banks supported by hardware. */
u8 max_banks; /* Slave and Master DMA data width in bytes (4 or 8). */
u8 data_dma_width; /* Control Data feature supported. */ bool data_control_supp; /* Is PHY type DLL. */ bool is_phy_type_dll;
};
int irq; /* Interrupts that have happened. */ struct cadence_nand_irq_status irq_status; /* Interrupts we are waiting for. */ struct cadence_nand_irq_status irq_mask; struct completion complete; /* Protect irq_mask and irq_status. */
spinlock_t irq_lock;
int ecc_strengths[BCH_MAX_NUM_CORR_CAPS]; struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES]; struct nand_ecc_caps ecc_caps;
int curr_trans_type;
struct dma_chan *dmac;
u32 nf_clk_rate; /* * Estimated Board delay. The value includes the total * round trip delay for the signals and is used for deciding on values * associated with data read capture.
*/
u32 board_delay;
/* * This is the interrupt service routine. It handles all interrupts * sent to this device.
*/ static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
{ struct cdns_nand_ctrl *cdns_ctrl = dev_id; struct cadence_nand_irq_status irq_status;
irqreturn_t result = IRQ_NONE;
spin_lock(&cdns_ctrl->irq_lock);
if (irq_detected(cdns_ctrl, &irq_status)) { /* Handle interrupt. */ /* First acknowledge it. */
cadence_nand_clear_interrupt(cdns_ctrl, &irq_status); /* Status in the device context for someone to read. */
cdns_ctrl->irq_status.status |= irq_status.status;
cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
cdns_ctrl->irq_status.trd_error |= irq_status.trd_error; /* Notify anyone who cares that it happened. */
complete(&cdns_ctrl->complete); /* Tell the OS that we've handled this. */
result = IRQ_HANDLED;
}
spin_unlock(&cdns_ctrl->irq_lock);
nstrengths = 0; for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) { if (cdns_ctrl->ecc_strengths[i] != 0)
nstrengths++;
}
ecc_caps->nstepinfos = 0; for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) { /* ECC strengths are common for all step infos. */
cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
cdns_ctrl->ecc_stepinfos[i].strengths =
cdns_ctrl->ecc_strengths;
if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
ecc_caps->nstepinfos++;
/* Clear all interrupts. */
writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
cadence_nand_get_caps(cdns_ctrl); if (cadence_nand_read_bch_caps(cdns_ctrl)) return -EIO;
#ifndef CONFIG_64BIT if (cdns_ctrl->caps2.data_dma_width == 8) {
dev_err(cdns_ctrl->dev, "cannot access 64-bit dma on !64-bit architectures"); return -EIO;
} #endif
/* * Set IO width access to 8. * It is because during SW device discovering width access * is expected to be 8.
*/
status = cadence_nand_set_access_width16(cdns_ctrl, false);
/* * Read only bad block marker from offset * defined by a memory manufacturer.
*/
status = cadence_nand_cdma_transfer(cdns_ctrl,
cdns_chip->cs[chip->cur_cs],
page, cdns_ctrl->buf, NULL,
mtd->oobsize,
0, DMA_FROM_DEVICE, false); if (status) {
dev_err(cdns_ctrl->dev, "read BBM failed\n"); return -EIO;
}
staticint cadence_nand_write_page_raw(struct nand_chip *chip, const u8 *buf, int oob_required, int page)
{ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip); struct mtd_info *mtd = nand_to_mtd(chip); int writesize = mtd->writesize; int oobsize = mtd->oobsize; int ecc_steps = chip->ecc.steps; int ecc_size = chip->ecc.size; int ecc_bytes = chip->ecc.bytes; void *tmp_buf = cdns_ctrl->buf; int oob_skip = cdns_chip->bbm_len;
size_t size = writesize + oobsize; int i, pos, len; int status = 0;
status = cadence_nand_select_target(chip); if (status) return status;
/* * Fill the buffer with 0xff first except the full page transfer. * This simplifies the logic.
*/ if (!buf || !oob_required)
memset(tmp_buf, 0xff, size);
/* Arrange the buffer for syndrome payload/ecc layout. */ if (buf) { for (i = 0; i < ecc_steps; i++) {
pos = i * (ecc_size + ecc_bytes);
len = ecc_size;
if (pos >= writesize)
pos += oob_skip; elseif (pos + len > writesize)
len = writesize - pos;
/* OOB ECC. */ for (i = 0; i < ecc_steps; i++) {
pos = ecc_size + i * (ecc_size + ecc_bytes); if (i == (ecc_steps - 1))
pos += cdns_chip->avail_oob_size;
len = ecc_bytes;
if (pos >= writesize)
pos += oob_skip; elseif (pos + len > writesize)
len = writesize - pos;
/* * If data buffer can be accessed by DMA and data_control feature * is supported then transfer data and oob directly.
*/ if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
cdns_ctrl->caps2.data_control_supp) {
u8 *oob;
switch (status) { case STAT_ECC_UNCORR:
mtd->ecc_stats.failed++;
ecc_err_count++; break; case STAT_ECC_CORR:
ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
cdns_ctrl->cdma_desc->status);
mtd->ecc_stats.corrected += ecc_err_count; break; case STAT_ERASED: case STAT_OK: break; default:
dev_err(cdns_ctrl->dev, "read page failed\n"); return -EIO;
}
if (oob_required) if (cadence_nand_read_bbm(chip, page, chip->oob_poi)) return -EIO;
return ecc_err_count;
}
/* Reads OOB data from the device. */ staticint cadence_nand_read_oob(struct nand_chip *chip, int page)
{ struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
switch (status) { case STAT_ERASED: case STAT_OK: break; default:
dev_err(cdns_ctrl->dev, "read raw page failed\n"); return -EIO;
}
/* Arrange the buffer for syndrome payload/ecc layout. */ if (buf) { for (i = 0; i < ecc_steps; i++) {
pos = i * (ecc_size + ecc_bytes);
len = ecc_size;
if (pos >= writesize)
pos += oob_skip; elseif (pos + len > writesize)
len = writesize - pos;
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
return -EIO;
}
staticint cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
u8 *buf, int len)
{
u8 thread_nr = 0;
u32 sdma_size; int status;
/* Wait until slave DMA interface is ready to data transfer. */
status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); if (status) return status;
if (!cdns_ctrl->caps1->has_dma) {
u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
/* read alignment data */ if (data_dma_width == 4)
ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words); #ifdef CONFIG_64BIT else
readsq(cdns_ctrl->io.virt, buf, len_in_words); #endif
if (sdma_size > len) { int read_bytes = (data_dma_width == 4) ?
len_in_words << 2 : len_in_words << 3;
/* read rest data from slave DMA interface if any */ if (data_dma_width == 4)
ioread32_rep(cdns_ctrl->io.virt,
cdns_ctrl->buf,
sdma_size / 4 - len_in_words); #ifdef CONFIG_64BIT else
readsq(cdns_ctrl->io.virt, cdns_ctrl->buf,
sdma_size / 8 - len_in_words); #endif
/* copy rest of data */
memcpy(buf + read_bytes, cdns_ctrl->buf,
len - read_bytes);
} return 0;
}
if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
cdns_ctrl->io.dma,
len, DMA_FROM_DEVICE); if (status == 0) return 0;
dev_warn(cdns_ctrl->dev, "Slave DMA transfer failed. Try again using bounce buffer.");
}
/* If DMA transfer is not possible or failed then use bounce buffer. */
status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
cdns_ctrl->io.dma,
sdma_size, DMA_FROM_DEVICE);
if (status) {
dev_err(cdns_ctrl->dev, "Slave DMA transfer failed"); return status;
}
memcpy(buf, cdns_ctrl->buf, len);
return 0;
}
staticint cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl, const u8 *buf, int len)
{
u8 thread_nr = 0;
u32 sdma_size; int status;
/* Wait until slave DMA interface is ready to data transfer. */
status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size); if (status) return status;
if (!cdns_ctrl->caps1->has_dma) {
u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
int len_in_words = (data_dma_width == 4) ? len >> 2 : len >> 3;
/* * Callers of this function do not verify if the NAND is using a 16-bit * an 8-bit bus for normal operations, so we need to take care of that * here by leaving the configuration unchanged if the NAND does not have * the NAND_BUSWIDTH_16 flag set.
*/ if (!(chip->options & NAND_BUSWIDTH_16)) return 0;
for (i = 0; i < naddrs; i++)
page |= (u32)addrs[i] << (8 * i);
return cadence_nand_erase(chip, page);
}
/* * If it is not an erase operation then handle operation * by calling exec_op function.
*/ for (op_id = 0; op_id < subop->ninstrs; op_id++) { int ret; conststruct nand_operation nand_op = {
.cs = chip->cur_cs,
.instrs = &subop->instrs[op_id],
.ninstrs = 1};
ret = chip->controller->ops->exec_op(chip, &nand_op, false); if (ret) return ret;
}
sdr = nand_get_sdr_timings(conf); if (IS_ERR(sdr)) return PTR_ERR(sdr);
memset(t, 0, sizeof(*t)); /* Sampling point calculation. */
if (cdns_ctrl->caps2.is_phy_type_dll)
phony_dqs_mod = 2; else
phony_dqs_mod = 1;
dqs_sampl_res = clk_period / phony_dqs_mod;
tdvw_min = sdr->tREA_max + board_delay_skew_max; /* * The idea of those calculation is to get the optimum value * for tRP and tRH timings. If it is NOT possible to sample data * with optimal tRP/tRH settings, the parameters will be extended.
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.27 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.