/** * struct emif_data - Per device static data for driver's use * @duplicate: Whether the DDR devices attached to this EMIF * instance are exactly same as that on EMIF1. In * this case we can save some memory and processing * @temperature_level: Maximum temperature of LPDDR2 devices attached * to this EMIF - read from MR4 register. If there * are two devices attached to this EMIF, this * value is the maximum of the two temperature * levels. * @lpmode: Chosen low power mode * @node: node in the device list * @base: base address of memory-mapped IO registers. * @dev: device pointer. * @regs_cache: An array of 'struct emif_regs' that stores * calculated register values for different * frequencies, to avoid re-calculating them on * each DVFS transition. * @curr_regs: The set of register values used in the last * frequency change (i.e. corresponding to the * frequency in effect at the moment) * @plat_data: Pointer to saved platform data. * @debugfs_root: dentry to the root folder for EMIF in debugfs * @np_ddr: Pointer to ddr device tree node
*/ struct emif_data {
u8 duplicate;
u8 temperature_level;
u8 lpmode; struct list_head node; void __iomem *base; struct device *dev; struct emif_regs *regs_cache[EMIF_MAX_NUM_FREQUENCIES]; struct emif_regs *curr_regs; struct emif_platform_data *plat_data; struct dentry *debugfs_root; struct device_node *np_ddr;
};
/* * Get bus width used by EMIF. Note that this may be different from the * bus width of the DDR devices used. For instance two 16-bit DDR devices * may be connected to a given CS of EMIF. In this case bus width as far * as EMIF is concerned is 32, where as the DDR bus width is 16 bits.
*/ static u32 get_emif_bus_width(struct emif_data *emif)
{
u32 width; void __iomem *base = emif->base;
/* * Workaround for errata i743 - LPDDR2 Power-Down State is Not * Efficient * * i743 DESCRIPTION: * The EMIF supports power-down state for low power. The EMIF * automatically puts the SDRAM into power-down after the memory is * not accessed for a defined number of cycles and the * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set to 0x4. * As the EMIF supports automatic output impedance calibration, a ZQ * calibration long command is issued every time it exits active * power-down and precharge power-down modes. The EMIF waits and * blocks any other command during this calibration. * The EMIF does not allow selective disabling of ZQ calibration upon * exit of power-down mode. Due to very short periods of power-down * cycles, ZQ calibration overhead creates bandwidth issues and * increases overall system power consumption. On the other hand, * issuing ZQ calibration long commands when exiting self-refresh is * still required. * * WORKAROUND * Because there is no power consumption benefit of the power-down due * to the calibration and there is a performance risk, the guideline * is to not allow power-down state and, therefore, to not have set * the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field to 0x4.
*/ if ((emif->plat_data->ip_rev == EMIF_4D) &&
(lpmode == EMIF_LP_MODE_PWR_DN)) {
WARN_ONCE(1, "REG_LP_MODE = LP_MODE_PWR_DN(4) is prohibited by erratum i743 switch to LP_MODE_SELF_REFRESH(2)\n"); /* rollback LP_MODE to Self-refresh mode */
lpmode = EMIF_LP_MODE_SELF_REFRESH;
}
/* * Workaround for errata i728: Disable LPMODE during FREQ_UPDATE * * i728 DESCRIPTION: * The EMIF automatically puts the SDRAM into self-refresh mode * after the EMIF has not performed accesses during * EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM number of DDR clock cycles * and the EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE bit field is set * to 0x2. If during a small window the following three events * occur: * - The SR_TIMING counter expires * - And frequency change is requested * - And OCP access is requested * Then it causes instable clock on the DDR interface. * * WORKAROUND * To avoid the occurrence of the three events, the workaround * is to disable the self-refresh when requesting a frequency * change. Before requesting a frequency change the software must * program EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0. When the * frequency change has been done, the software can reprogram * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2
*/
list_for_each_entry(emif, &device_list, node) { if (emif->lpmode == EMIF_LP_MODE_SELF_REFRESH)
set_lpmode(emif, EMIF_LP_MODE_DISABLE);
}
/* * TODO: Do FREQ_UPDATE here when an API * is available for this as part of the new * clock framework
*/
/* Find addressing table entry based on the device's type and density */ staticconststruct lpddr2_addressing *get_addressing_table( conststruct ddr_device_info *device_info)
{
u32 index, type, density;
type = device_info->type;
density = device_info->density;
switch (type) { case DDR_TYPE_LPDDR2_S4:
index = density - 1; break; case DDR_TYPE_LPDDR2_S2: switch (density) { case DDR_DENSITY_1Gb: case DDR_DENSITY_2Gb:
index = density + 3; break; default:
index = density - 1;
} break; default: return NULL;
}
/* * sdram_io_width is in 'log2(x) - 1' form. Convert emif_bus_width * also to this form and subtract to get TA_DEVCNT, which is * in log2(x) form.
*/
emif_bus_width = __fls(emif_bus_width) - 1;
devcnt = emif_bus_width - sdram_io_width;
alert |= devcnt << TA_DEVCNT_SHIFT;
/* DEVWDT is in 'log2(x) - 3' form */
alert |= (sdram_io_width - 2) << TA_DEVWDT_SHIFT;
/* Timeout based on DDR frequency */
timeout = freq >= freq_threshold ? timeout_perf : timeout_pwr;
/* * The value to be set in register is "log2(timeout) - 3" * if timeout < 16 load 0 in register * if timeout is not a power of 2, round to next highest power of 2
*/ if (timeout < 16) {
timeout = 0;
} else { if (timeout & (timeout - 1))
timeout <<= 1;
timeout = __fls(timeout) - 3;
}
switch (lpmode) { case EMIF_LP_MODE_CLOCK_STOP:
shift = CS_TIM_SHIFT;
mask = CS_TIM_MASK; break; case EMIF_LP_MODE_SELF_REFRESH: /* Workaround for errata i735 */ if (timeout < 6)
timeout = 6;
shift = SR_TIM_SHIFT;
mask = SR_TIM_MASK; break; case EMIF_LP_MODE_PWR_DN:
shift = PD_TIM_SHIFT;
mask = PD_TIM_MASK; break; case EMIF_LP_MODE_DISABLE: default:
mask = 0;
shift = 0; break;
} /* Round to maximum in case of overflow, BUT warn! */ if (lpmode != EMIF_LP_MODE_DISABLE && timeout > mask >> shift) {
pr_err("TIMEOUT Overflow - lpmode=%d perf=%d pwr=%d freq=%d\n",
lpmode,
timeout_perf,
timeout_pwr,
freq_threshold);
WARN(1, "timeout=0x%02x greater than 0x%02x. Using max\n",
timeout, mask >> shift);
timeout = mask >> shift;
}
/* Setup required timing */
pwr_mgmt_ctrl = (timeout << shift) & mask; /* setup a default mask for rest of the modes */
pwr_mgmt_ctrl |= (SR_TIM_MASK | CS_TIM_MASK | PD_TIM_MASK) &
~mask;
/* No CS_TIM in EMIF_4D5 */ if (ip_rev == EMIF_4D5)
pwr_mgmt_ctrl &= ~CS_TIM_MASK;
pwr_mgmt_ctrl |= lpmode << LP_MODE_SHIFT;
return pwr_mgmt_ctrl;
}
/* * Get the temperature level of the EMIF instance: * Reads the MR4 register of attached SDRAM parts to find out the temperature * level. If there are two parts attached(one on each CS), then the temperature * level for the EMIF instance is the higher of the two temperatures.
*/ staticvoid get_temperature_level(struct emif_data *emif)
{
u32 temp, temperature_level; void __iomem *base;
/* treat everything less than nominal(3) in MR4 as nominal */ if (unlikely(temperature_level < SDRAM_TEMP_NOMINAL))
temperature_level = SDRAM_TEMP_NOMINAL;
/* if we get reserved value in MR4 persist with the existing value */ if (likely(temperature_level != SDRAM_TEMP_RESERVED_4))
emif->temperature_level = temperature_level;
}
/* * setup_temperature_sensitive_regs() - set the timings for temperature * sensitive registers. This happens once at initialisation time based * on the temperature at boot time and subsequently based on the temperature * alert interrupt. Temperature alert can happen when the temperature * increases or drops. So this function can have the effect of either * derating the timings or going back to nominal values.
*/ staticvoid setup_temperature_sensitive_regs(struct emif_data *emif, struct emif_regs *regs)
{
u32 tim1, tim3, ref_ctrl, type; void __iomem *base = emif->base;
u32 temperature;
out:
writel(tim1, base + EMIF_SDRAM_TIMING_1_SHDW);
writel(tim3, base + EMIF_SDRAM_TIMING_3_SHDW);
writel(ref_ctrl, base + EMIF_SDRAM_REFRESH_CTRL_SHDW);
}
if (unlikely(emif->temperature_level == old_temp_level)) { return IRQ_HANDLED;
} elseif (!emif->curr_regs) {
dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n"); return IRQ_HANDLED;
}
custom_configs = emif->plat_data->custom_configs;
/* * IF we detect higher than "nominal rating" from DDR sensor * on an unsupported DDR part, shutdown system
*/ if (custom_configs && !(custom_configs->mask &
EMIF_CUSTOM_CONFIG_EXTENDED_TEMP_PART)) { if (emif->temperature_level >= SDRAM_TEMP_HIGH_DERATE_REFRESH) {
dev_err(emif->dev, "%s:NOT Extended temperature capable memory. Converting MR4=0x%02x as shutdown event\n",
__func__, emif->temperature_level); /* * Temperature far too high - do kernel_power_off() * from thread context
*/
emif->temperature_level = SDRAM_TEMP_VERY_HIGH_SHUTDOWN; return IRQ_WAKE_THREAD;
}
}
if (emif->temperature_level < old_temp_level ||
emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) { /* * Temperature coming down - defer handling to thread OR * Temperature far too high - do kernel_power_off() from * thread context
*/
ret = IRQ_WAKE_THREAD;
} else { /* Temperature is going up - handle immediately */
setup_temperature_sensitive_regs(emif, emif->curr_regs);
do_freq_update();
ret = IRQ_HANDLED;
}
/* Save the status and clear it */
interrupts = readl(base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_STATUS);
/* * Handle temperature alert * Temperature alert should be same for all ports * So, it's enough to process it only for one of the ports
*/ if (interrupts & TA_SYS_MASK)
ret = handle_temp_alert(base, emif);
if (interrupts & ERR_SYS_MASK)
dev_err(dev, "Access error from SYS port - %x\n", interrupts);
if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) { /* Save the status and clear it */
interrupts = readl(base + EMIF_LL_OCP_INTERRUPT_STATUS);
writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_STATUS);
if (interrupts & ERR_LL_MASK)
dev_err(dev, "Access error from LL port - %x\n",
interrupts);
}
if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN) {
dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
/* If we have Power OFF ability, use it, else try restarting */ if (kernel_can_power_off()) {
kernel_power_off();
} else {
WARN(1, "FIXME: NO pm_power_off!!! trying restart\n");
kernel_restart("SDRAM Over-temp Emergency restart");
} return IRQ_HANDLED;
}
spin_lock_irqsave(&emif_lock, irq_state);
if (emif->curr_regs) {
setup_temperature_sensitive_regs(emif, emif->curr_regs);
do_freq_update();
} else {
dev_err(emif->dev, "temperature alert before registers are calculated, not de-rating timings\n");
}
/* Enable interrupts for SYS interface */
interrupts = EN_ERR_SYS_MASK; if (type == DDR_TYPE_LPDDR2_S2 || type == DDR_TYPE_LPDDR2_S4)
interrupts |= EN_TA_SYS_MASK;
writel(interrupts, base + EMIF_SYSTEM_OCP_INTERRUPT_ENABLE_SET);
/* Enable interrupts for LL interface */ if (emif->plat_data->hw_caps & EMIF_HW_CAPS_LL_INTERFACE) { /* TA need not be enabled for LL */
interrupts = EN_ERR_LL_MASK;
writel(interrupts, base + EMIF_LL_OCP_INTERRUPT_ENABLE_SET);
}
/* * Init power management settings * We don't know the frequency yet. Use a high frequency * value for a conservative timeout setting
*/
pwr_mgmt_ctrl = get_pwr_mgmt_ctrl(1000000000, emif,
emif->plat_data->ip_rev);
emif->lpmode = (pwr_mgmt_ctrl & LP_MODE_MASK) >> LP_MODE_SHIFT;
writel(pwr_mgmt_ctrl, base + EMIF_POWER_MANAGEMENT_CONTROL);
/* Check temperature level temperature level*/
get_temperature_level(emif); if (emif->temperature_level == SDRAM_TEMP_VERY_HIGH_SHUTDOWN)
dev_emerg(emif->dev, "SDRAM temperature exceeds operating limit.. Needs shut down!!!\n");
/* Init temperature polling */
temp_alert_cfg = get_temp_alert_config(addressing,
emif->plat_data->custom_configs, device_info->cs1_used,
device_info->io_width, get_emif_bus_width(emif));
writel(temp_alert_cfg, base + EMIF_TEMPERATURE_ALERT_CONFIG);
/* * Program external PHY control registers that are not frequency * dependent
*/ if (emif->plat_data->phy_type != EMIF_PHY_TYPE_INTELLIPHY) return;
writel(EMIF_EXT_PHY_CTRL_1_VAL, base + EMIF_EXT_PHY_CTRL_1_SHDW);
writel(EMIF_EXT_PHY_CTRL_5_VAL, base + EMIF_EXT_PHY_CTRL_5_SHDW);
writel(EMIF_EXT_PHY_CTRL_6_VAL, base + EMIF_EXT_PHY_CTRL_6_SHDW);
writel(EMIF_EXT_PHY_CTRL_7_VAL, base + EMIF_EXT_PHY_CTRL_7_SHDW);
writel(EMIF_EXT_PHY_CTRL_8_VAL, base + EMIF_EXT_PHY_CTRL_8_SHDW);
writel(EMIF_EXT_PHY_CTRL_9_VAL, base + EMIF_EXT_PHY_CTRL_9_SHDW);
writel(EMIF_EXT_PHY_CTRL_10_VAL, base + EMIF_EXT_PHY_CTRL_10_SHDW);
writel(EMIF_EXT_PHY_CTRL_11_VAL, base + EMIF_EXT_PHY_CTRL_11_SHDW);
writel(EMIF_EXT_PHY_CTRL_12_VAL, base + EMIF_EXT_PHY_CTRL_12_SHDW);
writel(EMIF_EXT_PHY_CTRL_13_VAL, base + EMIF_EXT_PHY_CTRL_13_SHDW);
writel(EMIF_EXT_PHY_CTRL_14_VAL, base + EMIF_EXT_PHY_CTRL_14_SHDW);
writel(EMIF_EXT_PHY_CTRL_15_VAL, base + EMIF_EXT_PHY_CTRL_15_SHDW);
writel(EMIF_EXT_PHY_CTRL_16_VAL, base + EMIF_EXT_PHY_CTRL_16_SHDW);
writel(EMIF_EXT_PHY_CTRL_17_VAL, base + EMIF_EXT_PHY_CTRL_17_SHDW);
writel(EMIF_EXT_PHY_CTRL_18_VAL, base + EMIF_EXT_PHY_CTRL_18_SHDW);
writel(EMIF_EXT_PHY_CTRL_19_VAL, base + EMIF_EXT_PHY_CTRL_19_SHDW);
writel(EMIF_EXT_PHY_CTRL_20_VAL, base + EMIF_EXT_PHY_CTRL_20_SHDW);
writel(EMIF_EXT_PHY_CTRL_21_VAL, base + EMIF_EXT_PHY_CTRL_21_SHDW);
writel(EMIF_EXT_PHY_CTRL_22_VAL, base + EMIF_EXT_PHY_CTRL_22_SHDW);
writel(EMIF_EXT_PHY_CTRL_23_VAL, base + EMIF_EXT_PHY_CTRL_23_SHDW);
writel(EMIF_EXT_PHY_CTRL_24_VAL, base + EMIF_EXT_PHY_CTRL_24_SHDW);
}
/* Convert from density in Mb to the density encoding in jedc_ddr.h */ if (density & (density - 1))
dev_info->density = 0; else
dev_info->density = __fls(density) - 5;
/* Convert from io_width in bits to io_width encoding in jedc_ddr.h */ if (io_width & (io_width - 1))
dev_info->io_width = 0; else
dev_info->io_width = __fls(io_width) - 1;
}
if (of_property_read_bool(np_emif, "hw-caps-ll-interface"))
pd->hw_caps |= EMIF_HW_CAPS_LL_INTERFACE;
of_get_ddr_info(np_emif, np_ddr, dev_info); if (!is_dev_data_valid(pd->device_info->type, pd->device_info->density,
pd->device_info->io_width, pd->phy_type, pd->ip_rev,
emif->dev)) {
dev_err(dev, "%s: invalid device data!!\n", __func__); goto error;
} /* * For EMIF instances other than EMIF1 see if the devices connected * are exactly same as on EMIF1(which is typically the case). If so, * mark it as a duplicate of EMIF1. This will save some memory and * computation.
*/ if (emif1 && emif1->np_ddr == np_ddr) {
emif->duplicate = true; goto out;
} elseif (emif1) {
dev_warn(emif->dev, "%s: Non-symmetric DDR geometry\n",
__func__);
}
/* * For EMIF instances other than EMIF1 see if the devices connected * are exactly same as on EMIF1(which is typically the case). If so, * mark it as a duplicate of EMIF1 and skip copying timings data. * This will save some memory and some computation later.
*/
emif->duplicate = emif1 && (memcmp(dev_info,
emif1->plat_data->device_info, sizeof(struct ddr_device_info)) == 0);
/* * Copy custom configs - ignore allocation error, if any, as * custom_configs is not very critical
*/
cust_cfgs = pd->custom_configs; if (cust_cfgs && is_custom_config_valid(cust_cfgs, dev)) {
temp = devm_kzalloc(dev, sizeof(*cust_cfgs), GFP_KERNEL); if (temp)
memcpy(temp, cust_cfgs, sizeof(*cust_cfgs));
pd->custom_configs = temp;
}
/* * Copy timings and min-tck values from platform data. If it is not * available or if memory allocation fails, use JEDEC defaults
*/
size = sizeof(struct lpddr2_timings) * pd->timings_arr_size; if (pd->timings) {
temp = devm_kzalloc(dev, size, GFP_KERNEL); if (temp) {
memcpy(temp, pd->timings, size);
pd->timings = temp;
} else {
get_default_timings(emif);
}
} else {
get_default_timings(emif);
}