/* * By setting it, the histogram internal buffer is being cleared at the * same time it's being read. This bit must be cleared afterwards.
*/
isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
/* * We'll clear 4 words at each iteration for optimization. It avoids * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
*/ for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
}
isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
/* * By setting it, the histogram internal buffer is being cleared at the * same time it's being read. This bit must be cleared just after all * data is acquired.
*/
isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
/* * We'll read 4 times a 4-bytes-word at each iteration for * optimization. It avoids 3/4 of the jumps. We also know buf_size is * divisible by 16.
*/ for (i = hist->buf_size / 16; i > 0; i--) {
*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
*buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
}
isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
ISPHIST_CNT_CLEAR);
return STAT_BUF_DONE;
}
/* * hist_buf_process - Callback from ISP driver for HIST interrupt.
*/ staticint hist_buf_process(struct ispstat *hist)
{ struct omap3isp_hist_config *user_cfg = hist->priv; int ret;
if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames) return 1;
if (cur_cfg->hist_bins != user_cfg->hist_bins) return 1;
for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) { if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3) break; elseif (cur_cfg->wg[c] != user_cfg->wg[c]) return 1;
}
if (cur_cfg->num_regions != user_cfg->num_regions) return 1;
/* Regions */ for (c = 0; c < user_cfg->num_regions; c++) { if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start) return 1; if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end) return 1; if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start) return 1; if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end) return 1;
}
return 0;
}
/* * hist_update_params - Helper function to check and store user given params. * @new_conf: Pointer to user configuration structure.
*/ staticvoid hist_set_params(struct ispstat *hist, void *new_conf)
{ struct omap3isp_hist_config *user_cfg = new_conf; struct omap3isp_hist_config *cur_cfg = hist->priv;
if (!hist->configured || hist_comp_params(hist, user_cfg)) {
memcpy(cur_cfg, user_cfg, sizeof(*user_cfg)); if (user_cfg->num_acc_frames == 0)
user_cfg->num_acc_frames = 1;
hist->inc_config++;
hist->update = 1; /* * User might be asked for a bigger buffer than necessary for * this configuration. In order to return the right amount of * data during buffer request, let's calculate the size here * instead of stick with user_cfg->buf_size.
*/
cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL); if (hist_cfg == NULL) return -ENOMEM;
hist->isp = isp;
if (HIST_CONFIG_DMA) {
dma_cap_mask_t mask;
/* * We need slave capable channel without DMA request line for * reading out the data. * For this we can use dma_request_chan_by_mask() as we are * happy with any channel as long as it is capable of slave * configuration.
*/
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
hist->dma_ch = dma_request_chan_by_mask(&mask); if (IS_ERR(hist->dma_ch)) {
ret = PTR_ERR(hist->dma_ch); if (ret == -EPROBE_DEFER) goto err;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.