/* * Data structure to hold the pointer to the mtd device as well * as mode information of various use cases.
*/ struct mtd_file_info { struct mtd_info *mtd; enum mtd_file_modes mode;
};
staticint mtdchar_open(struct inode *inode, struct file *file)
{ int minor = iminor(inode); int devnum = minor >> 1; int ret = 0; struct mtd_info *mtd; struct mtd_file_info *mfi;
pr_debug("MTD_open\n");
/* You can't open the RO devices RW */ if ((file->f_mode & FMODE_WRITE) && (minor & 1)) return -EACCES;
mtd = get_mtd_device(NULL, devnum);
if (IS_ERR(mtd)) return PTR_ERR(mtd);
if (mtd->type == MTD_ABSENT) {
ret = -ENODEV; goto out1;
}
/* You can't open it RW if it's not a writeable device */ if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
ret = -EACCES; goto out1;
}
/* Back in June 2001, dwmw2 wrote: * * FIXME: This _really_ needs to die. In 2.5, we should lock the * userspace buffer down and use it directly with readv/writev. * * The implementation below, using mtd_kmalloc_up_to, mitigates * allocation failures when the system is under low-memory situations * or if memory is highly fragmented at the cost of reducing the * performance of the requested transfer due to a smaller buffer size. * * A more complex but more memory-efficient implementation based on * get_user_pages and iovecs to cover extents of those pages is a * longer-term goal, as intimated by dwmw2 above. However, for the * write case, this requires yet more complex head and tail transfer * handling when those head and tail offsets and sizes are such that * alignment requirements are not met in the NAND subdriver.
*/
ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen; break;
} default:
ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
} /* Nand returns -EBADMSG on ECC errors, but it returns * the data. For our userspace tools it is important * to dump areas with ECC errors! * For kernel internal usage it also might return -EUCLEAN * to signal the caller that a bitflip has occurred and has * been corrected by the ECC algorithm. * Userspace software which accesses NAND this way * must be aware of the fact that it deals with NAND
*/ if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
*ppos += retlen; if (copy_to_user(buf, kbuf, retlen)) {
kfree(kbuf); return -EFAULT;
} else
total_retlen += retlen;
kbuf = mtd_kmalloc_up_to(mtd, &size); if (!kbuf) return -ENOMEM;
while (count) {
len = min_t(size_t, count, size);
if (copy_from_user(kbuf, buf, len)) {
kfree(kbuf); return -EFAULT;
}
switch (mfi->mode) { case MTD_FILE_MODE_OTP_FACTORY:
ret = -EROFS; break; case MTD_FILE_MODE_OTP_USER:
ret = mtd_write_user_prot_reg(mtd, *ppos, len,
&retlen, kbuf); break;
case MTD_FILE_MODE_RAW:
{ struct mtd_oob_ops ops = {};
ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen; break;
}
default:
ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
/* * Return -ENOSPC only if no data could be written at all. * Otherwise just return the number of bytes that actually * have been written.
*/ if ((ret == -ENOSPC) && (total_retlen)) break;
if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs)) return -EINVAL;
ops.oobbuf = kmalloc(length, GFP_KERNEL); if (!ops.oobbuf) return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT; elseif (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
ops.oobretlen))
ret = -EFAULT;
kfree(ops.oobbuf);
/* * NAND returns -EBADMSG on ECC errors, but it returns the OOB * data. For our userspace tools it is important to dump areas * with ECC errors! * For kernel internal usage it also might return -EUCLEAN * to signal the caller that a bitflip has occurred and has * been corrected by the ECC algorithm. * * Note: currently the standard NAND function, nand_read_oob_std, * does not calculate ECC for the OOB area, so do not rely on * this behavior unless you have replaced it with your own.
*/ if (mtd_is_bitflip_or_eccerr(ret)) return 0;
return ret;
}
/* * Copies (and truncates, if necessary) OOB layout information to the * deprecated layout struct, nand_ecclayout_user. This is necessary only to * support the deprecated API ioctl ECCGETLAYOUT while allowing all new * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops * can describe any kind of OOB layout with almost zero overhead from a * memory usage point of view).
*/ staticint shrink_ecclayout(struct mtd_info *mtd, struct nand_ecclayout_user *to)
{ struct mtd_oob_region oobregion; int i, section = 0, ret;
if (!mtd || !to) return -EINVAL;
memset(to, 0, sizeof(*to));
to->eccbytes = 0; for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
u32 eccpos;
ret = mtd_ooblayout_ecc(mtd, section++, &oobregion); if (ret < 0) { if (ret != -ERANGE) return ret;
/* * Shorten non-page-aligned, eraseblock-sized writes so that * the write ends on an eraseblock boundary. This is necessary * for adjust_oob_length() to properly handle non-page-aligned * writes.
*/ if (ops.len == mtd->erasesize)
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
/* * For writes which are not OOB-only, adjust the amount of OOB * data written according to the number of data pages written. * This is necessary to prevent OOB data from being skipped * over in data+OOB writes requiring multiple mtd_write_oob() * calls to be completed.
*/
adjust_oob_length(mtd, req.start, &ops);
if (copy_from_user(datbuf, usr_data, ops.len) ||
copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
ret = -EFAULT; break;
}
ret = mtd_write_oob(mtd, req.start, &ops); if (ret) break;
/* * Shorten non-page-aligned, eraseblock-sized reads so that the * read ends on an eraseblock boundary. This is necessary in * order to prevent OOB data for some pages from being * duplicated in the output of non-page-aligned reads requiring * multiple mtd_read_oob() calls to be completed.
*/ if (ops.len == mtd->erasesize)
ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
/* * As multiple iterations of the above loop (and therefore multiple * mtd_read_oob() calls) may be necessary to complete the read request, * adjust the final return code to ensure it accounts for all detected * ECC errors.
*/ if (!ret || mtd_is_bitflip(ret)) { if (req.ecc_stats.uncorrectable_errors > 0)
ret = -EBADMSG; elseif (req.ecc_stats.corrected_bitflips > 0)
ret = -EUCLEAN;
}
/* * Check the file mode to require "dangerous" commands to have write * permissions.
*/ switch (cmd) { /* "safe" commands */ case MEMGETREGIONCOUNT: case MEMGETREGIONINFO: case MEMGETINFO: case MEMREADOOB: case MEMREADOOB64: case MEMREAD: case MEMISLOCKED: case MEMGETOOBSEL: case MEMGETBADBLOCK: case OTPSELECT: case OTPGETREGIONCOUNT: case OTPGETREGIONINFO: case ECCGETLAYOUT: case ECCGETSTATS: case MTDFILEMODE: case BLKPG: case BLKRRPART: break;
/* "dangerous" commands */ case MEMERASE: case MEMERASE64: case MEMLOCK: case MEMUNLOCK: case MEMSETBADBLOCK: case MEMWRITEOOB: case MEMWRITEOOB64: case MEMWRITE: case OTPLOCK: case OTPERASE: if (!(file->f_mode & FMODE_WRITE)) return -EPERM; break;
default: return -ENOTTY;
}
switch (cmd) { case MEMGETREGIONCOUNT: if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int))) return -EFAULT; break;
default:
ret = mtdchar_ioctl(file, cmd, (unsignedlong)argp);
}
mutex_unlock(&master->master.chrdev_lock);
return ret;
}
#endif/* CONFIG_COMPAT */
/* * try to determine where a shared mapping can be made * - only supported for NOMMU at the moment (MMU can't doesn't copy private * mappings)
*/ #ifndef CONFIG_MMU staticunsignedlong mtdchar_get_unmapped_area(struct file *file, unsignedlong addr, unsignedlong len, unsignedlong pgoff, unsignedlong flags)
{ struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; unsignedlong offset; int ret;
/* * set up a mapping for shared memory segments
*/ staticint mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{ #ifdef CONFIG_MMU struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv;
/* This is broken because it assumes the MTD device is map-based and that mtd->priv is a valid struct map_info. It should be replaced with something that uses the mtd_get_unmapped_area()
operation properly. */ if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { #ifdef pgprot_noncached if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif return vm_iomap_memory(vma, map->phys, map->size);
} return -ENODEV; #else return vma->vm_flags & VM_SHARED ? 0 : -EACCES; #endif
}
ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd", &mtd_fops); if (ret < 0) {
pr_err("Can't allocate major number %d for MTD\n",
MTD_CHAR_MAJOR); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.