/* Out-of-line versions of the i/o routines that redirect into the platform-specific version. Note that "platform-specific" may mean
"generic", which bumps through the machine vector. */
/* * The _relaxed functions must be ordered w.r.t. each other, but they don't * have to be ordered w.r.t. other memory accesses.
*/
u8 readb_relaxed(constvolatilevoid __iomem *addr)
{
mb(); return __raw_readb(addr);
}
/* * Read COUNT 16-bit words from port PORT into memory starting at * SRC. SRC must be at least short aligned. This is used by the * IDE driver to read disk sectors. Performance is important, but * the interfaces seems to be slow: just using the inlined version * of the inw() breaks things.
*/ void ioread16_rep(constvoid __iomem *port, void *dst, unsignedlong count)
{ if (unlikely((unsignedlong)dst & 0x3)) { if (!count) return;
BUG_ON((unsignedlong)dst & 0x1);
count--;
*(unsignedshort *)dst = ioread16(port);
dst += 2;
}
while (count >= 2) { unsignedint w;
count -= 2;
w = ioread16(port);
w |= ioread16(port) << 16;
*(unsignedint *)dst = w;
dst += 4;
}
if (count) {
*(unsignedshort*)dst = ioread16(port);
}
}
/* * Read COUNT 32-bit words from port PORT into memory starting at * SRC. Now works with any alignment in SRC. Performance is important, * but the interfaces seems to be slow: just using the inlined version * of the inl() breaks things.
*/ void ioread32_rep(constvoid __iomem *port, void *dst, unsignedlong count)
{ if (unlikely((unsignedlong)dst & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); };
((struct S *)dst)->x = ioread32(port);
dst += 4;
}
} else { /* Buffer 32-bit aligned. */ while (count--) {
*(unsignedint *)dst = ioread32(port);
dst += 4;
}
}
}
/* * Like insb but in the opposite direction. * Don't worry as much about doing aligned memory transfers: * doing byte reads the "slow" way isn't nearly as slow as * doing byte writes the slow way (no r-m-w cycle).
*/ void iowrite8_rep(void __iomem *port, constvoid *xsrc, unsignedlong count)
{ constunsignedchar *src = xsrc; while (count--)
iowrite8(*src++, port);
}
/* * Like insw but in the opposite direction. This is used by the IDE * driver to write disk sectors. Performance is important, but the * interfaces seems to be slow: just using the inlined version of the * outw() breaks things.
*/ void iowrite16_rep(void __iomem *port, constvoid *src, unsignedlong count)
{ if (unlikely((unsignedlong)src & 0x3)) { if (!count) return;
BUG_ON((unsignedlong)src & 0x1);
iowrite16(*(unsignedshort *)src, port);
src += 2;
--count;
}
/* * Like insl but in the opposite direction. This is used by the IDE * driver to write disk sectors. Works with any alignment in SRC. * Performance is important, but the interfaces seems to be slow: * just using the inlined version of the outl() breaks things.
*/ void iowrite32_rep(void __iomem *port, constvoid *src, unsignedlong count)
{ if (unlikely((unsignedlong)src & 0x3)) { while (count--) { struct S { int x __attribute__((packed)); };
iowrite32(((struct S *)src)->x, port);
src += 4;
}
} else { /* Buffer 32-bit aligned. */ while (count--) {
iowrite32(*(unsignedint *)src, port);
src += 4;
}
}
}
/* * Copy data from IO memory space to "real" memory space. * This needs to be optimized.
*/ void memcpy_fromio(void *to, constvolatilevoid __iomem *from, long count)
{ /* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */
if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8; do {
*(u64 *)to = __raw_readq(from);
count -= 8;
to += 8;
from += 8;
} while (count >= 0);
count += 8;
}
if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4; do {
*(u32 *)to = __raw_readl(from);
count -= 4;
to += 4;
from += 4;
} while (count >= 0);
count += 4;
}
if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2; do {
*(u16 *)to = __raw_readw(from);
count -= 2;
to += 2;
from += 2;
} while (count >= 0);
count += 2;
}
while (count > 0) {
*(u8 *) to = __raw_readb(from);
count--;
to++;
from++;
}
mb();
}
EXPORT_SYMBOL(memcpy_fromio);
/* * Copy data from "real" memory space to IO memory space. * This needs to be optimized.
*/ void memcpy_toio(volatilevoid __iomem *to, constvoid *from, long count)
{ /* Optimize co-aligned transfers. Everything else gets handled
a byte at a time. */ /* FIXME -- align FROM. */
if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
count -= 8; do {
__raw_writeq(*(const u64 *)from, to);
count -= 8;
to += 8;
from += 8;
} while (count >= 0);
count += 8;
}
if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
count -= 4; do {
__raw_writel(*(const u32 *)from, to);
count -= 4;
to += 4;
from += 4;
} while (count >= 0);
count += 4;
}
if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
count -= 2; do {
__raw_writew(*(const u16 *)from, to);
count -= 2;
to += 2;
from += 2;
} while (count >= 0);
count += 2;
}
/* * "memset" on IO memory space.
*/ void _memset_c_io(volatilevoid __iomem *to, unsignedlong c, long count)
{ /* Handle any initial odd byte */ if (count > 0 && ((u64)to & 1)) {
__raw_writeb(c, to);
to++;
count--;
}
/* Handle any initial odd halfword */ if (count >= 2 && ((u64)to & 2)) {
__raw_writew(c, to);
to += 2;
count -= 2;
}
/* Handle any initial odd word */ if (count >= 4 && ((u64)to & 4)) {
__raw_writel(c, to);
to += 4;
count -= 4;
}
/* Handle all full-sized quadwords: we're aligned
(or have a small count) */
count -= 8; if (count >= 0) { do {
__raw_writeq(c, to);
to += 8;
count -= 8;
} while (count >= 0);
}
count += 8;
/* The tail is word-aligned if we still have count >= 4 */ if (count >= 4) {
__raw_writel(c, to);
to += 4;
count -= 4;
}
/* The tail is half-word aligned if we have count >= 2 */ if (count >= 2) {
__raw_writew(c, to);
to += 2;
count -= 2;
}
/* And finally, one last byte.. */ if (count) {
__raw_writeb(c, to);
}
mb();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.