// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * Copyright 2009 Jonathan Corbet <corbet@lwn.net>
*/
/* * Core code for the Via multifunction framebuffer device.
*/ #include <linux/aperture.h> #include <linux/export.h> #include <linux/via-core.h> #include <linux/via_i2c.h> #include"via-gpio.h" #include"global.h"
/* * We currently only support one viafb device (will there ever be * more than one?), so just declare it globally here.
*/ staticstruct viafb_dev global_dev;
/* ---------------------------------------------------------------------- */ /* * Interrupt management. We have a single IRQ line for a lot of * different functions, so we need to share it. The design here * is that we don't want to reimplement the shared IRQ code here; * we also want to avoid having contention for a single handler thread. * So each subdev driver which needs interrupts just requests * them directly from the kernel. We just have what's needed for * overall access to the interrupt control register.
*/
/* * Which interrupts are enabled now?
*/ static u32 viafb_enabled_ints;
/* * Allow subdevs to ask for specific interrupts to be enabled. These * functions must be called with reg_lock held
*/ void viafb_irq_enable(u32 mask)
{
viafb_enabled_ints |= mask;
viafb_mmio_write(VDE_INTERRUPT, viafb_enabled_ints | VDE_I_ENABLE);
}
EXPORT_SYMBOL_GPL(viafb_irq_enable);
/* ---------------------------------------------------------------------- */ /* * Currently, the camera driver is the only user of the DMA code, so we * only compile it in if the camera driver is being built. Chances are, * most viafb systems will not need to have this extra code for a while. * As soon as another user comes long, the ifdef can be removed.
*/ #if IS_ENABLED(CONFIG_VIDEO_VIA_CAMERA) /* * Access to the DMA engine. This currently provides what the camera * driver needs (i.e. outgoing only) but is easily expandable if need * be.
*/
/* * There are four DMA channels in the vx855. For now, we only * use one of them, though. Most of the time, the DMA channel * will be idle, so we keep the IRQ handler unregistered except * when some subsystem has indicated an interest.
*/ staticint viafb_dma_users; static DECLARE_COMPLETION(viafb_dma_completion); /* * This mutex protects viafb_dma_users and our global interrupt * registration state; it also serializes access to the DMA * engine.
*/ static DEFINE_MUTEX(viafb_dma_lock);
/* * The VX855 DMA descriptor (used for s/g transfers) looks * like this.
*/ struct viafb_vx855_dma_descr {
u32 addr_low; /* Low part of phys addr */
u32 addr_high; /* High 12 bits of addr */
u32 fb_offset; /* Offset into FB memory */
u32 seg_size; /* Size, 16-byte units */
u32 tile_mode; /* "tile mode" setting */
u32 next_desc_low; /* Next descriptor addr */
u32 next_desc_high;
u32 pad; /* Fill out to 64 bytes */
};
/* * Flags added to the "next descriptor low" pointers
*/ #define VIAFB_DMA_MAGIC 0x01 /* ??? Just has to be there */ #define VIAFB_DMA_FINAL_SEGMENT 0x02 /* Final segment */
/* * The completion IRQ handler.
*/ static irqreturn_t viafb_dma_irq(int irq, void *data)
{ int csr;
irqreturn_t ret = IRQ_NONE;
spin_lock(&global_dev.reg_lock);
csr = viafb_mmio_read(VDMA_CSR0); if (csr & VDMA_C_DONE) {
viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
complete(&viafb_dma_completion);
ret = IRQ_HANDLED;
}
spin_unlock(&global_dev.reg_lock); return ret;
}
/* * Indicate a need for DMA functionality.
*/ int viafb_request_dma(void)
{ int ret = 0;
/* * Only VX855 is supported currently.
*/ if (global_dev.chip_type != UNICHROME_VX855) return -ENODEV; /* * Note the new user and set up our interrupt handler * if need be.
*/
mutex_lock(&viafb_dma_lock);
viafb_dma_users++; if (viafb_dma_users == 1) {
ret = request_irq(global_dev.pdev->irq, viafb_dma_irq,
IRQF_SHARED, "via-dma", &viafb_dma_users); if (ret)
viafb_dma_users--; else
viafb_irq_enable(VDE_I_DMA0TDEN);
}
mutex_unlock(&viafb_dma_lock); return ret;
}
EXPORT_SYMBOL_GPL(viafb_request_dma);
/* * Do a scatter/gather DMA copy from FB memory. You must have done * a successful call to viafb_request_dma() first.
*/ int viafb_dma_copy_out_sg(unsignedint offset, struct scatterlist *sg, int nsg)
{ struct viafb_vx855_dma_descr *descr; void *descrpages;
dma_addr_t descr_handle; unsignedlong flags; int i; struct scatterlist *sgentry;
dma_addr_t nextdesc;
/* * Get a place to put the descriptors.
*/
descrpages = dma_alloc_coherent(&global_dev.pdev->dev,
nsg*sizeof(struct viafb_vx855_dma_descr),
&descr_handle, GFP_KERNEL); if (descrpages == NULL) {
dev_err(&global_dev.pdev->dev, "Unable to get descr page.\n"); return -ENOMEM;
}
mutex_lock(&viafb_dma_lock); /* * Fill them in.
*/
descr = descrpages;
nextdesc = descr_handle + sizeof(struct viafb_vx855_dma_descr);
for_each_sg(sg, sgentry, nsg, i) {
dma_addr_t paddr = sg_dma_address(sgentry);
descr->addr_low = paddr & 0xfffffff0;
descr->addr_high = ((u64) paddr >> 32) & 0x0fff;
descr->fb_offset = offset;
descr->seg_size = sg_dma_len(sgentry) >> 4;
descr->tile_mode = 0;
descr->next_desc_low = (nextdesc&0xfffffff0) | VIAFB_DMA_MAGIC;
descr->next_desc_high = ((u64) nextdesc >> 32) & 0x0fff;
descr->pad = 0xffffffff; /* VIA driver does this */
offset += sg_dma_len(sgentry);
nextdesc += sizeof(struct viafb_vx855_dma_descr);
descr++;
}
descr[-1].next_desc_low = VIAFB_DMA_FINAL_SEGMENT|VIAFB_DMA_MAGIC; /* * Program the engine.
*/
spin_lock_irqsave(&global_dev.reg_lock, flags);
init_completion(&viafb_dma_completion);
viafb_mmio_write(VDMA_DQWCR0, 0);
viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_DONE);
viafb_mmio_write(VDMA_MR0, VDMA_MR_TDIE | VDMA_MR_CHAIN);
viafb_mmio_write(VDMA_DPRL0, descr_handle | VIAFB_DMA_MAGIC);
viafb_mmio_write(VDMA_DPRH0,
(((u64)descr_handle >> 32) & 0x0fff) | 0xf0000);
(void) viafb_mmio_read(VDMA_CSR0);
viafb_mmio_write(VDMA_CSR0, VDMA_C_ENABLE|VDMA_C_START);
spin_unlock_irqrestore(&global_dev.reg_lock, flags); /* * Now we just wait until the interrupt handler says * we're done. Except that, actually, we need to wait a little * longer: the interrupts seem to jump the gun a little and we * get corrupted frames sometimes.
*/
wait_for_completion_timeout(&viafb_dma_completion, 1);
msleep(1); if ((viafb_mmio_read(VDMA_CSR0)&VDMA_C_DONE) == 0)
printk(KERN_ERR "VIA DMA timeout!\n"); /* * Clean up and we're done.
*/
viafb_mmio_write(VDMA_CSR0, VDMA_C_DONE);
viafb_mmio_write(VDMA_MR0, 0); /* Reset int enable */
mutex_unlock(&viafb_dma_lock);
dma_free_coherent(&global_dev.pdev->dev,
nsg*sizeof(struct viafb_vx855_dma_descr), descrpages,
descr_handle); return 0;
}
EXPORT_SYMBOL_GPL(viafb_dma_copy_out_sg); #endif/* CONFIG_VIDEO_VIA_CAMERA */
/* ---------------------------------------------------------------------- */ /* * Figure out how big our framebuffer memory is. Kind of ugly, * but evidently we can't trust the information found in the * fbdev configuration area.
*/ static u16 via_function3[] = {
CLE266_FUNCTION3, KM400_FUNCTION3, CN400_FUNCTION3, CN700_FUNCTION3,
CX700_FUNCTION3, KM800_FUNCTION3, KM890_FUNCTION3, P4M890_FUNCTION3,
P4M900_FUNCTION3, VX800_FUNCTION3, VX855_FUNCTION3, VX900_FUNCTION3,
};
/* Get the BIOS-configured framebuffer size from PCI configuration space
* of function 3 in the respective chipset */ staticint viafb_get_fb_size_from_pci(int chip_type)
{ int i;
u8 offset = 0;
u32 FBSize;
u32 VideoMemSize;
/* search for the "FUNCTION3" device in this chipset */ for (i = 0; i < ARRAY_SIZE(via_function3); i++) { struct pci_dev *pdev;
pdev = pci_get_device(PCI_VENDOR_ID_VIA, via_function3[i],
NULL); if (!pdev) continue;
DEBUG_MSG(KERN_INFO "Device ID = %x\n", pdev->device);
switch (pdev->device) { case CLE266_FUNCTION3: case KM400_FUNCTION3:
offset = 0xE0; break; case CN400_FUNCTION3: case CN700_FUNCTION3: case CX700_FUNCTION3: case KM800_FUNCTION3: case KM890_FUNCTION3: case P4M890_FUNCTION3: case P4M900_FUNCTION3: case VX800_FUNCTION3: case VX855_FUNCTION3: case VX900_FUNCTION3: /*case CN750_FUNCTION3: */
offset = 0xA0; break;
}
/* * Figure out and map our MMIO regions.
*/ staticint via_pci_setup_mmio(struct viafb_dev *vdev)
{ int ret; /* * Hook up to the device registers. Note that we soldier * on if it fails; the framebuffer can operate (without * acceleration) without this region.
*/
vdev->engine_start = pci_resource_start(vdev->pdev, 1);
vdev->engine_len = pci_resource_len(vdev->pdev, 1);
vdev->engine_mmio = ioremap(vdev->engine_start,
vdev->engine_len); if (vdev->engine_mmio == NULL)
dev_err(&vdev->pdev->dev, "Unable to map engine MMIO; operation will be " "slow and crippled.\n"); /* * Map in framebuffer memory. For now, failure here is * fatal. Unfortunately, in the absence of significant * vmalloc space, failure here is also entirely plausible. * Eventually we want to move away from mapping this * entire region.
*/ if (vdev->chip_type == UNICHROME_VX900)
vdev->fbmem_start = pci_resource_start(vdev->pdev, 2); else
vdev->fbmem_start = pci_resource_start(vdev->pdev, 0);
ret = vdev->fbmem_len = viafb_get_fb_size_from_pci(vdev->chip_type); if (ret < 0) goto out_unmap;
/* try to map less memory on failure, 8 MB should be still enough */ for (; vdev->fbmem_len >= 8 << 20; vdev->fbmem_len /= 2) {
vdev->fbmem = ioremap_wc(vdev->fbmem_start, vdev->fbmem_len); if (vdev->fbmem) break;
}
if (vdev->fbmem == NULL) {
ret = -ENOMEM; goto out_unmap;
} return 0;
out_unmap:
iounmap(vdev->engine_mmio); return ret;
}
staticint via_create_subdev(struct viafb_dev *vdev, struct viafb_subdev_info *info)
{ int ret;
info->platdev = platform_device_alloc(info->name, -1); if (!info->platdev) {
dev_err(&vdev->pdev->dev, "Unable to allocate pdev %s\n",
info->name); return -ENOMEM;
}
info->platdev->dev.parent = &vdev->pdev->dev;
info->platdev->dev.platform_data = vdev;
ret = platform_device_add(info->platdev); if (ret) {
dev_err(&vdev->pdev->dev, "Unable to add pdev %s\n",
info->name);
platform_device_put(info->platdev);
info->platdev = NULL;
} return ret;
}
staticint via_setup_subdevs(struct viafb_dev *vdev)
{ int i;
/* * Ignore return values. Even if some of the devices * fail to be created, we'll still be able to use some * of the rest.
*/ for (i = 0; i < N_SUBDEVS; i++)
via_create_subdev(vdev, viafb_subdevs + i); return 0;
}
staticvoid via_teardown_subdevs(void)
{ int i;
for (i = 0; i < N_SUBDEVS; i++) if (viafb_subdevs[i].platdev) {
viafb_subdevs[i].platdev->dev.platform_data = NULL;
platform_device_unregister(viafb_subdevs[i].platdev);
}
}
/* * "I've occasionally hit a few drivers that caused suspend * failures, and each and every time it was a driver bug, and * the right thing to do was to just ignore the error and suspend * anyway - returning an error code and trying to undo the suspend * is not what anybody ever really wants, even if our model *_allows_ for it." * -- Linus Torvalds, Dec. 7, 2009
*/
mutex_lock(&viafb_pm_hooks_lock);
list_for_each_entry_reverse(hooks, &viafb_pm_hooks, list)
hooks->suspend(hooks->private);
mutex_unlock(&viafb_pm_hooks_lock);
/* Now bring back any subdevs */
mutex_lock(&viafb_pm_hooks_lock);
list_for_each_entry(hooks, &viafb_pm_hooks, list)
hooks->resume(hooks->private);
mutex_unlock(&viafb_pm_hooks_lock);
return 0;
}
staticint via_pci_probe(struct pci_dev *pdev, conststruct pci_device_id *ent)
{ int ret;
ret = aperture_remove_conflicting_pci_devices(pdev, "viafb"); if (ret) return ret;
ret = pci_enable_device(pdev); if (ret) return ret;
spin_lock_init(&global_dev.reg_lock);
ret = via_pci_setup_mmio(&global_dev); if (ret) goto out_disable; /* * Set up interrupts and create our subdevices. Continue even if * some things fail.
*/
viafb_int_init();
via_setup_subdevs(&global_dev); /* * Set up the framebuffer device
*/
ret = via_fb_pci_probe(&global_dev); if (ret) goto out_subdevs; return 0;
if (fb_modesetting_disabled("viafb")) return -ENODEV;
ret = viafb_init(); if (ret) return ret;
viafb_i2c_init();
viafb_gpio_init();
ret = pci_register_driver(&via_driver); if (ret) {
viafb_gpio_exit();
viafb_i2c_exit(); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.