@@ -88,12 +88,21 @@ DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr
#define TS_DMA_PACKETS (8)
#define TS_DMA_BYTES (188 * TS_DMA_PACKETS)
+#define TS_DMA_BUFFERS (2)
+#if TS_DMA_BYTES + 188 > PAGE_SIZE
+#error "DMA buffer larger than one page"
+#endif
#define I2C_ADDR_TDA10046 0x10
#define I2C_ADDR_TUA6034 0xc2
#define NHWFILTERS 8
struct pluto {
+ /* dma buffer, at start so it's aligned. Each buf gets its own
+ * page, even though they are smaller. */
+ u8 dma_buf[TS_DMA_BUFFERS][PAGE_SIZE];
+ struct work_struct work;
+
/* pci */
struct pci_dev *pdev;
u8 __iomem *io_mem;
@@ -118,9 +127,12 @@ struct pluto {
unsigned int overflow;
/* dma */
- dma_addr_t dma_addr;
- u8 dma_buf[TS_DMA_BYTES];
- u8 dummy[4096];
+ dma_addr_t dma_addr[TS_DMA_BUFFERS];
+ atomic_t npackets[TS_DMA_BUFFERS];
+ unsigned int dma_buf_num, dmx_buf_num;
+
+ struct workqueue_struct *dmaq;
+ char wqname[16];
};
static inline struct pluto *feed_to_pluto(struct dvb_demux_feed *feed)
@@ -232,26 +244,36 @@ static void pluto_reset_ts(struct pluto
}
}
-static void pluto_set_dma_addr(struct pluto *pluto)
-{
- pluto_writereg(pluto, REG_PCAR, pluto->dma_addr);
+static void pluto_set_dma_addr(struct pluto *pluto, unsigned int num)
+{
+ pluto_writereg(pluto, REG_PCAR, pluto->dma_addr[num]);
}
static int __devinit pluto_dma_map(struct pluto *pluto)
{
- pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
+ int r;
+
+ pluto->dma_addr[0] = pci_map_single(pluto->pdev, pluto->dma_buf[0],
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
- return pci_dma_mapping_error(pluto->dma_addr);
-#else
- return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
-#endif
+ r = pci_dma_mapping_error(pluto->pdev, pluto->dma_addr[0]);
+ if (r)
+ return r;
+
+ pluto->dma_addr[1] = pci_map_single(pluto->pdev, pluto->dma_buf[1],
+ TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ r = pci_dma_mapping_error(pluto->pdev, pluto->dma_addr[1]);
+ if (r)
+ pci_unmap_single(pluto->pdev, pluto->dma_addr[0],
+ TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+
+ return r;
}
static void pluto_dma_unmap(struct pluto *pluto)
{
- pci_unmap_single(pluto->pdev, pluto->dma_addr,
+ pci_unmap_single(pluto->pdev, pluto->dma_addr[0],
+ TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ pci_unmap_single(pluto->pdev, pluto->dma_addr[1],
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
}
@@ -289,9 +311,12 @@ static int pluto_stop_feed(struct dvb_de
static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
{
+ unsigned int bnum = pluto->dma_buf_num % TS_DMA_BUFFERS;
+ unsigned int newbnum;
+
/* synchronize the DMA transfer with the CPU
* first so that we see updated contents. */
- pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
+ pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr[bnum],
TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
/* Workaround for broken hardware:
@@ -305,28 +330,73 @@ static void pluto_dma_end(struct pluto *
* has been transfered. Only a reset seems to solve this
*/
if ((nbpackets == 0) || (nbpackets > TS_DMA_PACKETS)) {
- unsigned int i = 0;
- while (pluto->dma_buf[i] == 0x47)
- i += 188;
- nbpackets = i / 188;
- if (i == 0) {
+ unsigned int i;
+
+ for (i = 0, nbpackets = 0;
+ i < sizeof(pluto->dma_buf[bnum]) && pluto->dma_buf[bnum][i] == 0x47;
+ i += 188, nbpackets++)
+ ;
+
+ if (nbpackets == 0) {
pluto_reset_ts(pluto, 1);
- dev_printk(KERN_DEBUG, &pluto->pdev->dev, "resetting TS because of invalid packet counter\n");
+ dev_dbg(&pluto->pdev->dev,
+ "resetting TS because of invalid packet counter\n");
}
}
- dvb_dmx_swfilter_packets(&pluto->demux, pluto->dma_buf, nbpackets);
+ /* Mark buffer as used */
+ atomic_set(&pluto->npackets[bnum], nbpackets);
+
+ if (nbpackets) {
+ /* Switch to the next buffer */
+ pluto->dma_buf_num++;
+
+ /* queue sending buffer to demuxer */
+ queue_work(pluto->dmaq, &pluto->work);
+ }
+ newbnum = pluto->dma_buf_num % TS_DMA_BUFFERS;
+
+ if (atomic_read(&pluto->npackets[newbnum]))
+ dev_warn(&pluto->pdev->dev, "Overflow, buffer not ready");
+ /* should probably do something here */
+
+ /* Sync the new buffer and give it back to the card */
+ pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr[newbnum],
+ TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+
+ /* reset the dma address */
+ pluto_set_dma_addr(pluto, newbnum);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static void pluto_dmx_buffer(void *_pluto)
+#else
+static void pluto_dmx_buffer(struct work_struct *work)
+#endif
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ struct pluto *pluto = _pluto;
+#else
+ struct pluto *pluto = container_of(work, struct pluto, work);
+#endif
+ unsigned int bnum = pluto->dmx_buf_num % TS_DMA_BUFFERS;
+ unsigned int npackets = atomic_read(&pluto->npackets[bnum]);
+ unsigned int i;
+
+ if (!npackets)
+ return;
+
+ dvb_dmx_swfilter_packets(&pluto->demux, pluto->dma_buf[bnum], npackets);
/* clear the dma buffer. this is needed to be able to identify
* new valid ts packets above */
- memset(pluto->dma_buf, 0, nbpackets * 188);
-
- /* reset the dma address */
- pluto_set_dma_addr(pluto);
-
- /* sync the buffer and give it back to the card */
- pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ for (i = 0, npackets++; npackets; npackets--, i += 188)
+ pluto->dma_buf[bnum][i] = 0;
+
+ /* Mark buffer as empty */
+ atomic_set(&pluto->npackets[bnum], 0);
+
+ pluto->dmx_buf_num++;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
@@ -349,20 +419,19 @@ static irqreturn_t pluto_irq(int irq, vo
return IRQ_HANDLED;
}
+ /* overflow interrupt */
+ if (tscr & TSCR_OVR)
+ pluto->overflow++;
+
/* dma end interrupt */
if (tscr & TSCR_DE) {
pluto_dma_end(pluto, (tscr & TSCR_NBPACKETS) >> 24);
- /* overflow interrupt */
- if (tscr & TSCR_OVR)
- pluto->overflow++;
if (pluto->overflow) {
dev_err(&pluto->pdev->dev, "overflow irq (%d)\n",
pluto->overflow);
pluto_reset_ts(pluto, 1);
pluto->overflow = 0;
}
- } else if (tscr & TSCR_OVR) {
- pluto->overflow++;
}
/* ACK the interrupt */
@@ -412,7 +481,7 @@ static int __devinit pluto_hw_init(struc
#endif
/* map DMA and set address */
pluto_dma_map(pluto);
- pluto_set_dma_addr(pluto);
+ pluto_set_dma_addr(pluto, 0);
/* enable interrupts */
pluto_enable_irqs(pluto);
@@ -610,7 +679,7 @@ static int __devinit pluto2_probe(struct
pluto = kzalloc(sizeof(struct pluto), GFP_KERNEL);
if (!pluto)
- goto out;
+ return -ENOMEM;
pluto->pdev = pdev;
@@ -639,13 +708,9 @@ static int __devinit pluto2_probe(struct
pci_set_drvdata(pdev, pluto);
- ret = request_irq(pdev->irq, pluto_irq, IRQF_SHARED, DRIVER_NAME, pluto);
+ ret = pluto_hw_init(pluto);
if (ret < 0)
goto err_pci_iounmap;
-
- ret = pluto_hw_init(pluto);
- if (ret < 0)
- goto err_free_irq;
/* i2c */
i2c_set_adapdata(&pluto->i2c_adap, pluto);
@@ -721,9 +786,27 @@ static int __devinit pluto2_probe(struct
goto err_disconnect_frontend;
dvb_net_init(dvb_adapter, &pluto->dvbnet, dmx);
-out:
- return ret;
-
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+ INIT_WORK(&pluto->work, pluto_dmx_buffer, pluto);
+#else
+ INIT_WORK(&pluto->work, pluto_dmx_buffer);
+#endif
+ sprintf(pluto->wqname, "%s[%d] dvb", dvb_adapter->name, dvb_adapter->num);
+ pluto->dmaq = create_singlethread_workqueue(pluto->wqname);
+ if (!pluto->dmaq)
+ goto err_dvb_net;
+
+ ret = request_irq(pdev->irq, pluto_irq, IRQF_SHARED, DRIVER_NAME, pluto);
+ if (ret < 0)
+ goto err_workqueue;
+
+ return 0;
+
+err_workqueue:
+ destroy_workqueue(pluto->dmaq);
+err_dvb_net:
+ dvb_net_release(&pluto->dvbnet);
err_disconnect_frontend:
dmx->disconnect_frontend(dmx);
err_remove_mem_frontend:
@@ -740,8 +823,6 @@ err_i2c_del_adapter:
i2c_del_adapter(&pluto->i2c_adap);
err_pluto_hw_exit:
pluto_hw_exit(pluto);
-err_free_irq:
- free_irq(pdev->irq, pluto);
err_pci_iounmap:
pci_iounmap(pdev, pluto->io_mem);
err_pci_release_regions:
@@ -751,7 +832,7 @@ err_kfree:
err_kfree:
pci_set_drvdata(pdev, NULL);
kfree(pluto);
- goto out;
+ return ret;
}
static void __devexit pluto2_remove(struct pci_dev *pdev)
@@ -421,4 +421,8 @@ static inline int usb_endpoint_type(cons
} while (0)
#endif
-#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
+#define pci_dma_mapping_error(dev, addr) pci_dma_mapping_error(addr)
+#endif
+
+#endif