[go: nahoru, domu]

1/*
2 *  Driver for the Conexant CX23885 PCIe bridge
3 *
4 *  Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
5 *
6 *  This program is free software; you can redistribute it and/or modify
7 *  it under the terms of the GNU General Public License as published by
8 *  the Free Software Foundation; either version 2 of the License, or
9 *  (at your option) any later version.
10 *
11 *  This program is distributed in the hope that it will be useful,
12 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 *
15 *  GNU General Public License for more details.
16 */
17
18#include <linux/init.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kmod.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <asm/div64.h>
28#include <linux/firmware.h>
29
30#include "cx23885.h"
31#include "cimax2.h"
32#include "altera-ci.h"
33#include "cx23888-ir.h"
34#include "cx23885-ir.h"
35#include "cx23885-av.h"
36#include "cx23885-input.h"
37
38MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
39MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
40MODULE_LICENSE("GPL");
41MODULE_VERSION(CX23885_VERSION);
42
43static unsigned int debug;
44module_param(debug, int, 0644);
45MODULE_PARM_DESC(debug, "enable debug messages");
46
47static unsigned int card[]  = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
48module_param_array(card,  int, NULL, 0444);
49MODULE_PARM_DESC(card, "card type");
50
51#define dprintk(level, fmt, arg...)\
52	do { if (debug >= level)\
53		printk(KERN_DEBUG "%s: " fmt, dev->name, ## arg);\
54	} while (0)
55
56static unsigned int cx23885_devcount;
57
58#define NO_SYNC_LINE (-1U)
59
60/* FIXME, these allocations will change when
61 * analog arrives. The be reviewed.
62 * CX23887 Assumptions
63 * 1 line = 16 bytes of CDT
64 * cmds size = 80
65 * cdt size = 16 * linesize
66 * iqsize = 64
67 * maxlines = 6
68 *
69 * Address Space:
70 * 0x00000000 0x00008fff FIFO clusters
71 * 0x00010000 0x000104af Channel Management Data Structures
72 * 0x000104b0 0x000104ff Free
73 * 0x00010500 0x000108bf 15 channels * iqsize
74 * 0x000108c0 0x000108ff Free
75 * 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
76 *                       15 channels * (iqsize + (maxlines * linesize))
77 * 0x00010ea0 0x00010xxx Free
78 */
79
80static struct sram_channel cx23885_sram_channels[] = {
81	[SRAM_CH01] = {
82		.name		= "VID A",
83		.cmds_start	= 0x10000,
84		.ctrl_start	= 0x10380,
85		.cdt		= 0x104c0,
86		.fifo_start	= 0x40,
87		.fifo_size	= 0x2800,
88		.ptr1_reg	= DMA1_PTR1,
89		.ptr2_reg	= DMA1_PTR2,
90		.cnt1_reg	= DMA1_CNT1,
91		.cnt2_reg	= DMA1_CNT2,
92	},
93	[SRAM_CH02] = {
94		.name		= "ch2",
95		.cmds_start	= 0x0,
96		.ctrl_start	= 0x0,
97		.cdt		= 0x0,
98		.fifo_start	= 0x0,
99		.fifo_size	= 0x0,
100		.ptr1_reg	= DMA2_PTR1,
101		.ptr2_reg	= DMA2_PTR2,
102		.cnt1_reg	= DMA2_CNT1,
103		.cnt2_reg	= DMA2_CNT2,
104	},
105	[SRAM_CH03] = {
106		.name		= "TS1 B",
107		.cmds_start	= 0x100A0,
108		.ctrl_start	= 0x10400,
109		.cdt		= 0x10580,
110		.fifo_start	= 0x5000,
111		.fifo_size	= 0x1000,
112		.ptr1_reg	= DMA3_PTR1,
113		.ptr2_reg	= DMA3_PTR2,
114		.cnt1_reg	= DMA3_CNT1,
115		.cnt2_reg	= DMA3_CNT2,
116	},
117	[SRAM_CH04] = {
118		.name		= "ch4",
119		.cmds_start	= 0x0,
120		.ctrl_start	= 0x0,
121		.cdt		= 0x0,
122		.fifo_start	= 0x0,
123		.fifo_size	= 0x0,
124		.ptr1_reg	= DMA4_PTR1,
125		.ptr2_reg	= DMA4_PTR2,
126		.cnt1_reg	= DMA4_CNT1,
127		.cnt2_reg	= DMA4_CNT2,
128	},
129	[SRAM_CH05] = {
130		.name		= "ch5",
131		.cmds_start	= 0x0,
132		.ctrl_start	= 0x0,
133		.cdt		= 0x0,
134		.fifo_start	= 0x0,
135		.fifo_size	= 0x0,
136		.ptr1_reg	= DMA5_PTR1,
137		.ptr2_reg	= DMA5_PTR2,
138		.cnt1_reg	= DMA5_CNT1,
139		.cnt2_reg	= DMA5_CNT2,
140	},
141	[SRAM_CH06] = {
142		.name		= "TS2 C",
143		.cmds_start	= 0x10140,
144		.ctrl_start	= 0x10440,
145		.cdt		= 0x105e0,
146		.fifo_start	= 0x6000,
147		.fifo_size	= 0x1000,
148		.ptr1_reg	= DMA5_PTR1,
149		.ptr2_reg	= DMA5_PTR2,
150		.cnt1_reg	= DMA5_CNT1,
151		.cnt2_reg	= DMA5_CNT2,
152	},
153	[SRAM_CH07] = {
154		.name		= "TV Audio",
155		.cmds_start	= 0x10190,
156		.ctrl_start	= 0x10480,
157		.cdt		= 0x10a00,
158		.fifo_start	= 0x7000,
159		.fifo_size	= 0x1000,
160		.ptr1_reg	= DMA6_PTR1,
161		.ptr2_reg	= DMA6_PTR2,
162		.cnt1_reg	= DMA6_CNT1,
163		.cnt2_reg	= DMA6_CNT2,
164	},
165	[SRAM_CH08] = {
166		.name		= "ch8",
167		.cmds_start	= 0x0,
168		.ctrl_start	= 0x0,
169		.cdt		= 0x0,
170		.fifo_start	= 0x0,
171		.fifo_size	= 0x0,
172		.ptr1_reg	= DMA7_PTR1,
173		.ptr2_reg	= DMA7_PTR2,
174		.cnt1_reg	= DMA7_CNT1,
175		.cnt2_reg	= DMA7_CNT2,
176	},
177	[SRAM_CH09] = {
178		.name		= "ch9",
179		.cmds_start	= 0x0,
180		.ctrl_start	= 0x0,
181		.cdt		= 0x0,
182		.fifo_start	= 0x0,
183		.fifo_size	= 0x0,
184		.ptr1_reg	= DMA8_PTR1,
185		.ptr2_reg	= DMA8_PTR2,
186		.cnt1_reg	= DMA8_CNT1,
187		.cnt2_reg	= DMA8_CNT2,
188	},
189};
190
191static struct sram_channel cx23887_sram_channels[] = {
192	[SRAM_CH01] = {
193		.name		= "VID A",
194		.cmds_start	= 0x10000,
195		.ctrl_start	= 0x105b0,
196		.cdt		= 0x107b0,
197		.fifo_start	= 0x40,
198		.fifo_size	= 0x2800,
199		.ptr1_reg	= DMA1_PTR1,
200		.ptr2_reg	= DMA1_PTR2,
201		.cnt1_reg	= DMA1_CNT1,
202		.cnt2_reg	= DMA1_CNT2,
203	},
204	[SRAM_CH02] = {
205		.name		= "VID A (VBI)",
206		.cmds_start	= 0x10050,
207		.ctrl_start	= 0x105F0,
208		.cdt		= 0x10810,
209		.fifo_start	= 0x3000,
210		.fifo_size	= 0x1000,
211		.ptr1_reg	= DMA2_PTR1,
212		.ptr2_reg	= DMA2_PTR2,
213		.cnt1_reg	= DMA2_CNT1,
214		.cnt2_reg	= DMA2_CNT2,
215	},
216	[SRAM_CH03] = {
217		.name		= "TS1 B",
218		.cmds_start	= 0x100A0,
219		.ctrl_start	= 0x10630,
220		.cdt		= 0x10870,
221		.fifo_start	= 0x5000,
222		.fifo_size	= 0x1000,
223		.ptr1_reg	= DMA3_PTR1,
224		.ptr2_reg	= DMA3_PTR2,
225		.cnt1_reg	= DMA3_CNT1,
226		.cnt2_reg	= DMA3_CNT2,
227	},
228	[SRAM_CH04] = {
229		.name		= "ch4",
230		.cmds_start	= 0x0,
231		.ctrl_start	= 0x0,
232		.cdt		= 0x0,
233		.fifo_start	= 0x0,
234		.fifo_size	= 0x0,
235		.ptr1_reg	= DMA4_PTR1,
236		.ptr2_reg	= DMA4_PTR2,
237		.cnt1_reg	= DMA4_CNT1,
238		.cnt2_reg	= DMA4_CNT2,
239	},
240	[SRAM_CH05] = {
241		.name		= "ch5",
242		.cmds_start	= 0x0,
243		.ctrl_start	= 0x0,
244		.cdt		= 0x0,
245		.fifo_start	= 0x0,
246		.fifo_size	= 0x0,
247		.ptr1_reg	= DMA5_PTR1,
248		.ptr2_reg	= DMA5_PTR2,
249		.cnt1_reg	= DMA5_CNT1,
250		.cnt2_reg	= DMA5_CNT2,
251	},
252	[SRAM_CH06] = {
253		.name		= "TS2 C",
254		.cmds_start	= 0x10140,
255		.ctrl_start	= 0x10670,
256		.cdt		= 0x108d0,
257		.fifo_start	= 0x6000,
258		.fifo_size	= 0x1000,
259		.ptr1_reg	= DMA5_PTR1,
260		.ptr2_reg	= DMA5_PTR2,
261		.cnt1_reg	= DMA5_CNT1,
262		.cnt2_reg	= DMA5_CNT2,
263	},
264	[SRAM_CH07] = {
265		.name		= "TV Audio",
266		.cmds_start	= 0x10190,
267		.ctrl_start	= 0x106B0,
268		.cdt		= 0x10930,
269		.fifo_start	= 0x7000,
270		.fifo_size	= 0x1000,
271		.ptr1_reg	= DMA6_PTR1,
272		.ptr2_reg	= DMA6_PTR2,
273		.cnt1_reg	= DMA6_CNT1,
274		.cnt2_reg	= DMA6_CNT2,
275	},
276	[SRAM_CH08] = {
277		.name		= "ch8",
278		.cmds_start	= 0x0,
279		.ctrl_start	= 0x0,
280		.cdt		= 0x0,
281		.fifo_start	= 0x0,
282		.fifo_size	= 0x0,
283		.ptr1_reg	= DMA7_PTR1,
284		.ptr2_reg	= DMA7_PTR2,
285		.cnt1_reg	= DMA7_CNT1,
286		.cnt2_reg	= DMA7_CNT2,
287	},
288	[SRAM_CH09] = {
289		.name		= "ch9",
290		.cmds_start	= 0x0,
291		.ctrl_start	= 0x0,
292		.cdt		= 0x0,
293		.fifo_start	= 0x0,
294		.fifo_size	= 0x0,
295		.ptr1_reg	= DMA8_PTR1,
296		.ptr2_reg	= DMA8_PTR2,
297		.cnt1_reg	= DMA8_CNT1,
298		.cnt2_reg	= DMA8_CNT2,
299	},
300};
301
302static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
303{
304	unsigned long flags;
305	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
306
307	dev->pci_irqmask |= mask;
308
309	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
310}
311
312void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
313{
314	unsigned long flags;
315	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
316
317	dev->pci_irqmask |= mask;
318	cx_set(PCI_INT_MSK, mask);
319
320	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
321}
322
323void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
324{
325	u32 v;
326	unsigned long flags;
327	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
328
329	v = mask & dev->pci_irqmask;
330	if (v)
331		cx_set(PCI_INT_MSK, v);
332
333	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
334}
335
336static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
337{
338	cx23885_irq_enable(dev, 0xffffffff);
339}
340
341void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
342{
343	unsigned long flags;
344	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
345
346	cx_clear(PCI_INT_MSK, mask);
347
348	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
349}
350
351static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
352{
353	cx23885_irq_disable(dev, 0xffffffff);
354}
355
356void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
357{
358	unsigned long flags;
359	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
360
361	dev->pci_irqmask &= ~mask;
362	cx_clear(PCI_INT_MSK, mask);
363
364	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
365}
366
367static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
368{
369	u32 v;
370	unsigned long flags;
371	spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
372
373	v = cx_read(PCI_INT_MSK);
374
375	spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
376	return v;
377}
378
379static int cx23885_risc_decode(u32 risc)
380{
381	static char *instr[16] = {
382		[RISC_SYNC    >> 28] = "sync",
383		[RISC_WRITE   >> 28] = "write",
384		[RISC_WRITEC  >> 28] = "writec",
385		[RISC_READ    >> 28] = "read",
386		[RISC_READC   >> 28] = "readc",
387		[RISC_JUMP    >> 28] = "jump",
388		[RISC_SKIP    >> 28] = "skip",
389		[RISC_WRITERM >> 28] = "writerm",
390		[RISC_WRITECM >> 28] = "writecm",
391		[RISC_WRITECR >> 28] = "writecr",
392	};
393	static int incr[16] = {
394		[RISC_WRITE   >> 28] = 3,
395		[RISC_JUMP    >> 28] = 3,
396		[RISC_SKIP    >> 28] = 1,
397		[RISC_SYNC    >> 28] = 1,
398		[RISC_WRITERM >> 28] = 3,
399		[RISC_WRITECM >> 28] = 3,
400		[RISC_WRITECR >> 28] = 4,
401	};
402	static char *bits[] = {
403		"12",   "13",   "14",   "resync",
404		"cnt0", "cnt1", "18",   "19",
405		"20",   "21",   "22",   "23",
406		"irq1", "irq2", "eol",  "sol",
407	};
408	int i;
409
410	printk("0x%08x [ %s", risc,
411	       instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
412	for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
413		if (risc & (1 << (i + 12)))
414			printk(" %s", bits[i]);
415	printk(" count=%d ]\n", risc & 0xfff);
416	return incr[risc >> 28] ? incr[risc >> 28] : 1;
417}
418
419static void cx23885_wakeup(struct cx23885_tsport *port,
420			   struct cx23885_dmaqueue *q, u32 count)
421{
422	struct cx23885_dev *dev = port->dev;
423	struct cx23885_buffer *buf;
424
425	if (list_empty(&q->active))
426		return;
427	buf = list_entry(q->active.next,
428			 struct cx23885_buffer, queue);
429
430	v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
431	buf->vb.v4l2_buf.sequence = q->count++;
432	dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
433		count, q->count);
434	list_del(&buf->queue);
435	vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
436}
437
438int cx23885_sram_channel_setup(struct cx23885_dev *dev,
439				      struct sram_channel *ch,
440				      unsigned int bpl, u32 risc)
441{
442	unsigned int i, lines;
443	u32 cdt;
444
445	if (ch->cmds_start == 0) {
446		dprintk(1, "%s() Erasing channel [%s]\n", __func__,
447			ch->name);
448		cx_write(ch->ptr1_reg, 0);
449		cx_write(ch->ptr2_reg, 0);
450		cx_write(ch->cnt2_reg, 0);
451		cx_write(ch->cnt1_reg, 0);
452		return 0;
453	} else {
454		dprintk(1, "%s() Configuring channel [%s]\n", __func__,
455			ch->name);
456	}
457
458	bpl   = (bpl + 7) & ~7; /* alignment */
459	cdt   = ch->cdt;
460	lines = ch->fifo_size / bpl;
461	if (lines > 6)
462		lines = 6;
463	BUG_ON(lines < 2);
464
465	cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
466	cx_write(8 + 4, 12);
467	cx_write(8 + 8, 0);
468
469	/* write CDT */
470	for (i = 0; i < lines; i++) {
471		dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
472			ch->fifo_start + bpl*i);
473		cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
474		cx_write(cdt + 16*i +  4, 0);
475		cx_write(cdt + 16*i +  8, 0);
476		cx_write(cdt + 16*i + 12, 0);
477	}
478
479	/* write CMDS */
480	if (ch->jumponly)
481		cx_write(ch->cmds_start + 0, 8);
482	else
483		cx_write(ch->cmds_start + 0, risc);
484	cx_write(ch->cmds_start +  4, 0); /* 64 bits 63-32 */
485	cx_write(ch->cmds_start +  8, cdt);
486	cx_write(ch->cmds_start + 12, (lines*16) >> 3);
487	cx_write(ch->cmds_start + 16, ch->ctrl_start);
488	if (ch->jumponly)
489		cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
490	else
491		cx_write(ch->cmds_start + 20, 64 >> 2);
492	for (i = 24; i < 80; i += 4)
493		cx_write(ch->cmds_start + i, 0);
494
495	/* fill registers */
496	cx_write(ch->ptr1_reg, ch->fifo_start);
497	cx_write(ch->ptr2_reg, cdt);
498	cx_write(ch->cnt2_reg, (lines*16) >> 3);
499	cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
500
501	dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
502		dev->bridge,
503		ch->name,
504		bpl,
505		lines);
506
507	return 0;
508}
509
510void cx23885_sram_channel_dump(struct cx23885_dev *dev,
511				      struct sram_channel *ch)
512{
513	static char *name[] = {
514		"init risc lo",
515		"init risc hi",
516		"cdt base",
517		"cdt size",
518		"iq base",
519		"iq size",
520		"risc pc lo",
521		"risc pc hi",
522		"iq wr ptr",
523		"iq rd ptr",
524		"cdt current",
525		"pci target lo",
526		"pci target hi",
527		"line / byte",
528	};
529	u32 risc;
530	unsigned int i, j, n;
531
532	printk(KERN_WARNING "%s: %s - dma channel status dump\n",
533	       dev->name, ch->name);
534	for (i = 0; i < ARRAY_SIZE(name); i++)
535		printk(KERN_WARNING "%s:   cmds: %-15s: 0x%08x\n",
536		       dev->name, name[i],
537		       cx_read(ch->cmds_start + 4*i));
538
539	for (i = 0; i < 4; i++) {
540		risc = cx_read(ch->cmds_start + 4 * (i + 14));
541		printk(KERN_WARNING "%s:   risc%d: ", dev->name, i);
542		cx23885_risc_decode(risc);
543	}
544	for (i = 0; i < (64 >> 2); i += n) {
545		risc = cx_read(ch->ctrl_start + 4 * i);
546		/* No consideration for bits 63-32 */
547
548		printk(KERN_WARNING "%s:   (0x%08x) iq %x: ", dev->name,
549		       ch->ctrl_start + 4 * i, i);
550		n = cx23885_risc_decode(risc);
551		for (j = 1; j < n; j++) {
552			risc = cx_read(ch->ctrl_start + 4 * (i + j));
553			printk(KERN_WARNING "%s:   iq %x: 0x%08x [ arg #%d ]\n",
554			       dev->name, i+j, risc, j);
555		}
556	}
557
558	printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
559	       dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
560	printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
561	       dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
562	printk(KERN_WARNING "%s:   ptr1_reg: 0x%08x\n",
563	       dev->name, cx_read(ch->ptr1_reg));
564	printk(KERN_WARNING "%s:   ptr2_reg: 0x%08x\n",
565	       dev->name, cx_read(ch->ptr2_reg));
566	printk(KERN_WARNING "%s:   cnt1_reg: 0x%08x\n",
567	       dev->name, cx_read(ch->cnt1_reg));
568	printk(KERN_WARNING "%s:   cnt2_reg: 0x%08x\n",
569	       dev->name, cx_read(ch->cnt2_reg));
570}
571
572static void cx23885_risc_disasm(struct cx23885_tsport *port,
573				struct cx23885_riscmem *risc)
574{
575	struct cx23885_dev *dev = port->dev;
576	unsigned int i, j, n;
577
578	printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
579	       dev->name, risc->cpu, (unsigned long)risc->dma);
580	for (i = 0; i < (risc->size >> 2); i += n) {
581		printk(KERN_INFO "%s:   %04d: ", dev->name, i);
582		n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
583		for (j = 1; j < n; j++)
584			printk(KERN_INFO "%s:   %04d: 0x%08x [ arg #%d ]\n",
585			       dev->name, i + j, risc->cpu[i + j], j);
586		if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
587			break;
588	}
589}
590
591static void cx23885_shutdown(struct cx23885_dev *dev)
592{
593	/* disable RISC controller */
594	cx_write(DEV_CNTRL2, 0);
595
596	/* Disable all IR activity */
597	cx_write(IR_CNTRL_REG, 0);
598
599	/* Disable Video A/B activity */
600	cx_write(VID_A_DMA_CTL, 0);
601	cx_write(VID_B_DMA_CTL, 0);
602	cx_write(VID_C_DMA_CTL, 0);
603
604	/* Disable Audio activity */
605	cx_write(AUD_INT_DMA_CTL, 0);
606	cx_write(AUD_EXT_DMA_CTL, 0);
607
608	/* Disable Serial port */
609	cx_write(UART_CTL, 0);
610
611	/* Disable Interrupts */
612	cx23885_irq_disable_all(dev);
613	cx_write(VID_A_INT_MSK, 0);
614	cx_write(VID_B_INT_MSK, 0);
615	cx_write(VID_C_INT_MSK, 0);
616	cx_write(AUDIO_INT_INT_MSK, 0);
617	cx_write(AUDIO_EXT_INT_MSK, 0);
618
619}
620
621static void cx23885_reset(struct cx23885_dev *dev)
622{
623	dprintk(1, "%s()\n", __func__);
624
625	cx23885_shutdown(dev);
626
627	cx_write(PCI_INT_STAT, 0xffffffff);
628	cx_write(VID_A_INT_STAT, 0xffffffff);
629	cx_write(VID_B_INT_STAT, 0xffffffff);
630	cx_write(VID_C_INT_STAT, 0xffffffff);
631	cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
632	cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
633	cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
634	cx_write(PAD_CTRL, 0x00500300);
635
636	mdelay(100);
637
638	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
639		720*4, 0);
640	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
641	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
642		188*4, 0);
643	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
644	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
645	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
646		188*4, 0);
647	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
648	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
649	cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
650
651	cx23885_gpio_setup(dev);
652}
653
654
655static int cx23885_pci_quirks(struct cx23885_dev *dev)
656{
657	dprintk(1, "%s()\n", __func__);
658
659	/* The cx23885 bridge has a weird bug which causes NMI to be asserted
660	 * when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
661	 * occur on the cx23887 bridge.
662	 */
663	if (dev->bridge == CX23885_BRIDGE_885)
664		cx_clear(RDR_TLCTL0, 1 << 4);
665
666	return 0;
667}
668
669static int get_resources(struct cx23885_dev *dev)
670{
671	if (request_mem_region(pci_resource_start(dev->pci, 0),
672			       pci_resource_len(dev->pci, 0),
673			       dev->name))
674		return 0;
675
676	printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
677		dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
678
679	return -EBUSY;
680}
681
682static int cx23885_init_tsport(struct cx23885_dev *dev,
683	struct cx23885_tsport *port, int portno)
684{
685	dprintk(1, "%s(portno=%d)\n", __func__, portno);
686
687	/* Transport bus init dma queue  - Common settings */
688	port->dma_ctl_val        = 0x11; /* Enable RISC controller and Fifo */
689	port->ts_int_msk_val     = 0x1111; /* TS port bits for RISC */
690	port->vld_misc_val       = 0x0;
691	port->hw_sop_ctrl_val    = (0x47 << 16 | 188 << 4);
692
693	spin_lock_init(&port->slock);
694	port->dev = dev;
695	port->nr = portno;
696
697	INIT_LIST_HEAD(&port->mpegq.active);
698	mutex_init(&port->frontends.lock);
699	INIT_LIST_HEAD(&port->frontends.felist);
700	port->frontends.active_fe_id = 0;
701
702	/* This should be hardcoded allow a single frontend
703	 * attachment to this tsport, keeping the -dvb.c
704	 * code clean and safe.
705	 */
706	if (!port->num_frontends)
707		port->num_frontends = 1;
708
709	switch (portno) {
710	case 1:
711		port->reg_gpcnt          = VID_B_GPCNT;
712		port->reg_gpcnt_ctl      = VID_B_GPCNT_CTL;
713		port->reg_dma_ctl        = VID_B_DMA_CTL;
714		port->reg_lngth          = VID_B_LNGTH;
715		port->reg_hw_sop_ctrl    = VID_B_HW_SOP_CTL;
716		port->reg_gen_ctrl       = VID_B_GEN_CTL;
717		port->reg_bd_pkt_status  = VID_B_BD_PKT_STATUS;
718		port->reg_sop_status     = VID_B_SOP_STATUS;
719		port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
720		port->reg_vld_misc       = VID_B_VLD_MISC;
721		port->reg_ts_clk_en      = VID_B_TS_CLK_EN;
722		port->reg_src_sel        = VID_B_SRC_SEL;
723		port->reg_ts_int_msk     = VID_B_INT_MSK;
724		port->reg_ts_int_stat    = VID_B_INT_STAT;
725		port->sram_chno          = SRAM_CH03; /* VID_B */
726		port->pci_irqmask        = 0x02; /* VID_B bit1 */
727		break;
728	case 2:
729		port->reg_gpcnt          = VID_C_GPCNT;
730		port->reg_gpcnt_ctl      = VID_C_GPCNT_CTL;
731		port->reg_dma_ctl        = VID_C_DMA_CTL;
732		port->reg_lngth          = VID_C_LNGTH;
733		port->reg_hw_sop_ctrl    = VID_C_HW_SOP_CTL;
734		port->reg_gen_ctrl       = VID_C_GEN_CTL;
735		port->reg_bd_pkt_status  = VID_C_BD_PKT_STATUS;
736		port->reg_sop_status     = VID_C_SOP_STATUS;
737		port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
738		port->reg_vld_misc       = VID_C_VLD_MISC;
739		port->reg_ts_clk_en      = VID_C_TS_CLK_EN;
740		port->reg_src_sel        = 0;
741		port->reg_ts_int_msk     = VID_C_INT_MSK;
742		port->reg_ts_int_stat    = VID_C_INT_STAT;
743		port->sram_chno          = SRAM_CH06; /* VID_C */
744		port->pci_irqmask        = 0x04; /* VID_C bit2 */
745		break;
746	default:
747		BUG();
748	}
749
750	return 0;
751}
752
753static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
754{
755	switch (cx_read(RDR_CFG2) & 0xff) {
756	case 0x00:
757		/* cx23885 */
758		dev->hwrevision = 0xa0;
759		break;
760	case 0x01:
761		/* CX23885-12Z */
762		dev->hwrevision = 0xa1;
763		break;
764	case 0x02:
765		/* CX23885-13Z/14Z */
766		dev->hwrevision = 0xb0;
767		break;
768	case 0x03:
769		if (dev->pci->device == 0x8880) {
770			/* CX23888-21Z/22Z */
771			dev->hwrevision = 0xc0;
772		} else {
773			/* CX23885-14Z */
774			dev->hwrevision = 0xa4;
775		}
776		break;
777	case 0x04:
778		if (dev->pci->device == 0x8880) {
779			/* CX23888-31Z */
780			dev->hwrevision = 0xd0;
781		} else {
782			/* CX23885-15Z, CX23888-31Z */
783			dev->hwrevision = 0xa5;
784		}
785		break;
786	case 0x0e:
787		/* CX23887-15Z */
788		dev->hwrevision = 0xc0;
789		break;
790	case 0x0f:
791		/* CX23887-14Z */
792		dev->hwrevision = 0xb1;
793		break;
794	default:
795		printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
796			__func__, dev->hwrevision);
797	}
798	if (dev->hwrevision)
799		printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
800			__func__, dev->hwrevision);
801	else
802		printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
803			__func__, dev->hwrevision);
804}
805
806/* Find the first v4l2_subdev member of the group id in hw */
807struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
808{
809	struct v4l2_subdev *result = NULL;
810	struct v4l2_subdev *sd;
811
812	spin_lock(&dev->v4l2_dev.lock);
813	v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
814		if (sd->grp_id == hw) {
815			result = sd;
816			break;
817		}
818	}
819	spin_unlock(&dev->v4l2_dev.lock);
820	return result;
821}
822
823static int cx23885_dev_setup(struct cx23885_dev *dev)
824{
825	int i;
826
827	spin_lock_init(&dev->pci_irqmask_lock);
828
829	mutex_init(&dev->lock);
830	mutex_init(&dev->gpio_lock);
831
832	atomic_inc(&dev->refcount);
833
834	dev->nr = cx23885_devcount++;
835	sprintf(dev->name, "cx23885[%d]", dev->nr);
836
837	/* Configure the internal memory */
838	if (dev->pci->device == 0x8880) {
839		/* Could be 887 or 888, assume a default */
840		dev->bridge = CX23885_BRIDGE_887;
841		/* Apply a sensible clock frequency for the PCIe bridge */
842		dev->clk_freq = 25000000;
843		dev->sram_channels = cx23887_sram_channels;
844	} else
845	if (dev->pci->device == 0x8852) {
846		dev->bridge = CX23885_BRIDGE_885;
847		/* Apply a sensible clock frequency for the PCIe bridge */
848		dev->clk_freq = 28000000;
849		dev->sram_channels = cx23885_sram_channels;
850	} else
851		BUG();
852
853	dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
854		__func__, dev->bridge);
855
856	/* board config */
857	dev->board = UNSET;
858	if (card[dev->nr] < cx23885_bcount)
859		dev->board = card[dev->nr];
860	for (i = 0; UNSET == dev->board  &&  i < cx23885_idcount; i++)
861		if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
862		    dev->pci->subsystem_device == cx23885_subids[i].subdevice)
863			dev->board = cx23885_subids[i].card;
864	if (UNSET == dev->board) {
865		dev->board = CX23885_BOARD_UNKNOWN;
866		cx23885_card_list(dev);
867	}
868
869	/* If the user specific a clk freq override, apply it */
870	if (cx23885_boards[dev->board].clk_freq > 0)
871		dev->clk_freq = cx23885_boards[dev->board].clk_freq;
872
873	dev->pci_bus  = dev->pci->bus->number;
874	dev->pci_slot = PCI_SLOT(dev->pci->devfn);
875	cx23885_irq_add(dev, 0x001f00);
876
877	/* External Master 1 Bus */
878	dev->i2c_bus[0].nr = 0;
879	dev->i2c_bus[0].dev = dev;
880	dev->i2c_bus[0].reg_stat  = I2C1_STAT;
881	dev->i2c_bus[0].reg_ctrl  = I2C1_CTRL;
882	dev->i2c_bus[0].reg_addr  = I2C1_ADDR;
883	dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
884	dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
885	dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
886
887	/* External Master 2 Bus */
888	dev->i2c_bus[1].nr = 1;
889	dev->i2c_bus[1].dev = dev;
890	dev->i2c_bus[1].reg_stat  = I2C2_STAT;
891	dev->i2c_bus[1].reg_ctrl  = I2C2_CTRL;
892	dev->i2c_bus[1].reg_addr  = I2C2_ADDR;
893	dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
894	dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
895	dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
896
897	/* Internal Master 3 Bus */
898	dev->i2c_bus[2].nr = 2;
899	dev->i2c_bus[2].dev = dev;
900	dev->i2c_bus[2].reg_stat  = I2C3_STAT;
901	dev->i2c_bus[2].reg_ctrl  = I2C3_CTRL;
902	dev->i2c_bus[2].reg_addr  = I2C3_ADDR;
903	dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
904	dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
905	dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
906
907	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
908		(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
909		cx23885_init_tsport(dev, &dev->ts1, 1);
910
911	if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
912		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
913		cx23885_init_tsport(dev, &dev->ts2, 2);
914
915	if (get_resources(dev) < 0) {
916		printk(KERN_ERR "CORE %s No more PCIe resources for "
917		       "subsystem: %04x:%04x\n",
918		       dev->name, dev->pci->subsystem_vendor,
919		       dev->pci->subsystem_device);
920
921		cx23885_devcount--;
922		return -ENODEV;
923	}
924
925	/* PCIe stuff */
926	dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
927			     pci_resource_len(dev->pci, 0));
928
929	dev->bmmio = (u8 __iomem *)dev->lmmio;
930
931	printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
932	       dev->name, dev->pci->subsystem_vendor,
933	       dev->pci->subsystem_device, cx23885_boards[dev->board].name,
934	       dev->board, card[dev->nr] == dev->board ?
935	       "insmod option" : "autodetected");
936
937	cx23885_pci_quirks(dev);
938
939	/* Assume some sensible defaults */
940	dev->tuner_type = cx23885_boards[dev->board].tuner_type;
941	dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
942	dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
943	dev->radio_type = cx23885_boards[dev->board].radio_type;
944	dev->radio_addr = cx23885_boards[dev->board].radio_addr;
945
946	dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
947		__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
948	dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
949		__func__, dev->radio_type, dev->radio_addr);
950
951	/* The cx23417 encoder has GPIO's that need to be initialised
952	 * before DVB, so that demodulators and tuners are out of
953	 * reset before DVB uses them.
954	 */
955	if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
956		(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
957			cx23885_mc417_init(dev);
958
959	/* init hardware */
960	cx23885_reset(dev);
961
962	cx23885_i2c_register(&dev->i2c_bus[0]);
963	cx23885_i2c_register(&dev->i2c_bus[1]);
964	cx23885_i2c_register(&dev->i2c_bus[2]);
965	cx23885_card_setup(dev);
966	call_all(dev, core, s_power, 0);
967	cx23885_ir_init(dev);
968
969	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
970		if (cx23885_video_register(dev) < 0) {
971			printk(KERN_ERR "%s() Failed to register analog "
972				"video adapters on VID_A\n", __func__);
973		}
974	}
975
976	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
977		if (cx23885_boards[dev->board].num_fds_portb)
978			dev->ts1.num_frontends =
979				cx23885_boards[dev->board].num_fds_portb;
980		if (cx23885_dvb_register(&dev->ts1) < 0) {
981			printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
982			       __func__);
983		}
984	} else
985	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
986		if (cx23885_417_register(dev) < 0) {
987			printk(KERN_ERR
988				"%s() Failed to register 417 on VID_B\n",
989			       __func__);
990		}
991	}
992
993	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
994		if (cx23885_boards[dev->board].num_fds_portc)
995			dev->ts2.num_frontends =
996				cx23885_boards[dev->board].num_fds_portc;
997		if (cx23885_dvb_register(&dev->ts2) < 0) {
998			printk(KERN_ERR
999				"%s() Failed to register dvb on VID_C\n",
1000			       __func__);
1001		}
1002	} else
1003	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
1004		if (cx23885_417_register(dev) < 0) {
1005			printk(KERN_ERR
1006				"%s() Failed to register 417 on VID_C\n",
1007			       __func__);
1008		}
1009	}
1010
1011	cx23885_dev_checkrevision(dev);
1012
1013	/* disable MSI for NetUP cards, otherwise CI is not working */
1014	if (cx23885_boards[dev->board].ci_type > 0)
1015		cx_clear(RDR_RDRCTL1, 1 << 8);
1016
1017	switch (dev->board) {
1018	case CX23885_BOARD_TEVII_S470:
1019	case CX23885_BOARD_TEVII_S471:
1020		cx_clear(RDR_RDRCTL1, 1 << 8);
1021		break;
1022	}
1023
1024	return 0;
1025}
1026
1027static void cx23885_dev_unregister(struct cx23885_dev *dev)
1028{
1029	release_mem_region(pci_resource_start(dev->pci, 0),
1030			   pci_resource_len(dev->pci, 0));
1031
1032	if (!atomic_dec_and_test(&dev->refcount))
1033		return;
1034
1035	if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
1036		cx23885_video_unregister(dev);
1037
1038	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1039		cx23885_dvb_unregister(&dev->ts1);
1040
1041	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1042		cx23885_417_unregister(dev);
1043
1044	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1045		cx23885_dvb_unregister(&dev->ts2);
1046
1047	if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1048		cx23885_417_unregister(dev);
1049
1050	cx23885_i2c_unregister(&dev->i2c_bus[2]);
1051	cx23885_i2c_unregister(&dev->i2c_bus[1]);
1052	cx23885_i2c_unregister(&dev->i2c_bus[0]);
1053
1054	iounmap(dev->lmmio);
1055}
1056
1057static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1058			       unsigned int offset, u32 sync_line,
1059			       unsigned int bpl, unsigned int padding,
1060			       unsigned int lines,  unsigned int lpi, bool jump)
1061{
1062	struct scatterlist *sg;
1063	unsigned int line, todo, sol;
1064
1065
1066	if (jump) {
1067		*(rp++) = cpu_to_le32(RISC_JUMP);
1068		*(rp++) = cpu_to_le32(0);
1069		*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1070	}
1071
1072	/* sync instruction */
1073	if (sync_line != NO_SYNC_LINE)
1074		*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
1075
1076	/* scan lines */
1077	sg = sglist;
1078	for (line = 0; line < lines; line++) {
1079		while (offset && offset >= sg_dma_len(sg)) {
1080			offset -= sg_dma_len(sg);
1081			sg = sg_next(sg);
1082		}
1083
1084		if (lpi && line > 0 && !(line % lpi))
1085			sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
1086		else
1087			sol = RISC_SOL;
1088
1089		if (bpl <= sg_dma_len(sg)-offset) {
1090			/* fits into current chunk */
1091			*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
1092			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1093			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1094			offset += bpl;
1095		} else {
1096			/* scanline needs to be split */
1097			todo = bpl;
1098			*(rp++) = cpu_to_le32(RISC_WRITE|sol|
1099					    (sg_dma_len(sg)-offset));
1100			*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
1101			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102			todo -= (sg_dma_len(sg)-offset);
1103			offset = 0;
1104			sg = sg_next(sg);
1105			while (todo > sg_dma_len(sg)) {
1106				*(rp++) = cpu_to_le32(RISC_WRITE|
1107						    sg_dma_len(sg));
1108				*(rp++) = cpu_to_le32(sg_dma_address(sg));
1109				*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110				todo -= sg_dma_len(sg);
1111				sg = sg_next(sg);
1112			}
1113			*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114			*(rp++) = cpu_to_le32(sg_dma_address(sg));
1115			*(rp++) = cpu_to_le32(0); /* bits 63-32 */
1116			offset += todo;
1117		}
1118		offset += padding;
1119	}
1120
1121	return rp;
1122}
1123
1124int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1125			struct scatterlist *sglist, unsigned int top_offset,
1126			unsigned int bottom_offset, unsigned int bpl,
1127			unsigned int padding, unsigned int lines)
1128{
1129	u32 instructions, fields;
1130	__le32 *rp;
1131
1132	fields = 0;
1133	if (UNSET != top_offset)
1134		fields++;
1135	if (UNSET != bottom_offset)
1136		fields++;
1137
1138	/* estimate risc mem: worst case is one write per page border +
1139	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1140	   can cause next bpl to start close to a page border.  First DMA
1141	   region may be smaller than PAGE_SIZE */
1142	/* write and jump need and extra dword */
1143	instructions  = fields * (1 + ((bpl + padding) * lines)
1144		/ PAGE_SIZE + lines);
1145	instructions += 5;
1146	risc->size = instructions * 12;
1147	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1148	if (risc->cpu == NULL)
1149		return -ENOMEM;
1150
1151	/* write risc instructions */
1152	rp = risc->cpu;
1153	if (UNSET != top_offset)
1154		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1155					bpl, padding, lines, 0, true);
1156	if (UNSET != bottom_offset)
1157		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1158					bpl, padding, lines, 0, UNSET == top_offset);
1159
1160	/* save pointer to jmp instruction address */
1161	risc->jmp = rp;
1162	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1163	return 0;
1164}
1165
1166int cx23885_risc_databuffer(struct pci_dev *pci,
1167				   struct cx23885_riscmem *risc,
1168				   struct scatterlist *sglist,
1169				   unsigned int bpl,
1170				   unsigned int lines, unsigned int lpi)
1171{
1172	u32 instructions;
1173	__le32 *rp;
1174
1175	/* estimate risc mem: worst case is one write per page border +
1176	   one write per scan line + syncs + jump (all 2 dwords).  Here
1177	   there is no padding and no sync.  First DMA region may be smaller
1178	   than PAGE_SIZE */
1179	/* Jump and write need an extra dword */
1180	instructions  = 1 + (bpl * lines) / PAGE_SIZE + lines;
1181	instructions += 4;
1182
1183	risc->size = instructions * 12;
1184	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1185	if (risc->cpu == NULL)
1186		return -ENOMEM;
1187
1188	/* write risc instructions */
1189	rp = risc->cpu;
1190	rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
1191				bpl, 0, lines, lpi, lpi == 0);
1192
1193	/* save pointer to jmp instruction address */
1194	risc->jmp = rp;
1195	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1196	return 0;
1197}
1198
1199int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
1200			struct scatterlist *sglist, unsigned int top_offset,
1201			unsigned int bottom_offset, unsigned int bpl,
1202			unsigned int padding, unsigned int lines)
1203{
1204	u32 instructions, fields;
1205	__le32 *rp;
1206
1207	fields = 0;
1208	if (UNSET != top_offset)
1209		fields++;
1210	if (UNSET != bottom_offset)
1211		fields++;
1212
1213	/* estimate risc mem: worst case is one write per page border +
1214	   one write per scan line + syncs + jump (all 2 dwords).  Padding
1215	   can cause next bpl to start close to a page border.  First DMA
1216	   region may be smaller than PAGE_SIZE */
1217	/* write and jump need and extra dword */
1218	instructions  = fields * (1 + ((bpl + padding) * lines)
1219		/ PAGE_SIZE + lines);
1220	instructions += 5;
1221	risc->size = instructions * 12;
1222	risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
1223	if (risc->cpu == NULL)
1224		return -ENOMEM;
1225	/* write risc instructions */
1226	rp = risc->cpu;
1227
1228	/* Sync to line 6, so US CC line 21 will appear in line '12'
1229	 * in the userland vbi payload */
1230	if (UNSET != top_offset)
1231		rp = cx23885_risc_field(rp, sglist, top_offset, 0,
1232					bpl, padding, lines, 0, true);
1233
1234	if (UNSET != bottom_offset)
1235		rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
1236					bpl, padding, lines, 0, UNSET == top_offset);
1237
1238
1239
1240	/* save pointer to jmp instruction address */
1241	risc->jmp = rp;
1242	BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
1243	return 0;
1244}
1245
1246
1247void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
1248{
1249	struct cx23885_riscmem *risc = &buf->risc;
1250
1251	BUG_ON(in_interrupt());
1252	pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
1253}
1254
1255static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
1256{
1257	struct cx23885_dev *dev = port->dev;
1258
1259	dprintk(1, "%s() Register Dump\n", __func__);
1260	dprintk(1, "%s() DEV_CNTRL2               0x%08X\n", __func__,
1261		cx_read(DEV_CNTRL2));
1262	dprintk(1, "%s() PCI_INT_MSK              0x%08X\n", __func__,
1263		cx23885_irq_get_mask(dev));
1264	dprintk(1, "%s() AUD_INT_INT_MSK          0x%08X\n", __func__,
1265		cx_read(AUDIO_INT_INT_MSK));
1266	dprintk(1, "%s() AUD_INT_DMA_CTL          0x%08X\n", __func__,
1267		cx_read(AUD_INT_DMA_CTL));
1268	dprintk(1, "%s() AUD_EXT_INT_MSK          0x%08X\n", __func__,
1269		cx_read(AUDIO_EXT_INT_MSK));
1270	dprintk(1, "%s() AUD_EXT_DMA_CTL          0x%08X\n", __func__,
1271		cx_read(AUD_EXT_DMA_CTL));
1272	dprintk(1, "%s() PAD_CTRL                 0x%08X\n", __func__,
1273		cx_read(PAD_CTRL));
1274	dprintk(1, "%s() ALT_PIN_OUT_SEL          0x%08X\n", __func__,
1275		cx_read(ALT_PIN_OUT_SEL));
1276	dprintk(1, "%s() GPIO2                    0x%08X\n", __func__,
1277		cx_read(GPIO2));
1278	dprintk(1, "%s() gpcnt(0x%08X)          0x%08X\n", __func__,
1279		port->reg_gpcnt, cx_read(port->reg_gpcnt));
1280	dprintk(1, "%s() gpcnt_ctl(0x%08X)      0x%08x\n", __func__,
1281		port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
1282	dprintk(1, "%s() dma_ctl(0x%08X)        0x%08x\n", __func__,
1283		port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
1284	if (port->reg_src_sel)
1285		dprintk(1, "%s() src_sel(0x%08X)        0x%08x\n", __func__,
1286			port->reg_src_sel, cx_read(port->reg_src_sel));
1287	dprintk(1, "%s() lngth(0x%08X)          0x%08x\n", __func__,
1288		port->reg_lngth, cx_read(port->reg_lngth));
1289	dprintk(1, "%s() hw_sop_ctrl(0x%08X)    0x%08x\n", __func__,
1290		port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
1291	dprintk(1, "%s() gen_ctrl(0x%08X)       0x%08x\n", __func__,
1292		port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
1293	dprintk(1, "%s() bd_pkt_status(0x%08X)  0x%08x\n", __func__,
1294		port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
1295	dprintk(1, "%s() sop_status(0x%08X)     0x%08x\n", __func__,
1296		port->reg_sop_status, cx_read(port->reg_sop_status));
1297	dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
1298		port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
1299	dprintk(1, "%s() vld_misc(0x%08X)       0x%08x\n", __func__,
1300		port->reg_vld_misc, cx_read(port->reg_vld_misc));
1301	dprintk(1, "%s() ts_clk_en(0x%08X)      0x%08x\n", __func__,
1302		port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
1303	dprintk(1, "%s() ts_int_msk(0x%08X)     0x%08x\n", __func__,
1304		port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
1305}
1306
1307int cx23885_start_dma(struct cx23885_tsport *port,
1308			     struct cx23885_dmaqueue *q,
1309			     struct cx23885_buffer   *buf)
1310{
1311	struct cx23885_dev *dev = port->dev;
1312	u32 reg;
1313
1314	dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
1315		dev->width, dev->height, dev->field);
1316
1317	/* Stop the fifo and risc engine for this port */
1318	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1319
1320	/* setup fifo + format */
1321	cx23885_sram_channel_setup(dev,
1322				   &dev->sram_channels[port->sram_chno],
1323				   port->ts_packet_size, buf->risc.dma);
1324	if (debug > 5) {
1325		cx23885_sram_channel_dump(dev,
1326			&dev->sram_channels[port->sram_chno]);
1327		cx23885_risc_disasm(port, &buf->risc);
1328	}
1329
1330	/* write TS length to chip */
1331	cx_write(port->reg_lngth, port->ts_packet_size);
1332
1333	if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
1334		(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
1335		printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
1336			__func__,
1337			cx23885_boards[dev->board].portb,
1338			cx23885_boards[dev->board].portc);
1339		return -EINVAL;
1340	}
1341
1342	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1343		cx23885_av_clk(dev, 0);
1344
1345	udelay(100);
1346
1347	/* If the port supports SRC SELECT, configure it */
1348	if (port->reg_src_sel)
1349		cx_write(port->reg_src_sel, port->src_sel_val);
1350
1351	cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
1352	cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
1353	cx_write(port->reg_vld_misc, port->vld_misc_val);
1354	cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
1355	udelay(100);
1356
1357	/* NOTE: this is 2 (reserved) for portb, does it matter? */
1358	/* reset counter to zero */
1359	cx_write(port->reg_gpcnt_ctl, 3);
1360	q->count = 0;
1361
1362	/* Set VIDB pins to input */
1363	if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
1364		reg = cx_read(PAD_CTRL);
1365		reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
1366		cx_write(PAD_CTRL, reg);
1367	}
1368
1369	/* Set VIDC pins to input */
1370	if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
1371		reg = cx_read(PAD_CTRL);
1372		reg &= ~0x4; /* Clear TS2_SOP_OE */
1373		cx_write(PAD_CTRL, reg);
1374	}
1375
1376	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1377
1378		reg = cx_read(PAD_CTRL);
1379		reg = reg & ~0x1;    /* Clear TS1_OE */
1380
1381		/* FIXME, bit 2 writing here is questionable */
1382		/* set TS1_SOP_OE and TS1_OE_HI */
1383		reg = reg | 0xa;
1384		cx_write(PAD_CTRL, reg);
1385
1386		/* FIXME and these two registers should be documented. */
1387		cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
1388		cx_write(ALT_PIN_OUT_SEL, 0x10100045);
1389	}
1390
1391	switch (dev->bridge) {
1392	case CX23885_BRIDGE_885:
1393	case CX23885_BRIDGE_887:
1394	case CX23885_BRIDGE_888:
1395		/* enable irqs */
1396		dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
1397		cx_set(port->reg_ts_int_msk,  port->ts_int_msk_val);
1398		cx_set(port->reg_dma_ctl, port->dma_ctl_val);
1399		cx23885_irq_add(dev, port->pci_irqmask);
1400		cx23885_irq_enable_all(dev);
1401		break;
1402	default:
1403		BUG();
1404	}
1405
1406	cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
1407
1408	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1409		cx23885_av_clk(dev, 1);
1410
1411	if (debug > 4)
1412		cx23885_tsport_reg_dump(port);
1413
1414	return 0;
1415}
1416
1417static int cx23885_stop_dma(struct cx23885_tsport *port)
1418{
1419	struct cx23885_dev *dev = port->dev;
1420	u32 reg;
1421
1422	dprintk(1, "%s()\n", __func__);
1423
1424	/* Stop interrupts and DMA */
1425	cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
1426	cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1427
1428	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
1429
1430		reg = cx_read(PAD_CTRL);
1431
1432		/* Set TS1_OE */
1433		reg = reg | 0x1;
1434
1435		/* clear TS1_SOP_OE and TS1_OE_HI */
1436		reg = reg & ~0xa;
1437		cx_write(PAD_CTRL, reg);
1438		cx_write(port->reg_src_sel, 0);
1439		cx_write(port->reg_gen_ctrl, 8);
1440
1441	}
1442
1443	if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1444		cx23885_av_clk(dev, 0);
1445
1446	return 0;
1447}
1448
1449/* ------------------------------------------------------------------ */
1450
1451int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
1452{
1453	struct cx23885_dev *dev = port->dev;
1454	int size = port->ts_packet_size * port->ts_packet_count;
1455	struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb, 0);
1456	int rc;
1457
1458	dprintk(1, "%s: %p\n", __func__, buf);
1459	if (vb2_plane_size(&buf->vb, 0) < size)
1460		return -EINVAL;
1461	vb2_set_plane_payload(&buf->vb, 0, size);
1462
1463	rc = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE);
1464	if (!rc)
1465		return -EIO;
1466
1467	cx23885_risc_databuffer(dev->pci, &buf->risc,
1468				sgt->sgl,
1469				port->ts_packet_size, port->ts_packet_count, 0);
1470	return 0;
1471}
1472
1473/*
1474 * The risc program for each buffer works as follows: it starts with a simple
1475 * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
1476 * buffer follows and at the end we have a JUMP back to the start + 12 (skipping
1477 * the initial JUMP).
1478 *
1479 * This is the risc program of the first buffer to be queued if the active list
1480 * is empty and it just keeps DMAing this buffer without generating any
1481 * interrupts.
1482 *
1483 * If a new buffer is added then the initial JUMP in the code for that buffer
1484 * will generate an interrupt which signals that the previous buffer has been
1485 * DMAed successfully and that it can be returned to userspace.
1486 *
1487 * It also sets the final jump of the previous buffer to the start of the new
1488 * buffer, thus chaining the new buffer into the DMA chain. This is a single
1489 * atomic u32 write, so there is no race condition.
1490 *
1491 * The end-result of all this that you only get an interrupt when a buffer
1492 * is ready, so the control flow is very easy.
1493 */
1494void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
1495{
1496	struct cx23885_buffer    *prev;
1497	struct cx23885_dev *dev = port->dev;
1498	struct cx23885_dmaqueue  *cx88q = &port->mpegq;
1499	unsigned long flags;
1500
1501	buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
1502	buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
1503	buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
1504	buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
1505
1506	spin_lock_irqsave(&dev->slock, flags);
1507	if (list_empty(&cx88q->active)) {
1508		list_add_tail(&buf->queue, &cx88q->active);
1509		dprintk(1, "[%p/%d] %s - first active\n",
1510			buf, buf->vb.v4l2_buf.index, __func__);
1511	} else {
1512		buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
1513		prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
1514				  queue);
1515		list_add_tail(&buf->queue, &cx88q->active);
1516		prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
1517		dprintk(1, "[%p/%d] %s - append to active\n",
1518			 buf, buf->vb.v4l2_buf.index, __func__);
1519	}
1520	spin_unlock_irqrestore(&dev->slock, flags);
1521}
1522
1523/* ----------------------------------------------------------- */
1524
1525static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
1526{
1527	struct cx23885_dev *dev = port->dev;
1528	struct cx23885_dmaqueue *q = &port->mpegq;
1529	struct cx23885_buffer *buf;
1530	unsigned long flags;
1531
1532	spin_lock_irqsave(&port->slock, flags);
1533	while (!list_empty(&q->active)) {
1534		buf = list_entry(q->active.next, struct cx23885_buffer,
1535				 queue);
1536		list_del(&buf->queue);
1537		vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
1538		dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
1539			buf, buf->vb.v4l2_buf.index, reason, (unsigned long)buf->risc.dma);
1540	}
1541	spin_unlock_irqrestore(&port->slock, flags);
1542}
1543
1544void cx23885_cancel_buffers(struct cx23885_tsport *port)
1545{
1546	struct cx23885_dev *dev = port->dev;
1547
1548	dprintk(1, "%s()\n", __func__);
1549	cx23885_stop_dma(port);
1550	do_cancel_buffers(port, "cancel");
1551}
1552
1553int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
1554{
1555	/* FIXME: port1 assumption here. */
1556	struct cx23885_tsport *port = &dev->ts1;
1557	int count = 0;
1558	int handled = 0;
1559
1560	if (status == 0)
1561		return handled;
1562
1563	count = cx_read(port->reg_gpcnt);
1564	dprintk(7, "status: 0x%08x  mask: 0x%08x count: 0x%x\n",
1565		status, cx_read(port->reg_ts_int_msk), count);
1566
1567	if ((status & VID_B_MSK_BAD_PKT)         ||
1568		(status & VID_B_MSK_OPC_ERR)     ||
1569		(status & VID_B_MSK_VBI_OPC_ERR) ||
1570		(status & VID_B_MSK_SYNC)        ||
1571		(status & VID_B_MSK_VBI_SYNC)    ||
1572		(status & VID_B_MSK_OF)          ||
1573		(status & VID_B_MSK_VBI_OF)) {
1574		printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
1575			"= 0x%x\n", dev->name, status);
1576		if (status & VID_B_MSK_BAD_PKT)
1577			dprintk(1, "        VID_B_MSK_BAD_PKT\n");
1578		if (status & VID_B_MSK_OPC_ERR)
1579			dprintk(1, "        VID_B_MSK_OPC_ERR\n");
1580		if (status & VID_B_MSK_VBI_OPC_ERR)
1581			dprintk(1, "        VID_B_MSK_VBI_OPC_ERR\n");
1582		if (status & VID_B_MSK_SYNC)
1583			dprintk(1, "        VID_B_MSK_SYNC\n");
1584		if (status & VID_B_MSK_VBI_SYNC)
1585			dprintk(1, "        VID_B_MSK_VBI_SYNC\n");
1586		if (status & VID_B_MSK_OF)
1587			dprintk(1, "        VID_B_MSK_OF\n");
1588		if (status & VID_B_MSK_VBI_OF)
1589			dprintk(1, "        VID_B_MSK_VBI_OF\n");
1590
1591		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1592		cx23885_sram_channel_dump(dev,
1593			&dev->sram_channels[port->sram_chno]);
1594		cx23885_417_check_encoder(dev);
1595	} else if (status & VID_B_MSK_RISCI1) {
1596		dprintk(7, "        VID_B_MSK_RISCI1\n");
1597		spin_lock(&port->slock);
1598		cx23885_wakeup(port, &port->mpegq, count);
1599		spin_unlock(&port->slock);
1600	}
1601	if (status) {
1602		cx_write(port->reg_ts_int_stat, status);
1603		handled = 1;
1604	}
1605
1606	return handled;
1607}
1608
1609static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
1610{
1611	struct cx23885_dev *dev = port->dev;
1612	int handled = 0;
1613	u32 count;
1614
1615	if ((status & VID_BC_MSK_OPC_ERR) ||
1616		(status & VID_BC_MSK_BAD_PKT) ||
1617		(status & VID_BC_MSK_SYNC) ||
1618		(status & VID_BC_MSK_OF)) {
1619
1620		if (status & VID_BC_MSK_OPC_ERR)
1621			dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
1622				VID_BC_MSK_OPC_ERR);
1623
1624		if (status & VID_BC_MSK_BAD_PKT)
1625			dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
1626				VID_BC_MSK_BAD_PKT);
1627
1628		if (status & VID_BC_MSK_SYNC)
1629			dprintk(7, " (VID_BC_MSK_SYNC    0x%08x)\n",
1630				VID_BC_MSK_SYNC);
1631
1632		if (status & VID_BC_MSK_OF)
1633			dprintk(7, " (VID_BC_MSK_OF      0x%08x)\n",
1634				VID_BC_MSK_OF);
1635
1636		printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
1637
1638		cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
1639		cx23885_sram_channel_dump(dev,
1640			&dev->sram_channels[port->sram_chno]);
1641
1642	} else if (status & VID_BC_MSK_RISCI1) {
1643
1644		dprintk(7, " (RISCI1            0x%08x)\n", VID_BC_MSK_RISCI1);
1645
1646		spin_lock(&port->slock);
1647		count = cx_read(port->reg_gpcnt);
1648		cx23885_wakeup(port, &port->mpegq, count);
1649		spin_unlock(&port->slock);
1650
1651	}
1652	if (status) {
1653		cx_write(port->reg_ts_int_stat, status);
1654		handled = 1;
1655	}
1656
1657	return handled;
1658}
1659
1660static irqreturn_t cx23885_irq(int irq, void *dev_id)
1661{
1662	struct cx23885_dev *dev = dev_id;
1663	struct cx23885_tsport *ts1 = &dev->ts1;
1664	struct cx23885_tsport *ts2 = &dev->ts2;
1665	u32 pci_status, pci_mask;
1666	u32 vida_status, vida_mask;
1667	u32 audint_status, audint_mask;
1668	u32 ts1_status, ts1_mask;
1669	u32 ts2_status, ts2_mask;
1670	int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
1671	int audint_count = 0;
1672	bool subdev_handled;
1673
1674	pci_status = cx_read(PCI_INT_STAT);
1675	pci_mask = cx23885_irq_get_mask(dev);
1676	vida_status = cx_read(VID_A_INT_STAT);
1677	vida_mask = cx_read(VID_A_INT_MSK);
1678	audint_status = cx_read(AUDIO_INT_INT_STAT);
1679	audint_mask = cx_read(AUDIO_INT_INT_MSK);
1680	ts1_status = cx_read(VID_B_INT_STAT);
1681	ts1_mask = cx_read(VID_B_INT_MSK);
1682	ts2_status = cx_read(VID_C_INT_STAT);
1683	ts2_mask = cx_read(VID_C_INT_MSK);
1684
1685	if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
1686		goto out;
1687
1688	vida_count = cx_read(VID_A_GPCNT);
1689	audint_count = cx_read(AUD_INT_A_GPCNT);
1690	ts1_count = cx_read(ts1->reg_gpcnt);
1691	ts2_count = cx_read(ts2->reg_gpcnt);
1692	dprintk(7, "pci_status: 0x%08x  pci_mask: 0x%08x\n",
1693		pci_status, pci_mask);
1694	dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
1695		vida_status, vida_mask, vida_count);
1696	dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
1697		audint_status, audint_mask, audint_count);
1698	dprintk(7, "ts1_status: 0x%08x  ts1_mask: 0x%08x count: 0x%x\n",
1699		ts1_status, ts1_mask, ts1_count);
1700	dprintk(7, "ts2_status: 0x%08x  ts2_mask: 0x%08x count: 0x%x\n",
1701		ts2_status, ts2_mask, ts2_count);
1702
1703	if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
1704			  PCI_MSK_AL_RD   | PCI_MSK_AL_WR   | PCI_MSK_APB_DMA |
1705			  PCI_MSK_VID_C   | PCI_MSK_VID_B   | PCI_MSK_VID_A   |
1706			  PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
1707			  PCI_MSK_GPIO0   | PCI_MSK_GPIO1   |
1708			  PCI_MSK_AV_CORE | PCI_MSK_IR)) {
1709
1710		if (pci_status & PCI_MSK_RISC_RD)
1711			dprintk(7, " (PCI_MSK_RISC_RD   0x%08x)\n",
1712				PCI_MSK_RISC_RD);
1713
1714		if (pci_status & PCI_MSK_RISC_WR)
1715			dprintk(7, " (PCI_MSK_RISC_WR   0x%08x)\n",
1716				PCI_MSK_RISC_WR);
1717
1718		if (pci_status & PCI_MSK_AL_RD)
1719			dprintk(7, " (PCI_MSK_AL_RD     0x%08x)\n",
1720				PCI_MSK_AL_RD);
1721
1722		if (pci_status & PCI_MSK_AL_WR)
1723			dprintk(7, " (PCI_MSK_AL_WR     0x%08x)\n",
1724				PCI_MSK_AL_WR);
1725
1726		if (pci_status & PCI_MSK_APB_DMA)
1727			dprintk(7, " (PCI_MSK_APB_DMA   0x%08x)\n",
1728				PCI_MSK_APB_DMA);
1729
1730		if (pci_status & PCI_MSK_VID_C)
1731			dprintk(7, " (PCI_MSK_VID_C     0x%08x)\n",
1732				PCI_MSK_VID_C);
1733
1734		if (pci_status & PCI_MSK_VID_B)
1735			dprintk(7, " (PCI_MSK_VID_B     0x%08x)\n",
1736				PCI_MSK_VID_B);
1737
1738		if (pci_status & PCI_MSK_VID_A)
1739			dprintk(7, " (PCI_MSK_VID_A     0x%08x)\n",
1740				PCI_MSK_VID_A);
1741
1742		if (pci_status & PCI_MSK_AUD_INT)
1743			dprintk(7, " (PCI_MSK_AUD_INT   0x%08x)\n",
1744				PCI_MSK_AUD_INT);
1745
1746		if (pci_status & PCI_MSK_AUD_EXT)
1747			dprintk(7, " (PCI_MSK_AUD_EXT   0x%08x)\n",
1748				PCI_MSK_AUD_EXT);
1749
1750		if (pci_status & PCI_MSK_GPIO0)
1751			dprintk(7, " (PCI_MSK_GPIO0     0x%08x)\n",
1752				PCI_MSK_GPIO0);
1753
1754		if (pci_status & PCI_MSK_GPIO1)
1755			dprintk(7, " (PCI_MSK_GPIO1     0x%08x)\n",
1756				PCI_MSK_GPIO1);
1757
1758		if (pci_status & PCI_MSK_AV_CORE)
1759			dprintk(7, " (PCI_MSK_AV_CORE   0x%08x)\n",
1760				PCI_MSK_AV_CORE);
1761
1762		if (pci_status & PCI_MSK_IR)
1763			dprintk(7, " (PCI_MSK_IR        0x%08x)\n",
1764				PCI_MSK_IR);
1765	}
1766
1767	if (cx23885_boards[dev->board].ci_type == 1 &&
1768			(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
1769		handled += netup_ci_slot_status(dev, pci_status);
1770
1771	if (cx23885_boards[dev->board].ci_type == 2 &&
1772			(pci_status & PCI_MSK_GPIO0))
1773		handled += altera_ci_irq(dev);
1774
1775	if (ts1_status) {
1776		if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
1777			handled += cx23885_irq_ts(ts1, ts1_status);
1778		else
1779		if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
1780			handled += cx23885_irq_417(dev, ts1_status);
1781	}
1782
1783	if (ts2_status) {
1784		if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
1785			handled += cx23885_irq_ts(ts2, ts2_status);
1786		else
1787		if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
1788			handled += cx23885_irq_417(dev, ts2_status);
1789	}
1790
1791	if (vida_status)
1792		handled += cx23885_video_irq(dev, vida_status);
1793
1794	if (audint_status)
1795		handled += cx23885_audio_irq(dev, audint_status, audint_mask);
1796
1797	if (pci_status & PCI_MSK_IR) {
1798		subdev_handled = false;
1799		v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
1800				 pci_status, &subdev_handled);
1801		if (subdev_handled)
1802			handled++;
1803	}
1804
1805	if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
1806		cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
1807		schedule_work(&dev->cx25840_work);
1808		handled++;
1809	}
1810
1811	if (handled)
1812		cx_write(PCI_INT_STAT, pci_status);
1813out:
1814	return IRQ_RETVAL(handled);
1815}
1816
1817static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
1818				    unsigned int notification, void *arg)
1819{
1820	struct cx23885_dev *dev;
1821
1822	if (sd == NULL)
1823		return;
1824
1825	dev = to_cx23885(sd->v4l2_dev);
1826
1827	switch (notification) {
1828	case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
1829		if (sd == dev->sd_ir)
1830			cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
1831		break;
1832	case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
1833		if (sd == dev->sd_ir)
1834			cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
1835		break;
1836	}
1837}
1838
1839static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
1840{
1841	INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
1842	INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
1843	INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
1844	dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
1845}
1846
1847static inline int encoder_on_portb(struct cx23885_dev *dev)
1848{
1849	return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
1850}
1851
1852static inline int encoder_on_portc(struct cx23885_dev *dev)
1853{
1854	return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
1855}
1856
1857/* Mask represents 32 different GPIOs, GPIO's are split into multiple
1858 * registers depending on the board configuration (and whether the
1859 * 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
1860 * be pushed into the correct hardware register, regardless of the
1861 * physical location. Certain registers are shared so we sanity check
1862 * and report errors if we think we're tampering with a GPIo that might
1863 * be assigned to the encoder (and used for the host bus).
1864 *
1865 * GPIO  2 thru  0 - On the cx23885 bridge
1866 * GPIO 18 thru  3 - On the cx23417 host bus interface
1867 * GPIO 23 thru 19 - On the cx25840 a/v core
1868 */
1869void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
1870{
1871	if (mask & 0x7)
1872		cx_set(GP0_IO, mask & 0x7);
1873
1874	if (mask & 0x0007fff8) {
1875		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1876			printk(KERN_ERR
1877				"%s: Setting GPIO on encoder ports\n",
1878				dev->name);
1879		cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
1880	}
1881
1882	/* TODO: 23-19 */
1883	if (mask & 0x00f80000)
1884		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1885}
1886
1887void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
1888{
1889	if (mask & 0x00000007)
1890		cx_clear(GP0_IO, mask & 0x7);
1891
1892	if (mask & 0x0007fff8) {
1893		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1894			printk(KERN_ERR
1895				"%s: Clearing GPIO moving on encoder ports\n",
1896				dev->name);
1897		cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
1898	}
1899
1900	/* TODO: 23-19 */
1901	if (mask & 0x00f80000)
1902		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1903}
1904
1905u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
1906{
1907	if (mask & 0x00000007)
1908		return (cx_read(GP0_IO) >> 8) & mask & 0x7;
1909
1910	if (mask & 0x0007fff8) {
1911		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1912			printk(KERN_ERR
1913				"%s: Reading GPIO moving on encoder ports\n",
1914				dev->name);
1915		return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
1916	}
1917
1918	/* TODO: 23-19 */
1919	if (mask & 0x00f80000)
1920		printk(KERN_INFO "%s: Unsupported\n", dev->name);
1921
1922	return 0;
1923}
1924
1925void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
1926{
1927	if ((mask & 0x00000007) && asoutput)
1928		cx_set(GP0_IO, (mask & 0x7) << 16);
1929	else if ((mask & 0x00000007) && !asoutput)
1930		cx_clear(GP0_IO, (mask & 0x7) << 16);
1931
1932	if (mask & 0x0007fff8) {
1933		if (encoder_on_portb(dev) || encoder_on_portc(dev))
1934			printk(KERN_ERR
1935				"%s: Enabling GPIO on encoder ports\n",
1936				dev->name);
1937	}
1938
1939	/* MC417_OEN is active low for output, write 1 for an input */
1940	if ((mask & 0x0007fff8) && asoutput)
1941		cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
1942
1943	else if ((mask & 0x0007fff8) && !asoutput)
1944		cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
1945
1946	/* TODO: 23-19 */
1947}
1948
1949static int cx23885_initdev(struct pci_dev *pci_dev,
1950			   const struct pci_device_id *pci_id)
1951{
1952	struct cx23885_dev *dev;
1953	struct v4l2_ctrl_handler *hdl;
1954	int err;
1955
1956	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1957	if (NULL == dev)
1958		return -ENOMEM;
1959
1960	err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
1961	if (err < 0)
1962		goto fail_free;
1963
1964	hdl = &dev->ctrl_handler;
1965	v4l2_ctrl_handler_init(hdl, 6);
1966	if (hdl->error) {
1967		err = hdl->error;
1968		goto fail_ctrl;
1969	}
1970	dev->v4l2_dev.ctrl_handler = hdl;
1971
1972	/* Prepare to handle notifications from subdevices */
1973	cx23885_v4l2_dev_notify_init(dev);
1974
1975	/* pci init */
1976	dev->pci = pci_dev;
1977	if (pci_enable_device(pci_dev)) {
1978		err = -EIO;
1979		goto fail_ctrl;
1980	}
1981
1982	if (cx23885_dev_setup(dev) < 0) {
1983		err = -EINVAL;
1984		goto fail_ctrl;
1985	}
1986
1987	/* print pci info */
1988	dev->pci_rev = pci_dev->revision;
1989	pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER,  &dev->pci_lat);
1990	printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
1991	       "latency: %d, mmio: 0x%llx\n", dev->name,
1992	       pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
1993	       dev->pci_lat,
1994		(unsigned long long)pci_resource_start(pci_dev, 0));
1995
1996	pci_set_master(pci_dev);
1997	if (!pci_dma_supported(pci_dev, 0xffffffff)) {
1998		printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
1999		err = -EIO;
2000		goto fail_irq;
2001	}
2002
2003	err = request_irq(pci_dev->irq, cx23885_irq,
2004			  IRQF_SHARED, dev->name, dev);
2005	if (err < 0) {
2006		printk(KERN_ERR "%s: can't get IRQ %d\n",
2007		       dev->name, pci_dev->irq);
2008		goto fail_irq;
2009	}
2010
2011	switch (dev->board) {
2012	case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
2013		cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
2014		break;
2015	case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
2016		cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
2017		break;
2018	}
2019
2020	/*
2021	 * The CX2388[58] IR controller can start firing interrupts when
2022	 * enabled, so these have to take place after the cx23885_irq() handler
2023	 * is hooked up by the call to request_irq() above.
2024	 */
2025	cx23885_ir_pci_int_enable(dev);
2026	cx23885_input_init(dev);
2027
2028	return 0;
2029
2030fail_irq:
2031	cx23885_dev_unregister(dev);
2032fail_ctrl:
2033	v4l2_ctrl_handler_free(hdl);
2034	v4l2_device_unregister(&dev->v4l2_dev);
2035fail_free:
2036	kfree(dev);
2037	return err;
2038}
2039
2040static void cx23885_finidev(struct pci_dev *pci_dev)
2041{
2042	struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
2043	struct cx23885_dev *dev = to_cx23885(v4l2_dev);
2044
2045	cx23885_input_fini(dev);
2046	cx23885_ir_fini(dev);
2047
2048	cx23885_shutdown(dev);
2049
2050	pci_disable_device(pci_dev);
2051
2052	/* unregister stuff */
2053	free_irq(pci_dev->irq, dev);
2054
2055	cx23885_dev_unregister(dev);
2056	v4l2_ctrl_handler_free(&dev->ctrl_handler);
2057	v4l2_device_unregister(v4l2_dev);
2058	kfree(dev);
2059}
2060
2061static struct pci_device_id cx23885_pci_tbl[] = {
2062	{
2063		/* CX23885 */
2064		.vendor       = 0x14f1,
2065		.device       = 0x8852,
2066		.subvendor    = PCI_ANY_ID,
2067		.subdevice    = PCI_ANY_ID,
2068	}, {
2069		/* CX23887 Rev 2 */
2070		.vendor       = 0x14f1,
2071		.device       = 0x8880,
2072		.subvendor    = PCI_ANY_ID,
2073		.subdevice    = PCI_ANY_ID,
2074	}, {
2075		/* --- end of list --- */
2076	}
2077};
2078MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
2079
2080static struct pci_driver cx23885_pci_driver = {
2081	.name     = "cx23885",
2082	.id_table = cx23885_pci_tbl,
2083	.probe    = cx23885_initdev,
2084	.remove   = cx23885_finidev,
2085	/* TODO */
2086	.suspend  = NULL,
2087	.resume   = NULL,
2088};
2089
2090static int __init cx23885_init(void)
2091{
2092	printk(KERN_INFO "cx23885 driver version %s loaded\n",
2093		CX23885_VERSION);
2094	return pci_register_driver(&cx23885_pci_driver);
2095}
2096
2097static void __exit cx23885_fini(void)
2098{
2099	pci_unregister_driver(&cx23885_pci_driver);
2100}
2101
2102module_init(cx23885_init);
2103module_exit(cx23885_fini);
2104