[go: nahoru, domu]

1/*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
26 *
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
28 * be very slow.
29 */
30
31#include "via_3d_reg.h"
32#include <drm/drmP.h>
33#include <drm/via_drm.h>
34#include <drm/drm_legacy.h>
35#include "via_verifier.h"
36#include "via_drv.h"
37
38typedef enum {
39	state_command,
40	state_header2,
41	state_header1,
42	state_vheader5,
43	state_vheader6,
44	state_error
45} verifier_state_t;
46
47typedef enum {
48	no_check = 0,
49	check_for_header2,
50	check_for_header1,
51	check_for_header2_err,
52	check_for_header1_err,
53	check_for_fire,
54	check_z_buffer_addr0,
55	check_z_buffer_addr1,
56	check_z_buffer_addr_mode,
57	check_destination_addr0,
58	check_destination_addr1,
59	check_destination_addr_mode,
60	check_for_dummy,
61	check_for_dd,
62	check_texture_addr0,
63	check_texture_addr1,
64	check_texture_addr2,
65	check_texture_addr3,
66	check_texture_addr4,
67	check_texture_addr5,
68	check_texture_addr6,
69	check_texture_addr7,
70	check_texture_addr8,
71	check_texture_addr_mode,
72	check_for_vertex_count,
73	check_number_texunits,
74	forbidden_command
75} hazard_t;
76
77/*
78 * Associates each hazard above with a possible multi-command
79 * sequence. For example an address that is split over multiple
80 * commands and that needs to be checked at the first command
81 * that does not include any part of the address.
82 */
83
84static drm_via_sequence_t seqs[] = {
85	no_sequence,
86	no_sequence,
87	no_sequence,
88	no_sequence,
89	no_sequence,
90	no_sequence,
91	z_address,
92	z_address,
93	z_address,
94	dest_address,
95	dest_address,
96	dest_address,
97	no_sequence,
98	no_sequence,
99	tex_address,
100	tex_address,
101	tex_address,
102	tex_address,
103	tex_address,
104	tex_address,
105	tex_address,
106	tex_address,
107	tex_address,
108	tex_address,
109	no_sequence
110};
111
112typedef struct {
113	unsigned int code;
114	hazard_t hz;
115} hz_init_t;
116
117static hz_init_t init_table1[] = {
118	{0xf2, check_for_header2_err},
119	{0xf0, check_for_header1_err},
120	{0xee, check_for_fire},
121	{0xcc, check_for_dummy},
122	{0xdd, check_for_dd},
123	{0x00, no_check},
124	{0x10, check_z_buffer_addr0},
125	{0x11, check_z_buffer_addr1},
126	{0x12, check_z_buffer_addr_mode},
127	{0x13, no_check},
128	{0x14, no_check},
129	{0x15, no_check},
130	{0x23, no_check},
131	{0x24, no_check},
132	{0x33, no_check},
133	{0x34, no_check},
134	{0x35, no_check},
135	{0x36, no_check},
136	{0x37, no_check},
137	{0x38, no_check},
138	{0x39, no_check},
139	{0x3A, no_check},
140	{0x3B, no_check},
141	{0x3C, no_check},
142	{0x3D, no_check},
143	{0x3E, no_check},
144	{0x40, check_destination_addr0},
145	{0x41, check_destination_addr1},
146	{0x42, check_destination_addr_mode},
147	{0x43, no_check},
148	{0x44, no_check},
149	{0x50, no_check},
150	{0x51, no_check},
151	{0x52, no_check},
152	{0x53, no_check},
153	{0x54, no_check},
154	{0x55, no_check},
155	{0x56, no_check},
156	{0x57, no_check},
157	{0x58, no_check},
158	{0x70, no_check},
159	{0x71, no_check},
160	{0x78, no_check},
161	{0x79, no_check},
162	{0x7A, no_check},
163	{0x7B, no_check},
164	{0x7C, no_check},
165	{0x7D, check_for_vertex_count}
166};
167
168static hz_init_t init_table2[] = {
169	{0xf2, check_for_header2_err},
170	{0xf0, check_for_header1_err},
171	{0xee, check_for_fire},
172	{0xcc, check_for_dummy},
173	{0x00, check_texture_addr0},
174	{0x01, check_texture_addr0},
175	{0x02, check_texture_addr0},
176	{0x03, check_texture_addr0},
177	{0x04, check_texture_addr0},
178	{0x05, check_texture_addr0},
179	{0x06, check_texture_addr0},
180	{0x07, check_texture_addr0},
181	{0x08, check_texture_addr0},
182	{0x09, check_texture_addr0},
183	{0x20, check_texture_addr1},
184	{0x21, check_texture_addr1},
185	{0x22, check_texture_addr1},
186	{0x23, check_texture_addr4},
187	{0x2B, check_texture_addr3},
188	{0x2C, check_texture_addr3},
189	{0x2D, check_texture_addr3},
190	{0x2E, check_texture_addr3},
191	{0x2F, check_texture_addr3},
192	{0x30, check_texture_addr3},
193	{0x31, check_texture_addr3},
194	{0x32, check_texture_addr3},
195	{0x33, check_texture_addr3},
196	{0x34, check_texture_addr3},
197	{0x4B, check_texture_addr5},
198	{0x4C, check_texture_addr6},
199	{0x51, check_texture_addr7},
200	{0x52, check_texture_addr8},
201	{0x77, check_texture_addr2},
202	{0x78, no_check},
203	{0x79, no_check},
204	{0x7A, no_check},
205	{0x7B, check_texture_addr_mode},
206	{0x7C, no_check},
207	{0x7D, no_check},
208	{0x7E, no_check},
209	{0x7F, no_check},
210	{0x80, no_check},
211	{0x81, no_check},
212	{0x82, no_check},
213	{0x83, no_check},
214	{0x85, no_check},
215	{0x86, no_check},
216	{0x87, no_check},
217	{0x88, no_check},
218	{0x89, no_check},
219	{0x8A, no_check},
220	{0x90, no_check},
221	{0x91, no_check},
222	{0x92, no_check},
223	{0x93, no_check}
224};
225
226static hz_init_t init_table3[] = {
227	{0xf2, check_for_header2_err},
228	{0xf0, check_for_header1_err},
229	{0xcc, check_for_dummy},
230	{0x00, check_number_texunits}
231};
232
233static hazard_t table1[256];
234static hazard_t table2[256];
235static hazard_t table3[256];
236
237static __inline__ int
238eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
239{
240	if ((buf_end - *buf) >= num_words) {
241		*buf += num_words;
242		return 0;
243	}
244	DRM_ERROR("Illegal termination of DMA command buffer\n");
245	return 1;
246}
247
248/*
249 * Partially stolen from drm_memory.h
250 */
251
252static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253						    unsigned long offset,
254						    unsigned long size,
255						    struct drm_device *dev)
256{
257	struct drm_map_list *r_list;
258	drm_local_map_t *map = seq->map_cache;
259
260	if (map && map->offset <= offset
261	    && (offset + size) <= (map->offset + map->size)) {
262		return map;
263	}
264
265	list_for_each_entry(r_list, &dev->maplist, head) {
266		map = r_list->map;
267		if (!map)
268			continue;
269		if (map->offset <= offset
270		    && (offset + size) <= (map->offset + map->size)
271		    && !(map->flags & _DRM_RESTRICTED)
272		    && (map->type == _DRM_AGP)) {
273			seq->map_cache = map;
274			return map;
275		}
276	}
277	return NULL;
278}
279
280/*
281 * Require that all AGP texture levels reside in the same AGP map which should
282 * be mappable by the client. This is not a big restriction.
283 * FIXME: To actually enforce this security policy strictly, drm_rmmap
284 * would have to wait for dma quiescent before removing an AGP map.
285 * The via_drm_lookup_agp_map call in reality seems to take
286 * very little CPU time.
287 */
288
289static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
290{
291	switch (cur_seq->unfinished) {
292	case z_address:
293		DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
294		break;
295	case dest_address:
296		DRM_DEBUG("Destination start address is 0x%x\n",
297			  cur_seq->d_addr);
298		break;
299	case tex_address:
300		if (cur_seq->agp_texture) {
301			unsigned start =
302			    cur_seq->tex_level_lo[cur_seq->texture];
303			unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
304			unsigned long lo = ~0, hi = 0, tmp;
305			uint32_t *addr, *pitch, *height, tex;
306			unsigned i;
307			int npot;
308
309			if (end > 9)
310				end = 9;
311			if (start > 9)
312				start = 9;
313
314			addr =
315			    &(cur_seq->t_addr[tex = cur_seq->texture][start]);
316			pitch = &(cur_seq->pitch[tex][start]);
317			height = &(cur_seq->height[tex][start]);
318			npot = cur_seq->tex_npot[tex];
319			for (i = start; i <= end; ++i) {
320				tmp = *addr++;
321				if (tmp < lo)
322					lo = tmp;
323				if (i == 0 && npot)
324					tmp += (*height++ * *pitch++);
325				else
326					tmp += (*height++ << *pitch++);
327				if (tmp > hi)
328					hi = tmp;
329			}
330
331			if (!via_drm_lookup_agp_map
332			    (cur_seq, lo, hi - lo, cur_seq->dev)) {
333				DRM_ERROR
334				    ("AGP texture is not in allowed map\n");
335				return 2;
336			}
337		}
338		break;
339	default:
340		break;
341	}
342	cur_seq->unfinished = no_sequence;
343	return 0;
344}
345
346static __inline__ int
347investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
348{
349	register uint32_t tmp, *tmp_addr;
350
351	if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
352		int ret;
353		if ((ret = finish_current_sequence(cur_seq)))
354			return ret;
355	}
356
357	switch (hz) {
358	case check_for_header2:
359		if (cmd == HALCYON_HEADER2)
360			return 1;
361		return 0;
362	case check_for_header1:
363		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
364			return 1;
365		return 0;
366	case check_for_header2_err:
367		if (cmd == HALCYON_HEADER2)
368			return 1;
369		DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
370		break;
371	case check_for_header1_err:
372		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
373			return 1;
374		DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
375		break;
376	case check_for_fire:
377		if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
378			return 1;
379		DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
380		break;
381	case check_for_dummy:
382		if (HC_DUMMY == cmd)
383			return 0;
384		DRM_ERROR("Illegal DMA HC_DUMMY command\n");
385		break;
386	case check_for_dd:
387		if (0xdddddddd == cmd)
388			return 0;
389		DRM_ERROR("Illegal DMA 0xdddddddd command\n");
390		break;
391	case check_z_buffer_addr0:
392		cur_seq->unfinished = z_address;
393		cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
394		    (cmd & 0x00FFFFFF);
395		return 0;
396	case check_z_buffer_addr1:
397		cur_seq->unfinished = z_address;
398		cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
399		    ((cmd & 0xFF) << 24);
400		return 0;
401	case check_z_buffer_addr_mode:
402		cur_seq->unfinished = z_address;
403		if ((cmd & 0x0000C000) == 0)
404			return 0;
405		DRM_ERROR("Attempt to place Z buffer in system memory\n");
406		return 2;
407	case check_destination_addr0:
408		cur_seq->unfinished = dest_address;
409		cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
410		    (cmd & 0x00FFFFFF);
411		return 0;
412	case check_destination_addr1:
413		cur_seq->unfinished = dest_address;
414		cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
415		    ((cmd & 0xFF) << 24);
416		return 0;
417	case check_destination_addr_mode:
418		cur_seq->unfinished = dest_address;
419		if ((cmd & 0x0000C000) == 0)
420			return 0;
421		DRM_ERROR
422		    ("Attempt to place 3D drawing buffer in system memory\n");
423		return 2;
424	case check_texture_addr0:
425		cur_seq->unfinished = tex_address;
426		tmp = (cmd >> 24);
427		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
428		*tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
429		return 0;
430	case check_texture_addr1:
431		cur_seq->unfinished = tex_address;
432		tmp = ((cmd >> 24) - 0x20);
433		tmp += tmp << 1;
434		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
435		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
436		tmp_addr++;
437		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
438		tmp_addr++;
439		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
440		return 0;
441	case check_texture_addr2:
442		cur_seq->unfinished = tex_address;
443		cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
444		cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
445		return 0;
446	case check_texture_addr3:
447		cur_seq->unfinished = tex_address;
448		tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
449		if (tmp == 0 &&
450		    (cmd & HC_HTXnEnPit_MASK)) {
451			cur_seq->pitch[cur_seq->texture][tmp] =
452				(cmd & HC_HTXnLnPit_MASK);
453			cur_seq->tex_npot[cur_seq->texture] = 1;
454		} else {
455			cur_seq->pitch[cur_seq->texture][tmp] =
456				(cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
457			cur_seq->tex_npot[cur_seq->texture] = 0;
458			if (cmd & 0x000FFFFF) {
459				DRM_ERROR
460					("Unimplemented texture level 0 pitch mode.\n");
461				return 2;
462			}
463		}
464		return 0;
465	case check_texture_addr4:
466		cur_seq->unfinished = tex_address;
467		tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
468		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
469		return 0;
470	case check_texture_addr5:
471	case check_texture_addr6:
472		cur_seq->unfinished = tex_address;
473		/*
474		 * Texture width. We don't care since we have the pitch.
475		 */
476		return 0;
477	case check_texture_addr7:
478		cur_seq->unfinished = tex_address;
479		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
480		tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
481		tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
482		tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
483		tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
484		tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
485		tmp_addr[0] = 1 << (cmd & 0x0000000F);
486		return 0;
487	case check_texture_addr8:
488		cur_seq->unfinished = tex_address;
489		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
490		tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
491		tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
492		tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
493		tmp_addr[6] = 1 << (cmd & 0x0000000F);
494		return 0;
495	case check_texture_addr_mode:
496		cur_seq->unfinished = tex_address;
497		if (2 == (tmp = cmd & 0x00000003)) {
498			DRM_ERROR
499			    ("Attempt to fetch texture from system memory.\n");
500			return 2;
501		}
502		cur_seq->agp_texture = (tmp == 3);
503		cur_seq->tex_palette_size[cur_seq->texture] =
504		    (cmd >> 16) & 0x000000007;
505		return 0;
506	case check_for_vertex_count:
507		cur_seq->vertex_count = cmd & 0x0000FFFF;
508		return 0;
509	case check_number_texunits:
510		cur_seq->multitex = (cmd >> 3) & 1;
511		return 0;
512	default:
513		DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
514		return 2;
515	}
516	return 2;
517}
518
519static __inline__ int
520via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
521		    drm_via_state_t *cur_seq)
522{
523	drm_via_private_t *dev_priv =
524	    (drm_via_private_t *) cur_seq->dev->dev_private;
525	uint32_t a_fire, bcmd, dw_count;
526	int ret = 0;
527	int have_fire;
528	const uint32_t *buf = *buffer;
529
530	while (buf < buf_end) {
531		have_fire = 0;
532		if ((buf_end - buf) < 2) {
533			DRM_ERROR
534			    ("Unexpected termination of primitive list.\n");
535			ret = 1;
536			break;
537		}
538		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
539			break;
540		bcmd = *buf++;
541		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
542			DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
543				  *buf);
544			ret = 1;
545			break;
546		}
547		a_fire =
548		    *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
549		    HC_HE3Fire_MASK;
550
551		/*
552		 * How many dwords per vertex ?
553		 */
554
555		if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
556			DRM_ERROR("Illegal B command vertex data for AGP.\n");
557			ret = 1;
558			break;
559		}
560
561		dw_count = 0;
562		if (bcmd & (1 << 7))
563			dw_count += (cur_seq->multitex) ? 2 : 1;
564		if (bcmd & (1 << 8))
565			dw_count += (cur_seq->multitex) ? 2 : 1;
566		if (bcmd & (1 << 9))
567			dw_count++;
568		if (bcmd & (1 << 10))
569			dw_count++;
570		if (bcmd & (1 << 11))
571			dw_count++;
572		if (bcmd & (1 << 12))
573			dw_count++;
574		if (bcmd & (1 << 13))
575			dw_count++;
576		if (bcmd & (1 << 14))
577			dw_count++;
578
579		while (buf < buf_end) {
580			if (*buf == a_fire) {
581				if (dev_priv->num_fire_offsets >=
582				    VIA_FIRE_BUF_SIZE) {
583					DRM_ERROR("Fire offset buffer full.\n");
584					ret = 1;
585					break;
586				}
587				dev_priv->fire_offsets[dev_priv->
588						       num_fire_offsets++] =
589				    buf;
590				have_fire = 1;
591				buf++;
592				if (buf < buf_end && *buf == a_fire)
593					buf++;
594				break;
595			}
596			if ((*buf == HALCYON_HEADER2) ||
597			    ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
598				DRM_ERROR("Missing Vertex Fire command, "
599					  "Stray Vertex Fire command  or verifier "
600					  "lost sync.\n");
601				ret = 1;
602				break;
603			}
604			if ((ret = eat_words(&buf, buf_end, dw_count)))
605				break;
606		}
607		if (buf >= buf_end && !have_fire) {
608			DRM_ERROR("Missing Vertex Fire command or verifier "
609				  "lost sync.\n");
610			ret = 1;
611			break;
612		}
613		if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
614			DRM_ERROR("AGP Primitive list end misaligned.\n");
615			ret = 1;
616			break;
617		}
618	}
619	*buffer = buf;
620	return ret;
621}
622
623static __inline__ verifier_state_t
624via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
625		  drm_via_state_t *hc_state)
626{
627	uint32_t cmd;
628	int hz_mode;
629	hazard_t hz;
630	const uint32_t *buf = *buffer;
631	const hazard_t *hz_table;
632
633	if ((buf_end - buf) < 2) {
634		DRM_ERROR
635		    ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
636		return state_error;
637	}
638	buf++;
639	cmd = (*buf++ & 0xFFFF0000) >> 16;
640
641	switch (cmd) {
642	case HC_ParaType_CmdVdata:
643		if (via_check_prim_list(&buf, buf_end, hc_state))
644			return state_error;
645		*buffer = buf;
646		return state_command;
647	case HC_ParaType_NotTex:
648		hz_table = table1;
649		break;
650	case HC_ParaType_Tex:
651		hc_state->texture = 0;
652		hz_table = table2;
653		break;
654	case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
655		hc_state->texture = 1;
656		hz_table = table2;
657		break;
658	case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
659		hz_table = table3;
660		break;
661	case HC_ParaType_Auto:
662		if (eat_words(&buf, buf_end, 2))
663			return state_error;
664		*buffer = buf;
665		return state_command;
666	case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
667		if (eat_words(&buf, buf_end, 32))
668			return state_error;
669		*buffer = buf;
670		return state_command;
671	case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
672	case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
673		DRM_ERROR("Texture palettes are rejected because of "
674			  "lack of info how to determine their size.\n");
675		return state_error;
676	case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
677		DRM_ERROR("Fog factor palettes are rejected because of "
678			  "lack of info how to determine their size.\n");
679		return state_error;
680	default:
681
682		/*
683		 * There are some unimplemented HC_ParaTypes here, that
684		 * need to be implemented if the Mesa driver is extended.
685		 */
686
687		DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
688			  "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
689			  cmd, *(buf - 2));
690		*buffer = buf;
691		return state_error;
692	}
693
694	while (buf < buf_end) {
695		cmd = *buf++;
696		if ((hz = hz_table[cmd >> 24])) {
697			if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
698				if (hz_mode == 1) {
699					buf--;
700					break;
701				}
702				return state_error;
703			}
704		} else if (hc_state->unfinished &&
705			   finish_current_sequence(hc_state)) {
706			return state_error;
707		}
708	}
709	if (hc_state->unfinished && finish_current_sequence(hc_state))
710		return state_error;
711	*buffer = buf;
712	return state_command;
713}
714
715static __inline__ verifier_state_t
716via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
717		  const uint32_t *buf_end, int *fire_count)
718{
719	uint32_t cmd;
720	const uint32_t *buf = *buffer;
721	const uint32_t *next_fire;
722	int burst = 0;
723
724	next_fire = dev_priv->fire_offsets[*fire_count];
725	buf++;
726	cmd = (*buf & 0xFFFF0000) >> 16;
727	VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
728	switch (cmd) {
729	case HC_ParaType_CmdVdata:
730		while ((buf < buf_end) &&
731		       (*fire_count < dev_priv->num_fire_offsets) &&
732		       (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
733			while (buf <= next_fire) {
734				VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
735					  (burst & 63), *buf++);
736				burst += 4;
737			}
738			if ((buf < buf_end)
739			    && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
740				buf++;
741
742			if (++(*fire_count) < dev_priv->num_fire_offsets)
743				next_fire = dev_priv->fire_offsets[*fire_count];
744		}
745		break;
746	default:
747		while (buf < buf_end) {
748
749			if (*buf == HC_HEADER2 ||
750			    (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
751			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
752			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
753				break;
754
755			VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
756				  (burst & 63), *buf++);
757			burst += 4;
758		}
759	}
760	*buffer = buf;
761	return state_command;
762}
763
764static __inline__ int verify_mmio_address(uint32_t address)
765{
766	if ((address > 0x3FF) && (address < 0xC00)) {
767		DRM_ERROR("Invalid VIDEO DMA command. "
768			  "Attempt to access 3D- or command burst area.\n");
769		return 1;
770	} else if ((address > 0xCFF) && (address < 0x1300)) {
771		DRM_ERROR("Invalid VIDEO DMA command. "
772			  "Attempt to access PCI DMA area.\n");
773		return 1;
774	} else if (address > 0x13FF) {
775		DRM_ERROR("Invalid VIDEO DMA command. "
776			  "Attempt to access VGA registers.\n");
777		return 1;
778	}
779	return 0;
780}
781
782static __inline__ int
783verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
784		  uint32_t dwords)
785{
786	const uint32_t *buf = *buffer;
787
788	if (buf_end - buf < dwords) {
789		DRM_ERROR("Illegal termination of video command.\n");
790		return 1;
791	}
792	while (dwords--) {
793		if (*buf++) {
794			DRM_ERROR("Illegal video command tail.\n");
795			return 1;
796		}
797	}
798	*buffer = buf;
799	return 0;
800}
801
802static __inline__ verifier_state_t
803via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
804{
805	uint32_t cmd;
806	const uint32_t *buf = *buffer;
807	verifier_state_t ret = state_command;
808
809	while (buf < buf_end) {
810		cmd = *buf;
811		if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
812		    (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
813			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
814				break;
815			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
816				  "Attempt to access 3D- or command burst area.\n");
817			ret = state_error;
818			break;
819		} else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
820			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
821				break;
822			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
823				  "Attempt to access VGA registers.\n");
824			ret = state_error;
825			break;
826		} else {
827			buf += 2;
828		}
829	}
830	*buffer = buf;
831	return ret;
832}
833
834static __inline__ verifier_state_t
835via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
836		  const uint32_t *buf_end)
837{
838	register uint32_t cmd;
839	const uint32_t *buf = *buffer;
840
841	while (buf < buf_end) {
842		cmd = *buf;
843		if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
844			break;
845		VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
846		buf++;
847	}
848	*buffer = buf;
849	return state_command;
850}
851
852static __inline__ verifier_state_t
853via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
854{
855	uint32_t data;
856	const uint32_t *buf = *buffer;
857
858	if (buf_end - buf < 4) {
859		DRM_ERROR("Illegal termination of video header5 command\n");
860		return state_error;
861	}
862
863	data = *buf++ & ~VIA_VIDEOMASK;
864	if (verify_mmio_address(data))
865		return state_error;
866
867	data = *buf++;
868	if (*buf++ != 0x00F50000) {
869		DRM_ERROR("Illegal header5 header data\n");
870		return state_error;
871	}
872	if (*buf++ != 0x00000000) {
873		DRM_ERROR("Illegal header5 header data\n");
874		return state_error;
875	}
876	if (eat_words(&buf, buf_end, data))
877		return state_error;
878	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
879		return state_error;
880	*buffer = buf;
881	return state_command;
882
883}
884
885static __inline__ verifier_state_t
886via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
887		   const uint32_t *buf_end)
888{
889	uint32_t addr, count, i;
890	const uint32_t *buf = *buffer;
891
892	addr = *buf++ & ~VIA_VIDEOMASK;
893	i = count = *buf;
894	buf += 3;
895	while (i--)
896		VIA_WRITE(addr, *buf++);
897	if (count & 3)
898		buf += 4 - (count & 3);
899	*buffer = buf;
900	return state_command;
901}
902
903static __inline__ verifier_state_t
904via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
905{
906	uint32_t data;
907	const uint32_t *buf = *buffer;
908	uint32_t i;
909
910	if (buf_end - buf < 4) {
911		DRM_ERROR("Illegal termination of video header6 command\n");
912		return state_error;
913	}
914	buf++;
915	data = *buf++;
916	if (*buf++ != 0x00F60000) {
917		DRM_ERROR("Illegal header6 header data\n");
918		return state_error;
919	}
920	if (*buf++ != 0x00000000) {
921		DRM_ERROR("Illegal header6 header data\n");
922		return state_error;
923	}
924	if ((buf_end - buf) < (data << 1)) {
925		DRM_ERROR("Illegal termination of video header6 command\n");
926		return state_error;
927	}
928	for (i = 0; i < data; ++i) {
929		if (verify_mmio_address(*buf++))
930			return state_error;
931		buf++;
932	}
933	data <<= 1;
934	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
935		return state_error;
936	*buffer = buf;
937	return state_command;
938}
939
940static __inline__ verifier_state_t
941via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
942		   const uint32_t *buf_end)
943{
944
945	uint32_t addr, count, i;
946	const uint32_t *buf = *buffer;
947
948	i = count = *++buf;
949	buf += 3;
950	while (i--) {
951		addr = *buf++;
952		VIA_WRITE(addr, *buf++);
953	}
954	count <<= 1;
955	if (count & 3)
956		buf += 4 - (count & 3);
957	*buffer = buf;
958	return state_command;
959}
960
961int
962via_verify_command_stream(const uint32_t * buf, unsigned int size,
963			  struct drm_device * dev, int agp)
964{
965
966	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
967	drm_via_state_t *hc_state = &dev_priv->hc_state;
968	drm_via_state_t saved_state = *hc_state;
969	uint32_t cmd;
970	const uint32_t *buf_end = buf + (size >> 2);
971	verifier_state_t state = state_command;
972	int cme_video;
973	int supported_3d;
974
975	cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
976		     dev_priv->chipset == VIA_DX9_0);
977
978	supported_3d = dev_priv->chipset != VIA_DX9_0;
979
980	hc_state->dev = dev;
981	hc_state->unfinished = no_sequence;
982	hc_state->map_cache = NULL;
983	hc_state->agp = agp;
984	hc_state->buf_start = buf;
985	dev_priv->num_fire_offsets = 0;
986
987	while (buf < buf_end) {
988
989		switch (state) {
990		case state_header2:
991			state = via_check_header2(&buf, buf_end, hc_state);
992			break;
993		case state_header1:
994			state = via_check_header1(&buf, buf_end);
995			break;
996		case state_vheader5:
997			state = via_check_vheader5(&buf, buf_end);
998			break;
999		case state_vheader6:
1000			state = via_check_vheader6(&buf, buf_end);
1001			break;
1002		case state_command:
1003			if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1004			    supported_3d)
1005				state = state_header2;
1006			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1007				state = state_header1;
1008			else if (cme_video
1009				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1010				state = state_vheader5;
1011			else if (cme_video
1012				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1013				state = state_vheader6;
1014			else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1015				DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1016				state = state_error;
1017			} else {
1018				DRM_ERROR
1019				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1020				     cmd);
1021				state = state_error;
1022			}
1023			break;
1024		case state_error:
1025		default:
1026			*hc_state = saved_state;
1027			return -EINVAL;
1028		}
1029	}
1030	if (state == state_error) {
1031		*hc_state = saved_state;
1032		return -EINVAL;
1033	}
1034	return 0;
1035}
1036
1037int
1038via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
1039			 unsigned int size)
1040{
1041
1042	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1043	uint32_t cmd;
1044	const uint32_t *buf_end = buf + (size >> 2);
1045	verifier_state_t state = state_command;
1046	int fire_count = 0;
1047
1048	while (buf < buf_end) {
1049
1050		switch (state) {
1051		case state_header2:
1052			state =
1053			    via_parse_header2(dev_priv, &buf, buf_end,
1054					      &fire_count);
1055			break;
1056		case state_header1:
1057			state = via_parse_header1(dev_priv, &buf, buf_end);
1058			break;
1059		case state_vheader5:
1060			state = via_parse_vheader5(dev_priv, &buf, buf_end);
1061			break;
1062		case state_vheader6:
1063			state = via_parse_vheader6(dev_priv, &buf, buf_end);
1064			break;
1065		case state_command:
1066			if (HALCYON_HEADER2 == (cmd = *buf))
1067				state = state_header2;
1068			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1069				state = state_header1;
1070			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1071				state = state_vheader5;
1072			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1073				state = state_vheader6;
1074			else {
1075				DRM_ERROR
1076				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1077				     cmd);
1078				state = state_error;
1079			}
1080			break;
1081		case state_error:
1082		default:
1083			return -EINVAL;
1084		}
1085	}
1086	if (state == state_error)
1087		return -EINVAL;
1088	return 0;
1089}
1090
1091static void
1092setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1093{
1094	int i;
1095
1096	for (i = 0; i < 256; ++i)
1097		table[i] = forbidden_command;
1098
1099	for (i = 0; i < size; ++i)
1100		table[init_table[i].code] = init_table[i].hz;
1101}
1102
1103void via_init_command_verifier(void)
1104{
1105	setup_hazard_table(init_table1, table1,
1106			   sizeof(init_table1) / sizeof(hz_init_t));
1107	setup_hazard_table(init_table2, table2,
1108			   sizeof(init_table2) / sizeof(hz_init_t));
1109	setup_hazard_table(init_table3, table3,
1110			   sizeof(init_table3) / sizeof(hz_init_t));
1111}
1112