[go: nahoru, domu]

drm_vm.c revision 161c48100236916e98d33a9c8b5fc8eae6decd15
1/**
2 * \file drm_vm.c
3 * Memory mapping for DRM
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11 *
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include "drmP.h"
37#if defined(__ia64__)
38#include <linux/efi.h>
39#include <linux/slab.h>
40#endif
41
42static void drm_vm_open(struct vm_area_struct *vma);
43static void drm_vm_close(struct vm_area_struct *vma);
44
45static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
46{
47	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
48
49#if defined(__i386__) || defined(__x86_64__)
50	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
51		pgprot_val(tmp) |= _PAGE_PCD;
52		pgprot_val(tmp) &= ~_PAGE_PWT;
53	}
54#elif defined(__powerpc__)
55	pgprot_val(tmp) |= _PAGE_NO_CACHE;
56	if (map_type == _DRM_REGISTERS)
57		pgprot_val(tmp) |= _PAGE_GUARDED;
58#elif defined(__ia64__)
59	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60				    vma->vm_start))
61		tmp = pgprot_writecombine(tmp);
62	else
63		tmp = pgprot_noncached(tmp);
64#elif defined(__sparc__) || defined(__arm__)
65	tmp = pgprot_noncached(tmp);
66#endif
67	return tmp;
68}
69
70static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
71{
72	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
73
74#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
75	tmp |= _PAGE_NO_CACHE;
76#endif
77	return tmp;
78}
79
80/**
81 * \c fault method for AGP virtual memory.
82 *
83 * \param vma virtual memory area.
84 * \param address access address.
85 * \return pointer to the page structure.
86 *
87 * Find the right map and if it's AGP memory find the real physical page to
88 * map, get the page, increment the use count and return it.
89 */
90#if __OS_HAS_AGP
91static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
92{
93	struct drm_file *priv = vma->vm_file->private_data;
94	struct drm_device *dev = priv->minor->dev;
95	struct drm_local_map *map = NULL;
96	struct drm_map_list *r_list;
97	struct drm_hash_item *hash;
98
99	/*
100	 * Find the right map
101	 */
102	if (!drm_core_has_AGP(dev))
103		goto vm_fault_error;
104
105	if (!dev->agp || !dev->agp->cant_use_aperture)
106		goto vm_fault_error;
107
108	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
109		goto vm_fault_error;
110
111	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
112	map = r_list->map;
113
114	if (map && map->type == _DRM_AGP) {
115		/*
116		 * Using vm_pgoff as a selector forces us to use this unusual
117		 * addressing scheme.
118		 */
119		resource_size_t offset = (unsigned long)vmf->virtual_address -
120			vma->vm_start;
121		resource_size_t baddr = map->offset + offset;
122		struct drm_agp_mem *agpmem;
123		struct page *page;
124
125#ifdef __alpha__
126		/*
127		 * Adjust to a bus-relative address
128		 */
129		baddr -= dev->hose->mem_space->start;
130#endif
131
132		/*
133		 * It's AGP memory - find the real physical page to map
134		 */
135		list_for_each_entry(agpmem, &dev->agp->memory, head) {
136			if (agpmem->bound <= baddr &&
137			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
138				break;
139		}
140
141		if (&agpmem->head == &dev->agp->memory)
142			goto vm_fault_error;
143
144		/*
145		 * Get the page, inc the use count, and return it
146		 */
147		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
148		page = agpmem->memory->pages[offset];
149		get_page(page);
150		vmf->page = page;
151
152		DRM_DEBUG
153		    ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
154		     (unsigned long long)baddr,
155		     agpmem->memory->pages[offset],
156		     (unsigned long long)offset,
157		     page_count(page));
158		return 0;
159	}
160vm_fault_error:
161	return VM_FAULT_SIGBUS;	/* Disallow mremap */
162}
163#else				/* __OS_HAS_AGP */
164static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
165{
166	return VM_FAULT_SIGBUS;
167}
168#endif				/* __OS_HAS_AGP */
169
170/**
171 * \c nopage method for shared virtual memory.
172 *
173 * \param vma virtual memory area.
174 * \param address access address.
175 * \return pointer to the page structure.
176 *
177 * Get the mapping, find the real physical page to map, get the page, and
178 * return it.
179 */
180static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
181{
182	struct drm_local_map *map = vma->vm_private_data;
183	unsigned long offset;
184	unsigned long i;
185	struct page *page;
186
187	if (!map)
188		return VM_FAULT_SIGBUS;	/* Nothing allocated */
189
190	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
191	i = (unsigned long)map->handle + offset;
192	page = vmalloc_to_page((void *)i);
193	if (!page)
194		return VM_FAULT_SIGBUS;
195	get_page(page);
196	vmf->page = page;
197
198	DRM_DEBUG("shm_fault 0x%lx\n", offset);
199	return 0;
200}
201
202/**
203 * \c close method for shared virtual memory.
204 *
205 * \param vma virtual memory area.
206 *
207 * Deletes map information if we are the last
208 * person to close a mapping and it's not in the global maplist.
209 */
210static void drm_vm_shm_close(struct vm_area_struct *vma)
211{
212	struct drm_file *priv = vma->vm_file->private_data;
213	struct drm_device *dev = priv->minor->dev;
214	struct drm_vma_entry *pt, *temp;
215	struct drm_local_map *map;
216	struct drm_map_list *r_list;
217	int found_maps = 0;
218
219	DRM_DEBUG("0x%08lx,0x%08lx\n",
220		  vma->vm_start, vma->vm_end - vma->vm_start);
221	atomic_dec(&dev->vma_count);
222
223	map = vma->vm_private_data;
224
225	mutex_lock(&dev->struct_mutex);
226	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
227		if (pt->vma->vm_private_data == map)
228			found_maps++;
229		if (pt->vma == vma) {
230			list_del(&pt->head);
231			kfree(pt);
232		}
233	}
234
235	/* We were the only map that was found */
236	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
237		/* Check to see if we are in the maplist, if we are not, then
238		 * we delete this mappings information.
239		 */
240		found_maps = 0;
241		list_for_each_entry(r_list, &dev->maplist, head) {
242			if (r_list->map == map)
243				found_maps++;
244		}
245
246		if (!found_maps) {
247			drm_dma_handle_t dmah;
248
249			switch (map->type) {
250			case _DRM_REGISTERS:
251			case _DRM_FRAME_BUFFER:
252				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
253					int retcode;
254					retcode = mtrr_del(map->mtrr,
255							   map->offset,
256							   map->size);
257					DRM_DEBUG("mtrr_del = %d\n", retcode);
258				}
259				iounmap(map->handle);
260				break;
261			case _DRM_SHM:
262				vfree(map->handle);
263				break;
264			case _DRM_AGP:
265			case _DRM_SCATTER_GATHER:
266				break;
267			case _DRM_CONSISTENT:
268				dmah.vaddr = map->handle;
269				dmah.busaddr = map->offset;
270				dmah.size = map->size;
271				__drm_pci_free(dev, &dmah);
272				break;
273			case _DRM_GEM:
274				DRM_ERROR("tried to rmmap GEM object\n");
275				break;
276			}
277			kfree(map);
278		}
279	}
280	mutex_unlock(&dev->struct_mutex);
281}
282
283/**
284 * \c fault method for DMA virtual memory.
285 *
286 * \param vma virtual memory area.
287 * \param address access address.
288 * \return pointer to the page structure.
289 *
290 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
291 */
292static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
293{
294	struct drm_file *priv = vma->vm_file->private_data;
295	struct drm_device *dev = priv->minor->dev;
296	struct drm_device_dma *dma = dev->dma;
297	unsigned long offset;
298	unsigned long page_nr;
299	struct page *page;
300
301	if (!dma)
302		return VM_FAULT_SIGBUS;	/* Error */
303	if (!dma->pagelist)
304		return VM_FAULT_SIGBUS;	/* Nothing allocated */
305
306	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
307	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
308	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
309
310	get_page(page);
311	vmf->page = page;
312
313	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
314	return 0;
315}
316
317/**
318 * \c fault method for scatter-gather virtual memory.
319 *
320 * \param vma virtual memory area.
321 * \param address access address.
322 * \return pointer to the page structure.
323 *
324 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
325 */
326static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
327{
328	struct drm_local_map *map = vma->vm_private_data;
329	struct drm_file *priv = vma->vm_file->private_data;
330	struct drm_device *dev = priv->minor->dev;
331	struct drm_sg_mem *entry = dev->sg;
332	unsigned long offset;
333	unsigned long map_offset;
334	unsigned long page_offset;
335	struct page *page;
336
337	if (!entry)
338		return VM_FAULT_SIGBUS;	/* Error */
339	if (!entry->pagelist)
340		return VM_FAULT_SIGBUS;	/* Nothing allocated */
341
342	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
343	map_offset = map->offset - (unsigned long)dev->sg->virtual;
344	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
345	page = entry->pagelist[page_offset];
346	get_page(page);
347	vmf->page = page;
348
349	return 0;
350}
351
352static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
353{
354	return drm_do_vm_fault(vma, vmf);
355}
356
357static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
358{
359	return drm_do_vm_shm_fault(vma, vmf);
360}
361
362static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
363{
364	return drm_do_vm_dma_fault(vma, vmf);
365}
366
367static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
368{
369	return drm_do_vm_sg_fault(vma, vmf);
370}
371
372/** AGP virtual memory operations */
373static const struct vm_operations_struct drm_vm_ops = {
374	.fault = drm_vm_fault,
375	.open = drm_vm_open,
376	.close = drm_vm_close,
377};
378
379/** Shared virtual memory operations */
380static const struct vm_operations_struct drm_vm_shm_ops = {
381	.fault = drm_vm_shm_fault,
382	.open = drm_vm_open,
383	.close = drm_vm_shm_close,
384};
385
386/** DMA virtual memory operations */
387static const struct vm_operations_struct drm_vm_dma_ops = {
388	.fault = drm_vm_dma_fault,
389	.open = drm_vm_open,
390	.close = drm_vm_close,
391};
392
393/** Scatter-gather virtual memory operations */
394static const struct vm_operations_struct drm_vm_sg_ops = {
395	.fault = drm_vm_sg_fault,
396	.open = drm_vm_open,
397	.close = drm_vm_close,
398};
399
400/**
401 * \c open method for shared virtual memory.
402 *
403 * \param vma virtual memory area.
404 *
405 * Create a new drm_vma_entry structure as the \p vma private data entry and
406 * add it to drm_device::vmalist.
407 */
408void drm_vm_open_locked(struct vm_area_struct *vma)
409{
410	struct drm_file *priv = vma->vm_file->private_data;
411	struct drm_device *dev = priv->minor->dev;
412	struct drm_vma_entry *vma_entry;
413
414	DRM_DEBUG("0x%08lx,0x%08lx\n",
415		  vma->vm_start, vma->vm_end - vma->vm_start);
416	atomic_inc(&dev->vma_count);
417
418	vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
419	if (vma_entry) {
420		vma_entry->vma = vma;
421		vma_entry->pid = current->pid;
422		list_add(&vma_entry->head, &dev->vmalist);
423	}
424}
425
426static void drm_vm_open(struct vm_area_struct *vma)
427{
428	struct drm_file *priv = vma->vm_file->private_data;
429	struct drm_device *dev = priv->minor->dev;
430
431	mutex_lock(&dev->struct_mutex);
432	drm_vm_open_locked(vma);
433	mutex_unlock(&dev->struct_mutex);
434}
435
436/**
437 * \c close method for all virtual memory types.
438 *
439 * \param vma virtual memory area.
440 *
441 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
442 * free it.
443 */
444static void drm_vm_close(struct vm_area_struct *vma)
445{
446	struct drm_file *priv = vma->vm_file->private_data;
447	struct drm_device *dev = priv->minor->dev;
448	struct drm_vma_entry *pt, *temp;
449
450	DRM_DEBUG("0x%08lx,0x%08lx\n",
451		  vma->vm_start, vma->vm_end - vma->vm_start);
452	atomic_dec(&dev->vma_count);
453
454	mutex_lock(&dev->struct_mutex);
455	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
456		if (pt->vma == vma) {
457			list_del(&pt->head);
458			kfree(pt);
459			break;
460		}
461	}
462	mutex_unlock(&dev->struct_mutex);
463}
464
465/**
466 * mmap DMA memory.
467 *
468 * \param file_priv DRM file private.
469 * \param vma virtual memory area.
470 * \return zero on success or a negative number on failure.
471 *
472 * Sets the virtual memory area operations structure to vm_dma_ops, the file
473 * pointer, and calls vm_open().
474 */
475static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
476{
477	struct drm_file *priv = filp->private_data;
478	struct drm_device *dev;
479	struct drm_device_dma *dma;
480	unsigned long length = vma->vm_end - vma->vm_start;
481
482	dev = priv->minor->dev;
483	dma = dev->dma;
484	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
485		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
486
487	/* Length must match exact page count */
488	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
489		return -EINVAL;
490	}
491
492	if (!capable(CAP_SYS_ADMIN) &&
493	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
494		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
495#if defined(__i386__) || defined(__x86_64__)
496		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
497#else
498		/* Ye gads this is ugly.  With more thought
499		   we could move this up higher and use
500		   `protection_map' instead.  */
501		vma->vm_page_prot =
502		    __pgprot(pte_val
503			     (pte_wrprotect
504			      (__pte(pgprot_val(vma->vm_page_prot)))));
505#endif
506	}
507
508	vma->vm_ops = &drm_vm_dma_ops;
509
510	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
511	vma->vm_flags |= VM_DONTEXPAND;
512
513	vma->vm_file = filp;	/* Needed for drm_vm_open() */
514	drm_vm_open_locked(vma);
515	return 0;
516}
517
518resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
519{
520	return map->offset;
521}
522
523EXPORT_SYMBOL(drm_core_get_map_ofs);
524
525resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
526{
527#ifdef __alpha__
528	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
529#else
530	return 0;
531#endif
532}
533
534EXPORT_SYMBOL(drm_core_get_reg_ofs);
535
536/**
537 * mmap DMA memory.
538 *
539 * \param file_priv DRM file private.
540 * \param vma virtual memory area.
541 * \return zero on success or a negative number on failure.
542 *
543 * If the virtual memory area has no offset associated with it then it's a DMA
544 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
545 * checks that the restricted flag is not set, sets the virtual memory operations
546 * according to the mapping type and remaps the pages. Finally sets the file
547 * pointer and calls vm_open().
548 */
549int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
550{
551	struct drm_file *priv = filp->private_data;
552	struct drm_device *dev = priv->minor->dev;
553	struct drm_local_map *map = NULL;
554	resource_size_t offset = 0;
555	struct drm_hash_item *hash;
556
557	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
558		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
559
560	if (!priv->authenticated)
561		return -EACCES;
562
563	/* We check for "dma". On Apple's UniNorth, it's valid to have
564	 * the AGP mapped at physical address 0
565	 * --BenH.
566	 */
567	if (!vma->vm_pgoff
568#if __OS_HAS_AGP
569	    && (!dev->agp
570		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
571#endif
572	    )
573		return drm_mmap_dma(filp, vma);
574
575	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
576		DRM_ERROR("Could not find map\n");
577		return -EINVAL;
578	}
579
580	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
581	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
582		return -EPERM;
583
584	/* Check for valid size. */
585	if (map->size < vma->vm_end - vma->vm_start)
586		return -EINVAL;
587
588	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
589		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
590#if defined(__i386__) || defined(__x86_64__)
591		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
592#else
593		/* Ye gads this is ugly.  With more thought
594		   we could move this up higher and use
595		   `protection_map' instead.  */
596		vma->vm_page_prot =
597		    __pgprot(pte_val
598			     (pte_wrprotect
599			      (__pte(pgprot_val(vma->vm_page_prot)))));
600#endif
601	}
602
603	switch (map->type) {
604#if !defined(__arm__)
605	case _DRM_AGP:
606		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
607			/*
608			 * On some platforms we can't talk to bus dma address from the CPU, so for
609			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
610			 * pages and mappings in fault()
611			 */
612#if defined(__powerpc__)
613			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
614#endif
615			vma->vm_ops = &drm_vm_ops;
616			break;
617		}
618		/* fall through to _DRM_FRAME_BUFFER... */
619#endif
620	case _DRM_FRAME_BUFFER:
621	case _DRM_REGISTERS:
622		offset = dev->driver->get_reg_ofs(dev);
623		vma->vm_flags |= VM_IO;	/* not in core dump */
624		vma->vm_page_prot = drm_io_prot(map->type, vma);
625#if !defined(__arm__)
626		if (io_remap_pfn_range(vma, vma->vm_start,
627				       (map->offset + offset) >> PAGE_SHIFT,
628				       vma->vm_end - vma->vm_start,
629				       vma->vm_page_prot))
630			return -EAGAIN;
631#else
632		if (remap_pfn_range(vma, vma->vm_start,
633					(map->offset + offset) >> PAGE_SHIFT,
634					vma->vm_end - vma->vm_start,
635					vma->vm_page_prot))
636			return -EAGAIN;
637#endif
638
639		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
640			  " offset = 0x%llx\n",
641			  map->type,
642			  vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
643
644		vma->vm_ops = &drm_vm_ops;
645		break;
646	case _DRM_CONSISTENT:
647		/* Consistent memory is really like shared memory. But
648		 * it's allocated in a different way, so avoid fault */
649		if (remap_pfn_range(vma, vma->vm_start,
650		    page_to_pfn(virt_to_page(map->handle)),
651		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
652			return -EAGAIN;
653		vma->vm_page_prot = drm_dma_prot(map->type, vma);
654	/* fall through to _DRM_SHM */
655	case _DRM_SHM:
656		vma->vm_ops = &drm_vm_shm_ops;
657		vma->vm_private_data = (void *)map;
658		/* Don't let this area swap.  Change when
659		   DRM_KERNEL advisory is supported. */
660		vma->vm_flags |= VM_RESERVED;
661		break;
662	case _DRM_SCATTER_GATHER:
663		vma->vm_ops = &drm_vm_sg_ops;
664		vma->vm_private_data = (void *)map;
665		vma->vm_flags |= VM_RESERVED;
666		vma->vm_page_prot = drm_dma_prot(map->type, vma);
667		break;
668	default:
669		return -EINVAL;	/* This should never happen. */
670	}
671	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
672	vma->vm_flags |= VM_DONTEXPAND;
673
674	vma->vm_file = filp;	/* Needed for drm_vm_open() */
675	drm_vm_open_locked(vma);
676	return 0;
677}
678
679int drm_mmap(struct file *filp, struct vm_area_struct *vma)
680{
681	struct drm_file *priv = filp->private_data;
682	struct drm_device *dev = priv->minor->dev;
683	int ret;
684
685	mutex_lock(&dev->struct_mutex);
686	ret = drm_mmap_locked(filp, vma);
687	mutex_unlock(&dev->struct_mutex);
688
689	return ret;
690}
691EXPORT_SYMBOL(drm_mmap);
692