[go: nahoru, domu]

1/*
2 *	Adaptec AAC series RAID controller driver
3 *	(c) Copyright 2001 Red Hat Inc.
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 *               2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING.  If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * Module Name:
26 *  commctrl.c
27 *
28 * Abstract: Contains all routines for control of the AFA comm layer
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/pci.h>
36#include <linux/spinlock.h>
37#include <linux/slab.h>
38#include <linux/completion.h>
39#include <linux/dma-mapping.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h> /* ssleep prototype */
42#include <linux/kthread.h>
43#include <linux/semaphore.h>
44#include <asm/uaccess.h>
45#include <scsi/scsi_host.h>
46
47#include "aacraid.h"
48
49/**
50 *	ioctl_send_fib	-	send a FIB from userspace
51 *	@dev:	adapter is being processed
52 *	@arg:	arguments to the ioctl call
53 *
54 *	This routine sends a fib to the adapter on behalf of a user level
55 *	program.
56 */
57# define AAC_DEBUG_PREAMBLE	KERN_INFO
58# define AAC_DEBUG_POSTAMBLE
59
60static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
61{
62	struct hw_fib * kfib;
63	struct fib *fibptr;
64	struct hw_fib * hw_fib = (struct hw_fib *)0;
65	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
66	unsigned size;
67	int retval;
68
69	if (dev->in_reset) {
70		return -EBUSY;
71	}
72	fibptr = aac_fib_alloc(dev);
73	if(fibptr == NULL) {
74		return -ENOMEM;
75	}
76
77	kfib = fibptr->hw_fib_va;
78	/*
79	 *	First copy in the header so that we can check the size field.
80	 */
81	if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
82		aac_fib_free(fibptr);
83		return -EFAULT;
84	}
85	/*
86	 *	Since we copy based on the fib header size, make sure that we
87	 *	will not overrun the buffer when we copy the memory. Return
88	 *	an error if we would.
89	 */
90	size = le16_to_cpu(kfib->header.Size) + sizeof(struct aac_fibhdr);
91	if (size < le16_to_cpu(kfib->header.SenderSize))
92		size = le16_to_cpu(kfib->header.SenderSize);
93	if (size > dev->max_fib_size) {
94		dma_addr_t daddr;
95
96		if (size > 2048) {
97			retval = -EINVAL;
98			goto cleanup;
99		}
100
101		kfib = pci_alloc_consistent(dev->pdev, size, &daddr);
102		if (!kfib) {
103			retval = -ENOMEM;
104			goto cleanup;
105		}
106
107		/* Highjack the hw_fib */
108		hw_fib = fibptr->hw_fib_va;
109		hw_fib_pa = fibptr->hw_fib_pa;
110		fibptr->hw_fib_va = kfib;
111		fibptr->hw_fib_pa = daddr;
112		memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
113		memcpy(kfib, hw_fib, dev->max_fib_size);
114	}
115
116	if (copy_from_user(kfib, arg, size)) {
117		retval = -EFAULT;
118		goto cleanup;
119	}
120
121	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
122		aac_adapter_interrupt(dev);
123		/*
124		 * Since we didn't really send a fib, zero out the state to allow
125		 * cleanup code not to assert.
126		 */
127		kfib->header.XferState = 0;
128	} else {
129		retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
130				le16_to_cpu(kfib->header.Size) , FsaNormal,
131				1, 1, NULL, NULL);
132		if (retval) {
133			goto cleanup;
134		}
135		if (aac_fib_complete(fibptr) != 0) {
136			retval = -EINVAL;
137			goto cleanup;
138		}
139	}
140	/*
141	 *	Make sure that the size returned by the adapter (which includes
142	 *	the header) is less than or equal to the size of a fib, so we
143	 *	don't corrupt application data. Then copy that size to the user
144	 *	buffer. (Don't try to add the header information again, since it
145	 *	was already included by the adapter.)
146	 */
147
148	retval = 0;
149	if (copy_to_user(arg, (void *)kfib, size))
150		retval = -EFAULT;
151cleanup:
152	if (hw_fib) {
153		pci_free_consistent(dev->pdev, size, kfib, fibptr->hw_fib_pa);
154		fibptr->hw_fib_pa = hw_fib_pa;
155		fibptr->hw_fib_va = hw_fib;
156	}
157	if (retval != -ERESTARTSYS)
158		aac_fib_free(fibptr);
159	return retval;
160}
161
162/**
163 *	open_getadapter_fib	-	Get the next fib
164 *
165 *	This routine will get the next Fib, if available, from the AdapterFibContext
166 *	passed in from the user.
167 */
168
169static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
170{
171	struct aac_fib_context * fibctx;
172	int status;
173
174	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
175	if (fibctx == NULL) {
176		status = -ENOMEM;
177	} else {
178		unsigned long flags;
179		struct list_head * entry;
180		struct aac_fib_context * context;
181
182		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
183		fibctx->size = sizeof(struct aac_fib_context);
184		/*
185		 *	Yes yes, I know this could be an index, but we have a
186		 * better guarantee of uniqueness for the locked loop below.
187		 * Without the aid of a persistent history, this also helps
188		 * reduce the chance that the opaque context would be reused.
189		 */
190		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
191		/*
192		 *	Initialize the mutex used to wait for the next AIF.
193		 */
194		sema_init(&fibctx->wait_sem, 0);
195		fibctx->wait = 0;
196		/*
197		 *	Initialize the fibs and set the count of fibs on
198		 *	the list to 0.
199		 */
200		fibctx->count = 0;
201		INIT_LIST_HEAD(&fibctx->fib_list);
202		fibctx->jiffies = jiffies/HZ;
203		/*
204		 *	Now add this context onto the adapter's
205		 *	AdapterFibContext list.
206		 */
207		spin_lock_irqsave(&dev->fib_lock, flags);
208		/* Ensure that we have a unique identifier */
209		entry = dev->fib_list.next;
210		while (entry != &dev->fib_list) {
211			context = list_entry(entry, struct aac_fib_context, next);
212			if (context->unique == fibctx->unique) {
213				/* Not unique (32 bits) */
214				fibctx->unique++;
215				entry = dev->fib_list.next;
216			} else {
217				entry = entry->next;
218			}
219		}
220		list_add_tail(&fibctx->next, &dev->fib_list);
221		spin_unlock_irqrestore(&dev->fib_lock, flags);
222		if (copy_to_user(arg, &fibctx->unique,
223						sizeof(fibctx->unique))) {
224			status = -EFAULT;
225		} else {
226			status = 0;
227		}
228	}
229	return status;
230}
231
232/**
233 *	next_getadapter_fib	-	get the next fib
234 *	@dev: adapter to use
235 *	@arg: ioctl argument
236 *
237 *	This routine will get the next Fib, if available, from the AdapterFibContext
238 *	passed in from the user.
239 */
240
241static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
242{
243	struct fib_ioctl f;
244	struct fib *fib;
245	struct aac_fib_context *fibctx;
246	int status;
247	struct list_head * entry;
248	unsigned long flags;
249
250	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
251		return -EFAULT;
252	/*
253	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
254	 *
255	 *	Search the list of AdapterFibContext addresses on the adapter
256	 *	to be sure this is a valid address
257	 */
258	spin_lock_irqsave(&dev->fib_lock, flags);
259	entry = dev->fib_list.next;
260	fibctx = NULL;
261
262	while (entry != &dev->fib_list) {
263		fibctx = list_entry(entry, struct aac_fib_context, next);
264		/*
265		 *	Extract the AdapterFibContext from the Input parameters.
266		 */
267		if (fibctx->unique == f.fibctx) { /* We found a winner */
268			break;
269		}
270		entry = entry->next;
271		fibctx = NULL;
272	}
273	if (!fibctx) {
274		spin_unlock_irqrestore(&dev->fib_lock, flags);
275		dprintk ((KERN_INFO "Fib Context not found\n"));
276		return -EINVAL;
277	}
278
279	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
280		 (fibctx->size != sizeof(struct aac_fib_context))) {
281		spin_unlock_irqrestore(&dev->fib_lock, flags);
282		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
283		return -EINVAL;
284	}
285	status = 0;
286	/*
287	 *	If there are no fibs to send back, then either wait or return
288	 *	-EAGAIN
289	 */
290return_fib:
291	if (!list_empty(&fibctx->fib_list)) {
292		/*
293		 *	Pull the next fib from the fibs
294		 */
295		entry = fibctx->fib_list.next;
296		list_del(entry);
297
298		fib = list_entry(entry, struct fib, fiblink);
299		fibctx->count--;
300		spin_unlock_irqrestore(&dev->fib_lock, flags);
301		if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
302			kfree(fib->hw_fib_va);
303			kfree(fib);
304			return -EFAULT;
305		}
306		/*
307		 *	Free the space occupied by this copy of the fib.
308		 */
309		kfree(fib->hw_fib_va);
310		kfree(fib);
311		status = 0;
312	} else {
313		spin_unlock_irqrestore(&dev->fib_lock, flags);
314		/* If someone killed the AIF aacraid thread, restart it */
315		status = !dev->aif_thread;
316		if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
317			/* Be paranoid, be very paranoid! */
318			kthread_stop(dev->thread);
319			ssleep(1);
320			dev->aif_thread = 0;
321			dev->thread = kthread_run(aac_command_thread, dev,
322						  "%s", dev->name);
323			ssleep(1);
324		}
325		if (f.wait) {
326			if(down_interruptible(&fibctx->wait_sem) < 0) {
327				status = -ERESTARTSYS;
328			} else {
329				/* Lock again and retry */
330				spin_lock_irqsave(&dev->fib_lock, flags);
331				goto return_fib;
332			}
333		} else {
334			status = -EAGAIN;
335		}
336	}
337	fibctx->jiffies = jiffies/HZ;
338	return status;
339}
340
341int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
342{
343	struct fib *fib;
344
345	/*
346	 *	First free any FIBs that have not been consumed.
347	 */
348	while (!list_empty(&fibctx->fib_list)) {
349		struct list_head * entry;
350		/*
351		 *	Pull the next fib from the fibs
352		 */
353		entry = fibctx->fib_list.next;
354		list_del(entry);
355		fib = list_entry(entry, struct fib, fiblink);
356		fibctx->count--;
357		/*
358		 *	Free the space occupied by this copy of the fib.
359		 */
360		kfree(fib->hw_fib_va);
361		kfree(fib);
362	}
363	/*
364	 *	Remove the Context from the AdapterFibContext List
365	 */
366	list_del(&fibctx->next);
367	/*
368	 *	Invalidate context
369	 */
370	fibctx->type = 0;
371	/*
372	 *	Free the space occupied by the Context
373	 */
374	kfree(fibctx);
375	return 0;
376}
377
378/**
379 *	close_getadapter_fib	-	close down user fib context
380 *	@dev: adapter
381 *	@arg: ioctl arguments
382 *
383 *	This routine will close down the fibctx passed in from the user.
384 */
385
386static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
387{
388	struct aac_fib_context *fibctx;
389	int status;
390	unsigned long flags;
391	struct list_head * entry;
392
393	/*
394	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
395	 *
396	 *	Search the list of AdapterFibContext addresses on the adapter
397	 *	to be sure this is a valid address
398	 */
399
400	entry = dev->fib_list.next;
401	fibctx = NULL;
402
403	while(entry != &dev->fib_list) {
404		fibctx = list_entry(entry, struct aac_fib_context, next);
405		/*
406		 *	Extract the fibctx from the input parameters
407		 */
408		if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
409			break;
410		entry = entry->next;
411		fibctx = NULL;
412	}
413
414	if (!fibctx)
415		return 0; /* Already gone */
416
417	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
418		 (fibctx->size != sizeof(struct aac_fib_context)))
419		return -EINVAL;
420	spin_lock_irqsave(&dev->fib_lock, flags);
421	status = aac_close_fib_context(dev, fibctx);
422	spin_unlock_irqrestore(&dev->fib_lock, flags);
423	return status;
424}
425
426/**
427 *	check_revision	-	close down user fib context
428 *	@dev: adapter
429 *	@arg: ioctl arguments
430 *
431 *	This routine returns the driver version.
432 *	Under Linux, there have been no version incompatibilities, so this is
433 *	simple!
434 */
435
436static int check_revision(struct aac_dev *dev, void __user *arg)
437{
438	struct revision response;
439	char *driver_version = aac_driver_version;
440	u32 version;
441
442	response.compat = 1;
443	version = (simple_strtol(driver_version,
444				&driver_version, 10) << 24) | 0x00000400;
445	version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
446	version += simple_strtol(driver_version + 1, NULL, 10);
447	response.version = cpu_to_le32(version);
448#	ifdef AAC_DRIVER_BUILD
449		response.build = cpu_to_le32(AAC_DRIVER_BUILD);
450#	else
451		response.build = cpu_to_le32(9999);
452#	endif
453
454	if (copy_to_user(arg, &response, sizeof(response)))
455		return -EFAULT;
456	return 0;
457}
458
459
460/**
461 *
462 * aac_send_raw_scb
463 *
464 */
465
466static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
467{
468	struct fib* srbfib;
469	int status;
470	struct aac_srb *srbcmd = NULL;
471	struct user_aac_srb *user_srbcmd = NULL;
472	struct user_aac_srb __user *user_srb = arg;
473	struct aac_srb_reply __user *user_reply;
474	struct aac_srb_reply* reply;
475	u32 fibsize = 0;
476	u32 flags = 0;
477	s32 rcode = 0;
478	u32 data_dir;
479	void __user *sg_user[32];
480	void *sg_list[32];
481	u32 sg_indx = 0;
482	u32 byte_count = 0;
483	u32 actual_fibsize64, actual_fibsize = 0;
484	int i;
485
486
487	if (dev->in_reset) {
488		dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
489		return -EBUSY;
490	}
491	if (!capable(CAP_SYS_ADMIN)){
492		dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
493		return -EPERM;
494	}
495	/*
496	 *	Allocate and initialize a Fib then setup a SRB command
497	 */
498	if (!(srbfib = aac_fib_alloc(dev))) {
499		return -ENOMEM;
500	}
501	aac_fib_init(srbfib);
502	/* raw_srb FIB is not FastResponseCapable */
503	srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
504
505	srbcmd = (struct aac_srb*) fib_data(srbfib);
506
507	memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
508	if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
509		dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
510		rcode = -EFAULT;
511		goto cleanup;
512	}
513
514	if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
515	    (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
516		rcode = -EINVAL;
517		goto cleanup;
518	}
519
520	user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
521	if (!user_srbcmd) {
522		dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
523		rcode = -ENOMEM;
524		goto cleanup;
525	}
526	if(copy_from_user(user_srbcmd, user_srb,fibsize)){
527		dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
528		rcode = -EFAULT;
529		goto cleanup;
530	}
531
532	user_reply = arg+fibsize;
533
534	flags = user_srbcmd->flags; /* from user in cpu order */
535	// Fix up srb for endian and force some values
536
537	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);	// Force this
538	srbcmd->channel	 = cpu_to_le32(user_srbcmd->channel);
539	srbcmd->id	 = cpu_to_le32(user_srbcmd->id);
540	srbcmd->lun	 = cpu_to_le32(user_srbcmd->lun);
541	srbcmd->timeout	 = cpu_to_le32(user_srbcmd->timeout);
542	srbcmd->flags	 = cpu_to_le32(flags);
543	srbcmd->retry_limit = 0; // Obsolete parameter
544	srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
545	memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
546
547	switch (flags & (SRB_DataIn | SRB_DataOut)) {
548	case SRB_DataOut:
549		data_dir = DMA_TO_DEVICE;
550		break;
551	case (SRB_DataIn | SRB_DataOut):
552		data_dir = DMA_BIDIRECTIONAL;
553		break;
554	case SRB_DataIn:
555		data_dir = DMA_FROM_DEVICE;
556		break;
557	default:
558		data_dir = DMA_NONE;
559	}
560	if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
561		dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
562		  le32_to_cpu(srbcmd->sg.count)));
563		rcode = -EINVAL;
564		goto cleanup;
565	}
566	actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
567		((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
568	actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
569	  (sizeof(struct sgentry64) - sizeof(struct sgentry));
570	/* User made a mistake - should not continue */
571	if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
572		dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
573		  "Raw SRB command calculated fibsize=%lu;%lu "
574		  "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
575		  "issued fibsize=%d\n",
576		  actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
577		  sizeof(struct aac_srb), sizeof(struct sgentry),
578		  sizeof(struct sgentry64), fibsize));
579		rcode = -EINVAL;
580		goto cleanup;
581	}
582	if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
583		dprintk((KERN_DEBUG"aacraid: SG with no direction specified in Raw SRB command\n"));
584		rcode = -EINVAL;
585		goto cleanup;
586	}
587	byte_count = 0;
588	if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
589		struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
590		struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
591
592		/*
593		 * This should also catch if user used the 32 bit sgmap
594		 */
595		if (actual_fibsize64 == fibsize) {
596			actual_fibsize = actual_fibsize64;
597			for (i = 0; i < upsg->count; i++) {
598				u64 addr;
599				void* p;
600				if (upsg->sg[i].count >
601				    ((dev->adapter_info.options &
602				     AAC_OPT_NEW_COMM) ?
603				      (dev->scsi_host_ptr->max_sectors << 9) :
604				      65536)) {
605					rcode = -EINVAL;
606					goto cleanup;
607				}
608				/* Does this really need to be GFP_DMA? */
609				p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
610				if(!p) {
611					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
612					  upsg->sg[i].count,i,upsg->count));
613					rcode = -ENOMEM;
614					goto cleanup;
615				}
616				addr = (u64)upsg->sg[i].addr[0];
617				addr += ((u64)upsg->sg[i].addr[1]) << 32;
618				sg_user[i] = (void __user *)(uintptr_t)addr;
619				sg_list[i] = p; // save so we can clean up later
620				sg_indx = i;
621
622				if (flags & SRB_DataOut) {
623					if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
624						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
625						rcode = -EFAULT;
626						goto cleanup;
627					}
628				}
629				addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
630
631				psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
632				psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
633				byte_count += upsg->sg[i].count;
634				psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
635			}
636		} else {
637			struct user_sgmap* usg;
638			usg = kmalloc(actual_fibsize - sizeof(struct aac_srb)
639			  + sizeof(struct sgmap), GFP_KERNEL);
640			if (!usg) {
641				dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
642				rcode = -ENOMEM;
643				goto cleanup;
644			}
645			memcpy (usg, upsg, actual_fibsize - sizeof(struct aac_srb)
646			  + sizeof(struct sgmap));
647			actual_fibsize = actual_fibsize64;
648
649			for (i = 0; i < usg->count; i++) {
650				u64 addr;
651				void* p;
652				if (usg->sg[i].count >
653				    ((dev->adapter_info.options &
654				     AAC_OPT_NEW_COMM) ?
655				      (dev->scsi_host_ptr->max_sectors << 9) :
656				      65536)) {
657					kfree(usg);
658					rcode = -EINVAL;
659					goto cleanup;
660				}
661				/* Does this really need to be GFP_DMA? */
662				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
663				if(!p) {
664					dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
665					  usg->sg[i].count,i,usg->count));
666					kfree(usg);
667					rcode = -ENOMEM;
668					goto cleanup;
669				}
670				sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
671				sg_list[i] = p; // save so we can clean up later
672				sg_indx = i;
673
674				if (flags & SRB_DataOut) {
675					if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
676						kfree (usg);
677						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
678						rcode = -EFAULT;
679						goto cleanup;
680					}
681				}
682				addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
683
684				psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
685				psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
686				byte_count += usg->sg[i].count;
687				psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
688			}
689			kfree (usg);
690		}
691		srbcmd->count = cpu_to_le32(byte_count);
692		psg->count = cpu_to_le32(sg_indx+1);
693		status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
694	} else {
695		struct user_sgmap* upsg = &user_srbcmd->sg;
696		struct sgmap* psg = &srbcmd->sg;
697
698		if (actual_fibsize64 == fibsize) {
699			struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
700			for (i = 0; i < upsg->count; i++) {
701				uintptr_t addr;
702				void* p;
703				if (usg->sg[i].count >
704				    ((dev->adapter_info.options &
705				     AAC_OPT_NEW_COMM) ?
706				      (dev->scsi_host_ptr->max_sectors << 9) :
707				      65536)) {
708					rcode = -EINVAL;
709					goto cleanup;
710				}
711				/* Does this really need to be GFP_DMA? */
712				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
713				if(!p) {
714					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
715					  usg->sg[i].count,i,usg->count));
716					rcode = -ENOMEM;
717					goto cleanup;
718				}
719				addr = (u64)usg->sg[i].addr[0];
720				addr += ((u64)usg->sg[i].addr[1]) << 32;
721				sg_user[i] = (void __user *)addr;
722				sg_list[i] = p; // save so we can clean up later
723				sg_indx = i;
724
725				if (flags & SRB_DataOut) {
726					if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
727						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
728						rcode = -EFAULT;
729						goto cleanup;
730					}
731				}
732				addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
733
734				psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
735				byte_count += usg->sg[i].count;
736				psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
737			}
738		} else {
739			for (i = 0; i < upsg->count; i++) {
740				dma_addr_t addr;
741				void* p;
742				if (upsg->sg[i].count >
743				    ((dev->adapter_info.options &
744				     AAC_OPT_NEW_COMM) ?
745				      (dev->scsi_host_ptr->max_sectors << 9) :
746				      65536)) {
747					rcode = -EINVAL;
748					goto cleanup;
749				}
750				p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
751				if (!p) {
752					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
753					  upsg->sg[i].count, i, upsg->count));
754					rcode = -ENOMEM;
755					goto cleanup;
756				}
757				sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
758				sg_list[i] = p; // save so we can clean up later
759				sg_indx = i;
760
761				if (flags & SRB_DataOut) {
762					if(copy_from_user(p, sg_user[i],
763							upsg->sg[i].count)) {
764						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
765						rcode = -EFAULT;
766						goto cleanup;
767					}
768				}
769				addr = pci_map_single(dev->pdev, p,
770					upsg->sg[i].count, data_dir);
771
772				psg->sg[i].addr = cpu_to_le32(addr);
773				byte_count += upsg->sg[i].count;
774				psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
775			}
776		}
777		srbcmd->count = cpu_to_le32(byte_count);
778		psg->count = cpu_to_le32(sg_indx+1);
779		status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
780	}
781	if (status == -ERESTARTSYS) {
782		rcode = -ERESTARTSYS;
783		goto cleanup;
784	}
785
786	if (status != 0){
787		dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
788		rcode = -ENXIO;
789		goto cleanup;
790	}
791
792	if (flags & SRB_DataIn) {
793		for(i = 0 ; i <= sg_indx; i++){
794			byte_count = le32_to_cpu(
795			  (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)
796			      ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
797			      : srbcmd->sg.sg[i].count);
798			if(copy_to_user(sg_user[i], sg_list[i], byte_count)){
799				dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
800				rcode = -EFAULT;
801				goto cleanup;
802
803			}
804		}
805	}
806
807	reply = (struct aac_srb_reply *) fib_data(srbfib);
808	if(copy_to_user(user_reply,reply,sizeof(struct aac_srb_reply))){
809		dprintk((KERN_DEBUG"aacraid: Could not copy reply to user\n"));
810		rcode = -EFAULT;
811		goto cleanup;
812	}
813
814cleanup:
815	kfree(user_srbcmd);
816	for(i=0; i <= sg_indx; i++){
817		kfree(sg_list[i]);
818	}
819	if (rcode != -ERESTARTSYS) {
820		aac_fib_complete(srbfib);
821		aac_fib_free(srbfib);
822	}
823
824	return rcode;
825}
826
827struct aac_pci_info {
828	u32 bus;
829	u32 slot;
830};
831
832
833static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
834{
835	struct aac_pci_info pci_info;
836
837	pci_info.bus = dev->pdev->bus->number;
838	pci_info.slot = PCI_SLOT(dev->pdev->devfn);
839
840	if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
841		dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
842		return -EFAULT;
843	}
844	return 0;
845}
846
847
848int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
849{
850	int status;
851
852	/*
853	 *	HBA gets first crack
854	 */
855
856	status = aac_dev_ioctl(dev, cmd, arg);
857	if (status != -ENOTTY)
858		return status;
859
860	switch (cmd) {
861	case FSACTL_MINIPORT_REV_CHECK:
862		status = check_revision(dev, arg);
863		break;
864	case FSACTL_SEND_LARGE_FIB:
865	case FSACTL_SENDFIB:
866		status = ioctl_send_fib(dev, arg);
867		break;
868	case FSACTL_OPEN_GET_ADAPTER_FIB:
869		status = open_getadapter_fib(dev, arg);
870		break;
871	case FSACTL_GET_NEXT_ADAPTER_FIB:
872		status = next_getadapter_fib(dev, arg);
873		break;
874	case FSACTL_CLOSE_GET_ADAPTER_FIB:
875		status = close_getadapter_fib(dev, arg);
876		break;
877	case FSACTL_SEND_RAW_SRB:
878		status = aac_send_raw_srb(dev,arg);
879		break;
880	case FSACTL_GET_PCI_INFO:
881		status = aac_get_pci_info(dev,arg);
882		break;
883	default:
884		status = -ENOTTY;
885		break;
886	}
887	return status;
888}
889
890