[go: nahoru, domu]

1/*
2 * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/mm.h>
33#include <linux/types.h>
34#include <linux/device.h>
35#include <linux/dmapool.h>
36#include <linux/sched.h>
37#include <linux/slab.h>
38#include <linux/list.h>
39#include <linux/highmem.h>
40#include <linux/io.h>
41#include <linux/uio.h>
42#include <linux/rbtree.h>
43#include <linux/spinlock.h>
44#include <linux/delay.h>
45
46#include "ipath_kernel.h"
47#include "ipath_user_sdma.h"
48
49/* minimum size of header */
50#define IPATH_USER_SDMA_MIN_HEADER_LENGTH	64
51/* expected size of headers (for dma_pool) */
52#define IPATH_USER_SDMA_EXP_HEADER_LENGTH	64
53/* length mask in PBC (lower 11 bits) */
54#define IPATH_PBC_LENGTH_MASK			((1 << 11) - 1)
55
56struct ipath_user_sdma_pkt {
57	u8 naddr;		/* dimension of addr (1..3) ... */
58	u32 counter;		/* sdma pkts queued counter for this entry */
59	u64 added;		/* global descq number of entries */
60
61	struct {
62		u32 offset;			/* offset for kvaddr, addr */
63		u32 length;			/* length in page */
64		u8  put_page;			/* should we put_page? */
65		u8  dma_mapped;			/* is page dma_mapped? */
66		struct page *page;		/* may be NULL (coherent mem) */
67		void *kvaddr;			/* FIXME: only for pio hack */
68		dma_addr_t addr;
69	} addr[4];   /* max pages, any more and we coalesce */
70	struct list_head list;	/* list element */
71};
72
73struct ipath_user_sdma_queue {
74	/*
75	 * pkts sent to dma engine are queued on this
76	 * list head.  the type of the elements of this
77	 * list are struct ipath_user_sdma_pkt...
78	 */
79	struct list_head sent;
80
81	/* headers with expected length are allocated from here... */
82	char header_cache_name[64];
83	struct dma_pool *header_cache;
84
85	/* packets are allocated from the slab cache... */
86	char pkt_slab_name[64];
87	struct kmem_cache *pkt_slab;
88
89	/* as packets go on the queued queue, they are counted... */
90	u32 counter;
91	u32 sent_counter;
92
93	/* dma page table */
94	struct rb_root dma_pages_root;
95
96	/* protect everything above... */
97	struct mutex lock;
98};
99
100struct ipath_user_sdma_queue *
101ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
102{
103	struct ipath_user_sdma_queue *pq =
104		kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
105
106	if (!pq)
107		goto done;
108
109	pq->counter = 0;
110	pq->sent_counter = 0;
111	INIT_LIST_HEAD(&pq->sent);
112
113	mutex_init(&pq->lock);
114
115	snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
116		 "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
117	pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
118					 sizeof(struct ipath_user_sdma_pkt),
119					 0, 0, NULL);
120
121	if (!pq->pkt_slab)
122		goto err_kfree;
123
124	snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
125		 "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
126	pq->header_cache = dma_pool_create(pq->header_cache_name,
127					   dev,
128					   IPATH_USER_SDMA_EXP_HEADER_LENGTH,
129					   4, 0);
130	if (!pq->header_cache)
131		goto err_slab;
132
133	pq->dma_pages_root = RB_ROOT;
134
135	goto done;
136
137err_slab:
138	kmem_cache_destroy(pq->pkt_slab);
139err_kfree:
140	kfree(pq);
141	pq = NULL;
142
143done:
144	return pq;
145}
146
147static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
148				      int i, size_t offset, size_t len,
149				      int put_page, int dma_mapped,
150				      struct page *page,
151				      void *kvaddr, dma_addr_t dma_addr)
152{
153	pkt->addr[i].offset = offset;
154	pkt->addr[i].length = len;
155	pkt->addr[i].put_page = put_page;
156	pkt->addr[i].dma_mapped = dma_mapped;
157	pkt->addr[i].page = page;
158	pkt->addr[i].kvaddr = kvaddr;
159	pkt->addr[i].addr = dma_addr;
160}
161
162static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
163					u32 counter, size_t offset,
164					size_t len, int dma_mapped,
165					struct page *page,
166					void *kvaddr, dma_addr_t dma_addr)
167{
168	pkt->naddr = 1;
169	pkt->counter = counter;
170	ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
171				  kvaddr, dma_addr);
172}
173
174/* we've too many pages in the iovec, coalesce to a single page */
175static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
176				    struct ipath_user_sdma_pkt *pkt,
177				    const struct iovec *iov,
178				    unsigned long niov) {
179	int ret = 0;
180	struct page *page = alloc_page(GFP_KERNEL);
181	void *mpage_save;
182	char *mpage;
183	int i;
184	int len = 0;
185	dma_addr_t dma_addr;
186
187	if (!page) {
188		ret = -ENOMEM;
189		goto done;
190	}
191
192	mpage = kmap(page);
193	mpage_save = mpage;
194	for (i = 0; i < niov; i++) {
195		int cfur;
196
197		cfur = copy_from_user(mpage,
198				      iov[i].iov_base, iov[i].iov_len);
199		if (cfur) {
200			ret = -EFAULT;
201			goto free_unmap;
202		}
203
204		mpage += iov[i].iov_len;
205		len += iov[i].iov_len;
206	}
207
208	dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
209				DMA_TO_DEVICE);
210	if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
211		ret = -ENOMEM;
212		goto free_unmap;
213	}
214
215	ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
216				  dma_addr);
217	pkt->naddr = 2;
218
219	goto done;
220
221free_unmap:
222	kunmap(page);
223	__free_page(page);
224done:
225	return ret;
226}
227
228/* how many pages in this iovec element? */
229static int ipath_user_sdma_num_pages(const struct iovec *iov)
230{
231	const unsigned long addr  = (unsigned long) iov->iov_base;
232	const unsigned long  len  = iov->iov_len;
233	const unsigned long spage = addr & PAGE_MASK;
234	const unsigned long epage = (addr + len - 1) & PAGE_MASK;
235
236	return 1 + ((epage - spage) >> PAGE_SHIFT);
237}
238
239/* truncate length to page boundary */
240static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
241{
242	const unsigned long offset = addr & ~PAGE_MASK;
243
244	return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
245}
246
247static void ipath_user_sdma_free_pkt_frag(struct device *dev,
248					  struct ipath_user_sdma_queue *pq,
249					  struct ipath_user_sdma_pkt *pkt,
250					  int frag)
251{
252	const int i = frag;
253
254	if (pkt->addr[i].page) {
255		if (pkt->addr[i].dma_mapped)
256			dma_unmap_page(dev,
257				       pkt->addr[i].addr,
258				       pkt->addr[i].length,
259				       DMA_TO_DEVICE);
260
261		if (pkt->addr[i].kvaddr)
262			kunmap(pkt->addr[i].page);
263
264		if (pkt->addr[i].put_page)
265			put_page(pkt->addr[i].page);
266		else
267			__free_page(pkt->addr[i].page);
268	} else if (pkt->addr[i].kvaddr)
269		/* free coherent mem from cache... */
270		dma_pool_free(pq->header_cache,
271			      pkt->addr[i].kvaddr, pkt->addr[i].addr);
272}
273
274/* return number of pages pinned... */
275static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
276				     struct ipath_user_sdma_pkt *pkt,
277				     unsigned long addr, int tlen, int npages)
278{
279	struct page *pages[2];
280	int j;
281	int ret;
282
283	ret = get_user_pages_fast(addr, npages, 0, pages);
284	if (ret != npages) {
285		int i;
286
287		for (i = 0; i < ret; i++)
288			put_page(pages[i]);
289
290		ret = -ENOMEM;
291		goto done;
292	}
293
294	for (j = 0; j < npages; j++) {
295		/* map the pages... */
296		const int flen =
297			ipath_user_sdma_page_length(addr, tlen);
298		dma_addr_t dma_addr =
299			dma_map_page(&dd->pcidev->dev,
300				     pages[j], 0, flen, DMA_TO_DEVICE);
301		unsigned long fofs = addr & ~PAGE_MASK;
302
303		if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
304			ret = -ENOMEM;
305			goto done;
306		}
307
308		ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
309					  pages[j], kmap(pages[j]),
310					  dma_addr);
311
312		pkt->naddr++;
313		addr += flen;
314		tlen -= flen;
315	}
316
317done:
318	return ret;
319}
320
321static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
322				   struct ipath_user_sdma_queue *pq,
323				   struct ipath_user_sdma_pkt *pkt,
324				   const struct iovec *iov,
325				   unsigned long niov)
326{
327	int ret = 0;
328	unsigned long idx;
329
330	for (idx = 0; idx < niov; idx++) {
331		const int npages = ipath_user_sdma_num_pages(iov + idx);
332		const unsigned long addr = (unsigned long) iov[idx].iov_base;
333
334		ret = ipath_user_sdma_pin_pages(dd, pkt,
335						addr, iov[idx].iov_len,
336						npages);
337		if (ret < 0)
338			goto free_pkt;
339	}
340
341	goto done;
342
343free_pkt:
344	for (idx = 0; idx < pkt->naddr; idx++)
345		ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
346
347done:
348	return ret;
349}
350
351static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
352					struct ipath_user_sdma_queue *pq,
353					struct ipath_user_sdma_pkt *pkt,
354					const struct iovec *iov,
355					unsigned long niov, int npages)
356{
357	int ret = 0;
358
359	if (npages >= ARRAY_SIZE(pkt->addr))
360		ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
361	else
362		ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
363
364	return ret;
365}
366
367/* free a packet list -- return counter value of last packet */
368static void ipath_user_sdma_free_pkt_list(struct device *dev,
369					  struct ipath_user_sdma_queue *pq,
370					  struct list_head *list)
371{
372	struct ipath_user_sdma_pkt *pkt, *pkt_next;
373
374	list_for_each_entry_safe(pkt, pkt_next, list, list) {
375		int i;
376
377		for (i = 0; i < pkt->naddr; i++)
378			ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
379
380		kmem_cache_free(pq->pkt_slab, pkt);
381	}
382}
383
384/*
385 * copy headers, coalesce etc -- pq->lock must be held
386 *
387 * we queue all the packets to list, returning the
388 * number of bytes total.  list must be empty initially,
389 * as, if there is an error we clean it...
390 */
391static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
392				      struct ipath_user_sdma_queue *pq,
393				      struct list_head *list,
394				      const struct iovec *iov,
395				      unsigned long niov,
396				      int maxpkts)
397{
398	unsigned long idx = 0;
399	int ret = 0;
400	int npkts = 0;
401	struct page *page = NULL;
402	__le32 *pbc;
403	dma_addr_t dma_addr;
404	struct ipath_user_sdma_pkt *pkt = NULL;
405	size_t len;
406	size_t nw;
407	u32 counter = pq->counter;
408	int dma_mapped = 0;
409
410	while (idx < niov && npkts < maxpkts) {
411		const unsigned long addr = (unsigned long) iov[idx].iov_base;
412		const unsigned long idx_save = idx;
413		unsigned pktnw;
414		unsigned pktnwc;
415		int nfrags = 0;
416		int npages = 0;
417		int cfur;
418
419		dma_mapped = 0;
420		len = iov[idx].iov_len;
421		nw = len >> 2;
422		page = NULL;
423
424		pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
425		if (!pkt) {
426			ret = -ENOMEM;
427			goto free_list;
428		}
429
430		if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
431		    len > PAGE_SIZE || len & 3 || addr & 3) {
432			ret = -EINVAL;
433			goto free_pkt;
434		}
435
436		if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
437			pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
438					     &dma_addr);
439		else
440			pbc = NULL;
441
442		if (!pbc) {
443			page = alloc_page(GFP_KERNEL);
444			if (!page) {
445				ret = -ENOMEM;
446				goto free_pkt;
447			}
448			pbc = kmap(page);
449		}
450
451		cfur = copy_from_user(pbc, iov[idx].iov_base, len);
452		if (cfur) {
453			ret = -EFAULT;
454			goto free_pbc;
455		}
456
457		/*
458		 * this assignment is a bit strange.  it's because the
459		 * the pbc counts the number of 32 bit words in the full
460		 * packet _except_ the first word of the pbc itself...
461		 */
462		pktnwc = nw - 1;
463
464		/*
465		 * pktnw computation yields the number of 32 bit words
466		 * that the caller has indicated in the PBC.  note that
467		 * this is one less than the total number of words that
468		 * goes to the send DMA engine as the first 32 bit word
469		 * of the PBC itself is not counted.  Armed with this count,
470		 * we can verify that the packet is consistent with the
471		 * iovec lengths.
472		 */
473		pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
474		if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
475			ret = -EINVAL;
476			goto free_pbc;
477		}
478
479
480		idx++;
481		while (pktnwc < pktnw && idx < niov) {
482			const size_t slen = iov[idx].iov_len;
483			const unsigned long faddr =
484				(unsigned long) iov[idx].iov_base;
485
486			if (slen & 3 || faddr & 3 || !slen ||
487			    slen > PAGE_SIZE) {
488				ret = -EINVAL;
489				goto free_pbc;
490			}
491
492			npages++;
493			if ((faddr & PAGE_MASK) !=
494			    ((faddr + slen - 1) & PAGE_MASK))
495				npages++;
496
497			pktnwc += slen >> 2;
498			idx++;
499			nfrags++;
500		}
501
502		if (pktnwc != pktnw) {
503			ret = -EINVAL;
504			goto free_pbc;
505		}
506
507		if (page) {
508			dma_addr = dma_map_page(&dd->pcidev->dev,
509						page, 0, len, DMA_TO_DEVICE);
510			if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
511				ret = -ENOMEM;
512				goto free_pbc;
513			}
514
515			dma_mapped = 1;
516		}
517
518		ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
519					    page, pbc, dma_addr);
520
521		if (nfrags) {
522			ret = ipath_user_sdma_init_payload(dd, pq, pkt,
523							   iov + idx_save + 1,
524							   nfrags, npages);
525			if (ret < 0)
526				goto free_pbc_dma;
527		}
528
529		counter++;
530		npkts++;
531
532		list_add_tail(&pkt->list, list);
533	}
534
535	ret = idx;
536	goto done;
537
538free_pbc_dma:
539	if (dma_mapped)
540		dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
541free_pbc:
542	if (page) {
543		kunmap(page);
544		__free_page(page);
545	} else
546		dma_pool_free(pq->header_cache, pbc, dma_addr);
547free_pkt:
548	kmem_cache_free(pq->pkt_slab, pkt);
549free_list:
550	ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
551done:
552	return ret;
553}
554
555static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
556						 u32 c)
557{
558	pq->sent_counter = c;
559}
560
561/* try to clean out queue -- needs pq->lock */
562static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
563				       struct ipath_user_sdma_queue *pq)
564{
565	struct list_head free_list;
566	struct ipath_user_sdma_pkt *pkt;
567	struct ipath_user_sdma_pkt *pkt_prev;
568	int ret = 0;
569
570	INIT_LIST_HEAD(&free_list);
571
572	list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
573		s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
574
575		if (descd < 0)
576			break;
577
578		list_move_tail(&pkt->list, &free_list);
579
580		/* one more packet cleaned */
581		ret++;
582	}
583
584	if (!list_empty(&free_list)) {
585		u32 counter;
586
587		pkt = list_entry(free_list.prev,
588				 struct ipath_user_sdma_pkt, list);
589		counter = pkt->counter;
590
591		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
592		ipath_user_sdma_set_complete_counter(pq, counter);
593	}
594
595	return ret;
596}
597
598void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
599{
600	if (!pq)
601		return;
602
603	kmem_cache_destroy(pq->pkt_slab);
604	dma_pool_destroy(pq->header_cache);
605	kfree(pq);
606}
607
608/* clean descriptor queue, returns > 0 if some elements cleaned */
609static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
610{
611	int ret;
612	unsigned long flags;
613
614	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
615	ret = ipath_sdma_make_progress(dd);
616	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
617
618	return ret;
619}
620
621/* we're in close, drain packets so that we can cleanup successfully... */
622void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
623				 struct ipath_user_sdma_queue *pq)
624{
625	int i;
626
627	if (!pq)
628		return;
629
630	for (i = 0; i < 100; i++) {
631		mutex_lock(&pq->lock);
632		if (list_empty(&pq->sent)) {
633			mutex_unlock(&pq->lock);
634			break;
635		}
636		ipath_user_sdma_hwqueue_clean(dd);
637		ipath_user_sdma_queue_clean(dd, pq);
638		mutex_unlock(&pq->lock);
639		msleep(10);
640	}
641
642	if (!list_empty(&pq->sent)) {
643		struct list_head free_list;
644
645		printk(KERN_INFO "drain: lists not empty: forcing!\n");
646		INIT_LIST_HEAD(&free_list);
647		mutex_lock(&pq->lock);
648		list_splice_init(&pq->sent, &free_list);
649		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
650		mutex_unlock(&pq->lock);
651	}
652}
653
654static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
655					   u64 addr, u64 dwlen, u64 dwoffset)
656{
657	return cpu_to_le64(/* SDmaPhyAddr[31:0] */
658			   ((addr & 0xfffffffcULL) << 32) |
659			   /* SDmaGeneration[1:0] */
660			   ((dd->ipath_sdma_generation & 3ULL) << 30) |
661			   /* SDmaDwordCount[10:0] */
662			   ((dwlen & 0x7ffULL) << 16) |
663			   /* SDmaBufOffset[12:2] */
664			   (dwoffset & 0x7ffULL));
665}
666
667static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
668{
669	return descq | cpu_to_le64(1ULL << 12);
670}
671
672static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
673{
674					      /* last */  /* dma head */
675	return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
676}
677
678static inline __le64 ipath_sdma_make_desc1(u64 addr)
679{
680	/* SDmaPhyAddr[47:32] */
681	return cpu_to_le64(addr >> 32);
682}
683
684static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
685				      struct ipath_user_sdma_pkt *pkt, int idx,
686				      unsigned ofs, u16 tail)
687{
688	const u64 addr = (u64) pkt->addr[idx].addr +
689		(u64) pkt->addr[idx].offset;
690	const u64 dwlen = (u64) pkt->addr[idx].length / 4;
691	__le64 *descqp;
692	__le64 descq0;
693
694	descqp = &dd->ipath_sdma_descq[tail].qw[0];
695
696	descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
697	if (idx == 0)
698		descq0 = ipath_sdma_make_first_desc0(descq0);
699	if (idx == pkt->naddr - 1)
700		descq0 = ipath_sdma_make_last_desc0(descq0);
701
702	descqp[0] = descq0;
703	descqp[1] = ipath_sdma_make_desc1(addr);
704}
705
706/* pq->lock must be held, get packets on the wire... */
707static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
708				     struct ipath_user_sdma_queue *pq,
709				     struct list_head *pktlist)
710{
711	int ret = 0;
712	unsigned long flags;
713	u16 tail;
714
715	if (list_empty(pktlist))
716		return 0;
717
718	if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
719		return -ECOMM;
720
721	spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
722
723	if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
724		ret = -ECOMM;
725		goto unlock;
726	}
727
728	tail = dd->ipath_sdma_descq_tail;
729	while (!list_empty(pktlist)) {
730		struct ipath_user_sdma_pkt *pkt =
731			list_entry(pktlist->next, struct ipath_user_sdma_pkt,
732				   list);
733		int i;
734		unsigned ofs = 0;
735		u16 dtail = tail;
736
737		if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
738			goto unlock_check_tail;
739
740		for (i = 0; i < pkt->naddr; i++) {
741			ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
742			ofs += pkt->addr[i].length >> 2;
743
744			if (++tail == dd->ipath_sdma_descq_cnt) {
745				tail = 0;
746				++dd->ipath_sdma_generation;
747			}
748		}
749
750		if ((ofs<<2) > dd->ipath_ibmaxlen) {
751			ipath_dbg("packet size %X > ibmax %X, fail\n",
752				ofs<<2, dd->ipath_ibmaxlen);
753			ret = -EMSGSIZE;
754			goto unlock;
755		}
756
757		/*
758		 * if the packet is >= 2KB mtu equivalent, we have to use
759		 * the large buffers, and have to mark each descriptor as
760		 * part of a large buffer packet.
761		 */
762		if (ofs >= IPATH_SMALLBUF_DWORDS) {
763			for (i = 0; i < pkt->naddr; i++) {
764				dd->ipath_sdma_descq[dtail].qw[0] |=
765					cpu_to_le64(1ULL << 14);
766				if (++dtail == dd->ipath_sdma_descq_cnt)
767					dtail = 0;
768			}
769		}
770
771		dd->ipath_sdma_descq_added += pkt->naddr;
772		pkt->added = dd->ipath_sdma_descq_added;
773		list_move_tail(&pkt->list, &pq->sent);
774		ret++;
775	}
776
777unlock_check_tail:
778	/* advance the tail on the chip if necessary */
779	if (dd->ipath_sdma_descq_tail != tail) {
780		wmb();
781		ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
782		dd->ipath_sdma_descq_tail = tail;
783	}
784
785unlock:
786	spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
787
788	return ret;
789}
790
791int ipath_user_sdma_writev(struct ipath_devdata *dd,
792			   struct ipath_user_sdma_queue *pq,
793			   const struct iovec *iov,
794			   unsigned long dim)
795{
796	int ret = 0;
797	struct list_head list;
798	int npkts = 0;
799
800	INIT_LIST_HEAD(&list);
801
802	mutex_lock(&pq->lock);
803
804	if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
805		ipath_user_sdma_hwqueue_clean(dd);
806		ipath_user_sdma_queue_clean(dd, pq);
807	}
808
809	while (dim) {
810		const int mxp = 8;
811
812		ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
813		if (ret <= 0)
814			goto done_unlock;
815		else {
816			dim -= ret;
817			iov += ret;
818		}
819
820		/* force packets onto the sdma hw queue... */
821		if (!list_empty(&list)) {
822			/*
823			 * lazily clean hw queue.  the 4 is a guess of about
824			 * how many sdma descriptors a packet will take (it
825			 * doesn't have to be perfect).
826			 */
827			if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
828				ipath_user_sdma_hwqueue_clean(dd);
829				ipath_user_sdma_queue_clean(dd, pq);
830			}
831
832			ret = ipath_user_sdma_push_pkts(dd, pq, &list);
833			if (ret < 0)
834				goto done_unlock;
835			else {
836				npkts += ret;
837				pq->counter += ret;
838
839				if (!list_empty(&list))
840					goto done_unlock;
841			}
842		}
843	}
844
845done_unlock:
846	if (!list_empty(&list))
847		ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
848	mutex_unlock(&pq->lock);
849
850	return (ret < 0) ? ret : npkts;
851}
852
853int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
854				  struct ipath_user_sdma_queue *pq)
855{
856	int ret = 0;
857
858	mutex_lock(&pq->lock);
859	ipath_user_sdma_hwqueue_clean(dd);
860	ret = ipath_user_sdma_queue_clean(dd, pq);
861	mutex_unlock(&pq->lock);
862
863	return ret;
864}
865
866u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
867{
868	return pq->sent_counter;
869}
870
871u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
872{
873	return pq->counter;
874}
875
876