[go: nahoru, domu]

1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 *
19 ******************************************************************************/
20#define _RTL8188E_XMIT_C_
21#include <osdep_service.h>
22#include <drv_types.h>
23#include <wifi.h>
24#include <osdep_intf.h>
25#include <usb_ops_linux.h>
26#include <rtl8188e_hal.h>
27
28s32	rtl8188eu_init_xmit_priv(struct adapter *adapt)
29{
30	struct xmit_priv	*pxmitpriv = &adapt->xmitpriv;
31
32	tasklet_init(&pxmitpriv->xmit_tasklet,
33		     (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
34		     (unsigned long)adapt);
35	return _SUCCESS;
36}
37
38static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
39{
40	u8 set_tx_desc_offset;
41	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
42	set_tx_desc_offset = (((sz + TXDESC_SIZE) %  haldata->UsbBulkOutSize) == 0) ? 1 : 0;
43
44	return set_tx_desc_offset;
45}
46
47static void rtl8188eu_cal_txdesc_chksum(struct tx_desc	*ptxdesc)
48{
49	u16	*usptr = (u16 *)ptxdesc;
50	u32 count = 16;		/*  (32 bytes / 2 bytes per XOR) => 16 times */
51	u32 index;
52	u16 checksum = 0;
53
54	/* Clear first */
55	ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
56
57	for (index = 0; index < count; index++)
58		checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
59	ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
60}
61
62/*  Description: In normal chip, we should send some packet to Hw which will be used by Fw */
63/*			in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
64/*			Fw can tell Hw to send these packet derectly. */
65void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8  ispspoll, u8  is_btqosnull)
66{
67	struct tx_desc *ptxdesc;
68
69	/*  Clear all status */
70	ptxdesc = (struct tx_desc *)desc;
71	memset(desc, 0, TXDESC_SIZE);
72
73	/* offset 0 */
74	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
75
76	ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
77
78	ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /*  Buffer size + command header */
79
80	/* offset 4 */
81	ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /*  Fixed queue of Mgnt queue */
82
83	/* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
84	if (ispspoll) {
85		ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
86	} else {
87		ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /*  Hw set sequence number */
88		ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
89	}
90
91	if (is_btqosnull)
92		ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /*  BT NULL */
93
94	/* offset 16 */
95	ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
96
97	/*  USB interface drop packet if the checksum of descriptor isn't correct. */
98	/*  Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
99	rtl8188eu_cal_txdesc_chksum(ptxdesc);
100}
101
102static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
103{
104	if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
105		switch (pattrib->encrypt) {
106		/* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
107		case _WEP40_:
108		case _WEP104_:
109			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
110			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
111			break;
112		case _TKIP_:
113		case _TKIP_WTMIC_:
114			ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
115			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
116			break;
117		case _AES_:
118			ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
119			ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
120			break;
121		case _NO_PRIVACY_:
122		default:
123			break;
124		}
125	}
126}
127
128static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
129{
130	switch (pattrib->vcs_mode) {
131	case RTS_CTS:
132		*pdw |= cpu_to_le32(RTS_EN);
133		break;
134	case CTS_TO_SELF:
135		*pdw |= cpu_to_le32(CTS_2_SELF);
136		break;
137	case NONE_VCS:
138	default:
139		break;
140	}
141	if (pattrib->vcs_mode) {
142		*pdw |= cpu_to_le32(HW_RTS_EN);
143		/*  Set RTS BW */
144		if (pattrib->ht_en) {
145			*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(27)) : 0;
146
147			if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
148				*pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
149			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
150				*pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
151			else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
152				*pdw |= 0;
153			else
154				*pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
155		}
156	}
157}
158
159static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
160{
161	if (pattrib->ht_en) {
162		*pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ?	cpu_to_le32(BIT(25)) : 0;
163
164		if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
165			*pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
166		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
167			*pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
168		else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
169			*pdw |= 0;
170		else
171			*pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
172	}
173}
174
175static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
176{
177	int	pull = 0;
178	uint	qsel;
179	u8 data_rate, pwr_status, offset;
180	struct adapter		*adapt = pxmitframe->padapter;
181	struct pkt_attrib	*pattrib = &pxmitframe->attrib;
182	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
183	struct tx_desc	*ptxdesc = (struct tx_desc *)pmem;
184	struct mlme_ext_priv	*pmlmeext = &adapt->mlmeextpriv;
185	struct mlme_ext_info	*pmlmeinfo = &(pmlmeext->mlmext_info);
186	int	bmcst = IS_MCAST(pattrib->ra);
187
188	if (adapt->registrypriv.mp_mode == 0) {
189		if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
190			ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
191			pull = 1;
192		}
193	}
194
195	memset(ptxdesc, 0, sizeof(struct tx_desc));
196
197	/* 4 offset 0 */
198	ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
199	ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
200
201	offset = TXDESC_SIZE + OFFSET_SZ;
202
203	ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
204
205	if (bmcst)
206		ptxdesc->txdw0 |= cpu_to_le32(BMC);
207
208	if (adapt->registrypriv.mp_mode == 0) {
209		if (!bagg_pkt) {
210			if ((pull) && (pxmitframe->pkt_offset > 0))
211				pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
212		}
213	}
214
215	/*  pkt_offset, unit:8 bytes padding */
216	if (pxmitframe->pkt_offset > 0)
217		ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
218
219	/* driver uses rate */
220	ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
221
222	if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
223		/* offset 4 */
224		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
225
226		qsel = (uint)(pattrib->qsel & 0x0000001f);
227		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
228
229		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
230
231		fill_txdesc_sectype(pattrib, ptxdesc);
232
233		if (pattrib->ampdu_en) {
234			ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
235			ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
236		} else {
237			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
238		}
239
240		/* offset 8 */
241
242		/* offset 12 */
243		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
244
245		/* offset 16 , offset 20 */
246		if (pattrib->qos_en)
247			ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
248
249		/* offset 20 */
250		if (pxmitframe->agg_num > 1)
251			ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
252
253		if ((pattrib->ether_type != 0x888e) &&
254		    (pattrib->ether_type != 0x0806) &&
255		    (pattrib->ether_type != 0x88b4) &&
256		    (pattrib->dhcp_pkt != 1)) {
257			/* Non EAP & ARP & DHCP type data packet */
258
259			fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
260			fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
261
262			ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
263			ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS  Rate FB LMT */
264
265			if (pattrib->ht_en) {
266				if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
267					ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
268			}
269			data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
270			ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
271			pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
272			ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
273		} else {
274			/*  EAP data packet and ARP packet and DHCP. */
275			/*  Use the 1M data rate to send the EAP/ARP packet. */
276			/*  This will maybe make the handshake smooth. */
277			ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
278			if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
279				ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/*  DATA_SHORT */
280			ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
281		}
282	} else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
283		/* offset 4 */
284		ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
285
286		qsel = (uint)(pattrib->qsel&0x0000001f);
287		ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
288
289		ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
290
291		/* offset 8 */
292		/* CCX-TXRPT ack for xmit mgmt frames. */
293		if (pxmitframe->ack_report)
294			ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
295
296		/* offset 12 */
297		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
298
299		/* offset 20 */
300		ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
301		if (pattrib->retry_ctrl)
302			ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
303		else
304			ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
305
306		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
307	} else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
308		DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
309	} else {
310		DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
311
312		/* offset 4 */
313		ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
314
315		ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
316
317		/* offset 8 */
318
319		/* offset 12 */
320		ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
321
322		/* offset 20 */
323		ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
324	}
325
326	/*  2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
327	/*  (1) The sequence number of each non-Qos frame / broadcast / multicast / */
328	/*  mgnt frame should be controlled by Hw because Fw will also send null data */
329	/*  which we cannot control when Fw LPS enable. */
330	/*  --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
331	/*  (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
332	/*  (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
333	/*  2010.06.23. Added by tynli. */
334	if (!pattrib->qos_en) {
335		ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /*  Hw set sequence number */
336		ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);	/*  Hw set sequence number */
337	}
338
339	rtl88eu_dm_set_tx_ant_by_tx_info(&haldata->odmpriv, pmem,
340					 pattrib->mac_id);
341
342	rtl8188eu_cal_txdesc_chksum(ptxdesc);
343	_dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
344	return pull;
345}
346
347/* for non-agg data frame or  management frame */
348static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
349{
350	s32 ret = _SUCCESS;
351	s32 inner_ret = _SUCCESS;
352	int t, sz, w_sz, pull = 0;
353	u8 *mem_addr;
354	u32 ff_hwaddr;
355	struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
356	struct pkt_attrib *pattrib = &pxmitframe->attrib;
357	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
358	struct security_priv *psecuritypriv = &adapt->securitypriv;
359	if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
360	    (pxmitframe->attrib.ether_type != 0x0806) &&
361	    (pxmitframe->attrib.ether_type != 0x888e) &&
362	    (pxmitframe->attrib.ether_type != 0x88b4) &&
363	    (pxmitframe->attrib.dhcp_pkt != 1))
364		rtw_issue_addbareq_cmd(adapt, pxmitframe);
365	mem_addr = pxmitframe->buf_addr;
366
367	RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
368
369	for (t = 0; t < pattrib->nr_frags; t++) {
370		if (inner_ret != _SUCCESS && ret == _SUCCESS)
371			ret = _FAIL;
372
373		if (t != (pattrib->nr_frags - 1)) {
374			RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
375
376			sz = pxmitpriv->frag_len;
377			sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
378		} else {
379			/* no frag */
380			sz = pattrib->last_txcmdsz;
381		}
382
383		pull = update_txdesc(pxmitframe, mem_addr, sz, false);
384
385		if (pull) {
386			mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
387			pxmitframe->buf_addr = mem_addr;
388			w_sz = sz + TXDESC_SIZE;
389		} else {
390			w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
391		}
392		ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
393
394		inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
395
396		rtw_count_tx_stats(adapt, pxmitframe, sz);
397
398		RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
399
400		mem_addr += w_sz;
401
402		mem_addr = (u8 *) round_up((size_t)mem_addr, 4);
403	}
404
405	rtw_free_xmitframe(pxmitpriv, pxmitframe);
406
407	if  (ret != _SUCCESS)
408		rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
409
410	return ret;
411}
412
413static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
414{
415	struct pkt_attrib *pattrib = &pxmitframe->attrib;
416
417	u32 len = 0;
418
419	/*  no consider fragement */
420	len = pattrib->hdrlen + pattrib->iv_len +
421		SNAP_SIZE + sizeof(u16) +
422		pattrib->pktlen +
423		((pattrib->bswenc) ? pattrib->icv_len : 0);
424
425	if (pattrib->encrypt == _TKIP_)
426		len += 8;
427
428	return len;
429}
430
431s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
432{
433	struct hal_data_8188e	*haldata = GET_HAL_DATA(adapt);
434	struct xmit_frame *pxmitframe = NULL;
435	struct xmit_frame *pfirstframe = NULL;
436
437	/*  aggregate variable */
438	struct hw_xmit *phwxmit;
439	struct sta_info *psta = NULL;
440	struct tx_servq *ptxservq = NULL;
441
442	struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
443
444	u32 pbuf;	/*  next pkt address */
445	u32 pbuf_tail;	/*  last pkt tail */
446	u32 len;	/*  packet length, except TXDESC_SIZE and PKT_OFFSET */
447
448	u32 bulksize = haldata->UsbBulkOutSize;
449	u8 desc_cnt;
450	u32 bulkptr;
451
452	/*  dump frame variable */
453	u32 ff_hwaddr;
454
455	RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
456
457	/*  check xmitbuffer is ok */
458	if (pxmitbuf == NULL) {
459		pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
460		if (pxmitbuf == NULL)
461			return false;
462	}
463
464	/* 3 1. pick up first frame */
465	do {
466		rtw_free_xmitframe(pxmitpriv, pxmitframe);
467
468		pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
469		if (pxmitframe == NULL) {
470			/*  no more xmit frame, release xmit buffer */
471			rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
472			return false;
473		}
474
475		pxmitframe->pxmitbuf = pxmitbuf;
476		pxmitframe->buf_addr = pxmitbuf->pbuf;
477		pxmitbuf->priv_data = pxmitframe;
478
479		pxmitframe->agg_num = 1; /*  alloc xmitframe should assign to 1. */
480		pxmitframe->pkt_offset = 1; /*  first frame of aggregation, reserve offset */
481
482		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
483
484		/*  always return ndis_packet after rtw_xmitframe_coalesce */
485		rtw_os_xmit_complete(adapt, pxmitframe);
486
487		break;
488	} while (1);
489
490	/* 3 2. aggregate same priority and same DA(AP or STA) frames */
491	pfirstframe = pxmitframe;
492	len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
493	pbuf_tail = len;
494	pbuf = round_up(pbuf_tail, 8);
495
496	/*  check pkt amount in one bulk */
497	desc_cnt = 0;
498	bulkptr = bulksize;
499	if (pbuf < bulkptr) {
500		desc_cnt++;
501	} else {
502		desc_cnt = 0;
503		bulkptr = ((pbuf / bulksize) + 1) * bulksize; /*  round to next bulksize */
504	}
505
506	/*  dequeue same priority packet from station tx queue */
507	psta = pfirstframe->attrib.psta;
508	switch (pfirstframe->attrib.priority) {
509	case 1:
510	case 2:
511		ptxservq = &(psta->sta_xmitpriv.bk_q);
512		phwxmit = pxmitpriv->hwxmits + 3;
513		break;
514	case 4:
515	case 5:
516		ptxservq = &(psta->sta_xmitpriv.vi_q);
517		phwxmit = pxmitpriv->hwxmits + 1;
518		break;
519	case 6:
520	case 7:
521		ptxservq = &(psta->sta_xmitpriv.vo_q);
522		phwxmit = pxmitpriv->hwxmits;
523		break;
524	case 0:
525	case 3:
526	default:
527		ptxservq = &(psta->sta_xmitpriv.be_q);
528		phwxmit = pxmitpriv->hwxmits + 2;
529		break;
530	}
531	spin_lock_bh(&pxmitpriv->lock);
532
533	xmitframe_phead = get_list_head(&ptxservq->sta_pending);
534	xmitframe_plist = xmitframe_phead->next;
535
536	while (xmitframe_phead != xmitframe_plist) {
537		pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
538		xmitframe_plist = xmitframe_plist->next;
539
540		pxmitframe->agg_num = 0; /*  not first frame of aggregation */
541		pxmitframe->pkt_offset = 0; /*  not first frame of aggregation, no need to reserve offset */
542
543		len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
544
545		if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
546			pxmitframe->agg_num = 1;
547			pxmitframe->pkt_offset = 1;
548			break;
549		}
550		list_del_init(&pxmitframe->list);
551		ptxservq->qcnt--;
552		phwxmit->accnt--;
553
554		pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
555
556		rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
557		/*  always return ndis_packet after rtw_xmitframe_coalesce */
558		rtw_os_xmit_complete(adapt, pxmitframe);
559
560		/*  (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
561		update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
562
563		/*  don't need xmitframe any more */
564		rtw_free_xmitframe(pxmitpriv, pxmitframe);
565
566		/*  handle pointer and stop condition */
567		pbuf_tail = pbuf + len;
568		pbuf = round_up(pbuf_tail, 8);
569
570		pfirstframe->agg_num++;
571		if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
572			break;
573
574		if (pbuf < bulkptr) {
575			desc_cnt++;
576			if (desc_cnt == haldata->UsbTxAggDescNum)
577				break;
578		} else {
579			desc_cnt = 0;
580			bulkptr = ((pbuf / bulksize) + 1) * bulksize;
581		}
582	} /* end while (aggregate same priority and same DA(AP or STA) frames) */
583
584	if (list_empty(&ptxservq->sta_pending.queue))
585		list_del_init(&ptxservq->tx_pending);
586
587	spin_unlock_bh(&pxmitpriv->lock);
588	if ((pfirstframe->attrib.ether_type != 0x0806) &&
589	    (pfirstframe->attrib.ether_type != 0x888e) &&
590	    (pfirstframe->attrib.ether_type != 0x88b4) &&
591	    (pfirstframe->attrib.dhcp_pkt != 1))
592		rtw_issue_addbareq_cmd(adapt, pfirstframe);
593	/* 3 3. update first frame txdesc */
594	if ((pbuf_tail % bulksize) == 0) {
595		/*  remove pkt_offset */
596		pbuf_tail -= PACKET_OFFSET_SZ;
597		pfirstframe->buf_addr += PACKET_OFFSET_SZ;
598		pfirstframe->pkt_offset--;
599	}
600
601	update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
602
603	/* 3 4. write xmit buffer to USB FIFO */
604	ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
605	usb_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
606
607	/* 3 5. update statisitc */
608	pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
609	pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
610
611	rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
612
613	rtw_free_xmitframe(pxmitpriv, pfirstframe);
614
615	return true;
616}
617
618static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
619{
620	s32 res = _SUCCESS;
621
622	res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
623	if (res == _SUCCESS)
624		rtw_dump_xframe(adapt, pxmitframe);
625	else
626		DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
627	return res;
628}
629
630/*
631 * Return
632 *	true	dump packet directly
633 *	false	enqueue packet
634 */
635static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
636{
637	s32 res;
638	struct xmit_buf *pxmitbuf = NULL;
639	struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
640	struct pkt_attrib *pattrib = &pxmitframe->attrib;
641	struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
642
643	spin_lock_bh(&pxmitpriv->lock);
644
645	if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
646		goto enqueue;
647
648	if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
649		goto enqueue;
650
651	pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
652	if (pxmitbuf == NULL)
653		goto enqueue;
654
655	spin_unlock_bh(&pxmitpriv->lock);
656
657	pxmitframe->pxmitbuf = pxmitbuf;
658	pxmitframe->buf_addr = pxmitbuf->pbuf;
659	pxmitbuf->priv_data = pxmitframe;
660
661	if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
662		rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
663		rtw_free_xmitframe(pxmitpriv, pxmitframe);
664	}
665
666	return true;
667
668enqueue:
669	res = rtw_xmitframe_enqueue(adapt, pxmitframe);
670	spin_unlock_bh(&pxmitpriv->lock);
671
672	if (res != _SUCCESS) {
673		RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
674		rtw_free_xmitframe(pxmitpriv, pxmitframe);
675
676		/*  Trick, make the statistics correct */
677		pxmitpriv->tx_pkts--;
678		pxmitpriv->tx_drop++;
679		return true;
680	}
681
682	return false;
683}
684
685s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
686{
687	return rtw_dump_xframe(adapt, pmgntframe);
688}
689
690/*
691 * Return
692 *	true	dump packet directly ok
693 *	false	temporary can't transmit packets to hardware
694 */
695s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
696{
697	return pre_xmitframe(adapt, pxmitframe);
698}
699