1/* 2 * Marvell Wireless LAN device driver: WMM 3 * 4 * Copyright (C) 2011-2014, Marvell International Ltd. 5 * 6 * This software file (the "File") is distributed by Marvell International 7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991 8 * (the "License"). You may use, redistribute and/or modify this File in 9 * accordance with the terms and conditions of the License, a copy of which 10 * is available by writing to the Free Software Foundation, Inc., 11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the 12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. 13 * 14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE 16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about 17 * this warranty disclaimer. 18 */ 19 20#include "decl.h" 21#include "ioctl.h" 22#include "util.h" 23#include "fw.h" 24#include "main.h" 25#include "wmm.h" 26#include "11n.h" 27 28 29/* Maximum value FW can accept for driver delay in packet transmission */ 30#define DRV_PKT_DELAY_TO_FW_MAX 512 31 32 33#define WMM_QUEUED_PACKET_LOWER_LIMIT 180 34 35#define WMM_QUEUED_PACKET_UPPER_LIMIT 200 36 37/* Offset for TOS field in the IP header */ 38#define IPTOS_OFFSET 5 39 40static bool disable_tx_amsdu; 41module_param(disable_tx_amsdu, bool, 0644); 42 43/* WMM information IE */ 44static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07, 45 0x00, 0x50, 0xf2, 0x02, 46 0x00, 0x01, 0x00 47}; 48 49static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE, 50 WMM_AC_BK, 51 WMM_AC_VI, 52 WMM_AC_VO 53}; 54 55static u8 tos_to_tid[] = { 56 /* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */ 57 0x01, /* 0 1 0 AC_BK */ 58 0x02, /* 0 0 0 AC_BK */ 59 0x00, /* 0 0 1 AC_BE */ 60 0x03, /* 0 1 1 AC_BE */ 61 0x04, /* 1 0 0 AC_VI */ 62 0x05, /* 1 0 1 AC_VI */ 63 0x06, /* 1 1 0 AC_VO */ 64 0x07 /* 1 1 1 AC_VO */ 65}; 66 67static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} }; 68 69/* 70 * This function debug prints the priority parameters for a WMM AC. 71 */ 72static void 73mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param) 74{ 75 const char *ac_str[] = { "BK", "BE", "VI", "VO" }; 76 77 pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, " 78 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n", 79 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap 80 & MWIFIEX_ACI) >> 5]], 81 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5, 82 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4, 83 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN, 84 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN, 85 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4, 86 le16_to_cpu(ac_param->tx_op_limit)); 87} 88 89/* 90 * This function allocates a route address list. 91 * 92 * The function also initializes the list with the provided RA. 93 */ 94static struct mwifiex_ra_list_tbl * 95mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra) 96{ 97 struct mwifiex_ra_list_tbl *ra_list; 98 99 ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC); 100 if (!ra_list) 101 return NULL; 102 103 INIT_LIST_HEAD(&ra_list->list); 104 skb_queue_head_init(&ra_list->skb_head); 105 106 memcpy(ra_list->ra, ra, ETH_ALEN); 107 108 ra_list->total_pkt_count = 0; 109 110 dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list); 111 112 return ra_list; 113} 114 115/* This function returns random no between 16 and 32 to be used as threshold 116 * for no of packets after which BA setup is initiated. 117 */ 118static u8 mwifiex_get_random_ba_threshold(void) 119{ 120 u32 sec, usec; 121 struct timeval ba_tstamp; 122 u8 ba_threshold; 123 124 /* setup ba_packet_threshold here random number between 125 * [BA_SETUP_PACKET_OFFSET, 126 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1] 127 */ 128 129 do_gettimeofday(&ba_tstamp); 130 sec = (ba_tstamp.tv_sec & 0xFFFF) + (ba_tstamp.tv_sec >> 16); 131 usec = (ba_tstamp.tv_usec & 0xFFFF) + (ba_tstamp.tv_usec >> 16); 132 ba_threshold = (((sec << 16) + usec) % BA_SETUP_MAX_PACKET_THRESHOLD) 133 + BA_SETUP_PACKET_OFFSET; 134 135 return ba_threshold; 136} 137 138/* 139 * This function allocates and adds a RA list for all TIDs 140 * with the given RA. 141 */ 142void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra) 143{ 144 int i; 145 struct mwifiex_ra_list_tbl *ra_list; 146 struct mwifiex_adapter *adapter = priv->adapter; 147 struct mwifiex_sta_node *node; 148 unsigned long flags; 149 150 spin_lock_irqsave(&priv->sta_list_spinlock, flags); 151 node = mwifiex_get_sta_entry(priv, ra); 152 spin_unlock_irqrestore(&priv->sta_list_spinlock, flags); 153 154 for (i = 0; i < MAX_NUM_TID; ++i) { 155 ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra); 156 dev_dbg(adapter->dev, "info: created ra_list %p\n", ra_list); 157 158 if (!ra_list) 159 break; 160 161 ra_list->is_11n_enabled = 0; 162 ra_list->tdls_link = false; 163 if (!mwifiex_queuing_ra_based(priv)) { 164 if (mwifiex_get_tdls_link_status(priv, ra) == 165 TDLS_SETUP_COMPLETE) { 166 ra_list->tdls_link = true; 167 ra_list->is_11n_enabled = 168 mwifiex_tdls_peer_11n_enabled(priv, ra); 169 } else { 170 ra_list->is_11n_enabled = IS_11N_ENABLED(priv); 171 } 172 } else { 173 ra_list->is_11n_enabled = 174 mwifiex_is_sta_11n_enabled(priv, node); 175 if (ra_list->is_11n_enabled) 176 ra_list->max_amsdu = node->max_amsdu; 177 } 178 179 dev_dbg(adapter->dev, "data: ralist %p: is_11n_enabled=%d\n", 180 ra_list, ra_list->is_11n_enabled); 181 182 if (ra_list->is_11n_enabled) { 183 ra_list->ba_pkt_count = 0; 184 ra_list->ba_packet_thr = 185 mwifiex_get_random_ba_threshold(); 186 } 187 list_add_tail(&ra_list->list, 188 &priv->wmm.tid_tbl_ptr[i].ra_list); 189 } 190} 191 192/* 193 * This function sets the WMM queue priorities to their default values. 194 */ 195static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv) 196{ 197 /* Default queue priorities: VO->VI->BE->BK */ 198 priv->wmm.queue_priority[0] = WMM_AC_VO; 199 priv->wmm.queue_priority[1] = WMM_AC_VI; 200 priv->wmm.queue_priority[2] = WMM_AC_BE; 201 priv->wmm.queue_priority[3] = WMM_AC_BK; 202} 203 204/* 205 * This function map ACs to TIDs. 206 */ 207static void 208mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv) 209{ 210 struct mwifiex_wmm_desc *wmm = &priv->wmm; 211 u8 *queue_priority = wmm->queue_priority; 212 int i; 213 214 for (i = 0; i < 4; ++i) { 215 tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1]; 216 tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0]; 217 } 218 219 for (i = 0; i < MAX_NUM_TID; ++i) 220 priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i; 221 222 atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID); 223} 224 225/* 226 * This function initializes WMM priority queues. 227 */ 228void 229mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv, 230 struct ieee_types_wmm_parameter *wmm_ie) 231{ 232 u16 cw_min, avg_back_off, tmp[4]; 233 u32 i, j, num_ac; 234 u8 ac_idx; 235 236 if (!wmm_ie || !priv->wmm_enabled) { 237 /* WMM is not enabled, just set the defaults and return */ 238 mwifiex_wmm_default_queue_priorities(priv); 239 return; 240 } 241 242 dev_dbg(priv->adapter->dev, "info: WMM Parameter IE: version=%d, " 243 "qos_info Parameter Set Count=%d, Reserved=%#x\n", 244 wmm_ie->vend_hdr.version, wmm_ie->qos_info_bitmap & 245 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK, 246 wmm_ie->reserved); 247 248 for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) { 249 u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap; 250 u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap; 251 cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1; 252 avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN); 253 254 ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5]; 255 priv->wmm.queue_priority[ac_idx] = ac_idx; 256 tmp[ac_idx] = avg_back_off; 257 258 dev_dbg(priv->adapter->dev, 259 "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n", 260 (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1, 261 cw_min, avg_back_off); 262 mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]); 263 } 264 265 /* Bubble sort */ 266 for (i = 0; i < num_ac; i++) { 267 for (j = 1; j < num_ac - i; j++) { 268 if (tmp[j - 1] > tmp[j]) { 269 swap(tmp[j - 1], tmp[j]); 270 swap(priv->wmm.queue_priority[j - 1], 271 priv->wmm.queue_priority[j]); 272 } else if (tmp[j - 1] == tmp[j]) { 273 if (priv->wmm.queue_priority[j - 1] 274 < priv->wmm.queue_priority[j]) 275 swap(priv->wmm.queue_priority[j - 1], 276 priv->wmm.queue_priority[j]); 277 } 278 } 279 } 280 281 mwifiex_wmm_queue_priorities_tid(priv); 282} 283 284/* 285 * This function evaluates whether or not an AC is to be downgraded. 286 * 287 * In case the AC is not enabled, the highest AC is returned that is 288 * enabled and does not require admission control. 289 */ 290static enum mwifiex_wmm_ac_e 291mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv, 292 enum mwifiex_wmm_ac_e eval_ac) 293{ 294 int down_ac; 295 enum mwifiex_wmm_ac_e ret_ac; 296 struct mwifiex_wmm_ac_status *ac_status; 297 298 ac_status = &priv->wmm.ac_status[eval_ac]; 299 300 if (!ac_status->disabled) 301 /* Okay to use this AC, its enabled */ 302 return eval_ac; 303 304 /* Setup a default return value of the lowest priority */ 305 ret_ac = WMM_AC_BK; 306 307 /* 308 * Find the highest AC that is enabled and does not require 309 * admission control. The spec disallows downgrading to an AC, 310 * which is enabled due to a completed admission control. 311 * Unadmitted traffic is not to be sent on an AC with admitted 312 * traffic. 313 */ 314 for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) { 315 ac_status = &priv->wmm.ac_status[down_ac]; 316 317 if (!ac_status->disabled && !ac_status->flow_required) 318 /* AC is enabled and does not require admission 319 control */ 320 ret_ac = (enum mwifiex_wmm_ac_e) down_ac; 321 } 322 323 return ret_ac; 324} 325 326/* 327 * This function downgrades WMM priority queue. 328 */ 329void 330mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv) 331{ 332 int ac_val; 333 334 dev_dbg(priv->adapter->dev, "info: WMM: AC Priorities:" 335 "BK(0), BE(1), VI(2), VO(3)\n"); 336 337 if (!priv->wmm_enabled) { 338 /* WMM is not enabled, default priorities */ 339 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) 340 priv->wmm.ac_down_graded_vals[ac_val] = 341 (enum mwifiex_wmm_ac_e) ac_val; 342 } else { 343 for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) { 344 priv->wmm.ac_down_graded_vals[ac_val] 345 = mwifiex_wmm_eval_downgrade_ac(priv, 346 (enum mwifiex_wmm_ac_e) ac_val); 347 dev_dbg(priv->adapter->dev, 348 "info: WMM: AC PRIO %d maps to %d\n", 349 ac_val, priv->wmm.ac_down_graded_vals[ac_val]); 350 } 351 } 352} 353 354/* 355 * This function converts the IP TOS field to an WMM AC 356 * Queue assignment. 357 */ 358static enum mwifiex_wmm_ac_e 359mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos) 360{ 361 /* Map of TOS UP values to WMM AC */ 362 const enum mwifiex_wmm_ac_e tos_to_ac[] = { WMM_AC_BE, 363 WMM_AC_BK, 364 WMM_AC_BK, 365 WMM_AC_BE, 366 WMM_AC_VI, 367 WMM_AC_VI, 368 WMM_AC_VO, 369 WMM_AC_VO 370 }; 371 372 if (tos >= ARRAY_SIZE(tos_to_ac)) 373 return WMM_AC_BE; 374 375 return tos_to_ac[tos]; 376} 377 378/* 379 * This function evaluates a given TID and downgrades it to a lower 380 * TID if the WMM Parameter IE received from the AP indicates that the 381 * AP is disabled (due to call admission control (ACM bit). Mapping 382 * of TID to AC is taken care of internally. 383 */ 384u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid) 385{ 386 enum mwifiex_wmm_ac_e ac, ac_down; 387 u8 new_tid; 388 389 ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid); 390 ac_down = priv->wmm.ac_down_graded_vals[ac]; 391 392 /* Send the index to tid array, picking from the array will be 393 * taken care by dequeuing function 394 */ 395 new_tid = ac_to_tid[ac_down][tid % 2]; 396 397 return new_tid; 398} 399 400/* 401 * This function initializes the WMM state information and the 402 * WMM data path queues. 403 */ 404void 405mwifiex_wmm_init(struct mwifiex_adapter *adapter) 406{ 407 int i, j; 408 struct mwifiex_private *priv; 409 410 for (j = 0; j < adapter->priv_num; ++j) { 411 priv = adapter->priv[j]; 412 if (!priv) 413 continue; 414 415 for (i = 0; i < MAX_NUM_TID; ++i) { 416 if (!disable_tx_amsdu && 417 adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K) 418 priv->aggr_prio_tbl[i].amsdu = 419 priv->tos_to_tid_inv[i]; 420 else 421 priv->aggr_prio_tbl[i].amsdu = 422 BA_STREAM_NOT_ALLOWED; 423 priv->aggr_prio_tbl[i].ampdu_ap = 424 priv->tos_to_tid_inv[i]; 425 priv->aggr_prio_tbl[i].ampdu_user = 426 priv->tos_to_tid_inv[i]; 427 } 428 429 mwifiex_set_ba_params(priv); 430 mwifiex_reset_11n_rx_seq_num(priv); 431 432 atomic_set(&priv->wmm.tx_pkts_queued, 0); 433 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); 434 } 435} 436 437/* 438 * This function checks if WMM Tx queue is empty. 439 */ 440int 441mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) 442{ 443 int i; 444 struct mwifiex_private *priv; 445 446 for (i = 0; i < adapter->priv_num; ++i) { 447 priv = adapter->priv[i]; 448 if (priv && atomic_read(&priv->wmm.tx_pkts_queued)) 449 return false; 450 } 451 452 return true; 453} 454 455/* 456 * This function deletes all packets in an RA list node. 457 * 458 * The packet sent completion callback handler are called with 459 * status failure, after they are dequeued to ensure proper 460 * cleanup. The RA list node itself is freed at the end. 461 */ 462static void 463mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv, 464 struct mwifiex_ra_list_tbl *ra_list) 465{ 466 struct mwifiex_adapter *adapter = priv->adapter; 467 struct sk_buff *skb, *tmp; 468 469 skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) 470 mwifiex_write_data_complete(adapter, skb, 0, -1); 471} 472 473/* 474 * This function deletes all packets in an RA list. 475 * 476 * Each nodes in the RA list are freed individually first, and then 477 * the RA list itself is freed. 478 */ 479static void 480mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv, 481 struct list_head *ra_list_head) 482{ 483 struct mwifiex_ra_list_tbl *ra_list; 484 485 list_for_each_entry(ra_list, ra_list_head, list) 486 mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list); 487} 488 489/* 490 * This function deletes all packets in all RA lists. 491 */ 492static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv) 493{ 494 int i; 495 496 for (i = 0; i < MAX_NUM_TID; i++) 497 mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i]. 498 ra_list); 499 500 atomic_set(&priv->wmm.tx_pkts_queued, 0); 501 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); 502} 503 504/* 505 * This function deletes all route addresses from all RA lists. 506 */ 507static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv) 508{ 509 struct mwifiex_ra_list_tbl *ra_list, *tmp_node; 510 int i; 511 512 for (i = 0; i < MAX_NUM_TID; ++i) { 513 dev_dbg(priv->adapter->dev, 514 "info: ra_list: freeing buf for tid %d\n", i); 515 list_for_each_entry_safe(ra_list, tmp_node, 516 &priv->wmm.tid_tbl_ptr[i].ra_list, 517 list) { 518 list_del(&ra_list->list); 519 kfree(ra_list); 520 } 521 522 INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list); 523 } 524} 525 526/* 527 * This function cleans up the Tx and Rx queues. 528 * 529 * Cleanup includes - 530 * - All packets in RA lists 531 * - All entries in Rx reorder table 532 * - All entries in Tx BA stream table 533 * - MPA buffer (if required) 534 * - All RA lists 535 */ 536void 537mwifiex_clean_txrx(struct mwifiex_private *priv) 538{ 539 unsigned long flags; 540 struct sk_buff *skb, *tmp; 541 542 mwifiex_11n_cleanup_reorder_tbl(priv); 543 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 544 545 mwifiex_wmm_cleanup_queues(priv); 546 mwifiex_11n_delete_all_tx_ba_stream_tbl(priv); 547 548 if (priv->adapter->if_ops.cleanup_mpa_buf) 549 priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter); 550 551 mwifiex_wmm_delete_all_ralist(priv); 552 memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); 553 554 if (priv->adapter->if_ops.clean_pcie_ring && 555 !priv->adapter->surprise_removed) 556 priv->adapter->if_ops.clean_pcie_ring(priv->adapter); 557 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 558 559 skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) 560 mwifiex_write_data_complete(priv->adapter, skb, 0, -1); 561} 562 563/* 564 * This function retrieves a particular RA list node, matching with the 565 * given TID and RA address. 566 */ 567static struct mwifiex_ra_list_tbl * 568mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, 569 const u8 *ra_addr) 570{ 571 struct mwifiex_ra_list_tbl *ra_list; 572 573 list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list, 574 list) { 575 if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN)) 576 return ra_list; 577 } 578 579 return NULL; 580} 581 582/* 583 * This function retrieves an RA list node for a given TID and 584 * RA address pair. 585 * 586 * If no such node is found, a new node is added first and then 587 * retrieved. 588 */ 589struct mwifiex_ra_list_tbl * 590mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, 591 const u8 *ra_addr) 592{ 593 struct mwifiex_ra_list_tbl *ra_list; 594 595 ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr); 596 if (ra_list) 597 return ra_list; 598 mwifiex_ralist_add(priv, ra_addr); 599 600 return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr); 601} 602 603/* 604 * This function checks if a particular RA list node exists in a given TID 605 * table index. 606 */ 607int 608mwifiex_is_ralist_valid(struct mwifiex_private *priv, 609 struct mwifiex_ra_list_tbl *ra_list, int ptr_index) 610{ 611 struct mwifiex_ra_list_tbl *rlist; 612 613 list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list, 614 list) { 615 if (rlist == ra_list) 616 return true; 617 } 618 619 return false; 620} 621 622/* 623 * This function adds a packet to WMM queue. 624 * 625 * In disconnected state the packet is immediately dropped and the 626 * packet send completion callback is called with status failure. 627 * 628 * Otherwise, the correct RA list node is located and the packet 629 * is queued at the list tail. 630 */ 631void 632mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv, 633 struct sk_buff *skb) 634{ 635 struct mwifiex_adapter *adapter = priv->adapter; 636 u32 tid; 637 struct mwifiex_ra_list_tbl *ra_list; 638 u8 ra[ETH_ALEN], tid_down; 639 unsigned long flags; 640 struct list_head list_head; 641 int tdls_status = TDLS_NOT_SETUP; 642 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data; 643 struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb); 644 645 memcpy(ra, eth_hdr->h_dest, ETH_ALEN); 646 647 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA && 648 ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) { 649 if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS) 650 dev_dbg(adapter->dev, 651 "TDLS setup packet for %pM. Don't block\n", ra); 652 else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN)) 653 tdls_status = mwifiex_get_tdls_link_status(priv, ra); 654 } 655 656 if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) { 657 dev_dbg(adapter->dev, "data: drop packet in disconnect\n"); 658 mwifiex_write_data_complete(adapter, skb, 0, -1); 659 return; 660 } 661 662 tid = skb->priority; 663 664 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 665 666 tid_down = mwifiex_wmm_downgrade_tid(priv, tid); 667 668 /* In case of infra as we have already created the list during 669 association we just don't have to call get_queue_raptr, we will 670 have only 1 raptr for a tid in case of infra */ 671 if (!mwifiex_queuing_ra_based(priv) && 672 !mwifiex_is_skb_mgmt_frame(skb)) { 673 switch (tdls_status) { 674 case TDLS_SETUP_COMPLETE: 675 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, 676 ra); 677 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 678 break; 679 case TDLS_SETUP_INPROGRESS: 680 skb_queue_tail(&priv->tdls_txq, skb); 681 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 682 flags); 683 return; 684 default: 685 list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list; 686 if (!list_empty(&list_head)) 687 ra_list = list_first_entry( 688 &list_head, struct mwifiex_ra_list_tbl, 689 list); 690 else 691 ra_list = NULL; 692 break; 693 } 694 } else { 695 memcpy(ra, skb->data, ETH_ALEN); 696 if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb)) 697 memset(ra, 0xff, ETH_ALEN); 698 ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra); 699 } 700 701 if (!ra_list) { 702 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 703 mwifiex_write_data_complete(adapter, skb, 0, -1); 704 return; 705 } 706 707 skb_queue_tail(&ra_list->skb_head, skb); 708 709 ra_list->ba_pkt_count++; 710 ra_list->total_pkt_count++; 711 712 if (atomic_read(&priv->wmm.highest_queued_prio) < 713 priv->tos_to_tid_inv[tid_down]) 714 atomic_set(&priv->wmm.highest_queued_prio, 715 priv->tos_to_tid_inv[tid_down]); 716 717 atomic_inc(&priv->wmm.tx_pkts_queued); 718 719 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 720} 721 722/* 723 * This function processes the get WMM status command response from firmware. 724 * 725 * The response may contain multiple TLVs - 726 * - AC Queue status TLVs 727 * - Current WMM Parameter IE TLV 728 * - Admission Control action frame TLVs 729 * 730 * This function parses the TLVs and then calls further specific functions 731 * to process any changes in the queue prioritize or state. 732 */ 733int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv, 734 const struct host_cmd_ds_command *resp) 735{ 736 u8 *curr = (u8 *) &resp->params.get_wmm_status; 737 uint16_t resp_len = le16_to_cpu(resp->size), tlv_len; 738 bool valid = true; 739 740 struct mwifiex_ie_types_data *tlv_hdr; 741 struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus; 742 struct ieee_types_wmm_parameter *wmm_param_ie = NULL; 743 struct mwifiex_wmm_ac_status *ac_status; 744 745 dev_dbg(priv->adapter->dev, "info: WMM: WMM_GET_STATUS cmdresp received: %d\n", 746 resp_len); 747 748 while ((resp_len >= sizeof(tlv_hdr->header)) && valid) { 749 tlv_hdr = (struct mwifiex_ie_types_data *) curr; 750 tlv_len = le16_to_cpu(tlv_hdr->header.len); 751 752 if (resp_len < tlv_len + sizeof(tlv_hdr->header)) 753 break; 754 755 switch (le16_to_cpu(tlv_hdr->header.type)) { 756 case TLV_TYPE_WMMQSTATUS: 757 tlv_wmm_qstatus = 758 (struct mwifiex_ie_types_wmm_queue_status *) 759 tlv_hdr; 760 dev_dbg(priv->adapter->dev, 761 "info: CMD_RESP: WMM_GET_STATUS:" 762 " QSTATUS TLV: %d, %d, %d\n", 763 tlv_wmm_qstatus->queue_index, 764 tlv_wmm_qstatus->flow_required, 765 tlv_wmm_qstatus->disabled); 766 767 ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus-> 768 queue_index]; 769 ac_status->disabled = tlv_wmm_qstatus->disabled; 770 ac_status->flow_required = 771 tlv_wmm_qstatus->flow_required; 772 ac_status->flow_created = tlv_wmm_qstatus->flow_created; 773 break; 774 775 case WLAN_EID_VENDOR_SPECIFIC: 776 /* 777 * Point the regular IEEE IE 2 bytes into the Marvell IE 778 * and setup the IEEE IE type and length byte fields 779 */ 780 781 wmm_param_ie = 782 (struct ieee_types_wmm_parameter *) (curr + 783 2); 784 wmm_param_ie->vend_hdr.len = (u8) tlv_len; 785 wmm_param_ie->vend_hdr.element_id = 786 WLAN_EID_VENDOR_SPECIFIC; 787 788 dev_dbg(priv->adapter->dev, 789 "info: CMD_RESP: WMM_GET_STATUS:" 790 " WMM Parameter Set Count: %d\n", 791 wmm_param_ie->qos_info_bitmap & 792 IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK); 793 794 memcpy((u8 *) &priv->curr_bss_params.bss_descriptor. 795 wmm_ie, wmm_param_ie, 796 wmm_param_ie->vend_hdr.len + 2); 797 798 break; 799 800 default: 801 valid = false; 802 break; 803 } 804 805 curr += (tlv_len + sizeof(tlv_hdr->header)); 806 resp_len -= (tlv_len + sizeof(tlv_hdr->header)); 807 } 808 809 mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie); 810 mwifiex_wmm_setup_ac_downgrade(priv); 811 812 return 0; 813} 814 815/* 816 * Callback handler from the command module to allow insertion of a WMM TLV. 817 * 818 * If the BSS we are associating to supports WMM, this function adds the 819 * required WMM Information IE to the association request command buffer in 820 * the form of a Marvell extended IEEE IE. 821 */ 822u32 823mwifiex_wmm_process_association_req(struct mwifiex_private *priv, 824 u8 **assoc_buf, 825 struct ieee_types_wmm_parameter *wmm_ie, 826 struct ieee80211_ht_cap *ht_cap) 827{ 828 struct mwifiex_ie_types_wmm_param_set *wmm_tlv; 829 u32 ret_len = 0; 830 831 /* Null checks */ 832 if (!assoc_buf) 833 return 0; 834 if (!(*assoc_buf)) 835 return 0; 836 837 if (!wmm_ie) 838 return 0; 839 840 dev_dbg(priv->adapter->dev, 841 "info: WMM: process assoc req: bss->wmm_ie=%#x\n", 842 wmm_ie->vend_hdr.element_id); 843 844 if ((priv->wmm_required || 845 (ht_cap && (priv->adapter->config_bands & BAND_GN || 846 priv->adapter->config_bands & BAND_AN))) && 847 wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) { 848 wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf; 849 wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]); 850 wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]); 851 memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2], 852 le16_to_cpu(wmm_tlv->header.len)); 853 if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) 854 memcpy((u8 *) (wmm_tlv->wmm_ie 855 + le16_to_cpu(wmm_tlv->header.len) 856 - sizeof(priv->wmm_qosinfo)), 857 &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo)); 858 859 ret_len = sizeof(wmm_tlv->header) 860 + le16_to_cpu(wmm_tlv->header.len); 861 862 *assoc_buf += ret_len; 863 } 864 865 return ret_len; 866} 867 868/* 869 * This function computes the time delay in the driver queues for a 870 * given packet. 871 * 872 * When the packet is received at the OS/Driver interface, the current 873 * time is set in the packet structure. The difference between the present 874 * time and that received time is computed in this function and limited 875 * based on pre-compiled limits in the driver. 876 */ 877u8 878mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv, 879 const struct sk_buff *skb) 880{ 881 u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp)); 882 u8 ret_val; 883 884 /* 885 * Queue delay is passed as a uint8 in units of 2ms (ms shifted 886 * by 1). Min value (other than 0) is therefore 2ms, max is 510ms. 887 * 888 * Pass max value if queue_delay is beyond the uint8 range 889 */ 890 ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1); 891 892 dev_dbg(priv->adapter->dev, "data: WMM: Pkt Delay: %d ms," 893 " %d ms sent to FW\n", queue_delay, ret_val); 894 895 return ret_val; 896} 897 898/* 899 * This function retrieves the highest priority RA list table pointer. 900 */ 901static struct mwifiex_ra_list_tbl * 902mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, 903 struct mwifiex_private **priv, int *tid) 904{ 905 struct mwifiex_private *priv_tmp; 906 struct mwifiex_ra_list_tbl *ptr; 907 struct mwifiex_tid_tbl *tid_ptr; 908 atomic_t *hqp; 909 unsigned long flags_bss, flags_ra; 910 int i, j; 911 912 /* check the BSS with highest priority first */ 913 for (j = adapter->priv_num - 1; j >= 0; --j) { 914 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock, 915 flags_bss); 916 917 /* iterate over BSS with the equal priority */ 918 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur, 919 &adapter->bss_prio_tbl[j].bss_prio_head, 920 list) { 921 922 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; 923 924 if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0) 925 continue; 926 927 /* iterate over the WMM queues of the BSS */ 928 hqp = &priv_tmp->wmm.highest_queued_prio; 929 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { 930 931 spin_lock_irqsave(&priv_tmp->wmm. 932 ra_list_spinlock, flags_ra); 933 934 tid_ptr = &(priv_tmp)->wmm. 935 tid_tbl_ptr[tos_to_tid[i]]; 936 937 /* iterate over receiver addresses */ 938 list_for_each_entry(ptr, &tid_ptr->ra_list, 939 list) { 940 941 if (!skb_queue_empty(&ptr->skb_head)) 942 /* holds both locks */ 943 goto found; 944 } 945 946 spin_unlock_irqrestore(&priv_tmp->wmm. 947 ra_list_spinlock, 948 flags_ra); 949 } 950 } 951 952 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, 953 flags_bss); 954 } 955 956 return NULL; 957 958found: 959 /* holds bss_prio_lock / ra_list_spinlock */ 960 if (atomic_read(hqp) > i) 961 atomic_set(hqp, i); 962 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); 963 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, 964 flags_bss); 965 966 *priv = priv_tmp; 967 *tid = tos_to_tid[i]; 968 969 return ptr; 970} 971 972/* This functions rotates ra and bss lists so packets are picked round robin. 973 * 974 * After a packet is successfully transmitted, rotate the ra list, so the ra 975 * next to the one transmitted, will come first in the list. This way we pick 976 * the ra' in a round robin fashion. Same applies to bss nodes of equal 977 * priority. 978 * 979 * Function also increments wmm.packets_out counter. 980 */ 981void mwifiex_rotate_priolists(struct mwifiex_private *priv, 982 struct mwifiex_ra_list_tbl *ra, 983 int tid) 984{ 985 struct mwifiex_adapter *adapter = priv->adapter; 986 struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl; 987 struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid]; 988 unsigned long flags; 989 990 spin_lock_irqsave(&tbl[priv->bss_priority].bss_prio_lock, flags); 991 /* 992 * dirty trick: we remove 'head' temporarily and reinsert it after 993 * curr bss node. imagine list to stay fixed while head is moved 994 */ 995 list_move(&tbl[priv->bss_priority].bss_prio_head, 996 &tbl[priv->bss_priority].bss_prio_cur->list); 997 spin_unlock_irqrestore(&tbl[priv->bss_priority].bss_prio_lock, flags); 998 999 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 1000 if (mwifiex_is_ralist_valid(priv, ra, tid)) { 1001 priv->wmm.packets_out[tid]++; 1002 /* same as above */ 1003 list_move(&tid_ptr->ra_list, &ra->list); 1004 } 1005 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 1006} 1007 1008/* 1009 * This function checks if 11n aggregation is possible. 1010 */ 1011static int 1012mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv, 1013 struct mwifiex_ra_list_tbl *ptr, 1014 int max_buf_size) 1015{ 1016 int count = 0, total_size = 0; 1017 struct sk_buff *skb, *tmp; 1018 int max_amsdu_size; 1019 1020 if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled && 1021 ptr->is_11n_enabled) 1022 max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size); 1023 else 1024 max_amsdu_size = max_buf_size; 1025 1026 skb_queue_walk_safe(&ptr->skb_head, skb, tmp) { 1027 total_size += skb->len; 1028 if (total_size >= max_amsdu_size) 1029 break; 1030 if (++count >= MIN_NUM_AMSDU) 1031 return true; 1032 } 1033 1034 return false; 1035} 1036 1037/* 1038 * This function sends a single packet to firmware for transmission. 1039 */ 1040static void 1041mwifiex_send_single_packet(struct mwifiex_private *priv, 1042 struct mwifiex_ra_list_tbl *ptr, int ptr_index, 1043 unsigned long ra_list_flags) 1044 __releases(&priv->wmm.ra_list_spinlock) 1045{ 1046 struct sk_buff *skb, *skb_next; 1047 struct mwifiex_tx_param tx_param; 1048 struct mwifiex_adapter *adapter = priv->adapter; 1049 struct mwifiex_txinfo *tx_info; 1050 1051 if (skb_queue_empty(&ptr->skb_head)) { 1052 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1053 ra_list_flags); 1054 dev_dbg(adapter->dev, "data: nothing to send\n"); 1055 return; 1056 } 1057 1058 skb = skb_dequeue(&ptr->skb_head); 1059 1060 tx_info = MWIFIEX_SKB_TXCB(skb); 1061 dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb); 1062 1063 ptr->total_pkt_count--; 1064 1065 if (!skb_queue_empty(&ptr->skb_head)) 1066 skb_next = skb_peek(&ptr->skb_head); 1067 else 1068 skb_next = NULL; 1069 1070 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); 1071 1072 tx_param.next_pkt_len = ((skb_next) ? skb_next->len + 1073 sizeof(struct txpd) : 0); 1074 1075 if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) { 1076 /* Queue the packet back at the head */ 1077 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 1078 1079 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { 1080 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1081 ra_list_flags); 1082 mwifiex_write_data_complete(adapter, skb, 0, -1); 1083 return; 1084 } 1085 1086 skb_queue_tail(&ptr->skb_head, skb); 1087 1088 ptr->total_pkt_count++; 1089 ptr->ba_pkt_count++; 1090 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 1091 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1092 ra_list_flags); 1093 } else { 1094 mwifiex_rotate_priolists(priv, ptr, ptr_index); 1095 atomic_dec(&priv->wmm.tx_pkts_queued); 1096 } 1097} 1098 1099/* 1100 * This function checks if the first packet in the given RA list 1101 * is already processed or not. 1102 */ 1103static int 1104mwifiex_is_ptr_processed(struct mwifiex_private *priv, 1105 struct mwifiex_ra_list_tbl *ptr) 1106{ 1107 struct sk_buff *skb; 1108 struct mwifiex_txinfo *tx_info; 1109 1110 if (skb_queue_empty(&ptr->skb_head)) 1111 return false; 1112 1113 skb = skb_peek(&ptr->skb_head); 1114 1115 tx_info = MWIFIEX_SKB_TXCB(skb); 1116 if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT) 1117 return true; 1118 1119 return false; 1120} 1121 1122/* 1123 * This function sends a single processed packet to firmware for 1124 * transmission. 1125 */ 1126static void 1127mwifiex_send_processed_packet(struct mwifiex_private *priv, 1128 struct mwifiex_ra_list_tbl *ptr, int ptr_index, 1129 unsigned long ra_list_flags) 1130 __releases(&priv->wmm.ra_list_spinlock) 1131{ 1132 struct mwifiex_tx_param tx_param; 1133 struct mwifiex_adapter *adapter = priv->adapter; 1134 int ret = -1; 1135 struct sk_buff *skb, *skb_next; 1136 struct mwifiex_txinfo *tx_info; 1137 1138 if (skb_queue_empty(&ptr->skb_head)) { 1139 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1140 ra_list_flags); 1141 return; 1142 } 1143 1144 skb = skb_dequeue(&ptr->skb_head); 1145 1146 if (!skb_queue_empty(&ptr->skb_head)) 1147 skb_next = skb_peek(&ptr->skb_head); 1148 else 1149 skb_next = NULL; 1150 1151 tx_info = MWIFIEX_SKB_TXCB(skb); 1152 1153 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); 1154 1155 if (adapter->iface_type == MWIFIEX_USB) { 1156 adapter->data_sent = true; 1157 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA, 1158 skb, NULL); 1159 } else { 1160 tx_param.next_pkt_len = 1161 ((skb_next) ? skb_next->len + 1162 sizeof(struct txpd) : 0); 1163 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 1164 skb, &tx_param); 1165 } 1166 1167 switch (ret) { 1168 case -EBUSY: 1169 dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); 1170 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 1171 1172 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { 1173 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1174 ra_list_flags); 1175 mwifiex_write_data_complete(adapter, skb, 0, -1); 1176 return; 1177 } 1178 1179 skb_queue_tail(&ptr->skb_head, skb); 1180 1181 tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT; 1182 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1183 ra_list_flags); 1184 break; 1185 case -1: 1186 if (adapter->iface_type != MWIFIEX_PCIE) 1187 adapter->data_sent = false; 1188 dev_err(adapter->dev, "host_to_card failed: %#x\n", ret); 1189 adapter->dbg.num_tx_host_to_card_failure++; 1190 mwifiex_write_data_complete(adapter, skb, 0, ret); 1191 break; 1192 case -EINPROGRESS: 1193 if (adapter->iface_type != MWIFIEX_PCIE) 1194 adapter->data_sent = false; 1195 default: 1196 break; 1197 } 1198 if (ret != -EBUSY) { 1199 mwifiex_rotate_priolists(priv, ptr, ptr_index); 1200 atomic_dec(&priv->wmm.tx_pkts_queued); 1201 } 1202} 1203 1204/* 1205 * This function dequeues a packet from the highest priority list 1206 * and transmits it. 1207 */ 1208static int 1209mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter) 1210{ 1211 struct mwifiex_ra_list_tbl *ptr; 1212 struct mwifiex_private *priv = NULL; 1213 int ptr_index = 0; 1214 u8 ra[ETH_ALEN]; 1215 int tid_del = 0, tid = 0; 1216 unsigned long flags; 1217 1218 ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index); 1219 if (!ptr) 1220 return -1; 1221 1222 tid = mwifiex_get_tid(ptr); 1223 1224 dev_dbg(adapter->dev, "data: tid=%d\n", tid); 1225 1226 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags); 1227 if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) { 1228 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 1229 return -1; 1230 } 1231 1232 if (mwifiex_is_ptr_processed(priv, ptr)) { 1233 mwifiex_send_processed_packet(priv, ptr, ptr_index, flags); 1234 /* ra_list_spinlock has been freed in 1235 mwifiex_send_processed_packet() */ 1236 return 0; 1237 } 1238 1239 if (!ptr->is_11n_enabled || 1240 mwifiex_is_ba_stream_setup(priv, ptr, tid) || 1241 priv->wps.session_enable) { 1242 if (ptr->is_11n_enabled && 1243 mwifiex_is_ba_stream_setup(priv, ptr, tid) && 1244 mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) && 1245 mwifiex_is_amsdu_allowed(priv, tid) && 1246 mwifiex_is_11n_aggragation_possible(priv, ptr, 1247 adapter->tx_buf_size)) 1248 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); 1249 /* ra_list_spinlock has been freed in 1250 * mwifiex_11n_aggregate_pkt() 1251 */ 1252 else 1253 mwifiex_send_single_packet(priv, ptr, ptr_index, flags); 1254 /* ra_list_spinlock has been freed in 1255 * mwifiex_send_single_packet() 1256 */ 1257 } else { 1258 if (mwifiex_is_ampdu_allowed(priv, ptr, tid) && 1259 ptr->ba_pkt_count > ptr->ba_packet_thr) { 1260 if (mwifiex_space_avail_for_new_ba_stream(adapter)) { 1261 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1262 BA_SETUP_INPROGRESS); 1263 mwifiex_send_addba(priv, tid, ptr->ra); 1264 } else if (mwifiex_find_stream_to_delete 1265 (priv, tid, &tid_del, ra)) { 1266 mwifiex_create_ba_tbl(priv, ptr->ra, tid, 1267 BA_SETUP_INPROGRESS); 1268 mwifiex_send_delba(priv, tid_del, ra, 1); 1269 } 1270 } 1271 if (mwifiex_is_amsdu_allowed(priv, tid) && 1272 mwifiex_is_11n_aggragation_possible(priv, ptr, 1273 adapter->tx_buf_size)) 1274 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); 1275 /* ra_list_spinlock has been freed in 1276 mwifiex_11n_aggregate_pkt() */ 1277 else 1278 mwifiex_send_single_packet(priv, ptr, ptr_index, flags); 1279 /* ra_list_spinlock has been freed in 1280 mwifiex_send_single_packet() */ 1281 } 1282 return 0; 1283} 1284 1285/* 1286 * This function transmits the highest priority packet awaiting in the 1287 * WMM Queues. 1288 */ 1289void 1290mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter) 1291{ 1292 do { 1293 /* Check if busy */ 1294 if (adapter->data_sent || adapter->tx_lock_flag) 1295 break; 1296 1297 if (mwifiex_dequeue_tx_packet(adapter)) 1298 break; 1299 } while (!mwifiex_wmm_lists_empty(adapter)); 1300} 1301