1/* 2 * Atheros CARL9170 driver 3 * 4 * mac80211 interaction code 5 * 6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> 7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program; see the file COPYING. If not, see 21 * http://www.gnu.org/licenses/. 22 * 23 * This file incorporates work covered by the following copyright and 24 * permission notice: 25 * Copyright (c) 2007-2008 Atheros Communications, Inc. 26 * 27 * Permission to use, copy, modify, and/or distribute this software for any 28 * purpose with or without fee is hereby granted, provided that the above 29 * copyright notice and this permission notice appear in all copies. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 38 */ 39 40#include <linux/slab.h> 41#include <linux/module.h> 42#include <linux/etherdevice.h> 43#include <linux/random.h> 44#include <net/mac80211.h> 45#include <net/cfg80211.h> 46#include "hw.h" 47#include "carl9170.h" 48#include "cmd.h" 49 50static bool modparam_nohwcrypt; 51module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 52MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload."); 53 54int modparam_noht; 55module_param_named(noht, modparam_noht, int, S_IRUGO); 56MODULE_PARM_DESC(noht, "Disable MPDU aggregation."); 57 58#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ 59 .bitrate = (_bitrate), \ 60 .flags = (_flags), \ 61 .hw_value = (_hw_rate) | (_txpidx) << 4, \ 62} 63 64struct ieee80211_rate __carl9170_ratetable[] = { 65 RATE(10, 0, 0, 0), 66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE), 67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE), 68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE), 69 RATE(60, 0xb, 0, 0), 70 RATE(90, 0xf, 0, 0), 71 RATE(120, 0xa, 0, 0), 72 RATE(180, 0xe, 0, 0), 73 RATE(240, 0x9, 0, 0), 74 RATE(360, 0xd, 1, 0), 75 RATE(480, 0x8, 2, 0), 76 RATE(540, 0xc, 3, 0), 77}; 78#undef RATE 79 80#define carl9170_g_ratetable (__carl9170_ratetable + 0) 81#define carl9170_g_ratetable_size 12 82#define carl9170_a_ratetable (__carl9170_ratetable + 4) 83#define carl9170_a_ratetable_size 8 84 85/* 86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params 87 * array in phy.c so that we don't have to do frequency lookups! 88 */ 89#define CHAN(_freq, _idx) { \ 90 .center_freq = (_freq), \ 91 .hw_value = (_idx), \ 92 .max_power = 18, /* XXX */ \ 93} 94 95static struct ieee80211_channel carl9170_2ghz_chantable[] = { 96 CHAN(2412, 0), 97 CHAN(2417, 1), 98 CHAN(2422, 2), 99 CHAN(2427, 3), 100 CHAN(2432, 4), 101 CHAN(2437, 5), 102 CHAN(2442, 6), 103 CHAN(2447, 7), 104 CHAN(2452, 8), 105 CHAN(2457, 9), 106 CHAN(2462, 10), 107 CHAN(2467, 11), 108 CHAN(2472, 12), 109 CHAN(2484, 13), 110}; 111 112static struct ieee80211_channel carl9170_5ghz_chantable[] = { 113 CHAN(4920, 14), 114 CHAN(4940, 15), 115 CHAN(4960, 16), 116 CHAN(4980, 17), 117 CHAN(5040, 18), 118 CHAN(5060, 19), 119 CHAN(5080, 20), 120 CHAN(5180, 21), 121 CHAN(5200, 22), 122 CHAN(5220, 23), 123 CHAN(5240, 24), 124 CHAN(5260, 25), 125 CHAN(5280, 26), 126 CHAN(5300, 27), 127 CHAN(5320, 28), 128 CHAN(5500, 29), 129 CHAN(5520, 30), 130 CHAN(5540, 31), 131 CHAN(5560, 32), 132 CHAN(5580, 33), 133 CHAN(5600, 34), 134 CHAN(5620, 35), 135 CHAN(5640, 36), 136 CHAN(5660, 37), 137 CHAN(5680, 38), 138 CHAN(5700, 39), 139 CHAN(5745, 40), 140 CHAN(5765, 41), 141 CHAN(5785, 42), 142 CHAN(5805, 43), 143 CHAN(5825, 44), 144 CHAN(5170, 45), 145 CHAN(5190, 46), 146 CHAN(5210, 47), 147 CHAN(5230, 48), 148}; 149#undef CHAN 150 151#define CARL9170_HT_CAP \ 152{ \ 153 .ht_supported = true, \ 154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \ 155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ 156 IEEE80211_HT_CAP_SGI_40 | \ 157 IEEE80211_HT_CAP_DSSSCCK40 | \ 158 IEEE80211_HT_CAP_SM_PS, \ 159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \ 160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \ 161 .mcs = { \ 162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \ 163 .rx_highest = cpu_to_le16(300), \ 164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \ 165 }, \ 166} 167 168static struct ieee80211_supported_band carl9170_band_2GHz = { 169 .channels = carl9170_2ghz_chantable, 170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable), 171 .bitrates = carl9170_g_ratetable, 172 .n_bitrates = carl9170_g_ratetable_size, 173 .ht_cap = CARL9170_HT_CAP, 174}; 175 176static struct ieee80211_supported_band carl9170_band_5GHz = { 177 .channels = carl9170_5ghz_chantable, 178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable), 179 .bitrates = carl9170_a_ratetable, 180 .n_bitrates = carl9170_a_ratetable_size, 181 .ht_cap = CARL9170_HT_CAP, 182}; 183 184static void carl9170_ampdu_gc(struct ar9170 *ar) 185{ 186 struct carl9170_sta_tid *tid_info; 187 LIST_HEAD(tid_gc); 188 189 rcu_read_lock(); 190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { 191 spin_lock_bh(&ar->tx_ampdu_list_lock); 192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) { 193 tid_info->state = CARL9170_TID_STATE_KILLED; 194 list_del_rcu(&tid_info->list); 195 ar->tx_ampdu_list_len--; 196 list_add_tail(&tid_info->tmp_list, &tid_gc); 197 } 198 spin_unlock_bh(&ar->tx_ampdu_list_lock); 199 200 } 201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); 202 rcu_read_unlock(); 203 204 synchronize_rcu(); 205 206 while (!list_empty(&tid_gc)) { 207 struct sk_buff *skb; 208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid, 209 tmp_list); 210 211 while ((skb = __skb_dequeue(&tid_info->queue))) 212 carl9170_tx_status(ar, skb, false); 213 214 list_del_init(&tid_info->tmp_list); 215 kfree(tid_info); 216 } 217} 218 219static void carl9170_flush(struct ar9170 *ar, bool drop_queued) 220{ 221 if (drop_queued) { 222 int i; 223 224 /* 225 * We can only drop frames which have not been uploaded 226 * to the device yet. 227 */ 228 229 for (i = 0; i < ar->hw->queues; i++) { 230 struct sk_buff *skb; 231 232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) { 233 struct ieee80211_tx_info *info; 234 235 info = IEEE80211_SKB_CB(skb); 236 if (info->flags & IEEE80211_TX_CTL_AMPDU) 237 atomic_dec(&ar->tx_ampdu_upload); 238 239 carl9170_tx_status(ar, skb, false); 240 } 241 } 242 } 243 244 /* Wait for all other outstanding frames to timeout. */ 245 if (atomic_read(&ar->tx_total_queued)) 246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0); 247} 248 249static void carl9170_flush_ba(struct ar9170 *ar) 250{ 251 struct sk_buff_head free; 252 struct carl9170_sta_tid *tid_info; 253 struct sk_buff *skb; 254 255 __skb_queue_head_init(&free); 256 257 rcu_read_lock(); 258 spin_lock_bh(&ar->tx_ampdu_list_lock); 259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) { 260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) { 261 tid_info->state = CARL9170_TID_STATE_SUSPEND; 262 263 spin_lock(&tid_info->lock); 264 while ((skb = __skb_dequeue(&tid_info->queue))) 265 __skb_queue_tail(&free, skb); 266 spin_unlock(&tid_info->lock); 267 } 268 } 269 spin_unlock_bh(&ar->tx_ampdu_list_lock); 270 rcu_read_unlock(); 271 272 while ((skb = __skb_dequeue(&free))) 273 carl9170_tx_status(ar, skb, false); 274} 275 276static void carl9170_zap_queues(struct ar9170 *ar) 277{ 278 struct carl9170_vif_info *cvif; 279 unsigned int i; 280 281 carl9170_ampdu_gc(ar); 282 283 carl9170_flush_ba(ar); 284 carl9170_flush(ar, true); 285 286 for (i = 0; i < ar->hw->queues; i++) { 287 spin_lock_bh(&ar->tx_status[i].lock); 288 while (!skb_queue_empty(&ar->tx_status[i])) { 289 struct sk_buff *skb; 290 291 skb = skb_peek(&ar->tx_status[i]); 292 carl9170_tx_get_skb(skb); 293 spin_unlock_bh(&ar->tx_status[i].lock); 294 carl9170_tx_drop(ar, skb); 295 spin_lock_bh(&ar->tx_status[i].lock); 296 carl9170_tx_put_skb(skb); 297 } 298 spin_unlock_bh(&ar->tx_status[i].lock); 299 } 300 301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1); 302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT); 303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS); 304 305 /* reinitialize queues statistics */ 306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats)); 307 for (i = 0; i < ar->hw->queues; i++) 308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD; 309 310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++) 311 ar->mem_bitmap[i] = 0; 312 313 rcu_read_lock(); 314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) { 315 spin_lock_bh(&ar->beacon_lock); 316 dev_kfree_skb_any(cvif->beacon); 317 cvif->beacon = NULL; 318 spin_unlock_bh(&ar->beacon_lock); 319 } 320 rcu_read_unlock(); 321 322 atomic_set(&ar->tx_ampdu_upload, 0); 323 atomic_set(&ar->tx_ampdu_scheduler, 0); 324 atomic_set(&ar->tx_total_pending, 0); 325 atomic_set(&ar->tx_total_queued, 0); 326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks); 327} 328 329#define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \ 330do { \ 331 queue.aifs = ai_fs; \ 332 queue.cw_min = cwmin; \ 333 queue.cw_max = cwmax; \ 334 queue.txop = _txop; \ 335} while (0) 336 337static int carl9170_op_start(struct ieee80211_hw *hw) 338{ 339 struct ar9170 *ar = hw->priv; 340 int err, i; 341 342 mutex_lock(&ar->mutex); 343 344 carl9170_zap_queues(ar); 345 346 /* reset QoS defaults */ 347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47); 348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94); 349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0); 350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0); 351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0); 352 353 ar->current_factor = ar->current_density = -1; 354 /* "The first key is unique." */ 355 ar->usedkeys = 1; 356 ar->filter_state = 0; 357 ar->ps.last_action = jiffies; 358 ar->ps.last_slept = jiffies; 359 ar->erp_mode = CARL9170_ERP_AUTO; 360 361 /* Set "disable hw crypto offload" whenever the module parameter 362 * nohwcrypt is true or if the firmware does not support it. 363 */ 364 ar->disable_offload = modparam_nohwcrypt | 365 ar->fw.disable_offload_fw; 366 ar->rx_software_decryption = ar->disable_offload; 367 368 for (i = 0; i < ar->hw->queues; i++) { 369 ar->queue_stop_timeout[i] = jiffies; 370 ar->max_queue_stop_timeout[i] = 0; 371 } 372 373 atomic_set(&ar->mem_allocs, 0); 374 375 err = carl9170_usb_open(ar); 376 if (err) 377 goto out; 378 379 err = carl9170_init_mac(ar); 380 if (err) 381 goto out; 382 383 err = carl9170_set_qos(ar); 384 if (err) 385 goto out; 386 387 if (ar->fw.rx_filter) { 388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA | 389 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD); 390 if (err) 391 goto out; 392 } 393 394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 395 AR9170_DMA_TRIGGER_RXQ); 396 if (err) 397 goto out; 398 399 /* Clear key-cache */ 400 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) { 401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, 402 0, NULL, 0); 403 if (err) 404 goto out; 405 406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE, 407 1, NULL, 0); 408 if (err) 409 goto out; 410 411 if (i < AR9170_CAM_MAX_USER) { 412 err = carl9170_disable_key(ar, i); 413 if (err) 414 goto out; 415 } 416 } 417 418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED); 419 420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, 421 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK))); 422 423 ieee80211_wake_queues(ar->hw); 424 err = 0; 425 426out: 427 mutex_unlock(&ar->mutex); 428 return err; 429} 430 431static void carl9170_cancel_worker(struct ar9170 *ar) 432{ 433 cancel_delayed_work_sync(&ar->stat_work); 434 cancel_delayed_work_sync(&ar->tx_janitor); 435#ifdef CONFIG_CARL9170_LEDS 436 cancel_delayed_work_sync(&ar->led_work); 437#endif /* CONFIG_CARL9170_LEDS */ 438 cancel_work_sync(&ar->ps_work); 439 cancel_work_sync(&ar->ping_work); 440 cancel_work_sync(&ar->ampdu_work); 441} 442 443static void carl9170_op_stop(struct ieee80211_hw *hw) 444{ 445 struct ar9170 *ar = hw->priv; 446 447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); 448 449 ieee80211_stop_queues(ar->hw); 450 451 mutex_lock(&ar->mutex); 452 if (IS_ACCEPTING_CMD(ar)) { 453 RCU_INIT_POINTER(ar->beacon_iter, NULL); 454 455 carl9170_led_set_state(ar, 0); 456 457 /* stop DMA */ 458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0); 459 carl9170_usb_stop(ar); 460 } 461 462 carl9170_zap_queues(ar); 463 mutex_unlock(&ar->mutex); 464 465 carl9170_cancel_worker(ar); 466} 467 468static void carl9170_restart_work(struct work_struct *work) 469{ 470 struct ar9170 *ar = container_of(work, struct ar9170, 471 restart_work); 472 int err = -EIO; 473 474 ar->usedkeys = 0; 475 ar->filter_state = 0; 476 carl9170_cancel_worker(ar); 477 478 mutex_lock(&ar->mutex); 479 if (!ar->force_usb_reset) { 480 err = carl9170_usb_restart(ar); 481 if (net_ratelimit()) { 482 if (err) 483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err); 484 else 485 dev_info(&ar->udev->dev, "device restarted successfully.\n"); 486 } 487 } 488 carl9170_zap_queues(ar); 489 mutex_unlock(&ar->mutex); 490 491 if (!err && !ar->force_usb_reset) { 492 ar->restart_counter++; 493 atomic_set(&ar->pending_restarts, 0); 494 495 ieee80211_restart_hw(ar->hw); 496 } else { 497 /* 498 * The reset was unsuccessful and the device seems to 499 * be dead. But there's still one option: a low-level 500 * usb subsystem reset... 501 */ 502 503 carl9170_usb_reset(ar); 504 } 505} 506 507void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r) 508{ 509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE); 510 511 /* 512 * Sometimes, an error can trigger several different reset events. 513 * By ignoring these *surplus* reset events, the device won't be 514 * killed again, right after it has recovered. 515 */ 516 if (atomic_inc_return(&ar->pending_restarts) > 1) { 517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r); 518 return; 519 } 520 521 ieee80211_stop_queues(ar->hw); 522 523 dev_err(&ar->udev->dev, "restart device (%d)\n", r); 524 525 if (!WARN_ON(r == CARL9170_RR_NO_REASON) || 526 !WARN_ON(r >= __CARL9170_RR_LAST)) 527 ar->last_reason = r; 528 529 if (!ar->registered) 530 return; 531 532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset) 533 ar->force_usb_reset = true; 534 535 ieee80211_queue_work(ar->hw, &ar->restart_work); 536 537 /* 538 * At this point, the device instance might have vanished/disabled. 539 * So, don't put any code which access the ar9170 struct 540 * without proper protection. 541 */ 542} 543 544static void carl9170_ping_work(struct work_struct *work) 545{ 546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work); 547 int err; 548 549 if (!IS_STARTED(ar)) 550 return; 551 552 mutex_lock(&ar->mutex); 553 err = carl9170_echo_test(ar, 0xdeadbeef); 554 if (err) 555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE); 556 mutex_unlock(&ar->mutex); 557} 558 559static int carl9170_init_interface(struct ar9170 *ar, 560 struct ieee80211_vif *vif) 561{ 562 struct ath_common *common = &ar->common; 563 int err; 564 565 if (!vif) { 566 WARN_ON_ONCE(IS_STARTED(ar)); 567 return 0; 568 } 569 570 memcpy(common->macaddr, vif->addr, ETH_ALEN); 571 572 /* We have to fall back to software crypto, whenever 573 * the user choose to participates in an IBSS. HW 574 * offload for IBSS RSN is not supported by this driver. 575 * 576 * NOTE: If the previous main interface has already 577 * disabled hw crypto offload, we have to keep this 578 * previous disable_offload setting as it was. 579 * Altough ideally, we should notify mac80211 and tell 580 * it to forget about any HW crypto offload for now. 581 */ 582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) && 583 (vif->type != NL80211_IFTYPE_AP)); 584 585 /* While the driver supports HW offload in a single 586 * P2P client configuration, it doesn't support HW 587 * offload in the favourit, concurrent P2P GO+CLIENT 588 * configuration. Hence, HW offload will always be 589 * disabled for P2P. 590 */ 591 ar->disable_offload |= vif->p2p; 592 593 ar->rx_software_decryption = ar->disable_offload; 594 595 err = carl9170_set_operating_mode(ar); 596 return err; 597} 598 599static int carl9170_op_add_interface(struct ieee80211_hw *hw, 600 struct ieee80211_vif *vif) 601{ 602 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv; 603 struct ieee80211_vif *main_vif, *old_main = NULL; 604 struct ar9170 *ar = hw->priv; 605 int vif_id = -1, err = 0; 606 607 mutex_lock(&ar->mutex); 608 rcu_read_lock(); 609 if (vif_priv->active) { 610 /* 611 * Skip the interface structure initialization, 612 * if the vif survived the _restart call. 613 */ 614 vif_id = vif_priv->id; 615 vif_priv->enable_beacon = false; 616 617 spin_lock_bh(&ar->beacon_lock); 618 dev_kfree_skb_any(vif_priv->beacon); 619 vif_priv->beacon = NULL; 620 spin_unlock_bh(&ar->beacon_lock); 621 622 goto init; 623 } 624 625 /* Because the AR9170 HW's MAC doesn't provide full support for 626 * multiple, independent interfaces [of different operation modes]. 627 * We have to select ONE main interface [main mode of HW], but we 628 * can have multiple slaves [AKA: entry in the ACK-table]. 629 * 630 * The first (from HEAD/TOP) interface in the ar->vif_list is 631 * always the main intf. All following intfs in this list 632 * are considered to be slave intfs. 633 */ 634 main_vif = carl9170_get_main_vif(ar); 635 636 if (main_vif) { 637 switch (main_vif->type) { 638 case NL80211_IFTYPE_STATION: 639 if (vif->type == NL80211_IFTYPE_STATION) 640 break; 641 642 /* P2P GO [master] use-case 643 * Because the P2P GO station is selected dynamically 644 * by all participating peers of a WIFI Direct network, 645 * the driver has be able to change the main interface 646 * operating mode on the fly. 647 */ 648 if (main_vif->p2p && vif->p2p && 649 vif->type == NL80211_IFTYPE_AP) { 650 old_main = main_vif; 651 break; 652 } 653 654 err = -EBUSY; 655 rcu_read_unlock(); 656 657 goto unlock; 658 659 case NL80211_IFTYPE_MESH_POINT: 660 case NL80211_IFTYPE_AP: 661 if ((vif->type == NL80211_IFTYPE_STATION) || 662 (vif->type == NL80211_IFTYPE_WDS) || 663 (vif->type == NL80211_IFTYPE_AP) || 664 (vif->type == NL80211_IFTYPE_MESH_POINT)) 665 break; 666 667 err = -EBUSY; 668 rcu_read_unlock(); 669 goto unlock; 670 671 default: 672 rcu_read_unlock(); 673 goto unlock; 674 } 675 } 676 677 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0); 678 679 if (vif_id < 0) { 680 rcu_read_unlock(); 681 682 err = -ENOSPC; 683 goto unlock; 684 } 685 686 BUG_ON(ar->vif_priv[vif_id].id != vif_id); 687 688 vif_priv->active = true; 689 vif_priv->id = vif_id; 690 vif_priv->enable_beacon = false; 691 ar->vifs++; 692 if (old_main) { 693 /* We end up in here, if the main interface is being replaced. 694 * Put the new main interface at the HEAD of the list and the 695 * previous inteface will automatically become second in line. 696 */ 697 list_add_rcu(&vif_priv->list, &ar->vif_list); 698 } else { 699 /* Add new inteface. If the list is empty, it will become the 700 * main inteface, otherwise it will be slave. 701 */ 702 list_add_tail_rcu(&vif_priv->list, &ar->vif_list); 703 } 704 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif); 705 706init: 707 main_vif = carl9170_get_main_vif(ar); 708 709 if (main_vif == vif) { 710 rcu_assign_pointer(ar->beacon_iter, vif_priv); 711 rcu_read_unlock(); 712 713 if (old_main) { 714 struct carl9170_vif_info *old_main_priv = 715 (void *) old_main->drv_priv; 716 /* downgrade old main intf to slave intf. 717 * NOTE: We are no longer under rcu_read_lock. 718 * But we are still holding ar->mutex, so the 719 * vif data [id, addr] is safe. 720 */ 721 err = carl9170_mod_virtual_mac(ar, old_main_priv->id, 722 old_main->addr); 723 if (err) 724 goto unlock; 725 } 726 727 err = carl9170_init_interface(ar, vif); 728 if (err) 729 goto unlock; 730 } else { 731 rcu_read_unlock(); 732 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr); 733 734 if (err) 735 goto unlock; 736 } 737 738 if (ar->fw.tx_seq_table) { 739 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4, 740 0); 741 if (err) 742 goto unlock; 743 } 744 745unlock: 746 if (err && (vif_id >= 0)) { 747 vif_priv->active = false; 748 bitmap_release_region(&ar->vif_bitmap, vif_id, 0); 749 ar->vifs--; 750 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL); 751 list_del_rcu(&vif_priv->list); 752 mutex_unlock(&ar->mutex); 753 synchronize_rcu(); 754 } else { 755 if (ar->vifs > 1) 756 ar->ps.off_override |= PS_OFF_VIF; 757 758 mutex_unlock(&ar->mutex); 759 } 760 761 return err; 762} 763 764static void carl9170_op_remove_interface(struct ieee80211_hw *hw, 765 struct ieee80211_vif *vif) 766{ 767 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv; 768 struct ieee80211_vif *main_vif; 769 struct ar9170 *ar = hw->priv; 770 unsigned int id; 771 772 mutex_lock(&ar->mutex); 773 774 if (WARN_ON_ONCE(!vif_priv->active)) 775 goto unlock; 776 777 ar->vifs--; 778 779 rcu_read_lock(); 780 main_vif = carl9170_get_main_vif(ar); 781 782 id = vif_priv->id; 783 784 vif_priv->active = false; 785 WARN_ON(vif_priv->enable_beacon); 786 vif_priv->enable_beacon = false; 787 list_del_rcu(&vif_priv->list); 788 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL); 789 790 if (vif == main_vif) { 791 rcu_read_unlock(); 792 793 if (ar->vifs) { 794 WARN_ON(carl9170_init_interface(ar, 795 carl9170_get_main_vif(ar))); 796 } else { 797 carl9170_set_operating_mode(ar); 798 } 799 } else { 800 rcu_read_unlock(); 801 802 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL)); 803 } 804 805 carl9170_update_beacon(ar, false); 806 carl9170_flush_cab(ar, id); 807 808 spin_lock_bh(&ar->beacon_lock); 809 dev_kfree_skb_any(vif_priv->beacon); 810 vif_priv->beacon = NULL; 811 spin_unlock_bh(&ar->beacon_lock); 812 813 bitmap_release_region(&ar->vif_bitmap, id, 0); 814 815 carl9170_set_beacon_timers(ar); 816 817 if (ar->vifs == 1) 818 ar->ps.off_override &= ~PS_OFF_VIF; 819 820unlock: 821 mutex_unlock(&ar->mutex); 822 823 synchronize_rcu(); 824} 825 826void carl9170_ps_check(struct ar9170 *ar) 827{ 828 ieee80211_queue_work(ar->hw, &ar->ps_work); 829} 830 831/* caller must hold ar->mutex */ 832static int carl9170_ps_update(struct ar9170 *ar) 833{ 834 bool ps = false; 835 int err = 0; 836 837 if (!ar->ps.off_override) 838 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS); 839 840 if (ps != ar->ps.state) { 841 err = carl9170_powersave(ar, ps); 842 if (err) 843 return err; 844 845 if (ar->ps.state && !ps) { 846 ar->ps.sleep_ms = jiffies_to_msecs(jiffies - 847 ar->ps.last_action); 848 } 849 850 if (ps) 851 ar->ps.last_slept = jiffies; 852 853 ar->ps.last_action = jiffies; 854 ar->ps.state = ps; 855 } 856 857 return 0; 858} 859 860static void carl9170_ps_work(struct work_struct *work) 861{ 862 struct ar9170 *ar = container_of(work, struct ar9170, 863 ps_work); 864 mutex_lock(&ar->mutex); 865 if (IS_STARTED(ar)) 866 WARN_ON_ONCE(carl9170_ps_update(ar) != 0); 867 mutex_unlock(&ar->mutex); 868} 869 870static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise) 871{ 872 int err; 873 874 if (noise) { 875 err = carl9170_get_noisefloor(ar); 876 if (err) 877 return err; 878 } 879 880 if (ar->fw.hw_counters) { 881 err = carl9170_collect_tally(ar); 882 if (err) 883 return err; 884 } 885 886 if (flush) 887 memset(&ar->tally, 0, sizeof(ar->tally)); 888 889 return 0; 890} 891 892static void carl9170_stat_work(struct work_struct *work) 893{ 894 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work); 895 int err; 896 897 mutex_lock(&ar->mutex); 898 err = carl9170_update_survey(ar, false, true); 899 mutex_unlock(&ar->mutex); 900 901 if (err) 902 return; 903 904 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, 905 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK))); 906} 907 908static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed) 909{ 910 struct ar9170 *ar = hw->priv; 911 int err = 0; 912 913 mutex_lock(&ar->mutex); 914 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { 915 /* TODO */ 916 err = 0; 917 } 918 919 if (changed & IEEE80211_CONF_CHANGE_PS) { 920 err = carl9170_ps_update(ar); 921 if (err) 922 goto out; 923 } 924 925 if (changed & IEEE80211_CONF_CHANGE_SMPS) { 926 /* TODO */ 927 err = 0; 928 } 929 930 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { 931 enum nl80211_channel_type channel_type = 932 cfg80211_get_chandef_type(&hw->conf.chandef); 933 934 /* adjust slot time for 5 GHz */ 935 err = carl9170_set_slot_time(ar); 936 if (err) 937 goto out; 938 939 err = carl9170_update_survey(ar, true, false); 940 if (err) 941 goto out; 942 943 err = carl9170_set_channel(ar, hw->conf.chandef.chan, 944 channel_type); 945 if (err) 946 goto out; 947 948 err = carl9170_update_survey(ar, false, true); 949 if (err) 950 goto out; 951 952 err = carl9170_set_dyn_sifs_ack(ar); 953 if (err) 954 goto out; 955 956 err = carl9170_set_rts_cts_rate(ar); 957 if (err) 958 goto out; 959 } 960 961 if (changed & IEEE80211_CONF_CHANGE_POWER) { 962 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan); 963 if (err) 964 goto out; 965 } 966 967out: 968 mutex_unlock(&ar->mutex); 969 return err; 970} 971 972static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw, 973 struct netdev_hw_addr_list *mc_list) 974{ 975 struct netdev_hw_addr *ha; 976 u64 mchash; 977 978 /* always get broadcast frames */ 979 mchash = 1ULL << (0xff >> 2); 980 981 netdev_hw_addr_list_for_each(ha, mc_list) 982 mchash |= 1ULL << (ha->addr[5] >> 2); 983 984 return mchash; 985} 986 987static void carl9170_op_configure_filter(struct ieee80211_hw *hw, 988 unsigned int changed_flags, 989 unsigned int *new_flags, 990 u64 multicast) 991{ 992 struct ar9170 *ar = hw->priv; 993 994 /* mask supported flags */ 995 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps; 996 997 if (!IS_ACCEPTING_CMD(ar)) 998 return; 999 1000 mutex_lock(&ar->mutex); 1001 1002 ar->filter_state = *new_flags; 1003 /* 1004 * We can support more by setting the sniffer bit and 1005 * then checking the error flags, later. 1006 */ 1007 1008 if (*new_flags & FIF_ALLMULTI) 1009 multicast = ~0ULL; 1010 1011 if (multicast != ar->cur_mc_hash) 1012 WARN_ON(carl9170_update_multicast(ar, multicast)); 1013 1014 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) { 1015 ar->sniffer_enabled = !!(*new_flags & 1016 (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)); 1017 1018 WARN_ON(carl9170_set_operating_mode(ar)); 1019 } 1020 1021 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) { 1022 u32 rx_filter = 0; 1023 1024 if (!ar->fw.ba_filter) 1025 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER; 1026 1027 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))) 1028 rx_filter |= CARL9170_RX_FILTER_BAD; 1029 1030 if (!(*new_flags & FIF_CONTROL)) 1031 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER; 1032 1033 if (!(*new_flags & FIF_PSPOLL)) 1034 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL; 1035 1036 if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) { 1037 rx_filter |= CARL9170_RX_FILTER_OTHER_RA; 1038 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL; 1039 } 1040 1041 WARN_ON(carl9170_rx_filter(ar, rx_filter)); 1042 } 1043 1044 mutex_unlock(&ar->mutex); 1045} 1046 1047 1048static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw, 1049 struct ieee80211_vif *vif, 1050 struct ieee80211_bss_conf *bss_conf, 1051 u32 changed) 1052{ 1053 struct ar9170 *ar = hw->priv; 1054 struct ath_common *common = &ar->common; 1055 int err = 0; 1056 struct carl9170_vif_info *vif_priv; 1057 struct ieee80211_vif *main_vif; 1058 1059 mutex_lock(&ar->mutex); 1060 vif_priv = (void *) vif->drv_priv; 1061 main_vif = carl9170_get_main_vif(ar); 1062 if (WARN_ON(!main_vif)) 1063 goto out; 1064 1065 if (changed & BSS_CHANGED_BEACON_ENABLED) { 1066 struct carl9170_vif_info *iter; 1067 int i = 0; 1068 1069 vif_priv->enable_beacon = bss_conf->enable_beacon; 1070 rcu_read_lock(); 1071 list_for_each_entry_rcu(iter, &ar->vif_list, list) { 1072 if (iter->active && iter->enable_beacon) 1073 i++; 1074 1075 } 1076 rcu_read_unlock(); 1077 1078 ar->beacon_enabled = i; 1079 } 1080 1081 if (changed & BSS_CHANGED_BEACON) { 1082 err = carl9170_update_beacon(ar, false); 1083 if (err) 1084 goto out; 1085 } 1086 1087 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON | 1088 BSS_CHANGED_BEACON_INT)) { 1089 1090 if (main_vif != vif) { 1091 bss_conf->beacon_int = main_vif->bss_conf.beacon_int; 1092 bss_conf->dtim_period = main_vif->bss_conf.dtim_period; 1093 } 1094 1095 /* 1096 * Therefore a hard limit for the broadcast traffic should 1097 * prevent false alarms. 1098 */ 1099 if (vif->type != NL80211_IFTYPE_STATION && 1100 (bss_conf->beacon_int * bss_conf->dtim_period >= 1101 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) { 1102 err = -EINVAL; 1103 goto out; 1104 } 1105 1106 err = carl9170_set_beacon_timers(ar); 1107 if (err) 1108 goto out; 1109 } 1110 1111 if (changed & BSS_CHANGED_HT) { 1112 /* TODO */ 1113 err = 0; 1114 if (err) 1115 goto out; 1116 } 1117 1118 if (main_vif != vif) 1119 goto out; 1120 1121 /* 1122 * The following settings can only be changed by the 1123 * master interface. 1124 */ 1125 1126 if (changed & BSS_CHANGED_BSSID) { 1127 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1128 err = carl9170_set_operating_mode(ar); 1129 if (err) 1130 goto out; 1131 } 1132 1133 if (changed & BSS_CHANGED_ASSOC) { 1134 ar->common.curaid = bss_conf->aid; 1135 err = carl9170_set_beacon_timers(ar); 1136 if (err) 1137 goto out; 1138 } 1139 1140 if (changed & BSS_CHANGED_ERP_SLOT) { 1141 err = carl9170_set_slot_time(ar); 1142 if (err) 1143 goto out; 1144 } 1145 1146 if (changed & BSS_CHANGED_BASIC_RATES) { 1147 err = carl9170_set_mac_rates(ar); 1148 if (err) 1149 goto out; 1150 } 1151 1152out: 1153 WARN_ON_ONCE(err && IS_STARTED(ar)); 1154 mutex_unlock(&ar->mutex); 1155} 1156 1157static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw, 1158 struct ieee80211_vif *vif) 1159{ 1160 struct ar9170 *ar = hw->priv; 1161 struct carl9170_tsf_rsp tsf; 1162 int err; 1163 1164 mutex_lock(&ar->mutex); 1165 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF, 1166 0, NULL, sizeof(tsf), &tsf); 1167 mutex_unlock(&ar->mutex); 1168 if (WARN_ON(err)) 1169 return 0; 1170 1171 return le64_to_cpu(tsf.tsf_64); 1172} 1173 1174static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1175 struct ieee80211_vif *vif, 1176 struct ieee80211_sta *sta, 1177 struct ieee80211_key_conf *key) 1178{ 1179 struct ar9170 *ar = hw->priv; 1180 int err = 0, i; 1181 u8 ktype; 1182 1183 if (ar->disable_offload || !vif) 1184 return -EOPNOTSUPP; 1185 1186 /* Fall back to software encryption whenever the driver is connected 1187 * to more than one network. 1188 * 1189 * This is very unfortunate, because some machines cannot handle 1190 * the high througput speed in 802.11n networks. 1191 */ 1192 1193 if (!is_main_vif(ar, vif)) { 1194 mutex_lock(&ar->mutex); 1195 goto err_softw; 1196 } 1197 1198 /* 1199 * While the hardware supports *catch-all* key, for offloading 1200 * group-key en-/de-cryption. The way of how the hardware 1201 * decides which keyId maps to which key, remains a mystery... 1202 */ 1203 if ((vif->type != NL80211_IFTYPE_STATION && 1204 vif->type != NL80211_IFTYPE_ADHOC) && 1205 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 1206 return -EOPNOTSUPP; 1207 1208 switch (key->cipher) { 1209 case WLAN_CIPHER_SUITE_WEP40: 1210 ktype = AR9170_ENC_ALG_WEP64; 1211 break; 1212 case WLAN_CIPHER_SUITE_WEP104: 1213 ktype = AR9170_ENC_ALG_WEP128; 1214 break; 1215 case WLAN_CIPHER_SUITE_TKIP: 1216 ktype = AR9170_ENC_ALG_TKIP; 1217 break; 1218 case WLAN_CIPHER_SUITE_CCMP: 1219 ktype = AR9170_ENC_ALG_AESCCMP; 1220 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX; 1221 break; 1222 default: 1223 return -EOPNOTSUPP; 1224 } 1225 1226 mutex_lock(&ar->mutex); 1227 if (cmd == SET_KEY) { 1228 if (!IS_STARTED(ar)) { 1229 err = -EOPNOTSUPP; 1230 goto out; 1231 } 1232 1233 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 1234 sta = NULL; 1235 1236 i = 64 + key->keyidx; 1237 } else { 1238 for (i = 0; i < 64; i++) 1239 if (!(ar->usedkeys & BIT(i))) 1240 break; 1241 if (i == 64) 1242 goto err_softw; 1243 } 1244 1245 key->hw_key_idx = i; 1246 1247 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL, 1248 ktype, 0, key->key, 1249 min_t(u8, 16, key->keylen)); 1250 if (err) 1251 goto out; 1252 1253 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 1254 err = carl9170_upload_key(ar, i, sta ? sta->addr : 1255 NULL, ktype, 1, 1256 key->key + 16, 16); 1257 if (err) 1258 goto out; 1259 1260 /* 1261 * hardware is not capable generating MMIC 1262 * of fragmented frames! 1263 */ 1264 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 1265 } 1266 1267 if (i < 64) 1268 ar->usedkeys |= BIT(i); 1269 1270 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1271 } else { 1272 if (!IS_STARTED(ar)) { 1273 /* The device is gone... together with the key ;-) */ 1274 err = 0; 1275 goto out; 1276 } 1277 1278 if (key->hw_key_idx < 64) { 1279 ar->usedkeys &= ~BIT(key->hw_key_idx); 1280 } else { 1281 err = carl9170_upload_key(ar, key->hw_key_idx, NULL, 1282 AR9170_ENC_ALG_NONE, 0, 1283 NULL, 0); 1284 if (err) 1285 goto out; 1286 1287 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 1288 err = carl9170_upload_key(ar, key->hw_key_idx, 1289 NULL, 1290 AR9170_ENC_ALG_NONE, 1291 1, NULL, 0); 1292 if (err) 1293 goto out; 1294 } 1295 1296 } 1297 1298 err = carl9170_disable_key(ar, key->hw_key_idx); 1299 if (err) 1300 goto out; 1301 } 1302 1303out: 1304 mutex_unlock(&ar->mutex); 1305 return err; 1306 1307err_softw: 1308 if (!ar->rx_software_decryption) { 1309 ar->rx_software_decryption = true; 1310 carl9170_set_operating_mode(ar); 1311 } 1312 mutex_unlock(&ar->mutex); 1313 return -ENOSPC; 1314} 1315 1316static int carl9170_op_sta_add(struct ieee80211_hw *hw, 1317 struct ieee80211_vif *vif, 1318 struct ieee80211_sta *sta) 1319{ 1320 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1321 unsigned int i; 1322 1323 atomic_set(&sta_info->pending_frames, 0); 1324 1325 if (sta->ht_cap.ht_supported) { 1326 if (sta->ht_cap.ampdu_density > 6) { 1327 /* 1328 * HW does support 16us AMPDU density. 1329 * No HT-Xmit for station. 1330 */ 1331 1332 return 0; 1333 } 1334 1335 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) 1336 RCU_INIT_POINTER(sta_info->agg[i], NULL); 1337 1338 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor); 1339 sta_info->ht_sta = true; 1340 } 1341 1342 return 0; 1343} 1344 1345static int carl9170_op_sta_remove(struct ieee80211_hw *hw, 1346 struct ieee80211_vif *vif, 1347 struct ieee80211_sta *sta) 1348{ 1349 struct ar9170 *ar = hw->priv; 1350 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1351 unsigned int i; 1352 bool cleanup = false; 1353 1354 if (sta->ht_cap.ht_supported) { 1355 1356 sta_info->ht_sta = false; 1357 1358 rcu_read_lock(); 1359 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) { 1360 struct carl9170_sta_tid *tid_info; 1361 1362 tid_info = rcu_dereference(sta_info->agg[i]); 1363 RCU_INIT_POINTER(sta_info->agg[i], NULL); 1364 1365 if (!tid_info) 1366 continue; 1367 1368 spin_lock_bh(&ar->tx_ampdu_list_lock); 1369 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN) 1370 tid_info->state = CARL9170_TID_STATE_SHUTDOWN; 1371 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1372 cleanup = true; 1373 } 1374 rcu_read_unlock(); 1375 1376 if (cleanup) 1377 carl9170_ampdu_gc(ar); 1378 } 1379 1380 return 0; 1381} 1382 1383static int carl9170_op_conf_tx(struct ieee80211_hw *hw, 1384 struct ieee80211_vif *vif, u16 queue, 1385 const struct ieee80211_tx_queue_params *param) 1386{ 1387 struct ar9170 *ar = hw->priv; 1388 int ret; 1389 1390 mutex_lock(&ar->mutex); 1391 if (queue < ar->hw->queues) { 1392 memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param)); 1393 ret = carl9170_set_qos(ar); 1394 } else { 1395 ret = -EINVAL; 1396 } 1397 1398 mutex_unlock(&ar->mutex); 1399 return ret; 1400} 1401 1402static void carl9170_ampdu_work(struct work_struct *work) 1403{ 1404 struct ar9170 *ar = container_of(work, struct ar9170, 1405 ampdu_work); 1406 1407 if (!IS_STARTED(ar)) 1408 return; 1409 1410 mutex_lock(&ar->mutex); 1411 carl9170_ampdu_gc(ar); 1412 mutex_unlock(&ar->mutex); 1413} 1414 1415static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, 1416 struct ieee80211_vif *vif, 1417 enum ieee80211_ampdu_mlme_action action, 1418 struct ieee80211_sta *sta, 1419 u16 tid, u16 *ssn, u8 buf_size) 1420{ 1421 struct ar9170 *ar = hw->priv; 1422 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1423 struct carl9170_sta_tid *tid_info; 1424 1425 if (modparam_noht) 1426 return -EOPNOTSUPP; 1427 1428 switch (action) { 1429 case IEEE80211_AMPDU_TX_START: 1430 if (!sta_info->ht_sta) 1431 return -EOPNOTSUPP; 1432 1433 tid_info = kzalloc(sizeof(struct carl9170_sta_tid), 1434 GFP_ATOMIC); 1435 if (!tid_info) 1436 return -ENOMEM; 1437 1438 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn); 1439 tid_info->state = CARL9170_TID_STATE_PROGRESS; 1440 tid_info->tid = tid; 1441 tid_info->max = sta_info->ampdu_max_len; 1442 tid_info->sta = sta; 1443 tid_info->vif = vif; 1444 1445 INIT_LIST_HEAD(&tid_info->list); 1446 INIT_LIST_HEAD(&tid_info->tmp_list); 1447 skb_queue_head_init(&tid_info->queue); 1448 spin_lock_init(&tid_info->lock); 1449 1450 spin_lock_bh(&ar->tx_ampdu_list_lock); 1451 ar->tx_ampdu_list_len++; 1452 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list); 1453 rcu_assign_pointer(sta_info->agg[tid], tid_info); 1454 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1455 1456 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1457 break; 1458 1459 case IEEE80211_AMPDU_TX_STOP_CONT: 1460 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1461 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1462 rcu_read_lock(); 1463 tid_info = rcu_dereference(sta_info->agg[tid]); 1464 if (tid_info) { 1465 spin_lock_bh(&ar->tx_ampdu_list_lock); 1466 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN) 1467 tid_info->state = CARL9170_TID_STATE_SHUTDOWN; 1468 spin_unlock_bh(&ar->tx_ampdu_list_lock); 1469 } 1470 1471 RCU_INIT_POINTER(sta_info->agg[tid], NULL); 1472 rcu_read_unlock(); 1473 1474 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1475 ieee80211_queue_work(ar->hw, &ar->ampdu_work); 1476 break; 1477 1478 case IEEE80211_AMPDU_TX_OPERATIONAL: 1479 rcu_read_lock(); 1480 tid_info = rcu_dereference(sta_info->agg[tid]); 1481 1482 sta_info->stats[tid].clear = true; 1483 sta_info->stats[tid].req = false; 1484 1485 if (tid_info) { 1486 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE); 1487 tid_info->state = CARL9170_TID_STATE_IDLE; 1488 } 1489 rcu_read_unlock(); 1490 1491 if (WARN_ON_ONCE(!tid_info)) 1492 return -EFAULT; 1493 1494 break; 1495 1496 case IEEE80211_AMPDU_RX_START: 1497 case IEEE80211_AMPDU_RX_STOP: 1498 /* Handled by hardware */ 1499 break; 1500 1501 default: 1502 return -EOPNOTSUPP; 1503 } 1504 1505 return 0; 1506} 1507 1508#ifdef CONFIG_CARL9170_WPC 1509static int carl9170_register_wps_button(struct ar9170 *ar) 1510{ 1511 struct input_dev *input; 1512 int err; 1513 1514 if (!(ar->features & CARL9170_WPS_BUTTON)) 1515 return 0; 1516 1517 input = input_allocate_device(); 1518 if (!input) 1519 return -ENOMEM; 1520 1521 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button", 1522 wiphy_name(ar->hw->wiphy)); 1523 1524 snprintf(ar->wps.phys, sizeof(ar->wps.phys), 1525 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy)); 1526 1527 input->name = ar->wps.name; 1528 input->phys = ar->wps.phys; 1529 input->id.bustype = BUS_USB; 1530 input->dev.parent = &ar->hw->wiphy->dev; 1531 1532 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON); 1533 1534 err = input_register_device(input); 1535 if (err) { 1536 input_free_device(input); 1537 return err; 1538 } 1539 1540 ar->wps.pbc = input; 1541 return 0; 1542} 1543#endif /* CONFIG_CARL9170_WPC */ 1544 1545#ifdef CONFIG_CARL9170_HWRNG 1546static int carl9170_rng_get(struct ar9170 *ar) 1547{ 1548 1549#define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32)) 1550#define RB (CARL9170_MAX_CMD_PAYLOAD_LEN) 1551 1552 static const __le32 rng_load[RW] = { 1553 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)}; 1554 1555 u32 buf[RW]; 1556 1557 unsigned int i, off = 0, transfer, count; 1558 int err; 1559 1560 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN); 1561 1562 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized) 1563 return -EAGAIN; 1564 1565 count = ARRAY_SIZE(ar->rng.cache); 1566 while (count) { 1567 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, 1568 RB, (u8 *) rng_load, 1569 RB, (u8 *) buf); 1570 if (err) 1571 return err; 1572 1573 transfer = min_t(unsigned int, count, RW); 1574 for (i = 0; i < transfer; i++) 1575 ar->rng.cache[off + i] = buf[i]; 1576 1577 off += transfer; 1578 count -= transfer; 1579 } 1580 1581 ar->rng.cache_idx = 0; 1582 1583#undef RW 1584#undef RB 1585 return 0; 1586} 1587 1588static int carl9170_rng_read(struct hwrng *rng, u32 *data) 1589{ 1590 struct ar9170 *ar = (struct ar9170 *)rng->priv; 1591 int ret = -EIO; 1592 1593 mutex_lock(&ar->mutex); 1594 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) { 1595 ret = carl9170_rng_get(ar); 1596 if (ret) { 1597 mutex_unlock(&ar->mutex); 1598 return ret; 1599 } 1600 } 1601 1602 *data = ar->rng.cache[ar->rng.cache_idx++]; 1603 mutex_unlock(&ar->mutex); 1604 1605 return sizeof(u16); 1606} 1607 1608static void carl9170_unregister_hwrng(struct ar9170 *ar) 1609{ 1610 if (ar->rng.initialized) { 1611 hwrng_unregister(&ar->rng.rng); 1612 ar->rng.initialized = false; 1613 } 1614} 1615 1616static int carl9170_register_hwrng(struct ar9170 *ar) 1617{ 1618 int err; 1619 1620 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name), 1621 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy)); 1622 ar->rng.rng.name = ar->rng.name; 1623 ar->rng.rng.data_read = carl9170_rng_read; 1624 ar->rng.rng.priv = (unsigned long)ar; 1625 1626 if (WARN_ON(ar->rng.initialized)) 1627 return -EALREADY; 1628 1629 err = hwrng_register(&ar->rng.rng); 1630 if (err) { 1631 dev_err(&ar->udev->dev, "Failed to register the random " 1632 "number generator (%d)\n", err); 1633 return err; 1634 } 1635 1636 ar->rng.initialized = true; 1637 1638 err = carl9170_rng_get(ar); 1639 if (err) { 1640 carl9170_unregister_hwrng(ar); 1641 return err; 1642 } 1643 1644 return 0; 1645} 1646#endif /* CONFIG_CARL9170_HWRNG */ 1647 1648static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx, 1649 struct survey_info *survey) 1650{ 1651 struct ar9170 *ar = hw->priv; 1652 struct ieee80211_channel *chan; 1653 struct ieee80211_supported_band *band; 1654 int err, b, i; 1655 1656 chan = ar->channel; 1657 if (!chan) 1658 return -ENODEV; 1659 1660 if (idx == chan->hw_value) { 1661 mutex_lock(&ar->mutex); 1662 err = carl9170_update_survey(ar, false, true); 1663 mutex_unlock(&ar->mutex); 1664 if (err) 1665 return err; 1666 } 1667 1668 for (b = 0; b < IEEE80211_NUM_BANDS; b++) { 1669 band = ar->hw->wiphy->bands[b]; 1670 1671 if (!band) 1672 continue; 1673 1674 for (i = 0; i < band->n_channels; i++) { 1675 if (band->channels[i].hw_value == idx) { 1676 chan = &band->channels[i]; 1677 goto found; 1678 } 1679 } 1680 } 1681 return -ENOENT; 1682 1683found: 1684 memcpy(survey, &ar->survey[idx], sizeof(*survey)); 1685 1686 survey->channel = chan; 1687 survey->filled = SURVEY_INFO_NOISE_DBM; 1688 1689 if (ar->channel == chan) 1690 survey->filled |= SURVEY_INFO_IN_USE; 1691 1692 if (ar->fw.hw_counters) { 1693 survey->filled |= SURVEY_INFO_CHANNEL_TIME | 1694 SURVEY_INFO_CHANNEL_TIME_BUSY | 1695 SURVEY_INFO_CHANNEL_TIME_TX; 1696 } 1697 1698 return 0; 1699} 1700 1701static void carl9170_op_flush(struct ieee80211_hw *hw, 1702 struct ieee80211_vif *vif, 1703 u32 queues, bool drop) 1704{ 1705 struct ar9170 *ar = hw->priv; 1706 unsigned int vid; 1707 1708 mutex_lock(&ar->mutex); 1709 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num) 1710 carl9170_flush_cab(ar, vid); 1711 1712 carl9170_flush(ar, drop); 1713 mutex_unlock(&ar->mutex); 1714} 1715 1716static int carl9170_op_get_stats(struct ieee80211_hw *hw, 1717 struct ieee80211_low_level_stats *stats) 1718{ 1719 struct ar9170 *ar = hw->priv; 1720 1721 memset(stats, 0, sizeof(*stats)); 1722 stats->dot11ACKFailureCount = ar->tx_ack_failures; 1723 stats->dot11FCSErrorCount = ar->tx_fcs_errors; 1724 return 0; 1725} 1726 1727static void carl9170_op_sta_notify(struct ieee80211_hw *hw, 1728 struct ieee80211_vif *vif, 1729 enum sta_notify_cmd cmd, 1730 struct ieee80211_sta *sta) 1731{ 1732 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; 1733 1734 switch (cmd) { 1735 case STA_NOTIFY_SLEEP: 1736 sta_info->sleeping = true; 1737 if (atomic_read(&sta_info->pending_frames)) 1738 ieee80211_sta_block_awake(hw, sta, true); 1739 break; 1740 1741 case STA_NOTIFY_AWAKE: 1742 sta_info->sleeping = false; 1743 break; 1744 } 1745} 1746 1747static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw) 1748{ 1749 struct ar9170 *ar = hw->priv; 1750 1751 return !!atomic_read(&ar->tx_total_queued); 1752} 1753 1754static const struct ieee80211_ops carl9170_ops = { 1755 .start = carl9170_op_start, 1756 .stop = carl9170_op_stop, 1757 .tx = carl9170_op_tx, 1758 .flush = carl9170_op_flush, 1759 .add_interface = carl9170_op_add_interface, 1760 .remove_interface = carl9170_op_remove_interface, 1761 .config = carl9170_op_config, 1762 .prepare_multicast = carl9170_op_prepare_multicast, 1763 .configure_filter = carl9170_op_configure_filter, 1764 .conf_tx = carl9170_op_conf_tx, 1765 .bss_info_changed = carl9170_op_bss_info_changed, 1766 .get_tsf = carl9170_op_get_tsf, 1767 .set_key = carl9170_op_set_key, 1768 .sta_add = carl9170_op_sta_add, 1769 .sta_remove = carl9170_op_sta_remove, 1770 .sta_notify = carl9170_op_sta_notify, 1771 .get_survey = carl9170_op_get_survey, 1772 .get_stats = carl9170_op_get_stats, 1773 .ampdu_action = carl9170_op_ampdu_action, 1774 .tx_frames_pending = carl9170_tx_frames_pending, 1775}; 1776 1777void *carl9170_alloc(size_t priv_size) 1778{ 1779 struct ieee80211_hw *hw; 1780 struct ar9170 *ar; 1781 struct sk_buff *skb; 1782 int i; 1783 1784 /* 1785 * this buffer is used for rx stream reconstruction. 1786 * Under heavy load this device (or the transport layer?) 1787 * tends to split the streams into separate rx descriptors. 1788 */ 1789 1790 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL); 1791 if (!skb) 1792 goto err_nomem; 1793 1794 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops); 1795 if (!hw) 1796 goto err_nomem; 1797 1798 ar = hw->priv; 1799 ar->hw = hw; 1800 ar->rx_failover = skb; 1801 1802 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head)); 1803 ar->rx_has_plcp = false; 1804 1805 /* 1806 * Here's a hidden pitfall! 1807 * 1808 * All 4 AC queues work perfectly well under _legacy_ operation. 1809 * However as soon as aggregation is enabled, the traffic flow 1810 * gets very bumpy. Therefore we have to _switch_ to a 1811 * software AC with a single HW queue. 1812 */ 1813 hw->queues = __AR9170_NUM_TXQ; 1814 1815 mutex_init(&ar->mutex); 1816 spin_lock_init(&ar->beacon_lock); 1817 spin_lock_init(&ar->cmd_lock); 1818 spin_lock_init(&ar->tx_stats_lock); 1819 spin_lock_init(&ar->tx_ampdu_list_lock); 1820 spin_lock_init(&ar->mem_lock); 1821 spin_lock_init(&ar->state_lock); 1822 atomic_set(&ar->pending_restarts, 0); 1823 ar->vifs = 0; 1824 for (i = 0; i < ar->hw->queues; i++) { 1825 skb_queue_head_init(&ar->tx_status[i]); 1826 skb_queue_head_init(&ar->tx_pending[i]); 1827 1828 INIT_LIST_HEAD(&ar->bar_list[i]); 1829 spin_lock_init(&ar->bar_list_lock[i]); 1830 } 1831 INIT_WORK(&ar->ps_work, carl9170_ps_work); 1832 INIT_WORK(&ar->ping_work, carl9170_ping_work); 1833 INIT_WORK(&ar->restart_work, carl9170_restart_work); 1834 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work); 1835 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work); 1836 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor); 1837 INIT_LIST_HEAD(&ar->tx_ampdu_list); 1838 rcu_assign_pointer(ar->tx_ampdu_iter, 1839 (struct carl9170_sta_tid *) &ar->tx_ampdu_list); 1840 1841 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num); 1842 INIT_LIST_HEAD(&ar->vif_list); 1843 init_completion(&ar->tx_flush); 1844 1845 /* firmware decides which modes we support */ 1846 hw->wiphy->interface_modes = 0; 1847 1848 hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 1849 IEEE80211_HW_MFP_CAPABLE | 1850 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 1851 IEEE80211_HW_SUPPORTS_PS | 1852 IEEE80211_HW_PS_NULLFUNC_STACK | 1853 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | 1854 IEEE80211_HW_SUPPORTS_RC_TABLE | 1855 IEEE80211_HW_SIGNAL_DBM | 1856 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 1857 1858 if (!modparam_noht) { 1859 /* 1860 * see the comment above, why we allow the user 1861 * to disable HT by a module parameter. 1862 */ 1863 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 1864 } 1865 1866 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe); 1867 hw->sta_data_size = sizeof(struct carl9170_sta_info); 1868 hw->vif_data_size = sizeof(struct carl9170_vif_info); 1869 1870 hw->max_rates = CARL9170_TX_MAX_RATES; 1871 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES; 1872 1873 for (i = 0; i < ARRAY_SIZE(ar->noise); i++) 1874 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */ 1875 1876 return ar; 1877 1878err_nomem: 1879 kfree_skb(skb); 1880 return ERR_PTR(-ENOMEM); 1881} 1882 1883static int carl9170_read_eeprom(struct ar9170 *ar) 1884{ 1885#define RW 8 /* number of words to read at once */ 1886#define RB (sizeof(u32) * RW) 1887 u8 *eeprom = (void *)&ar->eeprom; 1888 __le32 offsets[RW]; 1889 int i, j, err; 1890 1891 BUILD_BUG_ON(sizeof(ar->eeprom) & 3); 1892 1893 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4); 1894#ifndef __CHECKER__ 1895 /* don't want to handle trailing remains */ 1896 BUILD_BUG_ON(sizeof(ar->eeprom) % RB); 1897#endif 1898 1899 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) { 1900 for (j = 0; j < RW; j++) 1901 offsets[j] = cpu_to_le32(AR9170_EEPROM_START + 1902 RB * i + 4 * j); 1903 1904 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG, 1905 RB, (u8 *) &offsets, 1906 RB, eeprom + RB * i); 1907 if (err) 1908 return err; 1909 } 1910 1911#undef RW 1912#undef RB 1913 return 0; 1914} 1915 1916static int carl9170_parse_eeprom(struct ar9170 *ar) 1917{ 1918 struct ath_regulatory *regulatory = &ar->common.regulatory; 1919 unsigned int rx_streams, tx_streams, tx_params = 0; 1920 int bands = 0; 1921 int chans = 0; 1922 1923 if (ar->eeprom.length == cpu_to_le16(0xffff)) 1924 return -ENODATA; 1925 1926 rx_streams = hweight8(ar->eeprom.rx_mask); 1927 tx_streams = hweight8(ar->eeprom.tx_mask); 1928 1929 if (rx_streams != tx_streams) { 1930 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF; 1931 1932 WARN_ON(!(tx_streams >= 1 && tx_streams <= 1933 IEEE80211_HT_MCS_TX_MAX_STREAMS)); 1934 1935 tx_params = (tx_streams - 1) << 1936 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; 1937 1938 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params; 1939 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params; 1940 } 1941 1942 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) { 1943 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 1944 &carl9170_band_2GHz; 1945 chans += carl9170_band_2GHz.n_channels; 1946 bands++; 1947 } 1948 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) { 1949 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 1950 &carl9170_band_5GHz; 1951 chans += carl9170_band_5GHz.n_channels; 1952 bands++; 1953 } 1954 1955 if (!bands) 1956 return -EINVAL; 1957 1958 ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL); 1959 if (!ar->survey) 1960 return -ENOMEM; 1961 ar->num_channels = chans; 1962 1963 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]); 1964 1965 /* second part of wiphy init */ 1966 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address); 1967 1968 return 0; 1969} 1970 1971static void carl9170_reg_notifier(struct wiphy *wiphy, 1972 struct regulatory_request *request) 1973{ 1974 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 1975 struct ar9170 *ar = hw->priv; 1976 1977 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory); 1978} 1979 1980int carl9170_register(struct ar9170 *ar) 1981{ 1982 struct ath_regulatory *regulatory = &ar->common.regulatory; 1983 int err = 0, i; 1984 1985 if (WARN_ON(ar->mem_bitmap)) 1986 return -EINVAL; 1987 1988 ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) * 1989 sizeof(unsigned long), GFP_KERNEL); 1990 1991 if (!ar->mem_bitmap) 1992 return -ENOMEM; 1993 1994 /* try to read EEPROM, init MAC addr */ 1995 err = carl9170_read_eeprom(ar); 1996 if (err) 1997 return err; 1998 1999 err = carl9170_parse_eeprom(ar); 2000 if (err) 2001 return err; 2002 2003 err = ath_regd_init(regulatory, ar->hw->wiphy, 2004 carl9170_reg_notifier); 2005 if (err) 2006 return err; 2007 2008 if (modparam_noht) { 2009 carl9170_band_2GHz.ht_cap.ht_supported = false; 2010 carl9170_band_5GHz.ht_cap.ht_supported = false; 2011 } 2012 2013 for (i = 0; i < ar->fw.vif_num; i++) { 2014 ar->vif_priv[i].id = i; 2015 ar->vif_priv[i].vif = NULL; 2016 } 2017 2018 err = ieee80211_register_hw(ar->hw); 2019 if (err) 2020 return err; 2021 2022 /* mac80211 interface is now registered */ 2023 ar->registered = true; 2024 2025 if (!ath_is_world_regd(regulatory)) 2026 regulatory_hint(ar->hw->wiphy, regulatory->alpha2); 2027 2028#ifdef CONFIG_CARL9170_DEBUGFS 2029 carl9170_debugfs_register(ar); 2030#endif /* CONFIG_CARL9170_DEBUGFS */ 2031 2032 err = carl9170_led_init(ar); 2033 if (err) 2034 goto err_unreg; 2035 2036#ifdef CONFIG_CARL9170_LEDS 2037 err = carl9170_led_register(ar); 2038 if (err) 2039 goto err_unreg; 2040#endif /* CONFIG_CARL9170_LEDS */ 2041 2042#ifdef CONFIG_CARL9170_WPC 2043 err = carl9170_register_wps_button(ar); 2044 if (err) 2045 goto err_unreg; 2046#endif /* CONFIG_CARL9170_WPC */ 2047 2048#ifdef CONFIG_CARL9170_HWRNG 2049 err = carl9170_register_hwrng(ar); 2050 if (err) 2051 goto err_unreg; 2052#endif /* CONFIG_CARL9170_HWRNG */ 2053 2054 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n", 2055 wiphy_name(ar->hw->wiphy)); 2056 2057 return 0; 2058 2059err_unreg: 2060 carl9170_unregister(ar); 2061 return err; 2062} 2063 2064void carl9170_unregister(struct ar9170 *ar) 2065{ 2066 if (!ar->registered) 2067 return; 2068 2069 ar->registered = false; 2070 2071#ifdef CONFIG_CARL9170_LEDS 2072 carl9170_led_unregister(ar); 2073#endif /* CONFIG_CARL9170_LEDS */ 2074 2075#ifdef CONFIG_CARL9170_DEBUGFS 2076 carl9170_debugfs_unregister(ar); 2077#endif /* CONFIG_CARL9170_DEBUGFS */ 2078 2079#ifdef CONFIG_CARL9170_WPC 2080 if (ar->wps.pbc) { 2081 input_unregister_device(ar->wps.pbc); 2082 ar->wps.pbc = NULL; 2083 } 2084#endif /* CONFIG_CARL9170_WPC */ 2085 2086#ifdef CONFIG_CARL9170_HWRNG 2087 carl9170_unregister_hwrng(ar); 2088#endif /* CONFIG_CARL9170_HWRNG */ 2089 2090 carl9170_cancel_worker(ar); 2091 cancel_work_sync(&ar->restart_work); 2092 2093 ieee80211_unregister_hw(ar->hw); 2094} 2095 2096void carl9170_free(struct ar9170 *ar) 2097{ 2098 WARN_ON(ar->registered); 2099 WARN_ON(IS_INITIALIZED(ar)); 2100 2101 kfree_skb(ar->rx_failover); 2102 ar->rx_failover = NULL; 2103 2104 kfree(ar->mem_bitmap); 2105 ar->mem_bitmap = NULL; 2106 2107 kfree(ar->survey); 2108 ar->survey = NULL; 2109 2110 mutex_destroy(&ar->mutex); 2111 2112 ieee80211_free_hw(ar->hw); 2113} 2114