1/* bnx2fc_io.c: QLogic NetXtreme II Linux FCoE offload driver. 2 * IO manager and SCSI IO processing. 3 * 4 * Copyright (c) 2008 - 2013 Broadcom Corporation 5 * Copyright (c) 2014, QLogic Corporation 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation. 10 * 11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) 12 */ 13 14#include "bnx2fc.h" 15 16#define RESERVE_FREE_LIST_INDEX num_possible_cpus() 17 18static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 19 int bd_index); 20static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); 21static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); 22static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); 23static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); 24static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 25 struct fcoe_fcp_rsp_payload *fcp_rsp, 26 u8 num_rq); 27 28void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, 29 unsigned int timer_msec) 30{ 31 struct bnx2fc_interface *interface = io_req->port->priv; 32 33 if (queue_delayed_work(interface->timer_work_queue, 34 &io_req->timeout_work, 35 msecs_to_jiffies(timer_msec))) 36 kref_get(&io_req->refcount); 37} 38 39static void bnx2fc_cmd_timeout(struct work_struct *work) 40{ 41 struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, 42 timeout_work.work); 43 struct fc_lport *lport; 44 struct fc_rport_priv *rdata; 45 u8 cmd_type = io_req->cmd_type; 46 struct bnx2fc_rport *tgt = io_req->tgt; 47 int logo_issued; 48 int rc; 49 50 BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," 51 "req_flags = %lx\n", cmd_type, io_req->req_flags); 52 53 spin_lock_bh(&tgt->tgt_lock); 54 if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { 55 clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 56 /* 57 * ideally we should hold the io_req until RRQ complets, 58 * and release io_req from timeout hold. 59 */ 60 spin_unlock_bh(&tgt->tgt_lock); 61 bnx2fc_send_rrq(io_req); 62 return; 63 } 64 if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { 65 BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); 66 goto done; 67 } 68 69 switch (cmd_type) { 70 case BNX2FC_SCSI_CMD: 71 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 72 &io_req->req_flags)) { 73 /* Handle eh_abort timeout */ 74 BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); 75 complete(&io_req->tm_done); 76 } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, 77 &io_req->req_flags)) { 78 /* Handle internally generated ABTS timeout */ 79 BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", 80 io_req->refcount.refcount.counter); 81 if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 82 &io_req->req_flags))) { 83 84 lport = io_req->port->lport; 85 rdata = io_req->tgt->rdata; 86 logo_issued = test_and_set_bit( 87 BNX2FC_FLAG_EXPL_LOGO, 88 &tgt->flags); 89 kref_put(&io_req->refcount, bnx2fc_cmd_release); 90 spin_unlock_bh(&tgt->tgt_lock); 91 92 /* Explicitly logo the target */ 93 if (!logo_issued) { 94 BNX2FC_IO_DBG(io_req, "Explicit " 95 "logo - tgt flags = 0x%lx\n", 96 tgt->flags); 97 98 mutex_lock(&lport->disc.disc_mutex); 99 lport->tt.rport_logoff(rdata); 100 mutex_unlock(&lport->disc.disc_mutex); 101 } 102 return; 103 } 104 } else { 105 /* Hanlde IO timeout */ 106 BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); 107 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, 108 &io_req->req_flags)) { 109 BNX2FC_IO_DBG(io_req, "IO completed before " 110 " timer expiry\n"); 111 goto done; 112 } 113 114 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 115 &io_req->req_flags)) { 116 rc = bnx2fc_initiate_abts(io_req); 117 if (rc == SUCCESS) 118 goto done; 119 /* 120 * Explicitly logo the target if 121 * abts initiation fails 122 */ 123 lport = io_req->port->lport; 124 rdata = io_req->tgt->rdata; 125 logo_issued = test_and_set_bit( 126 BNX2FC_FLAG_EXPL_LOGO, 127 &tgt->flags); 128 kref_put(&io_req->refcount, bnx2fc_cmd_release); 129 spin_unlock_bh(&tgt->tgt_lock); 130 131 if (!logo_issued) { 132 BNX2FC_IO_DBG(io_req, "Explicit " 133 "logo - tgt flags = 0x%lx\n", 134 tgt->flags); 135 136 137 mutex_lock(&lport->disc.disc_mutex); 138 lport->tt.rport_logoff(rdata); 139 mutex_unlock(&lport->disc.disc_mutex); 140 } 141 return; 142 } else { 143 BNX2FC_IO_DBG(io_req, "IO already in " 144 "ABTS processing\n"); 145 } 146 } 147 break; 148 case BNX2FC_ELS: 149 150 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 151 BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); 152 153 if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 154 &io_req->req_flags)) { 155 lport = io_req->port->lport; 156 rdata = io_req->tgt->rdata; 157 logo_issued = test_and_set_bit( 158 BNX2FC_FLAG_EXPL_LOGO, 159 &tgt->flags); 160 kref_put(&io_req->refcount, bnx2fc_cmd_release); 161 spin_unlock_bh(&tgt->tgt_lock); 162 163 /* Explicitly logo the target */ 164 if (!logo_issued) { 165 BNX2FC_IO_DBG(io_req, "Explicitly logo" 166 "(els)\n"); 167 mutex_lock(&lport->disc.disc_mutex); 168 lport->tt.rport_logoff(rdata); 169 mutex_unlock(&lport->disc.disc_mutex); 170 } 171 return; 172 } 173 } else { 174 /* 175 * Handle ELS timeout. 176 * tgt_lock is used to sync compl path and timeout 177 * path. If els compl path is processing this IO, we 178 * have nothing to do here, just release the timer hold 179 */ 180 BNX2FC_IO_DBG(io_req, "ELS timed out\n"); 181 if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, 182 &io_req->req_flags)) 183 goto done; 184 185 /* Indicate the cb_func that this ELS is timed out */ 186 set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); 187 188 if ((io_req->cb_func) && (io_req->cb_arg)) { 189 io_req->cb_func(io_req->cb_arg); 190 io_req->cb_arg = NULL; 191 } 192 } 193 break; 194 default: 195 printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", 196 cmd_type); 197 break; 198 } 199 200done: 201 /* release the cmd that was held when timer was set */ 202 kref_put(&io_req->refcount, bnx2fc_cmd_release); 203 spin_unlock_bh(&tgt->tgt_lock); 204} 205 206static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) 207{ 208 /* Called with host lock held */ 209 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 210 211 /* 212 * active_cmd_queue may have other command types as well, 213 * and during flush operation, we want to error back only 214 * scsi commands. 215 */ 216 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 217 return; 218 219 BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); 220 if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { 221 /* Do not call scsi done for this IO */ 222 return; 223 } 224 225 bnx2fc_unmap_sg_list(io_req); 226 io_req->sc_cmd = NULL; 227 if (!sc_cmd) { 228 printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " 229 "IO(0x%x) already cleaned up\n", 230 io_req->xid); 231 return; 232 } 233 sc_cmd->result = err_code << 16; 234 235 BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", 236 sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, 237 sc_cmd->allowed); 238 scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); 239 sc_cmd->SCp.ptr = NULL; 240 sc_cmd->scsi_done(sc_cmd); 241} 242 243struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) 244{ 245 struct bnx2fc_cmd_mgr *cmgr; 246 struct io_bdt *bdt_info; 247 struct bnx2fc_cmd *io_req; 248 size_t len; 249 u32 mem_size; 250 u16 xid; 251 int i; 252 int num_ios, num_pri_ios; 253 size_t bd_tbl_sz; 254 int arr_sz = num_possible_cpus() + 1; 255 u16 min_xid = BNX2FC_MIN_XID; 256 u16 max_xid = hba->max_xid; 257 258 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { 259 printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ 260 and max_xid 0x%x\n", min_xid, max_xid); 261 return NULL; 262 } 263 BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); 264 265 num_ios = max_xid - min_xid + 1; 266 len = (num_ios * (sizeof(struct bnx2fc_cmd *))); 267 len += sizeof(struct bnx2fc_cmd_mgr); 268 269 cmgr = kzalloc(len, GFP_KERNEL); 270 if (!cmgr) { 271 printk(KERN_ERR PFX "failed to alloc cmgr\n"); 272 return NULL; 273 } 274 275 cmgr->free_list = kzalloc(sizeof(*cmgr->free_list) * 276 arr_sz, GFP_KERNEL); 277 if (!cmgr->free_list) { 278 printk(KERN_ERR PFX "failed to alloc free_list\n"); 279 goto mem_err; 280 } 281 282 cmgr->free_list_lock = kzalloc(sizeof(*cmgr->free_list_lock) * 283 arr_sz, GFP_KERNEL); 284 if (!cmgr->free_list_lock) { 285 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 286 kfree(cmgr->free_list); 287 cmgr->free_list = NULL; 288 goto mem_err; 289 } 290 291 cmgr->hba = hba; 292 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); 293 294 for (i = 0; i < arr_sz; i++) { 295 INIT_LIST_HEAD(&cmgr->free_list[i]); 296 spin_lock_init(&cmgr->free_list_lock[i]); 297 } 298 299 /* 300 * Pre-allocated pool of bnx2fc_cmds. 301 * Last entry in the free list array is the free list 302 * of slow path requests. 303 */ 304 xid = BNX2FC_MIN_XID; 305 num_pri_ios = num_ios - hba->elstm_xids; 306 for (i = 0; i < num_ios; i++) { 307 io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); 308 309 if (!io_req) { 310 printk(KERN_ERR PFX "failed to alloc io_req\n"); 311 goto mem_err; 312 } 313 314 INIT_LIST_HEAD(&io_req->link); 315 INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); 316 317 io_req->xid = xid++; 318 if (i < num_pri_ios) 319 list_add_tail(&io_req->link, 320 &cmgr->free_list[io_req->xid % 321 num_possible_cpus()]); 322 else 323 list_add_tail(&io_req->link, 324 &cmgr->free_list[num_possible_cpus()]); 325 io_req++; 326 } 327 328 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ 329 mem_size = num_ios * sizeof(struct io_bdt *); 330 cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); 331 if (!cmgr->io_bdt_pool) { 332 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); 333 goto mem_err; 334 } 335 336 mem_size = sizeof(struct io_bdt); 337 for (i = 0; i < num_ios; i++) { 338 cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); 339 if (!cmgr->io_bdt_pool[i]) { 340 printk(KERN_ERR PFX "failed to alloc " 341 "io_bdt_pool[%d]\n", i); 342 goto mem_err; 343 } 344 } 345 346 /* Allocate an map fcoe_bdt_ctx structures */ 347 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 348 for (i = 0; i < num_ios; i++) { 349 bdt_info = cmgr->io_bdt_pool[i]; 350 bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, 351 bd_tbl_sz, 352 &bdt_info->bd_tbl_dma, 353 GFP_KERNEL); 354 if (!bdt_info->bd_tbl) { 355 printk(KERN_ERR PFX "failed to alloc " 356 "bdt_tbl[%d]\n", i); 357 goto mem_err; 358 } 359 } 360 361 return cmgr; 362 363mem_err: 364 bnx2fc_cmd_mgr_free(cmgr); 365 return NULL; 366} 367 368void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) 369{ 370 struct io_bdt *bdt_info; 371 struct bnx2fc_hba *hba = cmgr->hba; 372 size_t bd_tbl_sz; 373 u16 min_xid = BNX2FC_MIN_XID; 374 u16 max_xid = hba->max_xid; 375 int num_ios; 376 int i; 377 378 num_ios = max_xid - min_xid + 1; 379 380 /* Free fcoe_bdt_ctx structures */ 381 if (!cmgr->io_bdt_pool) 382 goto free_cmd_pool; 383 384 bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); 385 for (i = 0; i < num_ios; i++) { 386 bdt_info = cmgr->io_bdt_pool[i]; 387 if (bdt_info->bd_tbl) { 388 dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, 389 bdt_info->bd_tbl, 390 bdt_info->bd_tbl_dma); 391 bdt_info->bd_tbl = NULL; 392 } 393 } 394 395 /* Destroy io_bdt pool */ 396 for (i = 0; i < num_ios; i++) { 397 kfree(cmgr->io_bdt_pool[i]); 398 cmgr->io_bdt_pool[i] = NULL; 399 } 400 401 kfree(cmgr->io_bdt_pool); 402 cmgr->io_bdt_pool = NULL; 403 404free_cmd_pool: 405 kfree(cmgr->free_list_lock); 406 407 /* Destroy cmd pool */ 408 if (!cmgr->free_list) 409 goto free_cmgr; 410 411 for (i = 0; i < num_possible_cpus() + 1; i++) { 412 struct bnx2fc_cmd *tmp, *io_req; 413 414 list_for_each_entry_safe(io_req, tmp, 415 &cmgr->free_list[i], link) { 416 list_del(&io_req->link); 417 kfree(io_req); 418 } 419 } 420 kfree(cmgr->free_list); 421free_cmgr: 422 /* Free command manager itself */ 423 kfree(cmgr); 424} 425 426struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) 427{ 428 struct fcoe_port *port = tgt->port; 429 struct bnx2fc_interface *interface = port->priv; 430 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 431 struct bnx2fc_cmd *io_req; 432 struct list_head *listp; 433 struct io_bdt *bd_tbl; 434 int index = RESERVE_FREE_LIST_INDEX; 435 u32 free_sqes; 436 u32 max_sqes; 437 u16 xid; 438 439 max_sqes = tgt->max_sqes; 440 switch (type) { 441 case BNX2FC_TASK_MGMT_CMD: 442 max_sqes = BNX2FC_TM_MAX_SQES; 443 break; 444 case BNX2FC_ELS: 445 max_sqes = BNX2FC_ELS_MAX_SQES; 446 break; 447 default: 448 break; 449 } 450 451 /* 452 * NOTE: Free list insertions and deletions are protected with 453 * cmgr lock 454 */ 455 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 456 free_sqes = atomic_read(&tgt->free_sqes); 457 if ((list_empty(&(cmd_mgr->free_list[index]))) || 458 (tgt->num_active_ios.counter >= max_sqes) || 459 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 460 BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " 461 "ios(%d):sqes(%d)\n", 462 tgt->num_active_ios.counter, tgt->max_sqes); 463 if (list_empty(&(cmd_mgr->free_list[index]))) 464 printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); 465 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 466 return NULL; 467 } 468 469 listp = (struct list_head *) 470 cmd_mgr->free_list[index].next; 471 list_del_init(listp); 472 io_req = (struct bnx2fc_cmd *) listp; 473 xid = io_req->xid; 474 cmd_mgr->cmds[xid] = io_req; 475 atomic_inc(&tgt->num_active_ios); 476 atomic_dec(&tgt->free_sqes); 477 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 478 479 INIT_LIST_HEAD(&io_req->link); 480 481 io_req->port = port; 482 io_req->cmd_mgr = cmd_mgr; 483 io_req->req_flags = 0; 484 io_req->cmd_type = type; 485 486 /* Bind io_bdt for this io_req */ 487 /* Have a static link between io_req and io_bdt_pool */ 488 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 489 bd_tbl->io_req = io_req; 490 491 /* Hold the io_req against deletion */ 492 kref_init(&io_req->refcount); 493 return io_req; 494} 495 496struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) 497{ 498 struct fcoe_port *port = tgt->port; 499 struct bnx2fc_interface *interface = port->priv; 500 struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; 501 struct bnx2fc_cmd *io_req; 502 struct list_head *listp; 503 struct io_bdt *bd_tbl; 504 u32 free_sqes; 505 u32 max_sqes; 506 u16 xid; 507 int index = get_cpu(); 508 509 max_sqes = BNX2FC_SCSI_MAX_SQES; 510 /* 511 * NOTE: Free list insertions and deletions are protected with 512 * cmgr lock 513 */ 514 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 515 free_sqes = atomic_read(&tgt->free_sqes); 516 if ((list_empty(&cmd_mgr->free_list[index])) || 517 (tgt->num_active_ios.counter >= max_sqes) || 518 (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { 519 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 520 put_cpu(); 521 return NULL; 522 } 523 524 listp = (struct list_head *) 525 cmd_mgr->free_list[index].next; 526 list_del_init(listp); 527 io_req = (struct bnx2fc_cmd *) listp; 528 xid = io_req->xid; 529 cmd_mgr->cmds[xid] = io_req; 530 atomic_inc(&tgt->num_active_ios); 531 atomic_dec(&tgt->free_sqes); 532 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 533 put_cpu(); 534 535 INIT_LIST_HEAD(&io_req->link); 536 537 io_req->port = port; 538 io_req->cmd_mgr = cmd_mgr; 539 io_req->req_flags = 0; 540 541 /* Bind io_bdt for this io_req */ 542 /* Have a static link between io_req and io_bdt_pool */ 543 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; 544 bd_tbl->io_req = io_req; 545 546 /* Hold the io_req against deletion */ 547 kref_init(&io_req->refcount); 548 return io_req; 549} 550 551void bnx2fc_cmd_release(struct kref *ref) 552{ 553 struct bnx2fc_cmd *io_req = container_of(ref, 554 struct bnx2fc_cmd, refcount); 555 struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; 556 int index; 557 558 if (io_req->cmd_type == BNX2FC_SCSI_CMD) 559 index = io_req->xid % num_possible_cpus(); 560 else 561 index = RESERVE_FREE_LIST_INDEX; 562 563 564 spin_lock_bh(&cmd_mgr->free_list_lock[index]); 565 if (io_req->cmd_type != BNX2FC_SCSI_CMD) 566 bnx2fc_free_mp_resc(io_req); 567 cmd_mgr->cmds[io_req->xid] = NULL; 568 /* Delete IO from retire queue */ 569 list_del_init(&io_req->link); 570 /* Add it to the free list */ 571 list_add(&io_req->link, 572 &cmd_mgr->free_list[index]); 573 atomic_dec(&io_req->tgt->num_active_ios); 574 spin_unlock_bh(&cmd_mgr->free_list_lock[index]); 575 576} 577 578static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) 579{ 580 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); 581 struct bnx2fc_interface *interface = io_req->port->priv; 582 struct bnx2fc_hba *hba = interface->hba; 583 size_t sz = sizeof(struct fcoe_bd_ctx); 584 585 /* clear tm flags */ 586 mp_req->tm_flags = 0; 587 if (mp_req->mp_req_bd) { 588 dma_free_coherent(&hba->pcidev->dev, sz, 589 mp_req->mp_req_bd, 590 mp_req->mp_req_bd_dma); 591 mp_req->mp_req_bd = NULL; 592 } 593 if (mp_req->mp_resp_bd) { 594 dma_free_coherent(&hba->pcidev->dev, sz, 595 mp_req->mp_resp_bd, 596 mp_req->mp_resp_bd_dma); 597 mp_req->mp_resp_bd = NULL; 598 } 599 if (mp_req->req_buf) { 600 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 601 mp_req->req_buf, 602 mp_req->req_buf_dma); 603 mp_req->req_buf = NULL; 604 } 605 if (mp_req->resp_buf) { 606 dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 607 mp_req->resp_buf, 608 mp_req->resp_buf_dma); 609 mp_req->resp_buf = NULL; 610 } 611} 612 613int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) 614{ 615 struct bnx2fc_mp_req *mp_req; 616 struct fcoe_bd_ctx *mp_req_bd; 617 struct fcoe_bd_ctx *mp_resp_bd; 618 struct bnx2fc_interface *interface = io_req->port->priv; 619 struct bnx2fc_hba *hba = interface->hba; 620 dma_addr_t addr; 621 size_t sz; 622 623 mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 624 memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); 625 626 mp_req->req_len = sizeof(struct fcp_cmnd); 627 io_req->data_xfer_len = mp_req->req_len; 628 mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 629 &mp_req->req_buf_dma, 630 GFP_ATOMIC); 631 if (!mp_req->req_buf) { 632 printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); 633 bnx2fc_free_mp_resc(io_req); 634 return FAILED; 635 } 636 637 mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, 638 &mp_req->resp_buf_dma, 639 GFP_ATOMIC); 640 if (!mp_req->resp_buf) { 641 printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); 642 bnx2fc_free_mp_resc(io_req); 643 return FAILED; 644 } 645 memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); 646 memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); 647 648 /* Allocate and map mp_req_bd and mp_resp_bd */ 649 sz = sizeof(struct fcoe_bd_ctx); 650 mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 651 &mp_req->mp_req_bd_dma, 652 GFP_ATOMIC); 653 if (!mp_req->mp_req_bd) { 654 printk(KERN_ERR PFX "unable to alloc MP req bd\n"); 655 bnx2fc_free_mp_resc(io_req); 656 return FAILED; 657 } 658 mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, 659 &mp_req->mp_resp_bd_dma, 660 GFP_ATOMIC); 661 if (!mp_req->mp_resp_bd) { 662 printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); 663 bnx2fc_free_mp_resc(io_req); 664 return FAILED; 665 } 666 /* Fill bd table */ 667 addr = mp_req->req_buf_dma; 668 mp_req_bd = mp_req->mp_req_bd; 669 mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; 670 mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); 671 mp_req_bd->buf_len = CNIC_PAGE_SIZE; 672 mp_req_bd->flags = 0; 673 674 /* 675 * MP buffer is either a task mgmt command or an ELS. 676 * So the assumption is that it consumes a single bd 677 * entry in the bd table 678 */ 679 mp_resp_bd = mp_req->mp_resp_bd; 680 addr = mp_req->resp_buf_dma; 681 mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; 682 mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); 683 mp_resp_bd->buf_len = CNIC_PAGE_SIZE; 684 mp_resp_bd->flags = 0; 685 686 return SUCCESS; 687} 688 689static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) 690{ 691 struct fc_lport *lport; 692 struct fc_rport *rport; 693 struct fc_rport_libfc_priv *rp; 694 struct fcoe_port *port; 695 struct bnx2fc_interface *interface; 696 struct bnx2fc_rport *tgt; 697 struct bnx2fc_cmd *io_req; 698 struct bnx2fc_mp_req *tm_req; 699 struct fcoe_task_ctx_entry *task; 700 struct fcoe_task_ctx_entry *task_page; 701 struct Scsi_Host *host = sc_cmd->device->host; 702 struct fc_frame_header *fc_hdr; 703 struct fcp_cmnd *fcp_cmnd; 704 int task_idx, index; 705 int rc = SUCCESS; 706 u16 xid; 707 u32 sid, did; 708 unsigned long start = jiffies; 709 710 lport = shost_priv(host); 711 rport = starget_to_rport(scsi_target(sc_cmd->device)); 712 port = lport_priv(lport); 713 interface = port->priv; 714 715 if (rport == NULL) { 716 printk(KERN_ERR PFX "device_reset: rport is NULL\n"); 717 rc = FAILED; 718 goto tmf_err; 719 } 720 rp = rport->dd_data; 721 722 rc = fc_block_scsi_eh(sc_cmd); 723 if (rc) 724 return rc; 725 726 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 727 printk(KERN_ERR PFX "device_reset: link is not ready\n"); 728 rc = FAILED; 729 goto tmf_err; 730 } 731 /* rport and tgt are allocated together, so tgt should be non-NULL */ 732 tgt = (struct bnx2fc_rport *)&rp[1]; 733 734 if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { 735 printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); 736 rc = FAILED; 737 goto tmf_err; 738 } 739retry_tmf: 740 io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); 741 if (!io_req) { 742 if (time_after(jiffies, start + HZ)) { 743 printk(KERN_ERR PFX "tmf: Failed TMF"); 744 rc = FAILED; 745 goto tmf_err; 746 } 747 msleep(20); 748 goto retry_tmf; 749 } 750 /* Initialize rest of io_req fields */ 751 io_req->sc_cmd = sc_cmd; 752 io_req->port = port; 753 io_req->tgt = tgt; 754 755 tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); 756 757 rc = bnx2fc_init_mp_req(io_req); 758 if (rc == FAILED) { 759 printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); 760 spin_lock_bh(&tgt->tgt_lock); 761 kref_put(&io_req->refcount, bnx2fc_cmd_release); 762 spin_unlock_bh(&tgt->tgt_lock); 763 goto tmf_err; 764 } 765 766 /* Set TM flags */ 767 io_req->io_req_flags = 0; 768 tm_req->tm_flags = tm_flags; 769 770 /* Fill FCP_CMND */ 771 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); 772 fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; 773 memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); 774 fcp_cmnd->fc_dl = 0; 775 776 /* Fill FC header */ 777 fc_hdr = &(tm_req->req_fc_hdr); 778 sid = tgt->sid; 779 did = rport->port_id; 780 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, 781 FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 782 FC_FC_SEQ_INIT, 0); 783 /* Obtain exchange id */ 784 xid = io_req->xid; 785 786 BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); 787 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 788 index = xid % BNX2FC_TASKS_PER_PAGE; 789 790 /* Initialize task context for this IO request */ 791 task_page = (struct fcoe_task_ctx_entry *) 792 interface->hba->task_ctx[task_idx]; 793 task = &(task_page[index]); 794 bnx2fc_init_mp_task(io_req, task); 795 796 sc_cmd->SCp.ptr = (char *)io_req; 797 798 /* Obtain free SQ entry */ 799 spin_lock_bh(&tgt->tgt_lock); 800 bnx2fc_add_2_sq(tgt, xid); 801 802 /* Enqueue the io_req to active_tm_queue */ 803 io_req->on_tmf_queue = 1; 804 list_add_tail(&io_req->link, &tgt->active_tm_queue); 805 806 init_completion(&io_req->tm_done); 807 io_req->wait_for_comp = 1; 808 809 /* Ring doorbell */ 810 bnx2fc_ring_doorbell(tgt); 811 spin_unlock_bh(&tgt->tgt_lock); 812 813 rc = wait_for_completion_timeout(&io_req->tm_done, 814 BNX2FC_TM_TIMEOUT * HZ); 815 spin_lock_bh(&tgt->tgt_lock); 816 817 io_req->wait_for_comp = 0; 818 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { 819 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 820 if (io_req->on_tmf_queue) { 821 list_del_init(&io_req->link); 822 io_req->on_tmf_queue = 0; 823 } 824 io_req->wait_for_comp = 1; 825 bnx2fc_initiate_cleanup(io_req); 826 spin_unlock_bh(&tgt->tgt_lock); 827 rc = wait_for_completion_timeout(&io_req->tm_done, 828 BNX2FC_FW_TIMEOUT); 829 spin_lock_bh(&tgt->tgt_lock); 830 io_req->wait_for_comp = 0; 831 if (!rc) 832 kref_put(&io_req->refcount, bnx2fc_cmd_release); 833 } 834 835 spin_unlock_bh(&tgt->tgt_lock); 836 837 if (!rc) { 838 BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); 839 rc = FAILED; 840 } else { 841 BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); 842 rc = SUCCESS; 843 } 844tmf_err: 845 return rc; 846} 847 848int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) 849{ 850 struct fc_lport *lport; 851 struct bnx2fc_rport *tgt = io_req->tgt; 852 struct fc_rport *rport = tgt->rport; 853 struct fc_rport_priv *rdata = tgt->rdata; 854 struct bnx2fc_interface *interface; 855 struct fcoe_port *port; 856 struct bnx2fc_cmd *abts_io_req; 857 struct fcoe_task_ctx_entry *task; 858 struct fcoe_task_ctx_entry *task_page; 859 struct fc_frame_header *fc_hdr; 860 struct bnx2fc_mp_req *abts_req; 861 int task_idx, index; 862 u32 sid, did; 863 u16 xid; 864 int rc = SUCCESS; 865 u32 r_a_tov = rdata->r_a_tov; 866 867 /* called with tgt_lock held */ 868 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); 869 870 port = io_req->port; 871 interface = port->priv; 872 lport = port->lport; 873 874 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 875 printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); 876 rc = FAILED; 877 goto abts_err; 878 } 879 880 if (rport == NULL) { 881 printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); 882 rc = FAILED; 883 goto abts_err; 884 } 885 886 if (lport->state != LPORT_ST_READY || !(lport->link_up)) { 887 printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); 888 rc = FAILED; 889 goto abts_err; 890 } 891 892 abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); 893 if (!abts_io_req) { 894 printk(KERN_ERR PFX "abts: couldnt allocate cmd\n"); 895 rc = FAILED; 896 goto abts_err; 897 } 898 899 /* Initialize rest of io_req fields */ 900 abts_io_req->sc_cmd = NULL; 901 abts_io_req->port = port; 902 abts_io_req->tgt = tgt; 903 abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ 904 905 abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); 906 memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); 907 908 /* Fill FC header */ 909 fc_hdr = &(abts_req->req_fc_hdr); 910 911 /* Obtain oxid and rxid for the original exchange to be aborted */ 912 fc_hdr->fh_ox_id = htons(io_req->xid); 913 fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); 914 915 sid = tgt->sid; 916 did = rport->port_id; 917 918 __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, 919 FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | 920 FC_FC_SEQ_INIT, 0); 921 922 xid = abts_io_req->xid; 923 BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); 924 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 925 index = xid % BNX2FC_TASKS_PER_PAGE; 926 927 /* Initialize task context for this IO request */ 928 task_page = (struct fcoe_task_ctx_entry *) 929 interface->hba->task_ctx[task_idx]; 930 task = &(task_page[index]); 931 bnx2fc_init_mp_task(abts_io_req, task); 932 933 /* 934 * ABTS task is a temporary task that will be cleaned up 935 * irrespective of ABTS response. We need to start the timer 936 * for the original exchange, as the CQE is posted for the original 937 * IO request. 938 * 939 * Timer for ABTS is started only when it is originated by a 940 * TM request. For the ABTS issued as part of ULP timeout, 941 * scsi-ml maintains the timers. 942 */ 943 944 /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ 945 bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); 946 947 /* Obtain free SQ entry */ 948 bnx2fc_add_2_sq(tgt, xid); 949 950 /* Ring doorbell */ 951 bnx2fc_ring_doorbell(tgt); 952 953abts_err: 954 return rc; 955} 956 957int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, 958 enum fc_rctl r_ctl) 959{ 960 struct fc_lport *lport; 961 struct bnx2fc_rport *tgt = orig_io_req->tgt; 962 struct bnx2fc_interface *interface; 963 struct fcoe_port *port; 964 struct bnx2fc_cmd *seq_clnp_req; 965 struct fcoe_task_ctx_entry *task; 966 struct fcoe_task_ctx_entry *task_page; 967 struct bnx2fc_els_cb_arg *cb_arg = NULL; 968 int task_idx, index; 969 u16 xid; 970 int rc = 0; 971 972 BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", 973 orig_io_req->xid); 974 kref_get(&orig_io_req->refcount); 975 976 port = orig_io_req->port; 977 interface = port->priv; 978 lport = port->lport; 979 980 cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); 981 if (!cb_arg) { 982 printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); 983 rc = -ENOMEM; 984 goto cleanup_err; 985 } 986 987 seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); 988 if (!seq_clnp_req) { 989 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 990 rc = -ENOMEM; 991 kfree(cb_arg); 992 goto cleanup_err; 993 } 994 /* Initialize rest of io_req fields */ 995 seq_clnp_req->sc_cmd = NULL; 996 seq_clnp_req->port = port; 997 seq_clnp_req->tgt = tgt; 998 seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ 999 1000 xid = seq_clnp_req->xid; 1001 1002 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1003 index = xid % BNX2FC_TASKS_PER_PAGE; 1004 1005 /* Initialize task context for this IO request */ 1006 task_page = (struct fcoe_task_ctx_entry *) 1007 interface->hba->task_ctx[task_idx]; 1008 task = &(task_page[index]); 1009 cb_arg->aborted_io_req = orig_io_req; 1010 cb_arg->io_req = seq_clnp_req; 1011 cb_arg->r_ctl = r_ctl; 1012 cb_arg->offset = offset; 1013 seq_clnp_req->cb_arg = cb_arg; 1014 1015 printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); 1016 bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); 1017 1018 /* Obtain free SQ entry */ 1019 bnx2fc_add_2_sq(tgt, xid); 1020 1021 /* Ring doorbell */ 1022 bnx2fc_ring_doorbell(tgt); 1023cleanup_err: 1024 return rc; 1025} 1026 1027int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) 1028{ 1029 struct fc_lport *lport; 1030 struct bnx2fc_rport *tgt = io_req->tgt; 1031 struct bnx2fc_interface *interface; 1032 struct fcoe_port *port; 1033 struct bnx2fc_cmd *cleanup_io_req; 1034 struct fcoe_task_ctx_entry *task; 1035 struct fcoe_task_ctx_entry *task_page; 1036 int task_idx, index; 1037 u16 xid, orig_xid; 1038 int rc = 0; 1039 1040 /* ASSUMPTION: called with tgt_lock held */ 1041 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); 1042 1043 port = io_req->port; 1044 interface = port->priv; 1045 lport = port->lport; 1046 1047 cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); 1048 if (!cleanup_io_req) { 1049 printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n"); 1050 rc = -1; 1051 goto cleanup_err; 1052 } 1053 1054 /* Initialize rest of io_req fields */ 1055 cleanup_io_req->sc_cmd = NULL; 1056 cleanup_io_req->port = port; 1057 cleanup_io_req->tgt = tgt; 1058 cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ 1059 1060 xid = cleanup_io_req->xid; 1061 1062 task_idx = xid/BNX2FC_TASKS_PER_PAGE; 1063 index = xid % BNX2FC_TASKS_PER_PAGE; 1064 1065 /* Initialize task context for this IO request */ 1066 task_page = (struct fcoe_task_ctx_entry *) 1067 interface->hba->task_ctx[task_idx]; 1068 task = &(task_page[index]); 1069 orig_xid = io_req->xid; 1070 1071 BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); 1072 1073 bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); 1074 1075 /* Obtain free SQ entry */ 1076 bnx2fc_add_2_sq(tgt, xid); 1077 1078 /* Ring doorbell */ 1079 bnx2fc_ring_doorbell(tgt); 1080 1081cleanup_err: 1082 return rc; 1083} 1084 1085/** 1086 * bnx2fc_eh_target_reset: Reset a target 1087 * 1088 * @sc_cmd: SCSI command 1089 * 1090 * Set from SCSI host template to send task mgmt command to the target 1091 * and wait for the response 1092 */ 1093int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) 1094{ 1095 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); 1096} 1097 1098/** 1099 * bnx2fc_eh_device_reset - Reset a single LUN 1100 * 1101 * @sc_cmd: SCSI command 1102 * 1103 * Set from SCSI host template to send task mgmt command to the target 1104 * and wait for the response 1105 */ 1106int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) 1107{ 1108 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1109} 1110 1111int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req) 1112{ 1113 struct bnx2fc_rport *tgt = io_req->tgt; 1114 struct fc_rport_priv *rdata = tgt->rdata; 1115 int logo_issued; 1116 int rc = SUCCESS; 1117 int wait_cnt = 0; 1118 1119 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n", 1120 tgt->flags); 1121 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO, 1122 &tgt->flags); 1123 io_req->wait_for_comp = 1; 1124 bnx2fc_initiate_cleanup(io_req); 1125 1126 spin_unlock_bh(&tgt->tgt_lock); 1127 1128 wait_for_completion(&io_req->tm_done); 1129 1130 io_req->wait_for_comp = 0; 1131 /* 1132 * release the reference taken in eh_abort to allow the 1133 * target to re-login after flushing IOs 1134 */ 1135 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1136 1137 if (!logo_issued) { 1138 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); 1139 mutex_lock(&lport->disc.disc_mutex); 1140 lport->tt.rport_logoff(rdata); 1141 mutex_unlock(&lport->disc.disc_mutex); 1142 do { 1143 msleep(BNX2FC_RELOGIN_WAIT_TIME); 1144 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) { 1145 rc = FAILED; 1146 break; 1147 } 1148 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)); 1149 } 1150 spin_lock_bh(&tgt->tgt_lock); 1151 return rc; 1152} 1153/** 1154 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1155 * SCSI command 1156 * 1157 * @sc_cmd: SCSI_ML command pointer 1158 * 1159 * SCSI abort request handler 1160 */ 1161int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) 1162{ 1163 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1164 struct fc_rport_libfc_priv *rp = rport->dd_data; 1165 struct bnx2fc_cmd *io_req; 1166 struct fc_lport *lport; 1167 struct bnx2fc_rport *tgt; 1168 int rc = FAILED; 1169 1170 1171 rc = fc_block_scsi_eh(sc_cmd); 1172 if (rc) 1173 return rc; 1174 1175 lport = shost_priv(sc_cmd->device->host); 1176 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1177 printk(KERN_ERR PFX "eh_abort: link not ready\n"); 1178 return rc; 1179 } 1180 1181 tgt = (struct bnx2fc_rport *)&rp[1]; 1182 1183 BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); 1184 1185 spin_lock_bh(&tgt->tgt_lock); 1186 io_req = (struct bnx2fc_cmd *)sc_cmd->SCp.ptr; 1187 if (!io_req) { 1188 /* Command might have just completed */ 1189 printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); 1190 spin_unlock_bh(&tgt->tgt_lock); 1191 return SUCCESS; 1192 } 1193 BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", 1194 io_req->refcount.refcount.counter); 1195 1196 /* Hold IO request across abort processing */ 1197 kref_get(&io_req->refcount); 1198 1199 BUG_ON(tgt != io_req->tgt); 1200 1201 /* Remove the io_req from the active_q. */ 1202 /* 1203 * Task Mgmt functions (LUN RESET & TGT RESET) will not 1204 * issue an ABTS on this particular IO req, as the 1205 * io_req is no longer in the active_q. 1206 */ 1207 if (tgt->flush_in_prog) { 1208 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1209 "flush in progress\n", io_req->xid); 1210 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1211 spin_unlock_bh(&tgt->tgt_lock); 1212 return SUCCESS; 1213 } 1214 1215 if (io_req->on_active_queue == 0) { 1216 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1217 "not on active_q\n", io_req->xid); 1218 /* 1219 * This condition can happen only due to the FW bug, 1220 * where we do not receive cleanup response from 1221 * the FW. Handle this case gracefully by erroring 1222 * back the IO request to SCSI-ml 1223 */ 1224 bnx2fc_scsi_done(io_req, DID_ABORT); 1225 1226 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1227 spin_unlock_bh(&tgt->tgt_lock); 1228 return SUCCESS; 1229 } 1230 1231 /* 1232 * Only eh_abort processing will remove the IO from 1233 * active_cmd_q before processing the request. this is 1234 * done to avoid race conditions between IOs aborted 1235 * as part of task management completion and eh_abort 1236 * processing 1237 */ 1238 list_del_init(&io_req->link); 1239 io_req->on_active_queue = 0; 1240 /* Move IO req to retire queue */ 1241 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1242 1243 init_completion(&io_req->tm_done); 1244 1245 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1246 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1247 "already in abts processing\n", io_req->xid); 1248 if (cancel_delayed_work(&io_req->timeout_work)) 1249 kref_put(&io_req->refcount, 1250 bnx2fc_cmd_release); /* drop timer hold */ 1251 rc = bnx2fc_expl_logo(lport, io_req); 1252 /* This only occurs when an task abort was requested while ABTS 1253 is in progress. Setting the IO_CLEANUP flag will skip the 1254 RRQ process in the case when the fw generated SCSI_CMD cmpl 1255 was a result from the ABTS request rather than the CLEANUP 1256 request */ 1257 set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); 1258 goto out; 1259 } 1260 1261 /* Cancel the current timer running on this io_req */ 1262 if (cancel_delayed_work(&io_req->timeout_work)) 1263 kref_put(&io_req->refcount, 1264 bnx2fc_cmd_release); /* drop timer hold */ 1265 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); 1266 io_req->wait_for_comp = 1; 1267 rc = bnx2fc_initiate_abts(io_req); 1268 if (rc == FAILED) { 1269 bnx2fc_initiate_cleanup(io_req); 1270 spin_unlock_bh(&tgt->tgt_lock); 1271 wait_for_completion(&io_req->tm_done); 1272 spin_lock_bh(&tgt->tgt_lock); 1273 io_req->wait_for_comp = 0; 1274 goto done; 1275 } 1276 spin_unlock_bh(&tgt->tgt_lock); 1277 1278 wait_for_completion(&io_req->tm_done); 1279 1280 spin_lock_bh(&tgt->tgt_lock); 1281 io_req->wait_for_comp = 0; 1282 if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1283 BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); 1284 rc = SUCCESS; 1285 } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1286 &io_req->req_flags))) { 1287 /* Let the scsi-ml try to recover this command */ 1288 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1289 io_req->xid); 1290 rc = bnx2fc_expl_logo(lport, io_req); 1291 goto out; 1292 } else { 1293 /* 1294 * We come here even when there was a race condition 1295 * between timeout and abts completion, and abts 1296 * completion happens just in time. 1297 */ 1298 BNX2FC_IO_DBG(io_req, "abort succeeded\n"); 1299 rc = SUCCESS; 1300 bnx2fc_scsi_done(io_req, DID_ABORT); 1301 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1302 } 1303done: 1304 /* release the reference taken in eh_abort */ 1305 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1306out: 1307 spin_unlock_bh(&tgt->tgt_lock); 1308 return rc; 1309} 1310 1311void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, 1312 struct fcoe_task_ctx_entry *task, 1313 u8 rx_state) 1314{ 1315 struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; 1316 struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; 1317 u32 offset = cb_arg->offset; 1318 enum fc_rctl r_ctl = cb_arg->r_ctl; 1319 int rc = 0; 1320 struct bnx2fc_rport *tgt = orig_io_req->tgt; 1321 1322 BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" 1323 "cmd_type = %d\n", 1324 seq_clnp_req->xid, seq_clnp_req->cmd_type); 1325 1326 if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { 1327 printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", 1328 seq_clnp_req->xid); 1329 goto free_cb_arg; 1330 } 1331 1332 spin_unlock_bh(&tgt->tgt_lock); 1333 rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); 1334 spin_lock_bh(&tgt->tgt_lock); 1335 1336 if (rc) 1337 printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" 1338 " IO will abort\n"); 1339 seq_clnp_req->cb_arg = NULL; 1340 kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); 1341free_cb_arg: 1342 kfree(cb_arg); 1343 return; 1344} 1345 1346void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, 1347 struct fcoe_task_ctx_entry *task, 1348 u8 num_rq) 1349{ 1350 BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " 1351 "refcnt = %d, cmd_type = %d\n", 1352 io_req->refcount.refcount.counter, io_req->cmd_type); 1353 bnx2fc_scsi_done(io_req, DID_ERROR); 1354 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1355 if (io_req->wait_for_comp) 1356 complete(&io_req->tm_done); 1357} 1358 1359void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, 1360 struct fcoe_task_ctx_entry *task, 1361 u8 num_rq) 1362{ 1363 u32 r_ctl; 1364 u32 r_a_tov = FC_DEF_R_A_TOV; 1365 u8 issue_rrq = 0; 1366 struct bnx2fc_rport *tgt = io_req->tgt; 1367 1368 BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" 1369 "refcnt = %d, cmd_type = %d\n", 1370 io_req->xid, 1371 io_req->refcount.refcount.counter, io_req->cmd_type); 1372 1373 if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, 1374 &io_req->req_flags)) { 1375 BNX2FC_IO_DBG(io_req, "Timer context finished processing" 1376 " this io\n"); 1377 return; 1378 } 1379 1380 /* Do not issue RRQ as this IO is already cleanedup */ 1381 if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, 1382 &io_req->req_flags)) 1383 goto io_compl; 1384 1385 /* 1386 * For ABTS issued due to SCSI eh_abort_handler, timeout 1387 * values are maintained by scsi-ml itself. Cancel timeout 1388 * in case ABTS issued as part of task management function 1389 * or due to FW error. 1390 */ 1391 if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) 1392 if (cancel_delayed_work(&io_req->timeout_work)) 1393 kref_put(&io_req->refcount, 1394 bnx2fc_cmd_release); /* drop timer hold */ 1395 1396 r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; 1397 1398 switch (r_ctl) { 1399 case FC_RCTL_BA_ACC: 1400 /* 1401 * Dont release this cmd yet. It will be relesed 1402 * after we get RRQ response 1403 */ 1404 BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); 1405 issue_rrq = 1; 1406 break; 1407 1408 case FC_RCTL_BA_RJT: 1409 BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); 1410 break; 1411 default: 1412 printk(KERN_ERR PFX "Unknown ABTS response\n"); 1413 break; 1414 } 1415 1416 if (issue_rrq) { 1417 BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); 1418 set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 1419 } 1420 set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); 1421 bnx2fc_cmd_timer_set(io_req, r_a_tov); 1422 1423io_compl: 1424 if (io_req->wait_for_comp) { 1425 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1426 &io_req->req_flags)) 1427 complete(&io_req->tm_done); 1428 } else { 1429 /* 1430 * We end up here when ABTS is issued as 1431 * in asynchronous context, i.e., as part 1432 * of task management completion, or 1433 * when FW error is received or when the 1434 * ABTS is issued when the IO is timed 1435 * out. 1436 */ 1437 1438 if (io_req->on_active_queue) { 1439 list_del_init(&io_req->link); 1440 io_req->on_active_queue = 0; 1441 /* Move IO req to retire queue */ 1442 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1443 } 1444 bnx2fc_scsi_done(io_req, DID_ERROR); 1445 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1446 } 1447} 1448 1449static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) 1450{ 1451 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1452 struct bnx2fc_rport *tgt = io_req->tgt; 1453 struct bnx2fc_cmd *cmd, *tmp; 1454 u64 tm_lun = sc_cmd->device->lun; 1455 u64 lun; 1456 int rc = 0; 1457 1458 /* called with tgt_lock held */ 1459 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); 1460 /* 1461 * Walk thru the active_ios queue and ABORT the IO 1462 * that matches with the LUN that was reset 1463 */ 1464 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1465 BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); 1466 lun = cmd->sc_cmd->device->lun; 1467 if (lun == tm_lun) { 1468 /* Initiate ABTS on this cmd */ 1469 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1470 &cmd->req_flags)) { 1471 /* cancel the IO timeout */ 1472 if (cancel_delayed_work(&io_req->timeout_work)) 1473 kref_put(&io_req->refcount, 1474 bnx2fc_cmd_release); 1475 /* timer hold */ 1476 rc = bnx2fc_initiate_abts(cmd); 1477 /* abts shouldn't fail in this context */ 1478 WARN_ON(rc != SUCCESS); 1479 } else 1480 printk(KERN_ERR PFX "lun_rst: abts already in" 1481 " progress for this IO 0x%x\n", 1482 cmd->xid); 1483 } 1484 } 1485} 1486 1487static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) 1488{ 1489 struct bnx2fc_rport *tgt = io_req->tgt; 1490 struct bnx2fc_cmd *cmd, *tmp; 1491 int rc = 0; 1492 1493 /* called with tgt_lock held */ 1494 BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); 1495 /* 1496 * Walk thru the active_ios queue and ABORT the IO 1497 * that matches with the LUN that was reset 1498 */ 1499 list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { 1500 BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); 1501 /* Initiate ABTS */ 1502 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, 1503 &cmd->req_flags)) { 1504 /* cancel the IO timeout */ 1505 if (cancel_delayed_work(&io_req->timeout_work)) 1506 kref_put(&io_req->refcount, 1507 bnx2fc_cmd_release); /* timer hold */ 1508 rc = bnx2fc_initiate_abts(cmd); 1509 /* abts shouldn't fail in this context */ 1510 WARN_ON(rc != SUCCESS); 1511 1512 } else 1513 printk(KERN_ERR PFX "tgt_rst: abts already in progress" 1514 " for this IO 0x%x\n", cmd->xid); 1515 } 1516} 1517 1518void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, 1519 struct fcoe_task_ctx_entry *task, u8 num_rq) 1520{ 1521 struct bnx2fc_mp_req *tm_req; 1522 struct fc_frame_header *fc_hdr; 1523 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1524 u64 *hdr; 1525 u64 *temp_hdr; 1526 void *rsp_buf; 1527 1528 /* Called with tgt_lock held */ 1529 BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); 1530 1531 if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) 1532 set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); 1533 else { 1534 /* TM has already timed out and we got 1535 * delayed completion. Ignore completion 1536 * processing. 1537 */ 1538 return; 1539 } 1540 1541 tm_req = &(io_req->mp_req); 1542 fc_hdr = &(tm_req->resp_fc_hdr); 1543 hdr = (u64 *)fc_hdr; 1544 temp_hdr = (u64 *) 1545 &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; 1546 hdr[0] = cpu_to_be64(temp_hdr[0]); 1547 hdr[1] = cpu_to_be64(temp_hdr[1]); 1548 hdr[2] = cpu_to_be64(temp_hdr[2]); 1549 1550 tm_req->resp_len = 1551 task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; 1552 1553 rsp_buf = tm_req->resp_buf; 1554 1555 if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { 1556 bnx2fc_parse_fcp_rsp(io_req, 1557 (struct fcoe_fcp_rsp_payload *) 1558 rsp_buf, num_rq); 1559 if (io_req->fcp_rsp_code == 0) { 1560 /* TM successful */ 1561 if (tm_req->tm_flags & FCP_TMF_LUN_RESET) 1562 bnx2fc_lun_reset_cmpl(io_req); 1563 else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) 1564 bnx2fc_tgt_reset_cmpl(io_req); 1565 } 1566 } else { 1567 printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", 1568 fc_hdr->fh_r_ctl); 1569 } 1570 if (!sc_cmd->SCp.ptr) { 1571 printk(KERN_ERR PFX "tm_compl: SCp.ptr is NULL\n"); 1572 return; 1573 } 1574 switch (io_req->fcp_status) { 1575 case FC_GOOD: 1576 if (io_req->cdb_status == 0) { 1577 /* Good IO completion */ 1578 sc_cmd->result = DID_OK << 16; 1579 } else { 1580 /* Transport status is good, SCSI status not good */ 1581 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1582 } 1583 if (io_req->fcp_resid) 1584 scsi_set_resid(sc_cmd, io_req->fcp_resid); 1585 break; 1586 1587 default: 1588 BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", 1589 io_req->fcp_status); 1590 break; 1591 } 1592 1593 sc_cmd = io_req->sc_cmd; 1594 io_req->sc_cmd = NULL; 1595 1596 /* check if the io_req exists in tgt's tmf_q */ 1597 if (io_req->on_tmf_queue) { 1598 1599 list_del_init(&io_req->link); 1600 io_req->on_tmf_queue = 0; 1601 } else { 1602 1603 printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); 1604 return; 1605 } 1606 1607 sc_cmd->SCp.ptr = NULL; 1608 sc_cmd->scsi_done(sc_cmd); 1609 1610 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1611 if (io_req->wait_for_comp) { 1612 BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); 1613 complete(&io_req->tm_done); 1614 } 1615} 1616 1617static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, 1618 int bd_index) 1619{ 1620 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1621 int frag_size, sg_frags; 1622 1623 sg_frags = 0; 1624 while (sg_len) { 1625 if (sg_len >= BNX2FC_BD_SPLIT_SZ) 1626 frag_size = BNX2FC_BD_SPLIT_SZ; 1627 else 1628 frag_size = sg_len; 1629 bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; 1630 bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; 1631 bd[bd_index + sg_frags].buf_len = (u16)frag_size; 1632 bd[bd_index + sg_frags].flags = 0; 1633 1634 addr += (u64) frag_size; 1635 sg_frags++; 1636 sg_len -= frag_size; 1637 } 1638 return sg_frags; 1639 1640} 1641 1642static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) 1643{ 1644 struct bnx2fc_interface *interface = io_req->port->priv; 1645 struct bnx2fc_hba *hba = interface->hba; 1646 struct scsi_cmnd *sc = io_req->sc_cmd; 1647 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1648 struct scatterlist *sg; 1649 int byte_count = 0; 1650 int sg_count = 0; 1651 int bd_count = 0; 1652 int sg_frags; 1653 unsigned int sg_len; 1654 u64 addr; 1655 int i; 1656 1657 /* 1658 * Use dma_map_sg directly to ensure we're using the correct 1659 * dev struct off of pcidev. 1660 */ 1661 sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), 1662 scsi_sg_count(sc), sc->sc_data_direction); 1663 scsi_for_each_sg(sc, sg, sg_count, i) { 1664 sg_len = sg_dma_len(sg); 1665 addr = sg_dma_address(sg); 1666 if (sg_len > BNX2FC_MAX_BD_LEN) { 1667 sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, 1668 bd_count); 1669 } else { 1670 1671 sg_frags = 1; 1672 bd[bd_count].buf_addr_lo = addr & 0xffffffff; 1673 bd[bd_count].buf_addr_hi = addr >> 32; 1674 bd[bd_count].buf_len = (u16)sg_len; 1675 bd[bd_count].flags = 0; 1676 } 1677 bd_count += sg_frags; 1678 byte_count += sg_len; 1679 } 1680 if (byte_count != scsi_bufflen(sc)) 1681 printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " 1682 "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), 1683 io_req->xid); 1684 return bd_count; 1685} 1686 1687static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) 1688{ 1689 struct scsi_cmnd *sc = io_req->sc_cmd; 1690 struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; 1691 int bd_count; 1692 1693 if (scsi_sg_count(sc)) { 1694 bd_count = bnx2fc_map_sg(io_req); 1695 if (bd_count == 0) 1696 return -ENOMEM; 1697 } else { 1698 bd_count = 0; 1699 bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; 1700 bd[0].buf_len = bd[0].flags = 0; 1701 } 1702 io_req->bd_tbl->bd_valid = bd_count; 1703 1704 return 0; 1705} 1706 1707static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) 1708{ 1709 struct scsi_cmnd *sc = io_req->sc_cmd; 1710 struct bnx2fc_interface *interface = io_req->port->priv; 1711 struct bnx2fc_hba *hba = interface->hba; 1712 1713 /* 1714 * Use dma_unmap_sg directly to ensure we're using the correct 1715 * dev struct off of pcidev. 1716 */ 1717 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { 1718 dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), 1719 scsi_sg_count(sc), sc->sc_data_direction); 1720 io_req->bd_tbl->bd_valid = 0; 1721 } 1722} 1723 1724void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, 1725 struct fcp_cmnd *fcp_cmnd) 1726{ 1727 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1728 char tag[2]; 1729 1730 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); 1731 1732 int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); 1733 1734 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); 1735 memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); 1736 1737 fcp_cmnd->fc_cmdref = 0; 1738 fcp_cmnd->fc_pri_ta = 0; 1739 fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; 1740 fcp_cmnd->fc_flags = io_req->io_req_flags; 1741 1742 if (scsi_populate_tag_msg(sc_cmd, tag)) { 1743 switch (tag[0]) { 1744 case HEAD_OF_QUEUE_TAG: 1745 fcp_cmnd->fc_pri_ta = FCP_PTA_HEADQ; 1746 break; 1747 case ORDERED_QUEUE_TAG: 1748 fcp_cmnd->fc_pri_ta = FCP_PTA_ORDERED; 1749 break; 1750 default: 1751 fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; 1752 break; 1753 } 1754 } else { 1755 fcp_cmnd->fc_pri_ta = 0; 1756 } 1757} 1758 1759static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, 1760 struct fcoe_fcp_rsp_payload *fcp_rsp, 1761 u8 num_rq) 1762{ 1763 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 1764 struct bnx2fc_rport *tgt = io_req->tgt; 1765 u8 rsp_flags = fcp_rsp->fcp_flags.flags; 1766 u32 rq_buff_len = 0; 1767 int i; 1768 unsigned char *rq_data; 1769 unsigned char *dummy; 1770 int fcp_sns_len = 0; 1771 int fcp_rsp_len = 0; 1772 1773 io_req->fcp_status = FC_GOOD; 1774 io_req->fcp_resid = fcp_rsp->fcp_resid; 1775 1776 io_req->scsi_comp_flags = rsp_flags; 1777 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status = 1778 fcp_rsp->scsi_status_code; 1779 1780 /* Fetch fcp_rsp_info and fcp_sns_info if available */ 1781 if (num_rq) { 1782 1783 /* 1784 * We do not anticipate num_rq >1, as the linux defined 1785 * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO 1786 * 256 bytes of single rq buffer is good enough to hold this. 1787 */ 1788 1789 if (rsp_flags & 1790 FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { 1791 fcp_rsp_len = rq_buff_len 1792 = fcp_rsp->fcp_rsp_len; 1793 } 1794 1795 if (rsp_flags & 1796 FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { 1797 fcp_sns_len = fcp_rsp->fcp_sns_len; 1798 rq_buff_len += fcp_rsp->fcp_sns_len; 1799 } 1800 1801 io_req->fcp_rsp_len = fcp_rsp_len; 1802 io_req->fcp_sns_len = fcp_sns_len; 1803 1804 if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { 1805 /* Invalid sense sense length. */ 1806 printk(KERN_ERR PFX "invalid sns length %d\n", 1807 rq_buff_len); 1808 /* reset rq_buff_len */ 1809 rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; 1810 } 1811 1812 rq_data = bnx2fc_get_next_rqe(tgt, 1); 1813 1814 if (num_rq > 1) { 1815 /* We do not need extra sense data */ 1816 for (i = 1; i < num_rq; i++) 1817 dummy = bnx2fc_get_next_rqe(tgt, 1); 1818 } 1819 1820 /* fetch fcp_rsp_code */ 1821 if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { 1822 /* Only for task management function */ 1823 io_req->fcp_rsp_code = rq_data[3]; 1824 printk(KERN_ERR PFX "fcp_rsp_code = %d\n", 1825 io_req->fcp_rsp_code); 1826 } 1827 1828 /* fetch sense data */ 1829 rq_data += fcp_rsp_len; 1830 1831 if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { 1832 printk(KERN_ERR PFX "Truncating sense buffer\n"); 1833 fcp_sns_len = SCSI_SENSE_BUFFERSIZE; 1834 } 1835 1836 memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 1837 if (fcp_sns_len) 1838 memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); 1839 1840 /* return RQ entries */ 1841 for (i = 0; i < num_rq; i++) 1842 bnx2fc_return_rqe(tgt, 1); 1843 } 1844} 1845 1846/** 1847 * bnx2fc_queuecommand - Queuecommand function of the scsi template 1848 * 1849 * @host: The Scsi_Host the command was issued to 1850 * @sc_cmd: struct scsi_cmnd to be executed 1851 * 1852 * This is the IO strategy routine, called by SCSI-ML 1853 **/ 1854int bnx2fc_queuecommand(struct Scsi_Host *host, 1855 struct scsi_cmnd *sc_cmd) 1856{ 1857 struct fc_lport *lport = shost_priv(host); 1858 struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); 1859 struct fc_rport_libfc_priv *rp = rport->dd_data; 1860 struct bnx2fc_rport *tgt; 1861 struct bnx2fc_cmd *io_req; 1862 int rc = 0; 1863 int rval; 1864 1865 rval = fc_remote_port_chkready(rport); 1866 if (rval) { 1867 sc_cmd->result = rval; 1868 sc_cmd->scsi_done(sc_cmd); 1869 return 0; 1870 } 1871 1872 if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { 1873 rc = SCSI_MLQUEUE_HOST_BUSY; 1874 goto exit_qcmd; 1875 } 1876 1877 /* rport and tgt are allocated together, so tgt should be non-NULL */ 1878 tgt = (struct bnx2fc_rport *)&rp[1]; 1879 1880 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 1881 /* 1882 * Session is not offloaded yet. Let SCSI-ml retry 1883 * the command. 1884 */ 1885 rc = SCSI_MLQUEUE_TARGET_BUSY; 1886 goto exit_qcmd; 1887 } 1888 if (tgt->retry_delay_timestamp) { 1889 if (time_after(jiffies, tgt->retry_delay_timestamp)) { 1890 tgt->retry_delay_timestamp = 0; 1891 } else { 1892 /* If retry_delay timer is active, flow off the ML */ 1893 rc = SCSI_MLQUEUE_TARGET_BUSY; 1894 goto exit_qcmd; 1895 } 1896 } 1897 1898 spin_lock_bh(&tgt->tgt_lock); 1899 1900 io_req = bnx2fc_cmd_alloc(tgt); 1901 if (!io_req) { 1902 rc = SCSI_MLQUEUE_HOST_BUSY; 1903 goto exit_qcmd_tgtlock; 1904 } 1905 io_req->sc_cmd = sc_cmd; 1906 1907 if (bnx2fc_post_io_req(tgt, io_req)) { 1908 printk(KERN_ERR PFX "Unable to post io_req\n"); 1909 rc = SCSI_MLQUEUE_HOST_BUSY; 1910 goto exit_qcmd_tgtlock; 1911 } 1912 1913exit_qcmd_tgtlock: 1914 spin_unlock_bh(&tgt->tgt_lock); 1915exit_qcmd: 1916 return rc; 1917} 1918 1919void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, 1920 struct fcoe_task_ctx_entry *task, 1921 u8 num_rq) 1922{ 1923 struct fcoe_fcp_rsp_payload *fcp_rsp; 1924 struct bnx2fc_rport *tgt = io_req->tgt; 1925 struct scsi_cmnd *sc_cmd; 1926 struct Scsi_Host *host; 1927 1928 1929 /* scsi_cmd_cmpl is called with tgt lock held */ 1930 1931 if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { 1932 /* we will not receive ABTS response for this IO */ 1933 BNX2FC_IO_DBG(io_req, "Timer context finished processing " 1934 "this scsi cmd\n"); 1935 } 1936 1937 /* Cancel the timeout_work, as we received IO completion */ 1938 if (cancel_delayed_work(&io_req->timeout_work)) 1939 kref_put(&io_req->refcount, 1940 bnx2fc_cmd_release); /* drop timer hold */ 1941 1942 sc_cmd = io_req->sc_cmd; 1943 if (sc_cmd == NULL) { 1944 printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); 1945 return; 1946 } 1947 1948 /* Fetch fcp_rsp from task context and perform cmd completion */ 1949 fcp_rsp = (struct fcoe_fcp_rsp_payload *) 1950 &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); 1951 1952 /* parse fcp_rsp and obtain sense data from RQ if available */ 1953 bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); 1954 1955 host = sc_cmd->device->host; 1956 if (!sc_cmd->SCp.ptr) { 1957 printk(KERN_ERR PFX "SCp.ptr is NULL\n"); 1958 return; 1959 } 1960 1961 if (io_req->on_active_queue) { 1962 list_del_init(&io_req->link); 1963 io_req->on_active_queue = 0; 1964 /* Move IO req to retire queue */ 1965 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1966 } else { 1967 /* This should not happen, but could have been pulled 1968 * by bnx2fc_flush_active_ios(), or during a race 1969 * between command abort and (late) completion. 1970 */ 1971 BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); 1972 if (io_req->wait_for_comp) 1973 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, 1974 &io_req->req_flags)) 1975 complete(&io_req->tm_done); 1976 } 1977 1978 bnx2fc_unmap_sg_list(io_req); 1979 io_req->sc_cmd = NULL; 1980 1981 switch (io_req->fcp_status) { 1982 case FC_GOOD: 1983 if (io_req->cdb_status == 0) { 1984 /* Good IO completion */ 1985 sc_cmd->result = DID_OK << 16; 1986 } else { 1987 /* Transport status is good, SCSI status not good */ 1988 BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" 1989 " fcp_resid = 0x%x\n", 1990 io_req->cdb_status, io_req->fcp_resid); 1991 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; 1992 1993 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || 1994 io_req->cdb_status == SAM_STAT_BUSY) { 1995 /* Set the jiffies + retry_delay_timer * 100ms 1996 for the rport/tgt */ 1997 tgt->retry_delay_timestamp = jiffies + 1998 fcp_rsp->retry_delay_timer * HZ / 10; 1999 } 2000 2001 } 2002 if (io_req->fcp_resid) 2003 scsi_set_resid(sc_cmd, io_req->fcp_resid); 2004 break; 2005 default: 2006 printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", 2007 io_req->fcp_status); 2008 break; 2009 } 2010 sc_cmd->SCp.ptr = NULL; 2011 sc_cmd->scsi_done(sc_cmd); 2012 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2013} 2014 2015int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, 2016 struct bnx2fc_cmd *io_req) 2017{ 2018 struct fcoe_task_ctx_entry *task; 2019 struct fcoe_task_ctx_entry *task_page; 2020 struct scsi_cmnd *sc_cmd = io_req->sc_cmd; 2021 struct fcoe_port *port = tgt->port; 2022 struct bnx2fc_interface *interface = port->priv; 2023 struct bnx2fc_hba *hba = interface->hba; 2024 struct fc_lport *lport = port->lport; 2025 struct fc_stats *stats; 2026 int task_idx, index; 2027 u16 xid; 2028 2029 /* bnx2fc_post_io_req() is called with the tgt_lock held */ 2030 2031 /* Initialize rest of io_req fields */ 2032 io_req->cmd_type = BNX2FC_SCSI_CMD; 2033 io_req->port = port; 2034 io_req->tgt = tgt; 2035 io_req->data_xfer_len = scsi_bufflen(sc_cmd); 2036 sc_cmd->SCp.ptr = (char *)io_req; 2037 2038 stats = per_cpu_ptr(lport->stats, get_cpu()); 2039 if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { 2040 io_req->io_req_flags = BNX2FC_READ; 2041 stats->InputRequests++; 2042 stats->InputBytes += io_req->data_xfer_len; 2043 } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { 2044 io_req->io_req_flags = BNX2FC_WRITE; 2045 stats->OutputRequests++; 2046 stats->OutputBytes += io_req->data_xfer_len; 2047 } else { 2048 io_req->io_req_flags = 0; 2049 stats->ControlRequests++; 2050 } 2051 put_cpu(); 2052 2053 xid = io_req->xid; 2054 2055 /* Build buffer descriptor list for firmware from sg list */ 2056 if (bnx2fc_build_bd_list_from_sg(io_req)) { 2057 printk(KERN_ERR PFX "BD list creation failed\n"); 2058 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2059 return -EAGAIN; 2060 } 2061 2062 task_idx = xid / BNX2FC_TASKS_PER_PAGE; 2063 index = xid % BNX2FC_TASKS_PER_PAGE; 2064 2065 /* Initialize task context for this IO request */ 2066 task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; 2067 task = &(task_page[index]); 2068 bnx2fc_init_task(io_req, task); 2069 2070 if (tgt->flush_in_prog) { 2071 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 2072 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2073 return -EAGAIN; 2074 } 2075 2076 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 2077 printk(KERN_ERR PFX "Session not ready...post_io\n"); 2078 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2079 return -EAGAIN; 2080 } 2081 2082 /* Time IO req */ 2083 if (tgt->io_timeout) 2084 bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); 2085 /* Obtain free SQ entry */ 2086 bnx2fc_add_2_sq(tgt, xid); 2087 2088 /* Enqueue the io_req to active_cmd_queue */ 2089 2090 io_req->on_active_queue = 1; 2091 /* move io_req from pending_queue to active_queue */ 2092 list_add_tail(&io_req->link, &tgt->active_cmd_queue); 2093 2094 /* Ring doorbell */ 2095 bnx2fc_ring_doorbell(tgt); 2096 return 0; 2097} 2098