1/* 2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Cisco Systems. All rights reserved. 4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36#include <linux/string.h> 37#include <linux/slab.h> 38#include <linux/sched.h> 39 40#include <asm/io.h> 41 42#include <rdma/ib_verbs.h> 43#include <rdma/ib_cache.h> 44#include <rdma/ib_pack.h> 45 46#include "mthca_dev.h" 47#include "mthca_cmd.h" 48#include "mthca_memfree.h" 49#include "mthca_wqe.h" 50 51enum { 52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 53 MTHCA_ACK_REQ_FREQ = 10, 54 MTHCA_FLIGHT_LIMIT = 9, 55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ 56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ 57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ 58}; 59 60enum { 61 MTHCA_QP_STATE_RST = 0, 62 MTHCA_QP_STATE_INIT = 1, 63 MTHCA_QP_STATE_RTR = 2, 64 MTHCA_QP_STATE_RTS = 3, 65 MTHCA_QP_STATE_SQE = 4, 66 MTHCA_QP_STATE_SQD = 5, 67 MTHCA_QP_STATE_ERR = 6, 68 MTHCA_QP_STATE_DRAINING = 7 69}; 70 71enum { 72 MTHCA_QP_ST_RC = 0x0, 73 MTHCA_QP_ST_UC = 0x1, 74 MTHCA_QP_ST_RD = 0x2, 75 MTHCA_QP_ST_UD = 0x3, 76 MTHCA_QP_ST_MLX = 0x7 77}; 78 79enum { 80 MTHCA_QP_PM_MIGRATED = 0x3, 81 MTHCA_QP_PM_ARMED = 0x0, 82 MTHCA_QP_PM_REARM = 0x1 83}; 84 85enum { 86 /* qp_context flags */ 87 MTHCA_QP_BIT_DE = 1 << 8, 88 /* params1 */ 89 MTHCA_QP_BIT_SRE = 1 << 15, 90 MTHCA_QP_BIT_SWE = 1 << 14, 91 MTHCA_QP_BIT_SAE = 1 << 13, 92 MTHCA_QP_BIT_SIC = 1 << 4, 93 MTHCA_QP_BIT_SSC = 1 << 3, 94 /* params2 */ 95 MTHCA_QP_BIT_RRE = 1 << 15, 96 MTHCA_QP_BIT_RWE = 1 << 14, 97 MTHCA_QP_BIT_RAE = 1 << 13, 98 MTHCA_QP_BIT_RIC = 1 << 4, 99 MTHCA_QP_BIT_RSC = 1 << 3 100}; 101 102enum { 103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5 104}; 105 106struct mthca_qp_path { 107 __be32 port_pkey; 108 u8 rnr_retry; 109 u8 g_mylmc; 110 __be16 rlid; 111 u8 ackto; 112 u8 mgid_index; 113 u8 static_rate; 114 u8 hop_limit; 115 __be32 sl_tclass_flowlabel; 116 u8 rgid[16]; 117} __attribute__((packed)); 118 119struct mthca_qp_context { 120 __be32 flags; 121 __be32 tavor_sched_queue; /* Reserved on Arbel */ 122 u8 mtu_msgmax; 123 u8 rq_size_stride; /* Reserved on Tavor */ 124 u8 sq_size_stride; /* Reserved on Tavor */ 125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ 126 __be32 usr_page; 127 __be32 local_qpn; 128 __be32 remote_qpn; 129 u32 reserved1[2]; 130 struct mthca_qp_path pri_path; 131 struct mthca_qp_path alt_path; 132 __be32 rdd; 133 __be32 pd; 134 __be32 wqe_base; 135 __be32 wqe_lkey; 136 __be32 params1; 137 __be32 reserved2; 138 __be32 next_send_psn; 139 __be32 cqn_snd; 140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ 141 __be32 snd_db_index; /* (debugging only entries) */ 142 __be32 last_acked_psn; 143 __be32 ssn; 144 __be32 params2; 145 __be32 rnr_nextrecvpsn; 146 __be32 ra_buff_indx; 147 __be32 cqn_rcv; 148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ 149 __be32 rcv_db_index; /* (debugging only entries) */ 150 __be32 qkey; 151 __be32 srqn; 152 __be32 rmsn; 153 __be16 rq_wqe_counter; /* reserved on Tavor */ 154 __be16 sq_wqe_counter; /* reserved on Tavor */ 155 u32 reserved3[18]; 156} __attribute__((packed)); 157 158struct mthca_qp_param { 159 __be32 opt_param_mask; 160 u32 reserved1; 161 struct mthca_qp_context context; 162 u32 reserved2[62]; 163} __attribute__((packed)); 164 165enum { 166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 167 MTHCA_QP_OPTPAR_RRE = 1 << 1, 168 MTHCA_QP_OPTPAR_RAE = 1 << 2, 169 MTHCA_QP_OPTPAR_RWE = 1 << 3, 170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, 171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, 172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, 173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, 174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, 175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, 176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, 177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, 178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, 179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, 180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, 182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 183}; 184 185static const u8 mthca_opcode[] = { 186 [IB_WR_SEND] = MTHCA_OPCODE_SEND, 187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM, 188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE, 189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM, 190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ, 191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS, 192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA, 193}; 194 195static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) 196{ 197 return qp->qpn >= dev->qp_table.sqp_start && 198 qp->qpn <= dev->qp_table.sqp_start + 3; 199} 200 201static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) 202{ 203 return qp->qpn >= dev->qp_table.sqp_start && 204 qp->qpn <= dev->qp_table.sqp_start + 1; 205} 206 207static void *get_recv_wqe(struct mthca_qp *qp, int n) 208{ 209 if (qp->is_direct) 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); 211 else 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); 214} 215 216static void *get_send_wqe(struct mthca_qp *qp, int n) 217{ 218 if (qp->is_direct) 219 return qp->queue.direct.buf + qp->send_wqe_offset + 220 (n << qp->sq.wqe_shift); 221 else 222 return qp->queue.page_list[(qp->send_wqe_offset + 223 (n << qp->sq.wqe_shift)) >> 224 PAGE_SHIFT].buf + 225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & 226 (PAGE_SIZE - 1)); 227} 228 229static void mthca_wq_reset(struct mthca_wq *wq) 230{ 231 wq->next_ind = 0; 232 wq->last_comp = wq->max - 1; 233 wq->head = 0; 234 wq->tail = 0; 235} 236 237void mthca_qp_event(struct mthca_dev *dev, u32 qpn, 238 enum ib_event_type event_type) 239{ 240 struct mthca_qp *qp; 241 struct ib_event event; 242 243 spin_lock(&dev->qp_table.lock); 244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); 245 if (qp) 246 ++qp->refcount; 247 spin_unlock(&dev->qp_table.lock); 248 249 if (!qp) { 250 mthca_warn(dev, "Async event %d for bogus QP %08x\n", 251 event_type, qpn); 252 return; 253 } 254 255 if (event_type == IB_EVENT_PATH_MIG) 256 qp->port = qp->alt_port; 257 258 event.device = &dev->ib_dev; 259 event.event = event_type; 260 event.element.qp = &qp->ibqp; 261 if (qp->ibqp.event_handler) 262 qp->ibqp.event_handler(&event, qp->ibqp.qp_context); 263 264 spin_lock(&dev->qp_table.lock); 265 if (!--qp->refcount) 266 wake_up(&qp->wait); 267 spin_unlock(&dev->qp_table.lock); 268} 269 270static int to_mthca_state(enum ib_qp_state ib_state) 271{ 272 switch (ib_state) { 273 case IB_QPS_RESET: return MTHCA_QP_STATE_RST; 274 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT; 275 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR; 276 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS; 277 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD; 278 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE; 279 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR; 280 default: return -1; 281 } 282} 283 284enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; 285 286static int to_mthca_st(int transport) 287{ 288 switch (transport) { 289 case RC: return MTHCA_QP_ST_RC; 290 case UC: return MTHCA_QP_ST_UC; 291 case UD: return MTHCA_QP_ST_UD; 292 case RD: return MTHCA_QP_ST_RD; 293 case MLX: return MTHCA_QP_ST_MLX; 294 default: return -1; 295 } 296} 297 298static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr, 299 int attr_mask) 300{ 301 if (attr_mask & IB_QP_PKEY_INDEX) 302 sqp->pkey_index = attr->pkey_index; 303 if (attr_mask & IB_QP_QKEY) 304 sqp->qkey = attr->qkey; 305 if (attr_mask & IB_QP_SQ_PSN) 306 sqp->send_psn = attr->sq_psn; 307} 308 309static void init_port(struct mthca_dev *dev, int port) 310{ 311 int err; 312 struct mthca_init_ib_param param; 313 314 memset(¶m, 0, sizeof param); 315 316 param.port_width = dev->limits.port_width_cap; 317 param.vl_cap = dev->limits.vl_cap; 318 param.mtu_cap = dev->limits.mtu_cap; 319 param.gid_cap = dev->limits.gid_table_len; 320 param.pkey_cap = dev->limits.pkey_table_len; 321 322 err = mthca_INIT_IB(dev, ¶m, port); 323 if (err) 324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 325} 326 327static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, 328 int attr_mask) 329{ 330 u8 dest_rd_atomic; 331 u32 access_flags; 332 u32 hw_access_flags = 0; 333 334 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 335 dest_rd_atomic = attr->max_dest_rd_atomic; 336 else 337 dest_rd_atomic = qp->resp_depth; 338 339 if (attr_mask & IB_QP_ACCESS_FLAGS) 340 access_flags = attr->qp_access_flags; 341 else 342 access_flags = qp->atomic_rd_en; 343 344 if (!dest_rd_atomic) 345 access_flags &= IB_ACCESS_REMOTE_WRITE; 346 347 if (access_flags & IB_ACCESS_REMOTE_READ) 348 hw_access_flags |= MTHCA_QP_BIT_RRE; 349 if (access_flags & IB_ACCESS_REMOTE_ATOMIC) 350 hw_access_flags |= MTHCA_QP_BIT_RAE; 351 if (access_flags & IB_ACCESS_REMOTE_WRITE) 352 hw_access_flags |= MTHCA_QP_BIT_RWE; 353 354 return cpu_to_be32(hw_access_flags); 355} 356 357static inline enum ib_qp_state to_ib_qp_state(int mthca_state) 358{ 359 switch (mthca_state) { 360 case MTHCA_QP_STATE_RST: return IB_QPS_RESET; 361 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT; 362 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR; 363 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS; 364 case MTHCA_QP_STATE_DRAINING: 365 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD; 366 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE; 367 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR; 368 default: return -1; 369 } 370} 371 372static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state) 373{ 374 switch (mthca_mig_state) { 375 case 0: return IB_MIG_ARMED; 376 case 1: return IB_MIG_REARM; 377 case 3: return IB_MIG_MIGRATED; 378 default: return -1; 379 } 380} 381 382static int to_ib_qp_access_flags(int mthca_flags) 383{ 384 int ib_flags = 0; 385 386 if (mthca_flags & MTHCA_QP_BIT_RRE) 387 ib_flags |= IB_ACCESS_REMOTE_READ; 388 if (mthca_flags & MTHCA_QP_BIT_RWE) 389 ib_flags |= IB_ACCESS_REMOTE_WRITE; 390 if (mthca_flags & MTHCA_QP_BIT_RAE) 391 ib_flags |= IB_ACCESS_REMOTE_ATOMIC; 392 393 return ib_flags; 394} 395 396static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr, 397 struct mthca_qp_path *path) 398{ 399 memset(ib_ah_attr, 0, sizeof *ib_ah_attr); 400 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3; 401 402 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports) 403 return; 404 405 ib_ah_attr->dlid = be16_to_cpu(path->rlid); 406 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28; 407 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f; 408 ib_ah_attr->static_rate = mthca_rate_to_ib(dev, 409 path->static_rate & 0xf, 410 ib_ah_attr->port_num); 411 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0; 412 if (ib_ah_attr->ah_flags) { 413 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1); 414 ib_ah_attr->grh.hop_limit = path->hop_limit; 415 ib_ah_attr->grh.traffic_class = 416 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff; 417 ib_ah_attr->grh.flow_label = 418 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff; 419 memcpy(ib_ah_attr->grh.dgid.raw, 420 path->rgid, sizeof ib_ah_attr->grh.dgid.raw); 421 } 422} 423 424int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask, 425 struct ib_qp_init_attr *qp_init_attr) 426{ 427 struct mthca_dev *dev = to_mdev(ibqp->device); 428 struct mthca_qp *qp = to_mqp(ibqp); 429 int err = 0; 430 struct mthca_mailbox *mailbox = NULL; 431 struct mthca_qp_param *qp_param; 432 struct mthca_qp_context *context; 433 int mthca_state; 434 435 mutex_lock(&qp->mutex); 436 437 if (qp->state == IB_QPS_RESET) { 438 qp_attr->qp_state = IB_QPS_RESET; 439 goto done; 440 } 441 442 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 443 if (IS_ERR(mailbox)) { 444 err = PTR_ERR(mailbox); 445 goto out; 446 } 447 448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); 449 if (err) { 450 mthca_warn(dev, "QUERY_QP failed (%d)\n", err); 451 goto out_mailbox; 452 } 453 454 qp_param = mailbox->buf; 455 context = &qp_param->context; 456 mthca_state = be32_to_cpu(context->flags) >> 28; 457 458 qp->state = to_ib_qp_state(mthca_state); 459 qp_attr->qp_state = qp->state; 460 qp_attr->path_mtu = context->mtu_msgmax >> 5; 461 qp_attr->path_mig_state = 462 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3); 463 qp_attr->qkey = be32_to_cpu(context->qkey); 464 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff; 465 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff; 466 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff; 467 qp_attr->qp_access_flags = 468 to_ib_qp_access_flags(be32_to_cpu(context->params2)); 469 470 if (qp->transport == RC || qp->transport == UC) { 471 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); 472 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); 473 qp_attr->alt_pkey_index = 474 be32_to_cpu(context->alt_path.port_pkey) & 0x7f; 475 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; 476 } 477 478 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f; 479 qp_attr->port_num = 480 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3; 481 482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ 483 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING; 484 485 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7); 486 487 qp_attr->max_dest_rd_atomic = 488 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7); 489 qp_attr->min_rnr_timer = 490 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f; 491 qp_attr->timeout = context->pri_path.ackto >> 3; 492 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7; 493 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5; 494 qp_attr->alt_timeout = context->alt_path.ackto >> 3; 495 496done: 497 qp_attr->cur_qp_state = qp_attr->qp_state; 498 qp_attr->cap.max_send_wr = qp->sq.max; 499 qp_attr->cap.max_recv_wr = qp->rq.max; 500 qp_attr->cap.max_send_sge = qp->sq.max_gs; 501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; 502 qp_attr->cap.max_inline_data = qp->max_inline_data; 503 504 qp_init_attr->cap = qp_attr->cap; 505 qp_init_attr->sq_sig_type = qp->sq_policy; 506 507out_mailbox: 508 mthca_free_mailbox(dev, mailbox); 509 510out: 511 mutex_unlock(&qp->mutex); 512 return err; 513} 514 515static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah, 516 struct mthca_qp_path *path, u8 port) 517{ 518 path->g_mylmc = ah->src_path_bits & 0x7f; 519 path->rlid = cpu_to_be16(ah->dlid); 520 path->static_rate = mthca_get_rate(dev, ah->static_rate, port); 521 522 if (ah->ah_flags & IB_AH_GRH) { 523 if (ah->grh.sgid_index >= dev->limits.gid_table_len) { 524 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n", 525 ah->grh.sgid_index, dev->limits.gid_table_len-1); 526 return -1; 527 } 528 529 path->g_mylmc |= 1 << 7; 530 path->mgid_index = ah->grh.sgid_index; 531 path->hop_limit = ah->grh.hop_limit; 532 path->sl_tclass_flowlabel = 533 cpu_to_be32((ah->sl << 28) | 534 (ah->grh.traffic_class << 20) | 535 (ah->grh.flow_label)); 536 memcpy(path->rgid, ah->grh.dgid.raw, 16); 537 } else 538 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28); 539 540 return 0; 541} 542 543static int __mthca_modify_qp(struct ib_qp *ibqp, 544 const struct ib_qp_attr *attr, int attr_mask, 545 enum ib_qp_state cur_state, enum ib_qp_state new_state) 546{ 547 struct mthca_dev *dev = to_mdev(ibqp->device); 548 struct mthca_qp *qp = to_mqp(ibqp); 549 struct mthca_mailbox *mailbox; 550 struct mthca_qp_param *qp_param; 551 struct mthca_qp_context *qp_context; 552 u32 sqd_event = 0; 553 int err = -EINVAL; 554 555 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 556 if (IS_ERR(mailbox)) { 557 err = PTR_ERR(mailbox); 558 goto out; 559 } 560 qp_param = mailbox->buf; 561 qp_context = &qp_param->context; 562 memset(qp_param, 0, sizeof *qp_param); 563 564 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) | 565 (to_mthca_st(qp->transport) << 16)); 566 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE); 567 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) 568 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 569 else { 570 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE); 571 switch (attr->path_mig_state) { 572 case IB_MIG_MIGRATED: 573 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11); 574 break; 575 case IB_MIG_REARM: 576 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11); 577 break; 578 case IB_MIG_ARMED: 579 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11); 580 break; 581 } 582 } 583 584 /* leave tavor_sched_queue as 0 */ 585 586 if (qp->transport == MLX || qp->transport == UD) 587 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11; 588 else if (attr_mask & IB_QP_PATH_MTU) { 589 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) { 590 mthca_dbg(dev, "path MTU (%u) is invalid\n", 591 attr->path_mtu); 592 goto out_mailbox; 593 } 594 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31; 595 } 596 597 if (mthca_is_memfree(dev)) { 598 if (qp->rq.max) 599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; 600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; 601 602 if (qp->sq.max) 603 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; 604 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; 605 } 606 607 /* leave arbel_sched_queue as 0 */ 608 609 if (qp->ibqp.uobject) 610 qp_context->usr_page = 611 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index); 612 else 613 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 614 qp_context->local_qpn = cpu_to_be32(qp->qpn); 615 if (attr_mask & IB_QP_DEST_QPN) { 616 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 617 } 618 619 if (qp->transport == MLX) 620 qp_context->pri_path.port_pkey |= 621 cpu_to_be32(qp->port << 24); 622 else { 623 if (attr_mask & IB_QP_PORT) { 624 qp_context->pri_path.port_pkey |= 625 cpu_to_be32(attr->port_num << 24); 626 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM); 627 } 628 } 629 630 if (attr_mask & IB_QP_PKEY_INDEX) { 631 qp_context->pri_path.port_pkey |= 632 cpu_to_be32(attr->pkey_index); 633 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX); 634 } 635 636 if (attr_mask & IB_QP_RNR_RETRY) { 637 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry = 638 attr->rnr_retry << 5; 639 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY | 640 MTHCA_QP_OPTPAR_ALT_RNR_RETRY); 641 } 642 643 if (attr_mask & IB_QP_AV) { 644 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path, 645 attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) 646 goto out_mailbox; 647 648 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); 649 } 650 651 if (ibqp->qp_type == IB_QPT_RC && 652 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 653 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1; 654 655 if (mthca_is_memfree(dev)) 656 qp_context->rlkey_arbel_sched_queue |= sched_queue; 657 else 658 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue); 659 660 qp_param->opt_param_mask |= 661 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE); 662 } 663 664 if (attr_mask & IB_QP_TIMEOUT) { 665 qp_context->pri_path.ackto = attr->timeout << 3; 666 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); 667 } 668 669 if (attr_mask & IB_QP_ALT_PATH) { 670 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) { 671 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n", 672 attr->alt_pkey_index, dev->limits.pkey_table_len-1); 673 goto out_mailbox; 674 } 675 676 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) { 677 mthca_dbg(dev, "Alternate port number (%u) is invalid\n", 678 attr->alt_port_num); 679 goto out_mailbox; 680 } 681 682 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path, 683 attr->alt_ah_attr.port_num)) 684 goto out_mailbox; 685 686 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index | 687 attr->alt_port_num << 24); 688 qp_context->alt_path.ackto = attr->alt_timeout << 3; 689 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH); 690 } 691 692 /* leave rdd as 0 */ 693 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num); 694 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ 695 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey); 696 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) | 697 (MTHCA_FLIGHT_LIMIT << 24) | 698 MTHCA_QP_BIT_SWE); 699 if (qp->sq_policy == IB_SIGNAL_ALL_WR) 700 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC); 701 if (attr_mask & IB_QP_RETRY_CNT) { 702 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16); 703 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); 704 } 705 706 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { 707 if (attr->max_rd_atomic) { 708 qp_context->params1 |= 709 cpu_to_be32(MTHCA_QP_BIT_SRE | 710 MTHCA_QP_BIT_SAE); 711 qp_context->params1 |= 712 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21); 713 } 714 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX); 715 } 716 717 if (attr_mask & IB_QP_SQ_PSN) 718 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); 719 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); 720 721 if (mthca_is_memfree(dev)) { 722 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset); 723 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); 724 } 725 726 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { 727 if (attr->max_dest_rd_atomic) 728 qp_context->params2 |= 729 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21); 730 731 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX); 732 } 733 734 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { 735 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); 736 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE | 737 MTHCA_QP_OPTPAR_RRE | 738 MTHCA_QP_OPTPAR_RAE); 739 } 740 741 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); 742 743 if (ibqp->srq) 744 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC); 745 746 if (attr_mask & IB_QP_MIN_RNR_TIMER) { 747 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24); 748 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); 749 } 750 if (attr_mask & IB_QP_RQ_PSN) 751 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); 752 753 qp_context->ra_buff_indx = 754 cpu_to_be32(dev->qp_table.rdb_base + 755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << 756 dev->qp_table.rdb_shift)); 757 758 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); 759 760 if (mthca_is_memfree(dev)) 761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); 762 763 if (attr_mask & IB_QP_QKEY) { 764 qp_context->qkey = cpu_to_be32(attr->qkey); 765 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY); 766 } 767 768 if (ibqp->srq) 769 qp_context->srqn = cpu_to_be32(1 << 24 | 770 to_msrq(ibqp->srq)->srqn); 771 772 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && 773 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && 774 attr->en_sqd_async_notify) 775 sqd_event = 1 << 31; 776 777 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 778 mailbox, sqd_event); 779 if (err) { 780 mthca_warn(dev, "modify QP %d->%d returned %d.\n", 781 cur_state, new_state, err); 782 goto out_mailbox; 783 } 784 785 qp->state = new_state; 786 if (attr_mask & IB_QP_ACCESS_FLAGS) 787 qp->atomic_rd_en = attr->qp_access_flags; 788 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) 789 qp->resp_depth = attr->max_dest_rd_atomic; 790 if (attr_mask & IB_QP_PORT) 791 qp->port = attr->port_num; 792 if (attr_mask & IB_QP_ALT_PATH) 793 qp->alt_port = attr->alt_port_num; 794 795 if (is_sqp(dev, qp)) 796 store_attrs(to_msqp(qp), attr, attr_mask); 797 798 /* 799 * If we moved QP0 to RTR, bring the IB link up; if we moved 800 * QP0 to RESET or ERROR, bring the link back down. 801 */ 802 if (is_qp0(dev, qp)) { 803 if (cur_state != IB_QPS_RTR && 804 new_state == IB_QPS_RTR) 805 init_port(dev, qp->port); 806 807 if (cur_state != IB_QPS_RESET && 808 cur_state != IB_QPS_ERR && 809 (new_state == IB_QPS_RESET || 810 new_state == IB_QPS_ERR)) 811 mthca_CLOSE_IB(dev, qp->port); 812 } 813 814 /* 815 * If we moved a kernel QP to RESET, clean up all old CQ 816 * entries and reinitialize the QP. 817 */ 818 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) { 819 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn, 820 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 821 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 822 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); 823 824 mthca_wq_reset(&qp->sq); 825 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 826 827 mthca_wq_reset(&qp->rq); 828 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 829 830 if (mthca_is_memfree(dev)) { 831 *qp->sq.db = 0; 832 *qp->rq.db = 0; 833 } 834 } 835 836out_mailbox: 837 mthca_free_mailbox(dev, mailbox); 838out: 839 return err; 840} 841 842int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 843 struct ib_udata *udata) 844{ 845 struct mthca_dev *dev = to_mdev(ibqp->device); 846 struct mthca_qp *qp = to_mqp(ibqp); 847 enum ib_qp_state cur_state, new_state; 848 int err = -EINVAL; 849 850 mutex_lock(&qp->mutex); 851 if (attr_mask & IB_QP_CUR_STATE) { 852 cur_state = attr->cur_qp_state; 853 } else { 854 spin_lock_irq(&qp->sq.lock); 855 spin_lock(&qp->rq.lock); 856 cur_state = qp->state; 857 spin_unlock(&qp->rq.lock); 858 spin_unlock_irq(&qp->sq.lock); 859 } 860 861 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; 862 863 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, 864 IB_LINK_LAYER_UNSPECIFIED)) { 865 mthca_dbg(dev, "Bad QP transition (transport %d) " 866 "%d->%d with attr 0x%08x\n", 867 qp->transport, cur_state, new_state, 868 attr_mask); 869 goto out; 870 } 871 872 if ((attr_mask & IB_QP_PKEY_INDEX) && 873 attr->pkey_index >= dev->limits.pkey_table_len) { 874 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n", 875 attr->pkey_index, dev->limits.pkey_table_len-1); 876 goto out; 877 } 878 879 if ((attr_mask & IB_QP_PORT) && 880 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { 881 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num); 882 goto out; 883 } 884 885 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 886 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { 887 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n", 888 attr->max_rd_atomic, dev->limits.max_qp_init_rdma); 889 goto out; 890 } 891 892 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 893 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { 894 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n", 895 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift); 896 goto out; 897 } 898 899 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 900 err = 0; 901 goto out; 902 } 903 904 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); 905 906out: 907 mutex_unlock(&qp->mutex); 908 return err; 909} 910 911static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) 912{ 913 /* 914 * Calculate the maximum size of WQE s/g segments, excluding 915 * the next segment and other non-data segments. 916 */ 917 int max_data_size = desc_sz - sizeof (struct mthca_next_seg); 918 919 switch (qp->transport) { 920 case MLX: 921 max_data_size -= 2 * sizeof (struct mthca_data_seg); 922 break; 923 924 case UD: 925 if (mthca_is_memfree(dev)) 926 max_data_size -= sizeof (struct mthca_arbel_ud_seg); 927 else 928 max_data_size -= sizeof (struct mthca_tavor_ud_seg); 929 break; 930 931 default: 932 max_data_size -= sizeof (struct mthca_raddr_seg); 933 break; 934 } 935 936 return max_data_size; 937} 938 939static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size) 940{ 941 /* We don't support inline data for kernel QPs (yet). */ 942 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0; 943} 944 945static void mthca_adjust_qp_caps(struct mthca_dev *dev, 946 struct mthca_pd *pd, 947 struct mthca_qp *qp) 948{ 949 int max_data_size = mthca_max_data_size(dev, qp, 950 min(dev->limits.max_desc_sz, 951 1 << qp->sq.wqe_shift)); 952 953 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size); 954 955 qp->sq.max_gs = min_t(int, dev->limits.max_sg, 956 max_data_size / sizeof (struct mthca_data_seg)); 957 qp->rq.max_gs = min_t(int, dev->limits.max_sg, 958 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - 959 sizeof (struct mthca_next_seg)) / 960 sizeof (struct mthca_data_seg)); 961} 962 963/* 964 * Allocate and register buffer for WQEs. qp->rq.max, sq.max, 965 * rq.max_gs and sq.max_gs must all be assigned. 966 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and 967 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and 968 * queue) 969 */ 970static int mthca_alloc_wqe_buf(struct mthca_dev *dev, 971 struct mthca_pd *pd, 972 struct mthca_qp *qp) 973{ 974 int size; 975 int err = -ENOMEM; 976 977 size = sizeof (struct mthca_next_seg) + 978 qp->rq.max_gs * sizeof (struct mthca_data_seg); 979 980 if (size > dev->limits.max_desc_sz) 981 return -EINVAL; 982 983 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; 984 qp->rq.wqe_shift++) 985 ; /* nothing */ 986 987 size = qp->sq.max_gs * sizeof (struct mthca_data_seg); 988 switch (qp->transport) { 989 case MLX: 990 size += 2 * sizeof (struct mthca_data_seg); 991 break; 992 993 case UD: 994 size += mthca_is_memfree(dev) ? 995 sizeof (struct mthca_arbel_ud_seg) : 996 sizeof (struct mthca_tavor_ud_seg); 997 break; 998 999 case UC: 1000 size += sizeof (struct mthca_raddr_seg); 1001 break; 1002 1003 case RC: 1004 size += sizeof (struct mthca_raddr_seg); 1005 /* 1006 * An atomic op will require an atomic segment, a 1007 * remote address segment and one scatter entry. 1008 */ 1009 size = max_t(int, size, 1010 sizeof (struct mthca_atomic_seg) + 1011 sizeof (struct mthca_raddr_seg) + 1012 sizeof (struct mthca_data_seg)); 1013 break; 1014 1015 default: 1016 break; 1017 } 1018 1019 /* Make sure that we have enough space for a bind request */ 1020 size = max_t(int, size, sizeof (struct mthca_bind_seg)); 1021 1022 size += sizeof (struct mthca_next_seg); 1023 1024 if (size > dev->limits.max_desc_sz) 1025 return -EINVAL; 1026 1027 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; 1028 qp->sq.wqe_shift++) 1029 ; /* nothing */ 1030 1031 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 1032 1 << qp->sq.wqe_shift); 1033 1034 /* 1035 * If this is a userspace QP, we don't actually have to 1036 * allocate anything. All we need is to calculate the WQE 1037 * sizes and the send_wqe_offset, so we're done now. 1038 */ 1039 if (pd->ibpd.uobject) 1040 return 0; 1041 1042 size = PAGE_ALIGN(qp->send_wqe_offset + 1043 (qp->sq.max << qp->sq.wqe_shift)); 1044 1045 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), 1046 GFP_KERNEL); 1047 if (!qp->wrid) 1048 goto err_out; 1049 1050 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, 1051 &qp->queue, &qp->is_direct, pd, 0, &qp->mr); 1052 if (err) 1053 goto err_out; 1054 1055 return 0; 1056 1057err_out: 1058 kfree(qp->wrid); 1059 return err; 1060} 1061 1062static void mthca_free_wqe_buf(struct mthca_dev *dev, 1063 struct mthca_qp *qp) 1064{ 1065 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset + 1066 (qp->sq.max << qp->sq.wqe_shift)), 1067 &qp->queue, qp->is_direct, &qp->mr); 1068 kfree(qp->wrid); 1069} 1070 1071static int mthca_map_memfree(struct mthca_dev *dev, 1072 struct mthca_qp *qp) 1073{ 1074 int ret; 1075 1076 if (mthca_is_memfree(dev)) { 1077 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1078 if (ret) 1079 return ret; 1080 1081 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); 1082 if (ret) 1083 goto err_qpc; 1084 1085 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1086 qp->qpn << dev->qp_table.rdb_shift); 1087 if (ret) 1088 goto err_eqpc; 1089 1090 } 1091 1092 return 0; 1093 1094err_eqpc: 1095 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1096 1097err_qpc: 1098 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1099 1100 return ret; 1101} 1102 1103static void mthca_unmap_memfree(struct mthca_dev *dev, 1104 struct mthca_qp *qp) 1105{ 1106 mthca_table_put(dev, dev->qp_table.rdb_table, 1107 qp->qpn << dev->qp_table.rdb_shift); 1108 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1109 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); 1110} 1111 1112static int mthca_alloc_memfree(struct mthca_dev *dev, 1113 struct mthca_qp *qp) 1114{ 1115 if (mthca_is_memfree(dev)) { 1116 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, 1117 qp->qpn, &qp->rq.db); 1118 if (qp->rq.db_index < 0) 1119 return -ENOMEM; 1120 1121 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, 1122 qp->qpn, &qp->sq.db); 1123 if (qp->sq.db_index < 0) { 1124 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1125 return -ENOMEM; 1126 } 1127 } 1128 1129 return 0; 1130} 1131 1132static void mthca_free_memfree(struct mthca_dev *dev, 1133 struct mthca_qp *qp) 1134{ 1135 if (mthca_is_memfree(dev)) { 1136 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1137 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1138 } 1139} 1140 1141static int mthca_alloc_qp_common(struct mthca_dev *dev, 1142 struct mthca_pd *pd, 1143 struct mthca_cq *send_cq, 1144 struct mthca_cq *recv_cq, 1145 enum ib_sig_type send_policy, 1146 struct mthca_qp *qp) 1147{ 1148 int ret; 1149 int i; 1150 struct mthca_next_seg *next; 1151 1152 qp->refcount = 1; 1153 init_waitqueue_head(&qp->wait); 1154 mutex_init(&qp->mutex); 1155 qp->state = IB_QPS_RESET; 1156 qp->atomic_rd_en = 0; 1157 qp->resp_depth = 0; 1158 qp->sq_policy = send_policy; 1159 mthca_wq_reset(&qp->sq); 1160 mthca_wq_reset(&qp->rq); 1161 1162 spin_lock_init(&qp->sq.lock); 1163 spin_lock_init(&qp->rq.lock); 1164 1165 ret = mthca_map_memfree(dev, qp); 1166 if (ret) 1167 return ret; 1168 1169 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1170 if (ret) { 1171 mthca_unmap_memfree(dev, qp); 1172 return ret; 1173 } 1174 1175 mthca_adjust_qp_caps(dev, pd, qp); 1176 1177 /* 1178 * If this is a userspace QP, we're done now. The doorbells 1179 * will be allocated and buffers will be initialized in 1180 * userspace. 1181 */ 1182 if (pd->ibpd.uobject) 1183 return 0; 1184 1185 ret = mthca_alloc_memfree(dev, qp); 1186 if (ret) { 1187 mthca_free_wqe_buf(dev, qp); 1188 mthca_unmap_memfree(dev, qp); 1189 return ret; 1190 } 1191 1192 if (mthca_is_memfree(dev)) { 1193 struct mthca_data_seg *scatter; 1194 int size = (sizeof (struct mthca_next_seg) + 1195 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; 1196 1197 for (i = 0; i < qp->rq.max; ++i) { 1198 next = get_recv_wqe(qp, i); 1199 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) << 1200 qp->rq.wqe_shift); 1201 next->ee_nds = cpu_to_be32(size); 1202 1203 for (scatter = (void *) (next + 1); 1204 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift); 1205 ++scatter) 1206 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY); 1207 } 1208 1209 for (i = 0; i < qp->sq.max; ++i) { 1210 next = get_send_wqe(qp, i); 1211 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << 1212 qp->sq.wqe_shift) + 1213 qp->send_wqe_offset); 1214 } 1215 } else { 1216 for (i = 0; i < qp->rq.max; ++i) { 1217 next = get_recv_wqe(qp, i); 1218 next->nda_op = htonl((((i + 1) % qp->rq.max) << 1219 qp->rq.wqe_shift) | 1); 1220 } 1221 1222 } 1223 1224 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); 1225 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); 1226 1227 return 0; 1228} 1229 1230static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, 1231 struct mthca_pd *pd, struct mthca_qp *qp) 1232{ 1233 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); 1234 1235 /* Sanity check QP size before proceeding */ 1236 if (cap->max_send_wr > dev->limits.max_wqes || 1237 cap->max_recv_wr > dev->limits.max_wqes || 1238 cap->max_send_sge > dev->limits.max_sg || 1239 cap->max_recv_sge > dev->limits.max_sg || 1240 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size)) 1241 return -EINVAL; 1242 1243 /* 1244 * For MLX transport we need 2 extra send gather entries: 1245 * one for the header and one for the checksum at the end 1246 */ 1247 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg) 1248 return -EINVAL; 1249 1250 if (mthca_is_memfree(dev)) { 1251 qp->rq.max = cap->max_recv_wr ? 1252 roundup_pow_of_two(cap->max_recv_wr) : 0; 1253 qp->sq.max = cap->max_send_wr ? 1254 roundup_pow_of_two(cap->max_send_wr) : 0; 1255 } else { 1256 qp->rq.max = cap->max_recv_wr; 1257 qp->sq.max = cap->max_send_wr; 1258 } 1259 1260 qp->rq.max_gs = cap->max_recv_sge; 1261 qp->sq.max_gs = max_t(int, cap->max_send_sge, 1262 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, 1263 MTHCA_INLINE_CHUNK_SIZE) / 1264 sizeof (struct mthca_data_seg)); 1265 1266 return 0; 1267} 1268 1269int mthca_alloc_qp(struct mthca_dev *dev, 1270 struct mthca_pd *pd, 1271 struct mthca_cq *send_cq, 1272 struct mthca_cq *recv_cq, 1273 enum ib_qp_type type, 1274 enum ib_sig_type send_policy, 1275 struct ib_qp_cap *cap, 1276 struct mthca_qp *qp) 1277{ 1278 int err; 1279 1280 switch (type) { 1281 case IB_QPT_RC: qp->transport = RC; break; 1282 case IB_QPT_UC: qp->transport = UC; break; 1283 case IB_QPT_UD: qp->transport = UD; break; 1284 default: return -EINVAL; 1285 } 1286 1287 err = mthca_set_qp_size(dev, cap, pd, qp); 1288 if (err) 1289 return err; 1290 1291 qp->qpn = mthca_alloc(&dev->qp_table.alloc); 1292 if (qp->qpn == -1) 1293 return -ENOMEM; 1294 1295 /* initialize port to zero for error-catching. */ 1296 qp->port = 0; 1297 1298 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1299 send_policy, qp); 1300 if (err) { 1301 mthca_free(&dev->qp_table.alloc, qp->qpn); 1302 return err; 1303 } 1304 1305 spin_lock_irq(&dev->qp_table.lock); 1306 mthca_array_set(&dev->qp_table.qp, 1307 qp->qpn & (dev->limits.num_qps - 1), qp); 1308 spin_unlock_irq(&dev->qp_table.lock); 1309 1310 return 0; 1311} 1312 1313static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1314 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1315{ 1316 if (send_cq == recv_cq) { 1317 spin_lock_irq(&send_cq->lock); 1318 __acquire(&recv_cq->lock); 1319 } else if (send_cq->cqn < recv_cq->cqn) { 1320 spin_lock_irq(&send_cq->lock); 1321 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING); 1322 } else { 1323 spin_lock_irq(&recv_cq->lock); 1324 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); 1325 } 1326} 1327 1328static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1329 __releases(&send_cq->lock) __releases(&recv_cq->lock) 1330{ 1331 if (send_cq == recv_cq) { 1332 __release(&recv_cq->lock); 1333 spin_unlock_irq(&send_cq->lock); 1334 } else if (send_cq->cqn < recv_cq->cqn) { 1335 spin_unlock(&recv_cq->lock); 1336 spin_unlock_irq(&send_cq->lock); 1337 } else { 1338 spin_unlock(&send_cq->lock); 1339 spin_unlock_irq(&recv_cq->lock); 1340 } 1341} 1342 1343int mthca_alloc_sqp(struct mthca_dev *dev, 1344 struct mthca_pd *pd, 1345 struct mthca_cq *send_cq, 1346 struct mthca_cq *recv_cq, 1347 enum ib_sig_type send_policy, 1348 struct ib_qp_cap *cap, 1349 int qpn, 1350 int port, 1351 struct mthca_sqp *sqp) 1352{ 1353 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1354 int err; 1355 1356 sqp->qp.transport = MLX; 1357 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp); 1358 if (err) 1359 return err; 1360 1361 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1362 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1363 &sqp->header_dma, GFP_KERNEL); 1364 if (!sqp->header_buf) 1365 return -ENOMEM; 1366 1367 spin_lock_irq(&dev->qp_table.lock); 1368 if (mthca_array_get(&dev->qp_table.qp, mqpn)) 1369 err = -EBUSY; 1370 else 1371 mthca_array_set(&dev->qp_table.qp, mqpn, sqp); 1372 spin_unlock_irq(&dev->qp_table.lock); 1373 1374 if (err) 1375 goto err_out; 1376 1377 sqp->qp.port = port; 1378 sqp->qp.qpn = mqpn; 1379 sqp->qp.transport = MLX; 1380 1381 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, 1382 send_policy, &sqp->qp); 1383 if (err) 1384 goto err_out_free; 1385 1386 atomic_inc(&pd->sqp_count); 1387 1388 return 0; 1389 1390 err_out_free: 1391 /* 1392 * Lock CQs here, so that CQ polling code can do QP lookup 1393 * without taking a lock. 1394 */ 1395 mthca_lock_cqs(send_cq, recv_cq); 1396 1397 spin_lock(&dev->qp_table.lock); 1398 mthca_array_clear(&dev->qp_table.qp, mqpn); 1399 spin_unlock(&dev->qp_table.lock); 1400 1401 mthca_unlock_cqs(send_cq, recv_cq); 1402 1403 err_out: 1404 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size, 1405 sqp->header_buf, sqp->header_dma); 1406 1407 return err; 1408} 1409 1410static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp) 1411{ 1412 int c; 1413 1414 spin_lock_irq(&dev->qp_table.lock); 1415 c = qp->refcount; 1416 spin_unlock_irq(&dev->qp_table.lock); 1417 1418 return c; 1419} 1420 1421void mthca_free_qp(struct mthca_dev *dev, 1422 struct mthca_qp *qp) 1423{ 1424 struct mthca_cq *send_cq; 1425 struct mthca_cq *recv_cq; 1426 1427 send_cq = to_mcq(qp->ibqp.send_cq); 1428 recv_cq = to_mcq(qp->ibqp.recv_cq); 1429 1430 /* 1431 * Lock CQs here, so that CQ polling code can do QP lookup 1432 * without taking a lock. 1433 */ 1434 mthca_lock_cqs(send_cq, recv_cq); 1435 1436 spin_lock(&dev->qp_table.lock); 1437 mthca_array_clear(&dev->qp_table.qp, 1438 qp->qpn & (dev->limits.num_qps - 1)); 1439 --qp->refcount; 1440 spin_unlock(&dev->qp_table.lock); 1441 1442 mthca_unlock_cqs(send_cq, recv_cq); 1443 1444 wait_event(qp->wait, !get_qp_refcount(dev, qp)); 1445 1446 if (qp->state != IB_QPS_RESET) 1447 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1448 NULL, 0); 1449 1450 /* 1451 * If this is a userspace QP, the buffers, MR, CQs and so on 1452 * will be cleaned up in userspace, so all we have to do is 1453 * unref the mem-free tables and free the QPN in our table. 1454 */ 1455 if (!qp->ibqp.uobject) { 1456 mthca_cq_clean(dev, recv_cq, qp->qpn, 1457 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); 1458 if (send_cq != recv_cq) 1459 mthca_cq_clean(dev, send_cq, qp->qpn, NULL); 1460 1461 mthca_free_memfree(dev, qp); 1462 mthca_free_wqe_buf(dev, qp); 1463 } 1464 1465 mthca_unmap_memfree(dev, qp); 1466 1467 if (is_sqp(dev, qp)) { 1468 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1469 dma_free_coherent(&dev->pdev->dev, 1470 to_msqp(qp)->header_buf_size, 1471 to_msqp(qp)->header_buf, 1472 to_msqp(qp)->header_dma); 1473 } else 1474 mthca_free(&dev->qp_table.alloc, qp->qpn); 1475} 1476 1477/* Create UD header for an MLX send and build a data segment for it */ 1478static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, 1479 int ind, struct ib_send_wr *wr, 1480 struct mthca_mlx_seg *mlx, 1481 struct mthca_data_seg *data) 1482{ 1483 int header_size; 1484 int err; 1485 u16 pkey; 1486 1487 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0, 1488 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0, 1489 &sqp->ud_header); 1490 1491 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header); 1492 if (err) 1493 return err; 1494 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1); 1495 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | 1496 (sqp->ud_header.lrh.destination_lid == 1497 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | 1498 (sqp->ud_header.lrh.service_level << 8)); 1499 mlx->rlid = sqp->ud_header.lrh.destination_lid; 1500 mlx->vcrc = 0; 1501 1502 switch (wr->opcode) { 1503 case IB_WR_SEND: 1504 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; 1505 sqp->ud_header.immediate_present = 0; 1506 break; 1507 case IB_WR_SEND_WITH_IMM: 1508 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; 1509 sqp->ud_header.immediate_present = 1; 1510 sqp->ud_header.immediate_data = wr->ex.imm_data; 1511 break; 1512 default: 1513 return -EINVAL; 1514 } 1515 1516 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; 1517 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) 1518 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; 1519 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED); 1520 if (!sqp->qp.ibqp.qp_num) 1521 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1522 sqp->pkey_index, &pkey); 1523 else 1524 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, 1525 wr->wr.ud.pkey_index, &pkey); 1526 sqp->ud_header.bth.pkey = cpu_to_be16(pkey); 1527 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1528 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1)); 1529 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ? 1530 sqp->qkey : wr->wr.ud.remote_qkey); 1531 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num); 1532 1533 header_size = ib_ud_header_pack(&sqp->ud_header, 1534 sqp->header_buf + 1535 ind * MTHCA_UD_HEADER_SIZE); 1536 1537 data->byte_count = cpu_to_be32(header_size); 1538 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); 1539 data->addr = cpu_to_be64(sqp->header_dma + 1540 ind * MTHCA_UD_HEADER_SIZE); 1541 1542 return 0; 1543} 1544 1545static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, 1546 struct ib_cq *ib_cq) 1547{ 1548 unsigned cur; 1549 struct mthca_cq *cq; 1550 1551 cur = wq->head - wq->tail; 1552 if (likely(cur + nreq < wq->max)) 1553 return 0; 1554 1555 cq = to_mcq(ib_cq); 1556 spin_lock(&cq->lock); 1557 cur = wq->head - wq->tail; 1558 spin_unlock(&cq->lock); 1559 1560 return cur + nreq >= wq->max; 1561} 1562 1563static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg, 1564 u64 remote_addr, u32 rkey) 1565{ 1566 rseg->raddr = cpu_to_be64(remote_addr); 1567 rseg->rkey = cpu_to_be32(rkey); 1568 rseg->reserved = 0; 1569} 1570 1571static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg, 1572 struct ib_send_wr *wr) 1573{ 1574 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { 1575 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap); 1576 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add); 1577 } else { 1578 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add); 1579 aseg->compare = 0; 1580 } 1581 1582} 1583 1584static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg, 1585 struct ib_send_wr *wr) 1586{ 1587 useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key); 1588 useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma); 1589 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1590 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1591 1592} 1593 1594static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg, 1595 struct ib_send_wr *wr) 1596{ 1597 memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE); 1598 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn); 1599 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey); 1600} 1601 1602int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1603 struct ib_send_wr **bad_wr) 1604{ 1605 struct mthca_dev *dev = to_mdev(ibqp->device); 1606 struct mthca_qp *qp = to_mqp(ibqp); 1607 void *wqe; 1608 void *prev_wqe; 1609 unsigned long flags; 1610 int err = 0; 1611 int nreq; 1612 int i; 1613 int size; 1614 /* 1615 * f0 and size0 are only used if nreq != 0, and they will 1616 * always be initialized the first time through the main loop 1617 * before nreq is incremented. So nreq cannot become non-zero 1618 * without initializing f0 and size0, and they are in fact 1619 * never used uninitialized. 1620 */ 1621 int uninitialized_var(size0); 1622 u32 uninitialized_var(f0); 1623 int ind; 1624 u8 op0 = 0; 1625 1626 spin_lock_irqsave(&qp->sq.lock, flags); 1627 1628 /* XXX check that state is OK to post send */ 1629 1630 ind = qp->sq.next_ind; 1631 1632 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1633 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1634 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1635 " %d max, %d nreq)\n", qp->qpn, 1636 qp->sq.head, qp->sq.tail, 1637 qp->sq.max, nreq); 1638 err = -ENOMEM; 1639 *bad_wr = wr; 1640 goto out; 1641 } 1642 1643 wqe = get_send_wqe(qp, ind); 1644 prev_wqe = qp->sq.last; 1645 qp->sq.last = wqe; 1646 1647 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1649 ((struct mthca_next_seg *) wqe)->flags = 1650 ((wr->send_flags & IB_SEND_SIGNALED) ? 1651 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1652 ((wr->send_flags & IB_SEND_SOLICITED) ? 1653 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1654 cpu_to_be32(1); 1655 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1656 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1658 1659 wqe += sizeof (struct mthca_next_seg); 1660 size = sizeof (struct mthca_next_seg) / 16; 1661 1662 switch (qp->transport) { 1663 case RC: 1664 switch (wr->opcode) { 1665 case IB_WR_ATOMIC_CMP_AND_SWP: 1666 case IB_WR_ATOMIC_FETCH_AND_ADD: 1667 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1668 wr->wr.atomic.rkey); 1669 wqe += sizeof (struct mthca_raddr_seg); 1670 1671 set_atomic_seg(wqe, wr); 1672 wqe += sizeof (struct mthca_atomic_seg); 1673 size += (sizeof (struct mthca_raddr_seg) + 1674 sizeof (struct mthca_atomic_seg)) / 16; 1675 break; 1676 1677 case IB_WR_RDMA_WRITE: 1678 case IB_WR_RDMA_WRITE_WITH_IMM: 1679 case IB_WR_RDMA_READ: 1680 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1681 wr->wr.rdma.rkey); 1682 wqe += sizeof (struct mthca_raddr_seg); 1683 size += sizeof (struct mthca_raddr_seg) / 16; 1684 break; 1685 1686 default: 1687 /* No extra segments required for sends */ 1688 break; 1689 } 1690 1691 break; 1692 1693 case UC: 1694 switch (wr->opcode) { 1695 case IB_WR_RDMA_WRITE: 1696 case IB_WR_RDMA_WRITE_WITH_IMM: 1697 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 1698 wr->wr.rdma.rkey); 1699 wqe += sizeof (struct mthca_raddr_seg); 1700 size += sizeof (struct mthca_raddr_seg) / 16; 1701 break; 1702 1703 default: 1704 /* No extra segments required for sends */ 1705 break; 1706 } 1707 1708 break; 1709 1710 case UD: 1711 set_tavor_ud_seg(wqe, wr); 1712 wqe += sizeof (struct mthca_tavor_ud_seg); 1713 size += sizeof (struct mthca_tavor_ud_seg) / 16; 1714 break; 1715 1716 case MLX: 1717 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 1718 wqe - sizeof (struct mthca_next_seg), 1719 wqe); 1720 if (err) { 1721 *bad_wr = wr; 1722 goto out; 1723 } 1724 wqe += sizeof (struct mthca_data_seg); 1725 size += sizeof (struct mthca_data_seg) / 16; 1726 break; 1727 } 1728 1729 if (wr->num_sge > qp->sq.max_gs) { 1730 mthca_err(dev, "too many gathers\n"); 1731 err = -EINVAL; 1732 *bad_wr = wr; 1733 goto out; 1734 } 1735 1736 for (i = 0; i < wr->num_sge; ++i) { 1737 mthca_set_data_seg(wqe, wr->sg_list + i); 1738 wqe += sizeof (struct mthca_data_seg); 1739 size += sizeof (struct mthca_data_seg) / 16; 1740 } 1741 1742 /* Add one more inline data segment for ICRC */ 1743 if (qp->transport == MLX) { 1744 ((struct mthca_data_seg *) wqe)->byte_count = 1745 cpu_to_be32((1 << 31) | 4); 1746 ((u32 *) wqe)[1] = 0; 1747 wqe += sizeof (struct mthca_data_seg); 1748 size += sizeof (struct mthca_data_seg) / 16; 1749 } 1750 1751 qp->wrid[ind + qp->rq.max] = wr->wr_id; 1752 1753 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 1754 mthca_err(dev, "opcode invalid\n"); 1755 err = -EINVAL; 1756 *bad_wr = wr; 1757 goto out; 1758 } 1759 1760 ((struct mthca_next_seg *) prev_wqe)->nda_op = 1761 cpu_to_be32(((ind << qp->sq.wqe_shift) + 1762 qp->send_wqe_offset) | 1763 mthca_opcode[wr->opcode]); 1764 wmb(); 1765 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1766 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | 1767 ((wr->send_flags & IB_SEND_FENCE) ? 1768 MTHCA_NEXT_FENCE : 0)); 1769 1770 if (!nreq) { 1771 size0 = size; 1772 op0 = mthca_opcode[wr->opcode]; 1773 f0 = wr->send_flags & IB_SEND_FENCE ? 1774 MTHCA_SEND_DOORBELL_FENCE : 0; 1775 } 1776 1777 ++ind; 1778 if (unlikely(ind >= qp->sq.max)) 1779 ind -= qp->sq.max; 1780 } 1781 1782out: 1783 if (likely(nreq)) { 1784 wmb(); 1785 1786 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + 1787 qp->send_wqe_offset) | f0 | op0, 1788 (qp->qpn << 8) | size0, 1789 dev->kar + MTHCA_SEND_DOORBELL, 1790 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1791 /* 1792 * Make sure doorbells don't leak out of SQ spinlock 1793 * and reach the HCA out of order: 1794 */ 1795 mmiowb(); 1796 } 1797 1798 qp->sq.next_ind = ind; 1799 qp->sq.head += nreq; 1800 1801 spin_unlock_irqrestore(&qp->sq.lock, flags); 1802 return err; 1803} 1804 1805int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 1806 struct ib_recv_wr **bad_wr) 1807{ 1808 struct mthca_dev *dev = to_mdev(ibqp->device); 1809 struct mthca_qp *qp = to_mqp(ibqp); 1810 unsigned long flags; 1811 int err = 0; 1812 int nreq; 1813 int i; 1814 int size; 1815 /* 1816 * size0 is only used if nreq != 0, and it will always be 1817 * initialized the first time through the main loop before 1818 * nreq is incremented. So nreq cannot become non-zero 1819 * without initializing size0, and it is in fact never used 1820 * uninitialized. 1821 */ 1822 int uninitialized_var(size0); 1823 int ind; 1824 void *wqe; 1825 void *prev_wqe; 1826 1827 spin_lock_irqsave(&qp->rq.lock, flags); 1828 1829 /* XXX check that state is OK to post receive */ 1830 1831 ind = qp->rq.next_ind; 1832 1833 for (nreq = 0; wr; wr = wr->next) { 1834 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 1835 mthca_err(dev, "RQ %06x full (%u head, %u tail," 1836 " %d max, %d nreq)\n", qp->qpn, 1837 qp->rq.head, qp->rq.tail, 1838 qp->rq.max, nreq); 1839 err = -ENOMEM; 1840 *bad_wr = wr; 1841 goto out; 1842 } 1843 1844 wqe = get_recv_wqe(qp, ind); 1845 prev_wqe = qp->rq.last; 1846 qp->rq.last = wqe; 1847 1848 ((struct mthca_next_seg *) wqe)->ee_nds = 1849 cpu_to_be32(MTHCA_NEXT_DBD); 1850 ((struct mthca_next_seg *) wqe)->flags = 0; 1851 1852 wqe += sizeof (struct mthca_next_seg); 1853 size = sizeof (struct mthca_next_seg) / 16; 1854 1855 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 1856 err = -EINVAL; 1857 *bad_wr = wr; 1858 goto out; 1859 } 1860 1861 for (i = 0; i < wr->num_sge; ++i) { 1862 mthca_set_data_seg(wqe, wr->sg_list + i); 1863 wqe += sizeof (struct mthca_data_seg); 1864 size += sizeof (struct mthca_data_seg) / 16; 1865 } 1866 1867 qp->wrid[ind] = wr->wr_id; 1868 1869 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 1870 cpu_to_be32(MTHCA_NEXT_DBD | size); 1871 1872 if (!nreq) 1873 size0 = size; 1874 1875 ++ind; 1876 if (unlikely(ind >= qp->rq.max)) 1877 ind -= qp->rq.max; 1878 1879 ++nreq; 1880 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { 1881 nreq = 0; 1882 1883 wmb(); 1884 1885 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1886 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL, 1887 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1888 1889 qp->rq.next_ind = ind; 1890 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; 1891 } 1892 } 1893 1894out: 1895 if (likely(nreq)) { 1896 wmb(); 1897 1898 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0, 1899 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, 1900 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1901 } 1902 1903 qp->rq.next_ind = ind; 1904 qp->rq.head += nreq; 1905 1906 /* 1907 * Make sure doorbells don't leak out of RQ spinlock and reach 1908 * the HCA out of order: 1909 */ 1910 mmiowb(); 1911 1912 spin_unlock_irqrestore(&qp->rq.lock, flags); 1913 return err; 1914} 1915 1916int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, 1917 struct ib_send_wr **bad_wr) 1918{ 1919 struct mthca_dev *dev = to_mdev(ibqp->device); 1920 struct mthca_qp *qp = to_mqp(ibqp); 1921 u32 dbhi; 1922 void *wqe; 1923 void *prev_wqe; 1924 unsigned long flags; 1925 int err = 0; 1926 int nreq; 1927 int i; 1928 int size; 1929 /* 1930 * f0 and size0 are only used if nreq != 0, and they will 1931 * always be initialized the first time through the main loop 1932 * before nreq is incremented. So nreq cannot become non-zero 1933 * without initializing f0 and size0, and they are in fact 1934 * never used uninitialized. 1935 */ 1936 int uninitialized_var(size0); 1937 u32 uninitialized_var(f0); 1938 int ind; 1939 u8 op0 = 0; 1940 1941 spin_lock_irqsave(&qp->sq.lock, flags); 1942 1943 /* XXX check that state is OK to post send */ 1944 1945 ind = qp->sq.head & (qp->sq.max - 1); 1946 1947 for (nreq = 0; wr; ++nreq, wr = wr->next) { 1948 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { 1949 nreq = 0; 1950 1951 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | 1952 ((qp->sq.head & 0xffff) << 8) | f0 | op0; 1953 1954 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; 1955 1956 /* 1957 * Make sure that descriptors are written before 1958 * doorbell record. 1959 */ 1960 wmb(); 1961 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 1962 1963 /* 1964 * Make sure doorbell record is written before we 1965 * write MMIO send doorbell. 1966 */ 1967 wmb(); 1968 1969 mthca_write64(dbhi, (qp->qpn << 8) | size0, 1970 dev->kar + MTHCA_SEND_DOORBELL, 1971 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 1972 } 1973 1974 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { 1975 mthca_err(dev, "SQ %06x full (%u head, %u tail," 1976 " %d max, %d nreq)\n", qp->qpn, 1977 qp->sq.head, qp->sq.tail, 1978 qp->sq.max, nreq); 1979 err = -ENOMEM; 1980 *bad_wr = wr; 1981 goto out; 1982 } 1983 1984 wqe = get_send_wqe(qp, ind); 1985 prev_wqe = qp->sq.last; 1986 qp->sq.last = wqe; 1987 1988 ((struct mthca_next_seg *) wqe)->flags = 1989 ((wr->send_flags & IB_SEND_SIGNALED) ? 1990 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) | 1991 ((wr->send_flags & IB_SEND_SOLICITED) ? 1992 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) | 1993 ((wr->send_flags & IB_SEND_IP_CSUM) ? 1994 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) | 1995 cpu_to_be32(1); 1996 if (wr->opcode == IB_WR_SEND_WITH_IMM || 1997 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) 1998 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1999 2000 wqe += sizeof (struct mthca_next_seg); 2001 size = sizeof (struct mthca_next_seg) / 16; 2002 2003 switch (qp->transport) { 2004 case RC: 2005 switch (wr->opcode) { 2006 case IB_WR_ATOMIC_CMP_AND_SWP: 2007 case IB_WR_ATOMIC_FETCH_AND_ADD: 2008 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 2009 wr->wr.atomic.rkey); 2010 wqe += sizeof (struct mthca_raddr_seg); 2011 2012 set_atomic_seg(wqe, wr); 2013 wqe += sizeof (struct mthca_atomic_seg); 2014 size += (sizeof (struct mthca_raddr_seg) + 2015 sizeof (struct mthca_atomic_seg)) / 16; 2016 break; 2017 2018 case IB_WR_RDMA_READ: 2019 case IB_WR_RDMA_WRITE: 2020 case IB_WR_RDMA_WRITE_WITH_IMM: 2021 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2022 wr->wr.rdma.rkey); 2023 wqe += sizeof (struct mthca_raddr_seg); 2024 size += sizeof (struct mthca_raddr_seg) / 16; 2025 break; 2026 2027 default: 2028 /* No extra segments required for sends */ 2029 break; 2030 } 2031 2032 break; 2033 2034 case UC: 2035 switch (wr->opcode) { 2036 case IB_WR_RDMA_WRITE: 2037 case IB_WR_RDMA_WRITE_WITH_IMM: 2038 set_raddr_seg(wqe, wr->wr.rdma.remote_addr, 2039 wr->wr.rdma.rkey); 2040 wqe += sizeof (struct mthca_raddr_seg); 2041 size += sizeof (struct mthca_raddr_seg) / 16; 2042 break; 2043 2044 default: 2045 /* No extra segments required for sends */ 2046 break; 2047 } 2048 2049 break; 2050 2051 case UD: 2052 set_arbel_ud_seg(wqe, wr); 2053 wqe += sizeof (struct mthca_arbel_ud_seg); 2054 size += sizeof (struct mthca_arbel_ud_seg) / 16; 2055 break; 2056 2057 case MLX: 2058 err = build_mlx_header(dev, to_msqp(qp), ind, wr, 2059 wqe - sizeof (struct mthca_next_seg), 2060 wqe); 2061 if (err) { 2062 *bad_wr = wr; 2063 goto out; 2064 } 2065 wqe += sizeof (struct mthca_data_seg); 2066 size += sizeof (struct mthca_data_seg) / 16; 2067 break; 2068 } 2069 2070 if (wr->num_sge > qp->sq.max_gs) { 2071 mthca_err(dev, "too many gathers\n"); 2072 err = -EINVAL; 2073 *bad_wr = wr; 2074 goto out; 2075 } 2076 2077 for (i = 0; i < wr->num_sge; ++i) { 2078 mthca_set_data_seg(wqe, wr->sg_list + i); 2079 wqe += sizeof (struct mthca_data_seg); 2080 size += sizeof (struct mthca_data_seg) / 16; 2081 } 2082 2083 /* Add one more inline data segment for ICRC */ 2084 if (qp->transport == MLX) { 2085 ((struct mthca_data_seg *) wqe)->byte_count = 2086 cpu_to_be32((1 << 31) | 4); 2087 ((u32 *) wqe)[1] = 0; 2088 wqe += sizeof (struct mthca_data_seg); 2089 size += sizeof (struct mthca_data_seg) / 16; 2090 } 2091 2092 qp->wrid[ind + qp->rq.max] = wr->wr_id; 2093 2094 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) { 2095 mthca_err(dev, "opcode invalid\n"); 2096 err = -EINVAL; 2097 *bad_wr = wr; 2098 goto out; 2099 } 2100 2101 ((struct mthca_next_seg *) prev_wqe)->nda_op = 2102 cpu_to_be32(((ind << qp->sq.wqe_shift) + 2103 qp->send_wqe_offset) | 2104 mthca_opcode[wr->opcode]); 2105 wmb(); 2106 ((struct mthca_next_seg *) prev_wqe)->ee_nds = 2107 cpu_to_be32(MTHCA_NEXT_DBD | size | 2108 ((wr->send_flags & IB_SEND_FENCE) ? 2109 MTHCA_NEXT_FENCE : 0)); 2110 2111 if (!nreq) { 2112 size0 = size; 2113 op0 = mthca_opcode[wr->opcode]; 2114 f0 = wr->send_flags & IB_SEND_FENCE ? 2115 MTHCA_SEND_DOORBELL_FENCE : 0; 2116 } 2117 2118 ++ind; 2119 if (unlikely(ind >= qp->sq.max)) 2120 ind -= qp->sq.max; 2121 } 2122 2123out: 2124 if (likely(nreq)) { 2125 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; 2126 2127 qp->sq.head += nreq; 2128 2129 /* 2130 * Make sure that descriptors are written before 2131 * doorbell record. 2132 */ 2133 wmb(); 2134 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); 2135 2136 /* 2137 * Make sure doorbell record is written before we 2138 * write MMIO send doorbell. 2139 */ 2140 wmb(); 2141 2142 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL, 2143 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); 2144 } 2145 2146 /* 2147 * Make sure doorbells don't leak out of SQ spinlock and reach 2148 * the HCA out of order: 2149 */ 2150 mmiowb(); 2151 2152 spin_unlock_irqrestore(&qp->sq.lock, flags); 2153 return err; 2154} 2155 2156int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, 2157 struct ib_recv_wr **bad_wr) 2158{ 2159 struct mthca_dev *dev = to_mdev(ibqp->device); 2160 struct mthca_qp *qp = to_mqp(ibqp); 2161 unsigned long flags; 2162 int err = 0; 2163 int nreq; 2164 int ind; 2165 int i; 2166 void *wqe; 2167 2168 spin_lock_irqsave(&qp->rq.lock, flags); 2169 2170 /* XXX check that state is OK to post receive */ 2171 2172 ind = qp->rq.head & (qp->rq.max - 1); 2173 2174 for (nreq = 0; wr; ++nreq, wr = wr->next) { 2175 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { 2176 mthca_err(dev, "RQ %06x full (%u head, %u tail," 2177 " %d max, %d nreq)\n", qp->qpn, 2178 qp->rq.head, qp->rq.tail, 2179 qp->rq.max, nreq); 2180 err = -ENOMEM; 2181 *bad_wr = wr; 2182 goto out; 2183 } 2184 2185 wqe = get_recv_wqe(qp, ind); 2186 2187 ((struct mthca_next_seg *) wqe)->flags = 0; 2188 2189 wqe += sizeof (struct mthca_next_seg); 2190 2191 if (unlikely(wr->num_sge > qp->rq.max_gs)) { 2192 err = -EINVAL; 2193 *bad_wr = wr; 2194 goto out; 2195 } 2196 2197 for (i = 0; i < wr->num_sge; ++i) { 2198 mthca_set_data_seg(wqe, wr->sg_list + i); 2199 wqe += sizeof (struct mthca_data_seg); 2200 } 2201 2202 if (i < qp->rq.max_gs) 2203 mthca_set_data_seg_inval(wqe); 2204 2205 qp->wrid[ind] = wr->wr_id; 2206 2207 ++ind; 2208 if (unlikely(ind >= qp->rq.max)) 2209 ind -= qp->rq.max; 2210 } 2211out: 2212 if (likely(nreq)) { 2213 qp->rq.head += nreq; 2214 2215 /* 2216 * Make sure that descriptors are written before 2217 * doorbell record. 2218 */ 2219 wmb(); 2220 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff); 2221 } 2222 2223 spin_unlock_irqrestore(&qp->rq.lock, flags); 2224 return err; 2225} 2226 2227void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, 2228 int index, int *dbd, __be32 *new_wqe) 2229{ 2230 struct mthca_next_seg *next; 2231 2232 /* 2233 * For SRQs, all receive WQEs generate a CQE, so we're always 2234 * at the end of the doorbell chain. 2235 */ 2236 if (qp->ibqp.srq && !is_send) { 2237 *new_wqe = 0; 2238 return; 2239 } 2240 2241 if (is_send) 2242 next = get_send_wqe(qp, index); 2243 else 2244 next = get_recv_wqe(qp, index); 2245 2246 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD)); 2247 if (next->ee_nds & cpu_to_be32(0x3f)) 2248 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) | 2249 (next->ee_nds & cpu_to_be32(0x3f)); 2250 else 2251 *new_wqe = 0; 2252} 2253 2254int mthca_init_qp_table(struct mthca_dev *dev) 2255{ 2256 int err; 2257 int i; 2258 2259 spin_lock_init(&dev->qp_table.lock); 2260 2261 /* 2262 * We reserve 2 extra QPs per port for the special QPs. The 2263 * special QP for port 1 has to be even, so round up. 2264 */ 2265 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; 2266 err = mthca_alloc_init(&dev->qp_table.alloc, 2267 dev->limits.num_qps, 2268 (1 << 24) - 1, 2269 dev->qp_table.sqp_start + 2270 MTHCA_MAX_PORTS * 2); 2271 if (err) 2272 return err; 2273 2274 err = mthca_array_init(&dev->qp_table.qp, 2275 dev->limits.num_qps); 2276 if (err) { 2277 mthca_alloc_cleanup(&dev->qp_table.alloc); 2278 return err; 2279 } 2280 2281 for (i = 0; i < 2; ++i) { 2282 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2283 dev->qp_table.sqp_start + i * 2); 2284 if (err) { 2285 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2286 "%d, aborting.\n", err); 2287 goto err_out; 2288 } 2289 } 2290 return 0; 2291 2292 err_out: 2293 for (i = 0; i < 2; ++i) 2294 mthca_CONF_SPECIAL_QP(dev, i, 0); 2295 2296 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2297 mthca_alloc_cleanup(&dev->qp_table.alloc); 2298 2299 return err; 2300} 2301 2302void mthca_cleanup_qp_table(struct mthca_dev *dev) 2303{ 2304 int i; 2305 2306 for (i = 0; i < 2; ++i) 2307 mthca_CONF_SPECIAL_QP(dev, i, 0); 2308 2309 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2310 mthca_alloc_cleanup(&dev->qp_table.alloc); 2311} 2312