/drivers/infiniband/hw/cxgb3/ |
H A D | iwch_qp.c | 42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, argument 51 wqe->send.rdmaop = T3_SEND_WITH_SE; 53 wqe->send.rdmaop = T3_SEND; 54 wqe->send.rem_stag = 0; 58 wqe->send.rdmaop = T3_SEND_WITH_SE_INV; 60 wqe->send.rdmaop = T3_SEND_WITH_INV; 61 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey); 68 wqe->send.reserved[0] = 0; 69 wqe->send.reserved[1] = 0; 70 wqe 87 build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 128 build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) argument 191 build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt) argument 247 build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument 287 build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe, struct ib_recv_wr *wr) argument 359 union t3_wr *wqe; local 471 union t3_wr *wqe; local 535 union t3_wr *wqe; local 744 union t3_wr *wqe; local 776 union t3_wr *wqe; local 884 union t3_wr *wqe = qhp->wq.queue; local [all...] |
H A D | cxio_hal.c | 140 struct t3_modify_qp_wr *wqe; local 141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe)); 147 memset(wqe, 0, sizeof(*wqe)); 148 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 151 wqe->flags = cpu_to_be32(MODQP_WRITE_EC); 153 wqe->sge_cmd = cpu_to_be64(sge_cmd); 519 struct t3_modify_qp_wr *wqe; local 604 __be64 *wqe; local 836 struct t3_rdma_init_wr *wqe; local [all...] |
H A D | cxio_dbg.c | 111 void cxio_dump_wqe(union t3_wr *wqe) argument 113 __be64 *data = (__be64 *)wqe;
|
/drivers/infiniband/hw/ipath/ |
H A D | ipath_ruc.c | 123 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, argument 130 for (i = j = 0; i < wqe->num_sge; i++) { 131 if (wqe->sg_list[i].length == 0) 135 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) 137 *lengthp += wqe->sg_list[i].length; 146 wc.wr_id = wqe->wr_id; 172 struct ipath_rwqe *wqe; local 205 wqe = get_rwqe_ptr(rq, tail); 211 } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge)); 212 qp->r_wr_id = wqe 264 struct ipath_swqe *wqe; local 699 ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, enum ib_wc_status status) argument [all...] |
H A D | ipath_rc.c | 42 static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe, argument 47 len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu; 48 ss->sge = wqe->sg_list[0]; 49 ss->sg_list = wqe->sg_list + 1; 50 ss->num_sge = wqe->wr.num_sge; 52 return wqe->length - len; 58 * @wqe: the work queue to initialize the QP's SGE from 62 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe) argument 66 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, 218 struct ipath_swqe *wqe; local 727 struct ipath_swqe *wqe = get_swqe_ptr(qp, n); local 807 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last); local 863 struct ipath_swqe *wqe; local 1125 struct ipath_swqe *wqe; local [all...] |
H A D | ipath_uc.c | 49 struct ipath_swqe *wqe; local 70 wqe = get_swqe_ptr(qp, qp->s_last); 71 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 84 wqe = get_swqe_ptr(qp, qp->s_cur); 97 qp->s_psn = wqe->psn = qp->s_next_psn; 98 qp->s_sge.sge = wqe->sg_list[0]; 99 qp->s_sge.sg_list = wqe->sg_list + 1; 100 qp->s_sge.num_sge = wqe->wr.num_sge; 101 qp->s_len = len = wqe->length; 102 switch (wqe [all...] |
H A D | ipath_ud.c | 61 struct ipath_rwqe *wqe; local 133 wqe = get_rwqe_ptr(rq, tail); 135 if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) { 149 wc.wr_id = wqe->wr_id; 246 struct ipath_swqe *wqe; local 269 wqe = get_swqe_ptr(qp, qp->s_last); 270 ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 277 wqe = get_swqe_ptr(qp, qp->s_cur); 283 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 306 ipath_ud_loopback(qp, wqe); [all...] |
H A D | ipath_srq.c | 57 struct ipath_rwqe *wqe; local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); 80 wqe->wr_id = wr->wr_id; 81 wqe->num_sge = wr->num_sge; 83 wqe->sg_list[i] = wr->sg_list[i]; 286 struct ipath_rwqe *wqe; local 289 wqe = get_rwqe_ptr(&srq->rq, tail); 290 p->wr_id = wqe->wr_id; 291 p->num_sge = wqe->num_sge; 292 for (i = 0; i < wqe [all...] |
H A D | ipath_verbs.c | 338 struct ipath_swqe *wqe; local 397 wqe = get_swqe_ptr(qp, qp->s_head); 398 wqe->wr = *wr; 399 wqe->length = 0; 409 ok = ipath_lkey_ok(qp, &wqe->sg_list[j], 413 wqe->length += length; 416 wqe->wr.num_sge = j; 420 if (wqe->length > 0x80000000U) 422 } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu) 424 wqe 490 struct ipath_rwqe *wqe; local [all...] |
/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 434 static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, argument 446 wqe->send.sendop_pkd = cpu_to_be32( 449 wqe->send.sendop_pkd = cpu_to_be32( 451 wqe->send.stag_inv = 0; 455 wqe->send.sendop_pkd = cpu_to_be32( 458 wqe->send.sendop_pkd = cpu_to_be32( 460 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); 466 wqe->send.r3 = 0; 467 wqe->send.r4 = 0; 472 ret = build_immd(sq, wqe 501 build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 544 build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 573 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, struct ib_recv_wr *wr, u8 *len16) argument 588 build_fastreg(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16, u8 t5dev) argument 660 build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) argument 732 union t4_wr *wqe = NULL; local 858 union t4_recv_wr *wqe = NULL; local 1071 struct fw_ri_wr *wqe; local 1192 struct fw_ri_wr *wqe; local 1253 struct fw_ri_wr *wqe; local [all...] |
H A D | t4.h | 108 static inline void init_wr_hdr(union t4_wr *wqe, u16 wrid, argument 111 wqe->send.opcode = (u8)opcode; 112 wqe->send.flags = flags; 113 wqe->send.wrid = wrid; 114 wqe->send.r1[0] = 0; 115 wqe->send.r1[1] = 0; 116 wqe->send.r1[2] = 0; 117 wqe->send.len16 = len16; 455 union t4_wr *wqe) 461 if (inc == 1 && wqe) { 454 t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, union t4_wr *wqe) argument 478 t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, union t4_recv_wr *wqe) argument [all...] |
/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, argument 48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; 49 ss->sge = wqe->sg_list[0]; 50 ss->sg_list = wqe->sg_list + 1; 51 ss->num_sge = wqe->wr.num_sge; 52 ss->total_len = wqe->length; 54 return wqe->length - len; 236 struct qib_swqe *wqe; local 273 wqe = get_swqe_ptr(qp, qp->s_last); 274 qib_send_complete(qp, wqe, q 786 struct qib_swqe *wqe = get_swqe_ptr(qp, n); local 870 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); local 946 struct qib_swqe *wqe; local 972 struct qib_swqe *wqe; local 1054 do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) argument 1132 struct qib_swqe *wqe; local 1353 struct qib_swqe *wqe; local 1405 struct qib_swqe *wqe; local [all...] |
H A D | qib_ruc.c | 81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe) argument 94 for (i = j = 0; i < wqe->num_sge; i++) { 95 if (wqe->sg_list[i].length == 0) 99 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE)) 101 qp->r_len += wqe->sg_list[i].length; 117 wc.wr_id = wqe->wr_id; 144 struct qib_rwqe *wqe; local 176 wqe = get_rwqe_ptr(rq, tail); 185 if (!wr_id_only && !qib_init_sge(qp, wqe)) { 189 qp->r_wr_id = wqe 359 struct qib_swqe *wqe; local 770 qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, enum ib_wc_status status) argument [all...] |
H A D | qib_uc.c | 49 struct qib_swqe *wqe; local 70 wqe = get_swqe_ptr(qp, qp->s_last); 71 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 84 wqe = get_swqe_ptr(qp, qp->s_cur); 97 wqe->psn = qp->s_next_psn; 99 qp->s_sge.sge = wqe->sg_list[0]; 100 qp->s_sge.sg_list = wqe->sg_list + 1; 101 qp->s_sge.num_sge = wqe->wr.num_sge; 102 qp->s_sge.total_len = wqe->length; 103 len = wqe [all...] |
H A D | qib_ud.c | 241 struct qib_swqe *wqe; local 264 wqe = get_swqe_ptr(qp, qp->s_last); 265 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR); 272 wqe = get_swqe_ptr(qp, qp->s_cur); 280 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr; 303 qib_ud_loopback(qp, wqe); 305 qib_send_complete(qp, wqe, IB_WC_SUCCESS); 311 extra_bytes = -wqe->length & 3; 312 nwords = (wqe->length + extra_bytes) >> 2; 316 qp->s_cur_size = wqe [all...] |
H A D | qib_srq.c | 57 struct qib_rwqe *wqe; local 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); 80 wqe->wr_id = wr->wr_id; 81 wqe->num_sge = wr->num_sge; 83 wqe->sg_list[i] = wr->sg_list[i]; 282 struct qib_rwqe *wqe; local 285 wqe = get_rwqe_ptr(&srq->rq, tail); 286 p->wr_id = wqe->wr_id; 287 p->num_sge = wqe->num_sge; 288 for (i = 0; i < wqe [all...] |
/drivers/infiniband/hw/mthca/ |
H A D | mthca_srq.c | 90 static inline int *wqe_to_link(void *wqe) argument 92 return (int *) (wqe + offsetof(struct mthca_next_seg, imm)); 151 void *wqe; local 178 next = wqe = get_wqe(srq, i); 181 *wqe_to_link(wqe) = i + 1; 184 *wqe_to_link(wqe) = -1; 188 for (scatter = wqe + sizeof (struct mthca_next_seg); 189 (void *) scatter < wqe + (1 << srq->wqe_shift); 487 void *wqe; local 496 wqe 586 void *wqe; local [all...] |
H A D | mthca_qp.c | 1607 void *wqe; local 1643 wqe = get_send_wqe(qp, ind); 1645 qp->sq.last = wqe; 1647 ((struct mthca_next_seg *) wqe)->nda_op = 0; 1648 ((struct mthca_next_seg *) wqe)->ee_nds = 0; 1649 ((struct mthca_next_seg *) wqe)->flags = 1657 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data; 1659 wqe += sizeof (struct mthca_next_seg); 1667 set_raddr_seg(wqe, wr->wr.atomic.remote_addr, 1669 wqe 1824 void *wqe; local 1922 void *wqe; local 2166 void *wqe; local [all...] |
H A D | mthca_cq.c | 126 __be32 wqe; member in struct:mthca_cqe 140 __be32 wqe; member in struct:mthca_err_cqe 312 mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe)); 388 be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe), 477 cqe->wqe = new_wqe; 511 be32_to_cpu(cqe->wqe)); 540 wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset) 546 u32 wqe = be32_to_cpu(cqe->wqe); local 548 wqe_index = wqe >> sr 552 s32 wqe; local [all...] |
/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 214 __be32 *wqe; local 229 wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); 230 *wqe = stamp; 236 wqe = buf + i; 237 *wqe = cpu_to_be32(0xffffffff); 246 void *wqe; local 249 ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); 253 struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; 262 inl = wqe + s; 1983 void *wqe, unsigne 1981 build_sriov_qp0_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) argument 2104 build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) argument 2462 build_tunnel_header(struct ib_send_wr *wr, void *wqe, unsigned *mlx_seg_len) argument 2545 build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr, struct mlx4_ib_qp *qp, unsigned *lso_seg_len, __be32 *lso_hdr_sz, __be32 *blh) argument 2581 add_zero_len_inline(void *wqe) argument 2592 void *wqe; local [all...] |
/drivers/infiniband/hw/nes/ |
H A D | nes_verbs.c | 219 struct nes_hw_qp_wqe *wqe; local 239 wqe = &nesqp->hwqp.sq_vbase[head]; 240 /* nes_debug(NES_DBG_MR, "processing sq wqe at %p, head = %u.\n", wqe, head); */ 241 nes_fill_init_qp_wqe(wqe, nesqp, head); 243 set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX, u64temp); 256 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); 257 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, 259 set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey); 260 set_wqe_32bit_value(wqe 3233 fill_wqe_sg_send(struct nes_hw_qp_wqe *wqe, struct ib_send_wr *ib_wr, u32 uselkey) argument 3267 struct nes_hw_qp_wqe *wqe; local 3547 struct nes_hw_qp_wqe *wqe; local [all...] |
/drivers/scsi/lpfc/ |
H A D | lpfc_sli.c | 86 * @wqe: The work Queue Entry to put on the Work queue. 88 * This routine will copy the contents of @wqe to the next available entry on 96 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe) argument 106 temp_wqe = q->qe[q->host_index].wqe; 117 bf_set(wqe_wqec, &wqe->generic.wqe_com, 1); 119 bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); 120 lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size); 176 * @wqe: The Mailbox Queue Entry to put on the Work queue. 446 * @wqe: The Receive Queue Entry to put on the Receive queue. 448 * This routine will copy the contents of @wqe t 8141 lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, union lpfc_wqe *wqe) argument 8627 union lpfc_wqe wqe; local 16953 union lpfc_wqe wqe; local [all...] |
/drivers/scsi/bfa/ |
H A D | bfa_svc.c | 671 struct bfa_fcxp_wqe_s *wqe; local 674 bfa_q_deq(&mod->req_wait_q, &wqe); 676 bfa_q_deq(&mod->rsp_wait_q, &wqe); 678 if (wqe) { 681 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles, 682 wqe->nrsp_sgles, wqe->req_sga_cbfn, 683 wqe 1112 bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe, bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg, void *caller, int nreq_sgles, int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn, bfa_fcxp_get_sglen_t req_sglen_cbfn, bfa_fcxp_get_sgaddr_t rsp_sga_cbfn, bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req) argument 1145 bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe) argument 5390 struct bfa_sgpg_wqe_s *wqe; local 5419 bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg) argument 5445 bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe) argument 5458 bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg), void *cbarg) argument [all...] |
/drivers/scsi/bnx2fc/ |
H A D | 57xx_hsi_bnx2fc.h | 639 __le16 wqe; member in struct:fcoe_sqe 782 __le16 wqe; member in struct:fcoe_xfrqe 834 __le16 wqe; member in struct:fcoe_cqe 898 __le32 wqe; member in struct:fcoe_lcqe 913 __le16 wqe; member in struct:fcoe_pend_wq_cqe 992 __le16 wqe; member in struct:fcoe_unsolicited_cqe
|
/drivers/infiniband/hw/ehca/ |
H A D | ehca_reqs.c | 84 /* clear wqe header until sglist */ 102 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 174 /* clear wqe header until sglist */ 309 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe"); 678 * ignore this to avoid double cqes of bad wqe 803 struct ehca_wqe *wqe; local 821 wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset); 822 if (!wqe) { 823 ehca_err(cq->device, "Invalid wqe offset=%#llx on " 828 wc->wr_id = replace_wr_id(wqe [all...] |