[go: nahoru, domu]

1/*
2 * Copyright (c) 2013, Mellanox Technologies inc.  All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IB_H
34#define MLX5_IB_H
35
36#include <linux/kernel.h>
37#include <linux/sched.h>
38#include <rdma/ib_verbs.h>
39#include <rdma/ib_smi.h>
40#include <linux/mlx5/driver.h>
41#include <linux/mlx5/cq.h>
42#include <linux/mlx5/qp.h>
43#include <linux/mlx5/srq.h>
44#include <linux/types.h>
45
46#define mlx5_ib_dbg(dev, format, arg...)				\
47pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
48	 __LINE__, current->pid, ##arg)
49
50#define mlx5_ib_err(dev, format, arg...)				\
51pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
52	__LINE__, current->pid, ##arg)
53
54#define mlx5_ib_warn(dev, format, arg...)				\
55pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__,	\
56	__LINE__, current->pid, ##arg)
57
58enum {
59	MLX5_IB_MMAP_CMD_SHIFT	= 8,
60	MLX5_IB_MMAP_CMD_MASK	= 0xff,
61};
62
63enum mlx5_ib_mmap_cmd {
64	MLX5_IB_MMAP_REGULAR_PAGE		= 0,
65	MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES	= 1, /* always last */
66};
67
68enum {
69	MLX5_RES_SCAT_DATA32_CQE	= 0x1,
70	MLX5_RES_SCAT_DATA64_CQE	= 0x2,
71	MLX5_REQ_SCAT_DATA32_CQE	= 0x11,
72	MLX5_REQ_SCAT_DATA64_CQE	= 0x22,
73};
74
75enum mlx5_ib_latency_class {
76	MLX5_IB_LATENCY_CLASS_LOW,
77	MLX5_IB_LATENCY_CLASS_MEDIUM,
78	MLX5_IB_LATENCY_CLASS_HIGH,
79	MLX5_IB_LATENCY_CLASS_FAST_PATH
80};
81
82enum mlx5_ib_mad_ifc_flags {
83	MLX5_MAD_IFC_IGNORE_MKEY	= 1,
84	MLX5_MAD_IFC_IGNORE_BKEY	= 2,
85	MLX5_MAD_IFC_NET_VIEW		= 4,
86};
87
88struct mlx5_ib_ucontext {
89	struct ib_ucontext	ibucontext;
90	struct list_head	db_page_list;
91
92	/* protect doorbell record alloc/free
93	 */
94	struct mutex		db_page_mutex;
95	struct mlx5_uuar_info	uuari;
96};
97
98static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
99{
100	return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
101}
102
103struct mlx5_ib_pd {
104	struct ib_pd		ibpd;
105	u32			pdn;
106	u32			pa_lkey;
107};
108
109/* Use macros here so that don't have to duplicate
110 * enum ib_send_flags and enum ib_qp_type for low-level driver
111 */
112
113#define MLX5_IB_SEND_UMR_UNREG	IB_SEND_RESERVED_START
114#define MLX5_IB_QPT_REG_UMR	IB_QPT_RESERVED1
115#define MLX5_IB_WR_UMR		IB_WR_RESERVED1
116
117struct wr_list {
118	u16	opcode;
119	u16	next;
120};
121
122struct mlx5_ib_wq {
123	u64		       *wrid;
124	u32		       *wr_data;
125	struct wr_list	       *w_list;
126	unsigned	       *wqe_head;
127	u16		        unsig_count;
128
129	/* serialize post to the work queue
130	 */
131	spinlock_t		lock;
132	int			wqe_cnt;
133	int			max_post;
134	int			max_gs;
135	int			offset;
136	int			wqe_shift;
137	unsigned		head;
138	unsigned		tail;
139	u16			cur_post;
140	u16			last_poll;
141	void		       *qend;
142};
143
144enum {
145	MLX5_QP_USER,
146	MLX5_QP_KERNEL,
147	MLX5_QP_EMPTY
148};
149
150struct mlx5_ib_qp {
151	struct ib_qp		ibqp;
152	struct mlx5_core_qp	mqp;
153	struct mlx5_buf		buf;
154
155	struct mlx5_db		db;
156	struct mlx5_ib_wq	rq;
157
158	u32			doorbell_qpn;
159	u8			sq_signal_bits;
160	u8			fm_cache;
161	int			sq_max_wqes_per_wr;
162	int			sq_spare_wqes;
163	struct mlx5_ib_wq	sq;
164
165	struct ib_umem	       *umem;
166	int			buf_size;
167
168	/* serialize qp state modifications
169	 */
170	struct mutex		mutex;
171	u16			xrcdn;
172	u32			flags;
173	u8			port;
174	u8			alt_port;
175	u8			atomic_rd_en;
176	u8			resp_depth;
177	u8			state;
178	int			mlx_type;
179	int			wq_sig;
180	int			scat_cqe;
181	int			max_inline_data;
182	struct mlx5_bf	       *bf;
183	int			has_rq;
184
185	/* only for user space QPs. For kernel
186	 * we have it from the bf object
187	 */
188	int			uuarn;
189
190	int			create_type;
191	u32			pa_lkey;
192
193	/* Store signature errors */
194	bool			signature_en;
195};
196
197struct mlx5_ib_cq_buf {
198	struct mlx5_buf		buf;
199	struct ib_umem		*umem;
200	int			cqe_size;
201	int			nent;
202};
203
204enum mlx5_ib_qp_flags {
205	MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK     = 1 << 0,
206	MLX5_IB_QP_SIGNATURE_HANDLING           = 1 << 1,
207};
208
209struct mlx5_shared_mr_info {
210	int mr_id;
211	struct ib_umem		*umem;
212};
213
214struct mlx5_ib_cq {
215	struct ib_cq		ibcq;
216	struct mlx5_core_cq	mcq;
217	struct mlx5_ib_cq_buf	buf;
218	struct mlx5_db		db;
219
220	/* serialize access to the CQ
221	 */
222	spinlock_t		lock;
223
224	/* protect resize cq
225	 */
226	struct mutex		resize_mutex;
227	struct mlx5_ib_cq_buf  *resize_buf;
228	struct ib_umem	       *resize_umem;
229	int			cqe_size;
230};
231
232struct mlx5_ib_srq {
233	struct ib_srq		ibsrq;
234	struct mlx5_core_srq	msrq;
235	struct mlx5_buf		buf;
236	struct mlx5_db		db;
237	u64		       *wrid;
238	/* protect SRQ hanlding
239	 */
240	spinlock_t		lock;
241	int			head;
242	int			tail;
243	u16			wqe_ctr;
244	struct ib_umem	       *umem;
245	/* serialize arming a SRQ
246	 */
247	struct mutex		mutex;
248	int			wq_sig;
249};
250
251struct mlx5_ib_xrcd {
252	struct ib_xrcd		ibxrcd;
253	u32			xrcdn;
254};
255
256struct mlx5_ib_mr {
257	struct ib_mr		ibmr;
258	struct mlx5_core_mr	mmr;
259	struct ib_umem	       *umem;
260	struct mlx5_shared_mr_info	*smr_info;
261	struct list_head	list;
262	int			order;
263	int			umred;
264	__be64			*pas;
265	dma_addr_t		dma;
266	int			npages;
267	struct mlx5_ib_dev     *dev;
268	struct mlx5_create_mkey_mbox_out out;
269	struct mlx5_core_sig_ctx    *sig;
270};
271
272struct mlx5_ib_fast_reg_page_list {
273	struct ib_fast_reg_page_list	ibfrpl;
274	__be64			       *mapped_page_list;
275	dma_addr_t			map;
276};
277
278struct mlx5_ib_umr_context {
279	enum ib_wc_status	status;
280	struct completion	done;
281};
282
283static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
284{
285	context->status = -1;
286	init_completion(&context->done);
287}
288
289struct umr_common {
290	struct ib_pd	*pd;
291	struct ib_cq	*cq;
292	struct ib_qp	*qp;
293	struct ib_mr	*mr;
294	/* control access to UMR QP
295	 */
296	struct semaphore	sem;
297};
298
299enum {
300	MLX5_FMR_INVALID,
301	MLX5_FMR_VALID,
302	MLX5_FMR_BUSY,
303};
304
305struct mlx5_ib_fmr {
306	struct ib_fmr			ibfmr;
307	struct mlx5_core_mr		mr;
308	int				access_flags;
309	int				state;
310	/* protect fmr state
311	 */
312	spinlock_t			lock;
313	u64				wrid;
314	struct ib_send_wr		wr[2];
315	u8				page_shift;
316	struct ib_fast_reg_page_list	page_list;
317};
318
319struct mlx5_cache_ent {
320	struct list_head	head;
321	/* sync access to the cahce entry
322	 */
323	spinlock_t		lock;
324
325
326	struct dentry	       *dir;
327	char                    name[4];
328	u32                     order;
329	u32			size;
330	u32                     cur;
331	u32                     miss;
332	u32			limit;
333
334	struct dentry          *fsize;
335	struct dentry          *fcur;
336	struct dentry          *fmiss;
337	struct dentry          *flimit;
338
339	struct mlx5_ib_dev     *dev;
340	struct work_struct	work;
341	struct delayed_work	dwork;
342	int			pending;
343};
344
345struct mlx5_mr_cache {
346	struct workqueue_struct *wq;
347	struct mlx5_cache_ent	ent[MAX_MR_CACHE_ENTRIES];
348	int			stopped;
349	struct dentry		*root;
350	unsigned long		last_add;
351};
352
353struct mlx5_ib_resources {
354	struct ib_cq	*c0;
355	struct ib_xrcd	*x0;
356	struct ib_xrcd	*x1;
357	struct ib_pd	*p0;
358	struct ib_srq	*s0;
359};
360
361struct mlx5_ib_dev {
362	struct ib_device		ib_dev;
363	struct mlx5_core_dev		*mdev;
364	MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
365	struct list_head		eqs_list;
366	int				num_ports;
367	int				num_comp_vectors;
368	/* serialize update of capability mask
369	 */
370	struct mutex			cap_mask_mutex;
371	bool				ib_active;
372	struct umr_common		umrc;
373	/* sync used page count stats
374	 */
375	spinlock_t			mr_lock;
376	struct mlx5_ib_resources	devr;
377	struct mlx5_mr_cache		cache;
378	struct timer_list		delay_timer;
379	int				fill_delay;
380};
381
382static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
383{
384	return container_of(mcq, struct mlx5_ib_cq, mcq);
385}
386
387static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
388{
389	return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
390}
391
392static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
393{
394	return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
395}
396
397static inline struct mlx5_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
398{
399	return container_of(ibfmr, struct mlx5_ib_fmr, ibfmr);
400}
401
402static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
403{
404	return container_of(ibcq, struct mlx5_ib_cq, ibcq);
405}
406
407static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
408{
409	return container_of(mqp, struct mlx5_ib_qp, mqp);
410}
411
412static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
413{
414	return container_of(mmr, struct mlx5_ib_mr, mmr);
415}
416
417static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
418{
419	return container_of(ibpd, struct mlx5_ib_pd, ibpd);
420}
421
422static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
423{
424	return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
425}
426
427static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
428{
429	return container_of(ibqp, struct mlx5_ib_qp, ibqp);
430}
431
432static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
433{
434	return container_of(msrq, struct mlx5_ib_srq, msrq);
435}
436
437static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
438{
439	return container_of(ibmr, struct mlx5_ib_mr, ibmr);
440}
441
442static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
443{
444	return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
445}
446
447struct mlx5_ib_ah {
448	struct ib_ah		ibah;
449	struct mlx5_av		av;
450};
451
452static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
453{
454	return container_of(ibah, struct mlx5_ib_ah, ibah);
455}
456
457int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
458			struct mlx5_db *db);
459void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
460void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
461void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
462void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
463int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
464		 u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
465		 void *in_mad, void *response_mad);
466struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
467			   struct mlx5_ib_ah *ah);
468struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
469int mlx5_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr);
470int mlx5_ib_destroy_ah(struct ib_ah *ah);
471struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
472				  struct ib_srq_init_attr *init_attr,
473				  struct ib_udata *udata);
474int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
475		       enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
476int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
477int mlx5_ib_destroy_srq(struct ib_srq *srq);
478int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
479			  struct ib_recv_wr **bad_wr);
480struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
481				struct ib_qp_init_attr *init_attr,
482				struct ib_udata *udata);
483int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
484		      int attr_mask, struct ib_udata *udata);
485int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
486		     struct ib_qp_init_attr *qp_init_attr);
487int mlx5_ib_destroy_qp(struct ib_qp *qp);
488int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
489		      struct ib_send_wr **bad_wr);
490int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
491		      struct ib_recv_wr **bad_wr);
492void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
493struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
494				int vector, struct ib_ucontext *context,
495				struct ib_udata *udata);
496int mlx5_ib_destroy_cq(struct ib_cq *cq);
497int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
498int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
499int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
500int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
501struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
502struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
503				  u64 virt_addr, int access_flags,
504				  struct ib_udata *udata);
505int mlx5_ib_dereg_mr(struct ib_mr *ibmr);
506int mlx5_ib_destroy_mr(struct ib_mr *ibmr);
507struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
508				struct ib_mr_init_attr *mr_init_attr);
509struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
510					int max_page_list_len);
511struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
512							       int page_list_len);
513void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
514struct ib_fmr *mlx5_ib_fmr_alloc(struct ib_pd *pd, int acc,
515				 struct ib_fmr_attr *fmr_attr);
516int mlx5_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
517		      int npages, u64 iova);
518int mlx5_ib_unmap_fmr(struct list_head *fmr_list);
519int mlx5_ib_fmr_dealloc(struct ib_fmr *ibfmr);
520int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
521			struct ib_wc *in_wc, struct ib_grh *in_grh,
522			struct ib_mad *in_mad, struct ib_mad *out_mad);
523struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
524					  struct ib_ucontext *context,
525					  struct ib_udata *udata);
526int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd);
527int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn);
528int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset);
529int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port);
530int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
531		       struct ib_port_attr *props);
532int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
533void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
534void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
535			int *ncont, int *order);
536void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
537			  int page_shift, __be64 *pas, int umr);
538void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
539int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
540int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
541int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
542int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
543void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
544int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
545			    struct ib_mr_status *mr_status);
546
547static inline void init_query_mad(struct ib_smp *mad)
548{
549	mad->base_version  = 1;
550	mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
551	mad->class_version = 1;
552	mad->method	   = IB_MGMT_METHOD_GET;
553}
554
555static inline u8 convert_access(int acc)
556{
557	return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC       : 0) |
558	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX5_PERM_REMOTE_WRITE : 0) |
559	       (acc & IB_ACCESS_REMOTE_READ   ? MLX5_PERM_REMOTE_READ  : 0) |
560	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX5_PERM_LOCAL_WRITE  : 0) |
561	       MLX5_PERM_LOCAL_READ;
562}
563
564#endif /* MLX5_IB_H */
565