[go: nahoru, domu]

1/*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for          *
3 * RoCE (RDMA over Converged Ethernet) adapters.                   *
4 * Copyright (C) 2008-2014 Emulex. All rights reserved.            *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 *                                                                 *
8 * This program is free software; you can redistribute it and/or   *
9 * modify it under the terms of version 2 of the GNU General       *
10 * Public License as published by the Free Software Foundation.    *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
17 * more details, a copy of which can be found in the file COPYING  *
18 * included with this package.                                     *
19 *
20 * Contact Information:
21 * linux-drivers@emulex.com
22 *
23 * Emulex
24 * 3333 Susan Street
25 * Costa Mesa, CA 92626
26 *******************************************************************/
27
28#include <rdma/ib_addr.h>
29#include "ocrdma_stats.h"
30
31static struct dentry *ocrdma_dbgfs_dir;
32
33static int ocrdma_add_stat(char *start, char *pcur,
34				char *name, u64 count)
35{
36	char buff[128] = {0};
37	int cpy_len = 0;
38
39	snprintf(buff, 128, "%s: %llu\n", name, count);
40	cpy_len = strlen(buff);
41
42	if (pcur + cpy_len > start + OCRDMA_MAX_DBGFS_MEM) {
43		pr_err("%s: No space in stats buff\n", __func__);
44		return 0;
45	}
46
47	memcpy(pcur, buff, cpy_len);
48	return cpy_len;
49}
50
51static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
52{
53	struct stats_mem *mem = &dev->stats_mem;
54
55	/* Alloc mbox command mem*/
56	mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
57			sizeof(struct ocrdma_rdma_stats_resp));
58
59	mem->va   = dma_alloc_coherent(&dev->nic_info.pdev->dev, mem->size,
60					 &mem->pa, GFP_KERNEL);
61	if (!mem->va) {
62		pr_err("%s: stats mbox allocation failed\n", __func__);
63		return false;
64	}
65
66	memset(mem->va, 0, mem->size);
67
68	/* Alloc debugfs mem */
69	mem->debugfs_mem = kzalloc(OCRDMA_MAX_DBGFS_MEM, GFP_KERNEL);
70	if (!mem->debugfs_mem) {
71		pr_err("%s: stats debugfs mem allocation failed\n", __func__);
72		return false;
73	}
74
75	return true;
76}
77
78static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
79{
80	struct stats_mem *mem = &dev->stats_mem;
81
82	if (mem->va)
83		dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
84				  mem->va, mem->pa);
85	kfree(mem->debugfs_mem);
86}
87
88static char *ocrdma_resource_stats(struct ocrdma_dev *dev)
89{
90	char *stats = dev->stats_mem.debugfs_mem, *pcur;
91	struct ocrdma_rdma_stats_resp *rdma_stats =
92			(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
93	struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
94
95	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
96
97	pcur = stats;
98	pcur += ocrdma_add_stat(stats, pcur, "active_dpp_pds",
99				(u64)rsrc_stats->dpp_pds);
100	pcur += ocrdma_add_stat(stats, pcur, "active_non_dpp_pds",
101				(u64)rsrc_stats->non_dpp_pds);
102	pcur += ocrdma_add_stat(stats, pcur, "active_rc_dpp_qps",
103				(u64)rsrc_stats->rc_dpp_qps);
104	pcur += ocrdma_add_stat(stats, pcur, "active_uc_dpp_qps",
105				(u64)rsrc_stats->uc_dpp_qps);
106	pcur += ocrdma_add_stat(stats, pcur, "active_ud_dpp_qps",
107				(u64)rsrc_stats->ud_dpp_qps);
108	pcur += ocrdma_add_stat(stats, pcur, "active_rc_non_dpp_qps",
109				(u64)rsrc_stats->rc_non_dpp_qps);
110	pcur += ocrdma_add_stat(stats, pcur, "active_uc_non_dpp_qps",
111				(u64)rsrc_stats->uc_non_dpp_qps);
112	pcur += ocrdma_add_stat(stats, pcur, "active_ud_non_dpp_qps",
113				(u64)rsrc_stats->ud_non_dpp_qps);
114	pcur += ocrdma_add_stat(stats, pcur, "active_srqs",
115				(u64)rsrc_stats->srqs);
116	pcur += ocrdma_add_stat(stats, pcur, "active_rbqs",
117				(u64)rsrc_stats->rbqs);
118	pcur += ocrdma_add_stat(stats, pcur, "active_64K_nsmr",
119				(u64)rsrc_stats->r64K_nsmr);
120	pcur += ocrdma_add_stat(stats, pcur, "active_64K_to_2M_nsmr",
121				(u64)rsrc_stats->r64K_to_2M_nsmr);
122	pcur += ocrdma_add_stat(stats, pcur, "active_2M_to_44M_nsmr",
123				(u64)rsrc_stats->r2M_to_44M_nsmr);
124	pcur += ocrdma_add_stat(stats, pcur, "active_44M_to_1G_nsmr",
125				(u64)rsrc_stats->r44M_to_1G_nsmr);
126	pcur += ocrdma_add_stat(stats, pcur, "active_1G_to_4G_nsmr",
127				(u64)rsrc_stats->r1G_to_4G_nsmr);
128	pcur += ocrdma_add_stat(stats, pcur, "active_nsmr_count_4G_to_32G",
129				(u64)rsrc_stats->nsmr_count_4G_to_32G);
130	pcur += ocrdma_add_stat(stats, pcur, "active_32G_to_64G_nsmr",
131				(u64)rsrc_stats->r32G_to_64G_nsmr);
132	pcur += ocrdma_add_stat(stats, pcur, "active_64G_to_128G_nsmr",
133				(u64)rsrc_stats->r64G_to_128G_nsmr);
134	pcur += ocrdma_add_stat(stats, pcur, "active_128G_to_higher_nsmr",
135				(u64)rsrc_stats->r128G_to_higher_nsmr);
136	pcur += ocrdma_add_stat(stats, pcur, "active_embedded_nsmr",
137				(u64)rsrc_stats->embedded_nsmr);
138	pcur += ocrdma_add_stat(stats, pcur, "active_frmr",
139				(u64)rsrc_stats->frmr);
140	pcur += ocrdma_add_stat(stats, pcur, "active_prefetch_qps",
141				(u64)rsrc_stats->prefetch_qps);
142	pcur += ocrdma_add_stat(stats, pcur, "active_ondemand_qps",
143				(u64)rsrc_stats->ondemand_qps);
144	pcur += ocrdma_add_stat(stats, pcur, "active_phy_mr",
145				(u64)rsrc_stats->phy_mr);
146	pcur += ocrdma_add_stat(stats, pcur, "active_mw",
147				(u64)rsrc_stats->mw);
148
149	/* Print the threshold stats */
150	rsrc_stats = &rdma_stats->th_rsrc_stats;
151
152	pcur += ocrdma_add_stat(stats, pcur, "threshold_dpp_pds",
153				(u64)rsrc_stats->dpp_pds);
154	pcur += ocrdma_add_stat(stats, pcur, "threshold_non_dpp_pds",
155				(u64)rsrc_stats->non_dpp_pds);
156	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_dpp_qps",
157				(u64)rsrc_stats->rc_dpp_qps);
158	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_dpp_qps",
159				(u64)rsrc_stats->uc_dpp_qps);
160	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_dpp_qps",
161				(u64)rsrc_stats->ud_dpp_qps);
162	pcur += ocrdma_add_stat(stats, pcur, "threshold_rc_non_dpp_qps",
163				(u64)rsrc_stats->rc_non_dpp_qps);
164	pcur += ocrdma_add_stat(stats, pcur, "threshold_uc_non_dpp_qps",
165				(u64)rsrc_stats->uc_non_dpp_qps);
166	pcur += ocrdma_add_stat(stats, pcur, "threshold_ud_non_dpp_qps",
167				(u64)rsrc_stats->ud_non_dpp_qps);
168	pcur += ocrdma_add_stat(stats, pcur, "threshold_srqs",
169				(u64)rsrc_stats->srqs);
170	pcur += ocrdma_add_stat(stats, pcur, "threshold_rbqs",
171				(u64)rsrc_stats->rbqs);
172	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_nsmr",
173				(u64)rsrc_stats->r64K_nsmr);
174	pcur += ocrdma_add_stat(stats, pcur, "threshold_64K_to_2M_nsmr",
175				(u64)rsrc_stats->r64K_to_2M_nsmr);
176	pcur += ocrdma_add_stat(stats, pcur, "threshold_2M_to_44M_nsmr",
177				(u64)rsrc_stats->r2M_to_44M_nsmr);
178	pcur += ocrdma_add_stat(stats, pcur, "threshold_44M_to_1G_nsmr",
179				(u64)rsrc_stats->r44M_to_1G_nsmr);
180	pcur += ocrdma_add_stat(stats, pcur, "threshold_1G_to_4G_nsmr",
181				(u64)rsrc_stats->r1G_to_4G_nsmr);
182	pcur += ocrdma_add_stat(stats, pcur, "threshold_nsmr_count_4G_to_32G",
183				(u64)rsrc_stats->nsmr_count_4G_to_32G);
184	pcur += ocrdma_add_stat(stats, pcur, "threshold_32G_to_64G_nsmr",
185				(u64)rsrc_stats->r32G_to_64G_nsmr);
186	pcur += ocrdma_add_stat(stats, pcur, "threshold_64G_to_128G_nsmr",
187				(u64)rsrc_stats->r64G_to_128G_nsmr);
188	pcur += ocrdma_add_stat(stats, pcur, "threshold_128G_to_higher_nsmr",
189				(u64)rsrc_stats->r128G_to_higher_nsmr);
190	pcur += ocrdma_add_stat(stats, pcur, "threshold_embedded_nsmr",
191				(u64)rsrc_stats->embedded_nsmr);
192	pcur += ocrdma_add_stat(stats, pcur, "threshold_frmr",
193				(u64)rsrc_stats->frmr);
194	pcur += ocrdma_add_stat(stats, pcur, "threshold_prefetch_qps",
195				(u64)rsrc_stats->prefetch_qps);
196	pcur += ocrdma_add_stat(stats, pcur, "threshold_ondemand_qps",
197				(u64)rsrc_stats->ondemand_qps);
198	pcur += ocrdma_add_stat(stats, pcur, "threshold_phy_mr",
199				(u64)rsrc_stats->phy_mr);
200	pcur += ocrdma_add_stat(stats, pcur, "threshold_mw",
201				(u64)rsrc_stats->mw);
202	return stats;
203}
204
205static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
206{
207	char *stats = dev->stats_mem.debugfs_mem, *pcur;
208	struct ocrdma_rdma_stats_resp *rdma_stats =
209		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
210	struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
211
212	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
213
214	pcur = stats;
215	pcur += ocrdma_add_stat
216		(stats, pcur, "roce_frame_bytes",
217		 convert_to_64bit(rx_stats->roce_frame_bytes_lo,
218		 rx_stats->roce_frame_bytes_hi));
219	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_icrc_drops",
220				(u64)rx_stats->roce_frame_icrc_drops);
221	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_payload_len_drops",
222				(u64)rx_stats->roce_frame_payload_len_drops);
223	pcur += ocrdma_add_stat(stats, pcur, "ud_drops",
224				(u64)rx_stats->ud_drops);
225	pcur += ocrdma_add_stat(stats, pcur, "qp1_drops",
226				(u64)rx_stats->qp1_drops);
227	pcur += ocrdma_add_stat(stats, pcur, "psn_error_request_packets",
228				(u64)rx_stats->psn_error_request_packets);
229	pcur += ocrdma_add_stat(stats, pcur, "psn_error_resp_packets",
230				(u64)rx_stats->psn_error_resp_packets);
231	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_timeouts",
232				(u64)rx_stats->rnr_nak_timeouts);
233	pcur += ocrdma_add_stat(stats, pcur, "rnr_nak_receives",
234				(u64)rx_stats->rnr_nak_receives);
235	pcur += ocrdma_add_stat(stats, pcur, "roce_frame_rxmt_drops",
236				(u64)rx_stats->roce_frame_rxmt_drops);
237	pcur += ocrdma_add_stat(stats, pcur, "nak_count_psn_sequence_errors",
238				(u64)rx_stats->nak_count_psn_sequence_errors);
239	pcur += ocrdma_add_stat(stats, pcur, "rc_drop_count_lookup_errors",
240				(u64)rx_stats->rc_drop_count_lookup_errors);
241	pcur += ocrdma_add_stat(stats, pcur, "rq_rnr_naks",
242				(u64)rx_stats->rq_rnr_naks);
243	pcur += ocrdma_add_stat(stats, pcur, "srq_rnr_naks",
244				(u64)rx_stats->srq_rnr_naks);
245	pcur += ocrdma_add_stat(stats, pcur, "roce_frames",
246				convert_to_64bit(rx_stats->roce_frames_lo,
247						 rx_stats->roce_frames_hi));
248
249	return stats;
250}
251
252static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
253{
254	char *stats = dev->stats_mem.debugfs_mem, *pcur;
255	struct ocrdma_rdma_stats_resp *rdma_stats =
256		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
257	struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
258
259	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
260
261	pcur = stats;
262	pcur += ocrdma_add_stat(stats, pcur, "send_pkts",
263				convert_to_64bit(tx_stats->send_pkts_lo,
264						 tx_stats->send_pkts_hi));
265	pcur += ocrdma_add_stat(stats, pcur, "write_pkts",
266				convert_to_64bit(tx_stats->write_pkts_lo,
267						 tx_stats->write_pkts_hi));
268	pcur += ocrdma_add_stat(stats, pcur, "read_pkts",
269				convert_to_64bit(tx_stats->read_pkts_lo,
270						 tx_stats->read_pkts_hi));
271	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_pkts",
272				convert_to_64bit(tx_stats->read_rsp_pkts_lo,
273						 tx_stats->read_rsp_pkts_hi));
274	pcur += ocrdma_add_stat(stats, pcur, "ack_pkts",
275				convert_to_64bit(tx_stats->ack_pkts_lo,
276						 tx_stats->ack_pkts_hi));
277	pcur += ocrdma_add_stat(stats, pcur, "send_bytes",
278				convert_to_64bit(tx_stats->send_bytes_lo,
279						 tx_stats->send_bytes_hi));
280	pcur += ocrdma_add_stat(stats, pcur, "write_bytes",
281				convert_to_64bit(tx_stats->write_bytes_lo,
282						 tx_stats->write_bytes_hi));
283	pcur += ocrdma_add_stat(stats, pcur, "read_req_bytes",
284				convert_to_64bit(tx_stats->read_req_bytes_lo,
285						 tx_stats->read_req_bytes_hi));
286	pcur += ocrdma_add_stat(stats, pcur, "read_rsp_bytes",
287				convert_to_64bit(tx_stats->read_rsp_bytes_lo,
288						 tx_stats->read_rsp_bytes_hi));
289	pcur += ocrdma_add_stat(stats, pcur, "ack_timeouts",
290				(u64)tx_stats->ack_timeouts);
291
292	return stats;
293}
294
295static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
296{
297	char *stats = dev->stats_mem.debugfs_mem, *pcur;
298	struct ocrdma_rdma_stats_resp *rdma_stats =
299		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
300	struct ocrdma_wqe_stats	*wqe_stats = &rdma_stats->wqe_stats;
301
302	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
303
304	pcur = stats;
305	pcur += ocrdma_add_stat(stats, pcur, "large_send_rc_wqes",
306		convert_to_64bit(wqe_stats->large_send_rc_wqes_lo,
307				 wqe_stats->large_send_rc_wqes_hi));
308	pcur += ocrdma_add_stat(stats, pcur, "large_write_rc_wqes",
309		convert_to_64bit(wqe_stats->large_write_rc_wqes_lo,
310				 wqe_stats->large_write_rc_wqes_hi));
311	pcur += ocrdma_add_stat(stats, pcur, "read_wqes",
312				convert_to_64bit(wqe_stats->read_wqes_lo,
313						 wqe_stats->read_wqes_hi));
314	pcur += ocrdma_add_stat(stats, pcur, "frmr_wqes",
315				convert_to_64bit(wqe_stats->frmr_wqes_lo,
316						 wqe_stats->frmr_wqes_hi));
317	pcur += ocrdma_add_stat(stats, pcur, "mw_bind_wqes",
318				convert_to_64bit(wqe_stats->mw_bind_wqes_lo,
319						 wqe_stats->mw_bind_wqes_hi));
320	pcur += ocrdma_add_stat(stats, pcur, "invalidate_wqes",
321		convert_to_64bit(wqe_stats->invalidate_wqes_lo,
322				 wqe_stats->invalidate_wqes_hi));
323	pcur += ocrdma_add_stat(stats, pcur, "dpp_wqe_drops",
324				(u64)wqe_stats->dpp_wqe_drops);
325	return stats;
326}
327
328static char *ocrdma_db_errstats(struct ocrdma_dev *dev)
329{
330	char *stats = dev->stats_mem.debugfs_mem, *pcur;
331	struct ocrdma_rdma_stats_resp *rdma_stats =
332		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
333	struct ocrdma_db_err_stats *db_err_stats = &rdma_stats->db_err_stats;
334
335	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
336
337	pcur = stats;
338	pcur += ocrdma_add_stat(stats, pcur, "sq_doorbell_errors",
339				(u64)db_err_stats->sq_doorbell_errors);
340	pcur += ocrdma_add_stat(stats, pcur, "cq_doorbell_errors",
341				(u64)db_err_stats->cq_doorbell_errors);
342	pcur += ocrdma_add_stat(stats, pcur, "rq_srq_doorbell_errors",
343				(u64)db_err_stats->rq_srq_doorbell_errors);
344	pcur += ocrdma_add_stat(stats, pcur, "cq_overflow_errors",
345				(u64)db_err_stats->cq_overflow_errors);
346	return stats;
347}
348
349static char *ocrdma_rxqp_errstats(struct ocrdma_dev *dev)
350{
351	char *stats = dev->stats_mem.debugfs_mem, *pcur;
352	struct ocrdma_rdma_stats_resp *rdma_stats =
353		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
354	struct ocrdma_rx_qp_err_stats *rx_qp_err_stats =
355		 &rdma_stats->rx_qp_err_stats;
356
357	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
358
359	pcur = stats;
360	pcur += ocrdma_add_stat(stats, pcur, "nak_invalid_requst_errors",
361			(u64)rx_qp_err_stats->nak_invalid_requst_errors);
362	pcur += ocrdma_add_stat(stats, pcur, "nak_remote_operation_errors",
363			(u64)rx_qp_err_stats->nak_remote_operation_errors);
364	pcur += ocrdma_add_stat(stats, pcur, "nak_count_remote_access_errors",
365			(u64)rx_qp_err_stats->nak_count_remote_access_errors);
366	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
367			(u64)rx_qp_err_stats->local_length_errors);
368	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
369			(u64)rx_qp_err_stats->local_protection_errors);
370	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
371			(u64)rx_qp_err_stats->local_qp_operation_errors);
372	return stats;
373}
374
375static char *ocrdma_txqp_errstats(struct ocrdma_dev *dev)
376{
377	char *stats = dev->stats_mem.debugfs_mem, *pcur;
378	struct ocrdma_rdma_stats_resp *rdma_stats =
379		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
380	struct ocrdma_tx_qp_err_stats *tx_qp_err_stats =
381		&rdma_stats->tx_qp_err_stats;
382
383	memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
384
385	pcur = stats;
386	pcur += ocrdma_add_stat(stats, pcur, "local_length_errors",
387			(u64)tx_qp_err_stats->local_length_errors);
388	pcur += ocrdma_add_stat(stats, pcur, "local_protection_errors",
389			(u64)tx_qp_err_stats->local_protection_errors);
390	pcur += ocrdma_add_stat(stats, pcur, "local_qp_operation_errors",
391			(u64)tx_qp_err_stats->local_qp_operation_errors);
392	pcur += ocrdma_add_stat(stats, pcur, "retry_count_exceeded_errors",
393			(u64)tx_qp_err_stats->retry_count_exceeded_errors);
394	pcur += ocrdma_add_stat(stats, pcur, "rnr_retry_count_exceeded_errors",
395			(u64)tx_qp_err_stats->rnr_retry_count_exceeded_errors);
396	return stats;
397}
398
399static char *ocrdma_tx_dbg_stats(struct ocrdma_dev *dev)
400{
401	int i;
402	char *pstats = dev->stats_mem.debugfs_mem;
403	struct ocrdma_rdma_stats_resp *rdma_stats =
404		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
405	struct ocrdma_tx_dbg_stats *tx_dbg_stats =
406		&rdma_stats->tx_dbg_stats;
407
408	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
409
410	for (i = 0; i < 100; i++)
411		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
412				 tx_dbg_stats->data[i]);
413
414	return dev->stats_mem.debugfs_mem;
415}
416
417static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
418{
419	int i;
420	char *pstats = dev->stats_mem.debugfs_mem;
421	struct ocrdma_rdma_stats_resp *rdma_stats =
422		(struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
423	struct ocrdma_rx_dbg_stats *rx_dbg_stats =
424		&rdma_stats->rx_dbg_stats;
425
426	memset(pstats, 0, (OCRDMA_MAX_DBGFS_MEM));
427
428	for (i = 0; i < 200; i++)
429		pstats += snprintf(pstats, 80, "DW[%d] = 0x%x\n", i,
430				 rx_dbg_stats->data[i]);
431
432	return dev->stats_mem.debugfs_mem;
433}
434
435static void ocrdma_update_stats(struct ocrdma_dev *dev)
436{
437	ulong now = jiffies, secs;
438	int status = 0;
439
440	secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
441	if (secs) {
442		/* update */
443		status = ocrdma_mbx_rdma_stats(dev, false);
444		if (status)
445			pr_err("%s: stats mbox failed with status = %d\n",
446			       __func__, status);
447		dev->last_stats_time = jiffies;
448	}
449}
450
451static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
452					size_t usr_buf_len, loff_t *ppos)
453{
454	struct ocrdma_stats *pstats = filp->private_data;
455	struct ocrdma_dev *dev = pstats->dev;
456	ssize_t status = 0;
457	char *data = NULL;
458
459	/* No partial reads */
460	if (*ppos != 0)
461		return 0;
462
463	mutex_lock(&dev->stats_lock);
464
465	ocrdma_update_stats(dev);
466
467	switch (pstats->type) {
468	case OCRDMA_RSRC_STATS:
469		data = ocrdma_resource_stats(dev);
470		break;
471	case OCRDMA_RXSTATS:
472		data = ocrdma_rx_stats(dev);
473		break;
474	case OCRDMA_WQESTATS:
475		data = ocrdma_wqe_stats(dev);
476		break;
477	case OCRDMA_TXSTATS:
478		data = ocrdma_tx_stats(dev);
479		break;
480	case OCRDMA_DB_ERRSTATS:
481		data = ocrdma_db_errstats(dev);
482		break;
483	case OCRDMA_RXQP_ERRSTATS:
484		data = ocrdma_rxqp_errstats(dev);
485		break;
486	case OCRDMA_TXQP_ERRSTATS:
487		data = ocrdma_txqp_errstats(dev);
488		break;
489	case OCRDMA_TX_DBG_STATS:
490		data = ocrdma_tx_dbg_stats(dev);
491		break;
492	case OCRDMA_RX_DBG_STATS:
493		data = ocrdma_rx_dbg_stats(dev);
494		break;
495
496	default:
497		status = -EFAULT;
498		goto exit;
499	}
500
501	if (usr_buf_len < strlen(data)) {
502		status = -ENOSPC;
503		goto exit;
504	}
505
506	status = simple_read_from_buffer(buffer, usr_buf_len, ppos, data,
507					 strlen(data));
508exit:
509	mutex_unlock(&dev->stats_lock);
510	return status;
511}
512
513static const struct file_operations ocrdma_dbg_ops = {
514	.owner = THIS_MODULE,
515	.open = simple_open,
516	.read = ocrdma_dbgfs_ops_read,
517};
518
519void ocrdma_add_port_stats(struct ocrdma_dev *dev)
520{
521	if (!ocrdma_dbgfs_dir)
522		return;
523
524	/* Create post stats base dir */
525	dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
526	if (!dev->dir)
527		goto err;
528
529	dev->rsrc_stats.type = OCRDMA_RSRC_STATS;
530	dev->rsrc_stats.dev = dev;
531	if (!debugfs_create_file("resource_stats", S_IRUSR, dev->dir,
532				 &dev->rsrc_stats, &ocrdma_dbg_ops))
533		goto err;
534
535	dev->rx_stats.type = OCRDMA_RXSTATS;
536	dev->rx_stats.dev = dev;
537	if (!debugfs_create_file("rx_stats", S_IRUSR, dev->dir,
538				 &dev->rx_stats, &ocrdma_dbg_ops))
539		goto err;
540
541	dev->wqe_stats.type = OCRDMA_WQESTATS;
542	dev->wqe_stats.dev = dev;
543	if (!debugfs_create_file("wqe_stats", S_IRUSR, dev->dir,
544				 &dev->wqe_stats, &ocrdma_dbg_ops))
545		goto err;
546
547	dev->tx_stats.type = OCRDMA_TXSTATS;
548	dev->tx_stats.dev = dev;
549	if (!debugfs_create_file("tx_stats", S_IRUSR, dev->dir,
550				 &dev->tx_stats, &ocrdma_dbg_ops))
551		goto err;
552
553	dev->db_err_stats.type = OCRDMA_DB_ERRSTATS;
554	dev->db_err_stats.dev = dev;
555	if (!debugfs_create_file("db_err_stats", S_IRUSR, dev->dir,
556				 &dev->db_err_stats, &ocrdma_dbg_ops))
557		goto err;
558
559
560	dev->tx_qp_err_stats.type = OCRDMA_TXQP_ERRSTATS;
561	dev->tx_qp_err_stats.dev = dev;
562	if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR, dev->dir,
563				 &dev->tx_qp_err_stats, &ocrdma_dbg_ops))
564		goto err;
565
566	dev->rx_qp_err_stats.type = OCRDMA_RXQP_ERRSTATS;
567	dev->rx_qp_err_stats.dev = dev;
568	if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR, dev->dir,
569				 &dev->rx_qp_err_stats, &ocrdma_dbg_ops))
570		goto err;
571
572
573	dev->tx_dbg_stats.type = OCRDMA_TX_DBG_STATS;
574	dev->tx_dbg_stats.dev = dev;
575	if (!debugfs_create_file("tx_dbg_stats", S_IRUSR, dev->dir,
576				 &dev->tx_dbg_stats, &ocrdma_dbg_ops))
577		goto err;
578
579	dev->rx_dbg_stats.type = OCRDMA_RX_DBG_STATS;
580	dev->rx_dbg_stats.dev = dev;
581	if (!debugfs_create_file("rx_dbg_stats", S_IRUSR, dev->dir,
582				 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
583		goto err;
584
585	/* Now create dma_mem for stats mbx command */
586	if (!ocrdma_alloc_stats_mem(dev))
587		goto err;
588
589	mutex_init(&dev->stats_lock);
590
591	return;
592err:
593	ocrdma_release_stats_mem(dev);
594	debugfs_remove_recursive(dev->dir);
595	dev->dir = NULL;
596}
597
598void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
599{
600	if (!dev->dir)
601		return;
602	mutex_destroy(&dev->stats_lock);
603	ocrdma_release_stats_mem(dev);
604	debugfs_remove(dev->dir);
605}
606
607void ocrdma_init_debugfs(void)
608{
609	/* Create base dir in debugfs root dir */
610	ocrdma_dbgfs_dir = debugfs_create_dir("ocrdma", NULL);
611}
612
613void ocrdma_rem_debugfs(void)
614{
615	debugfs_remove_recursive(ocrdma_dbgfs_dir);
616}
617