[go: nahoru, domu]

1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/lnet/lib-eq.c
37 *
38 * Library level Event queue management routines
39 */
40
41#define DEBUG_SUBSYSTEM S_LNET
42#include "../../include/linux/lnet/lib-lnet.h"
43
44/**
45 * Create an event queue that has room for \a count number of events.
46 *
47 * The event queue is circular and older events will be overwritten by new
48 * ones if they are not removed in time by the user using the functions
49 * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
50 * determine the appropriate size of the event queue to prevent this loss
51 * of events. Note that when EQ handler is specified in \a callback, no
52 * event loss can happen, since the handler is run for each event deposited
53 * into the EQ.
54 *
55 * \param count The number of events to be stored in the event queue. It
56 * will be rounded up to the next power of two.
57 * \param callback A handler function that runs when an event is deposited
58 * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
59 * indicate that no event handler is desired.
60 * \param handle On successful return, this location will hold a handle for
61 * the newly created EQ.
62 *
63 * \retval 0       On success.
64 * \retval -EINVAL If an parameter is not valid.
65 * \retval -ENOMEM If memory for the EQ can't be allocated.
66 *
67 * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
68 */
69int
70LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
71	    lnet_handle_eq_t *handle)
72{
73	lnet_eq_t     *eq;
74
75	LASSERT(the_lnet.ln_init);
76	LASSERT(the_lnet.ln_refcount > 0);
77
78	/* We need count to be a power of 2 so that when eq_{enq,deq}_seq
79	 * overflow, they don't skip entries, so the queue has the same
80	 * apparent capacity at all times */
81
82	count = cfs_power2_roundup(count);
83
84	if (callback != LNET_EQ_HANDLER_NONE && count != 0) {
85		CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
86	}
87
88	/* count can be 0 if only need callback, we can eliminate
89	 * overhead of enqueue event */
90	if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
91		return -EINVAL;
92
93	eq = lnet_eq_alloc();
94	if (eq == NULL)
95		return -ENOMEM;
96
97	if (count != 0) {
98		LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
99		if (eq->eq_events == NULL)
100			goto failed;
101		/* NB allocator has set all event sequence numbers to 0,
102		 * so all them should be earlier than eq_deq_seq */
103	}
104
105	eq->eq_deq_seq = 1;
106	eq->eq_enq_seq = 1;
107	eq->eq_size = count;
108	eq->eq_callback = callback;
109
110	eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
111				       sizeof(*eq->eq_refs[0]));
112	if (eq->eq_refs == NULL)
113		goto failed;
114
115	/* MUST hold both exclusive lnet_res_lock */
116	lnet_res_lock(LNET_LOCK_EX);
117	/* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
118	 * both EQ lookup and poll event with only lnet_eq_wait_lock */
119	lnet_eq_wait_lock();
120
121	lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
122	list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
123
124	lnet_eq_wait_unlock();
125	lnet_res_unlock(LNET_LOCK_EX);
126
127	lnet_eq2handle(handle, eq);
128	return 0;
129
130failed:
131	if (eq->eq_events != NULL)
132		LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
133
134	if (eq->eq_refs != NULL)
135		cfs_percpt_free(eq->eq_refs);
136
137	lnet_eq_free(eq);
138	return -ENOMEM;
139}
140EXPORT_SYMBOL(LNetEQAlloc);
141
142/**
143 * Release the resources associated with an event queue if it's idle;
144 * otherwise do nothing and it's up to the user to try again.
145 *
146 * \param eqh A handle for the event queue to be released.
147 *
148 * \retval 0 If the EQ is not in use and freed.
149 * \retval -ENOENT If \a eqh does not point to a valid EQ.
150 * \retval -EBUSY  If the EQ is still in use by some MDs.
151 */
152int
153LNetEQFree(lnet_handle_eq_t eqh)
154{
155	struct lnet_eq	*eq;
156	lnet_event_t	*events = NULL;
157	int		**refs = NULL;
158	int		*ref;
159	int		rc = 0;
160	int		size = 0;
161	int		i;
162
163	LASSERT(the_lnet.ln_init);
164	LASSERT(the_lnet.ln_refcount > 0);
165
166	lnet_res_lock(LNET_LOCK_EX);
167	/* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
168	 * both EQ lookup and poll event with only lnet_eq_wait_lock */
169	lnet_eq_wait_lock();
170
171	eq = lnet_handle2eq(&eqh);
172	if (eq == NULL) {
173		rc = -ENOENT;
174		goto out;
175	}
176
177	cfs_percpt_for_each(ref, i, eq->eq_refs) {
178		LASSERT(*ref >= 0);
179		if (*ref == 0)
180			continue;
181
182		CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
183		       i, *ref);
184		rc = -EBUSY;
185		goto out;
186	}
187
188	/* stash for free after lock dropped */
189	events	= eq->eq_events;
190	size	= eq->eq_size;
191	refs	= eq->eq_refs;
192
193	lnet_res_lh_invalidate(&eq->eq_lh);
194	list_del(&eq->eq_list);
195	lnet_eq_free_locked(eq);
196 out:
197	lnet_eq_wait_unlock();
198	lnet_res_unlock(LNET_LOCK_EX);
199
200	if (events != NULL)
201		LIBCFS_FREE(events, size * sizeof(lnet_event_t));
202	if (refs != NULL)
203		cfs_percpt_free(refs);
204
205	return rc;
206}
207EXPORT_SYMBOL(LNetEQFree);
208
209void
210lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
211{
212	/* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
213	int index;
214
215	if (eq->eq_size == 0) {
216		LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
217		eq->eq_callback(ev);
218		return;
219	}
220
221	lnet_eq_wait_lock();
222	ev->sequence = eq->eq_enq_seq++;
223
224	LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
225	index = ev->sequence & (eq->eq_size - 1);
226
227	eq->eq_events[index] = *ev;
228
229	if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
230		eq->eq_callback(ev);
231
232	/* Wake anyone waiting in LNetEQPoll() */
233	if (waitqueue_active(&the_lnet.ln_eq_waitq))
234		wake_up_all(&the_lnet.ln_eq_waitq);
235	lnet_eq_wait_unlock();
236}
237
238static int
239lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
240{
241	int		new_index = eq->eq_deq_seq & (eq->eq_size - 1);
242	lnet_event_t	*new_event = &eq->eq_events[new_index];
243	int		rc;
244
245	/* must called with lnet_eq_wait_lock hold */
246	if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
247		return 0;
248
249	/* We've got a new event... */
250	*ev = *new_event;
251
252	CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
253	       new_event, eq->eq_deq_seq, eq->eq_size);
254
255	/* ...but did it overwrite an event we've not seen yet? */
256	if (eq->eq_deq_seq == new_event->sequence) {
257		rc = 1;
258	} else {
259		/* don't complain with CERROR: some EQs are sized small
260		 * anyway; if it's important, the caller should complain */
261		CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
262		       eq->eq_deq_seq, new_event->sequence);
263		rc = -EOVERFLOW;
264	}
265
266	eq->eq_deq_seq = new_event->sequence + 1;
267	return rc;
268}
269
270/**
271 * A nonblocking function that can be used to get the next event in an EQ.
272 * If an event handler is associated with the EQ, the handler will run before
273 * this function returns successfully. The event is removed from the queue.
274 *
275 * \param eventq A handle for the event queue.
276 * \param event On successful return (1 or -EOVERFLOW), this location will
277 * hold the next event in the EQ.
278 *
279 * \retval 0	  No pending event in the EQ.
280 * \retval 1	  Indicates success.
281 * \retval -ENOENT    If \a eventq does not point to a valid EQ.
282 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
283 * at least one event between this event and the last event obtained from the
284 * EQ has been dropped due to limited space in the EQ.
285 */
286int
287LNetEQGet(lnet_handle_eq_t eventq, lnet_event_t *event)
288{
289	int which;
290
291	return LNetEQPoll(&eventq, 1, 0,
292			 event, &which);
293}
294EXPORT_SYMBOL(LNetEQGet);
295
296/**
297 * Block the calling process until there is an event in the EQ.
298 * If an event handler is associated with the EQ, the handler will run before
299 * this function returns successfully. This function returns the next event
300 * in the EQ and removes it from the EQ.
301 *
302 * \param eventq A handle for the event queue.
303 * \param event On successful return (1 or -EOVERFLOW), this location will
304 * hold the next event in the EQ.
305 *
306 * \retval 1	  Indicates success.
307 * \retval -ENOENT    If \a eventq does not point to a valid EQ.
308 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
309 * at least one event between this event and the last event obtained from the
310 * EQ has been dropped due to limited space in the EQ.
311 */
312int
313LNetEQWait(lnet_handle_eq_t eventq, lnet_event_t *event)
314{
315	int which;
316
317	return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
318			 event, &which);
319}
320EXPORT_SYMBOL(LNetEQWait);
321
322
323static int
324lnet_eq_wait_locked(int *timeout_ms)
325__must_hold(&the_lnet.ln_eq_wait_lock)
326{
327	int		tms = *timeout_ms;
328	int		wait;
329	wait_queue_t  wl;
330	unsigned long      now;
331
332	if (tms == 0)
333		return -1; /* don't want to wait and no new event */
334
335	init_waitqueue_entry(&wl, current);
336	set_current_state(TASK_INTERRUPTIBLE);
337	add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
338
339	lnet_eq_wait_unlock();
340
341	if (tms < 0) {
342		schedule();
343
344	} else {
345		struct timeval tv;
346
347		now = cfs_time_current();
348		schedule_timeout(cfs_time_seconds(tms) / 1000);
349		cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
350		tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
351		if (tms < 0) /* no more wait but may have new event */
352			tms = 0;
353	}
354
355	wait = tms != 0; /* might need to call here again */
356	*timeout_ms = tms;
357
358	lnet_eq_wait_lock();
359	remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
360
361	return wait;
362}
363
364
365
366/**
367 * Block the calling process until there's an event from a set of EQs or
368 * timeout happens.
369 *
370 * If an event handler is associated with the EQ, the handler will run before
371 * this function returns successfully, in which case the corresponding event
372 * is consumed.
373 *
374 * LNetEQPoll() provides a timeout to allow applications to poll, block for a
375 * fixed period, or block indefinitely.
376 *
377 * \param eventqs,neq An array of EQ handles, and size of the array.
378 * \param timeout_ms Time in milliseconds to wait for an event to occur on
379 * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
380 * infinite timeout.
381 * \param event,which On successful return (1 or -EOVERFLOW), \a event will
382 * hold the next event in the EQs, and \a which will contain the index of the
383 * EQ from which the event was taken.
384 *
385 * \retval 0	  No pending event in the EQs after timeout.
386 * \retval 1	  Indicates success.
387 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
388 * at least one event between this event and the last event obtained from the
389 * EQ indicated by \a which has been dropped due to limited space in the EQ.
390 * \retval -ENOENT    If there's an invalid handle in \a eventqs.
391 */
392int
393LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
394	   lnet_event_t *event, int *which)
395{
396	int	wait = 1;
397	int	rc;
398	int	i;
399
400	LASSERT(the_lnet.ln_init);
401	LASSERT(the_lnet.ln_refcount > 0);
402
403	if (neq < 1)
404		return -ENOENT;
405
406	lnet_eq_wait_lock();
407
408	for (;;) {
409		for (i = 0; i < neq; i++) {
410			lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
411
412			if (eq == NULL) {
413				lnet_eq_wait_unlock();
414				return -ENOENT;
415			}
416
417			rc = lnet_eq_dequeue_event(eq, event);
418			if (rc != 0) {
419				lnet_eq_wait_unlock();
420				*which = i;
421				return rc;
422			}
423		}
424
425		if (wait == 0)
426			break;
427
428		/*
429		 * return value of lnet_eq_wait_locked:
430		 * -1 : did nothing and it's sure no new event
431		 *  1 : sleep inside and wait until new event
432		 *  0 : don't want to wait anymore, but might have new event
433		 *      so need to call dequeue again
434		 */
435		wait = lnet_eq_wait_locked(&timeout_ms);
436		if (wait < 0) /* no new event */
437			break;
438	}
439
440	lnet_eq_wait_unlock();
441	return 0;
442}
443