[go: nahoru, domu]

1/*
2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/sched.h>
11#include <linux/export.h>
12#include <linux/pm_runtime.h>
13#include <trace/events/rpm.h>
14#include "power.h"
15
16#define RPM_GET_CALLBACK(dev, cb)				\
17({								\
18	int (*__rpm_cb)(struct device *__d);			\
19								\
20	if (dev->pm_domain)					\
21		__rpm_cb = dev->pm_domain->ops.cb;		\
22	else if (dev->type && dev->type->pm)			\
23		__rpm_cb = dev->type->pm->cb;			\
24	else if (dev->class && dev->class->pm)			\
25		__rpm_cb = dev->class->pm->cb;			\
26	else if (dev->bus && dev->bus->pm)			\
27		__rpm_cb = dev->bus->pm->cb;			\
28	else							\
29		__rpm_cb = NULL;				\
30								\
31	if (!__rpm_cb && dev->driver && dev->driver->pm)	\
32		__rpm_cb = dev->driver->pm->cb;			\
33								\
34	__rpm_cb;						\
35})
36
37static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
38{
39	return RPM_GET_CALLBACK(dev, runtime_suspend);
40}
41
42static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
43{
44	return RPM_GET_CALLBACK(dev, runtime_resume);
45}
46
47#ifdef CONFIG_PM_RUNTIME
48static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
49{
50	return RPM_GET_CALLBACK(dev, runtime_idle);
51}
52
53static int rpm_resume(struct device *dev, int rpmflags);
54static int rpm_suspend(struct device *dev, int rpmflags);
55
56/**
57 * update_pm_runtime_accounting - Update the time accounting of power states
58 * @dev: Device to update the accounting for
59 *
60 * In order to be able to have time accounting of the various power states
61 * (as used by programs such as PowerTOP to show the effectiveness of runtime
62 * PM), we need to track the time spent in each state.
63 * update_pm_runtime_accounting must be called each time before the
64 * runtime_status field is updated, to account the time in the old state
65 * correctly.
66 */
67void update_pm_runtime_accounting(struct device *dev)
68{
69	unsigned long now = jiffies;
70	unsigned long delta;
71
72	delta = now - dev->power.accounting_timestamp;
73
74	dev->power.accounting_timestamp = now;
75
76	if (dev->power.disable_depth > 0)
77		return;
78
79	if (dev->power.runtime_status == RPM_SUSPENDED)
80		dev->power.suspended_jiffies += delta;
81	else
82		dev->power.active_jiffies += delta;
83}
84
85static void __update_runtime_status(struct device *dev, enum rpm_status status)
86{
87	update_pm_runtime_accounting(dev);
88	dev->power.runtime_status = status;
89}
90
91/**
92 * pm_runtime_deactivate_timer - Deactivate given device's suspend timer.
93 * @dev: Device to handle.
94 */
95static void pm_runtime_deactivate_timer(struct device *dev)
96{
97	if (dev->power.timer_expires > 0) {
98		del_timer(&dev->power.suspend_timer);
99		dev->power.timer_expires = 0;
100	}
101}
102
103/**
104 * pm_runtime_cancel_pending - Deactivate suspend timer and cancel requests.
105 * @dev: Device to handle.
106 */
107static void pm_runtime_cancel_pending(struct device *dev)
108{
109	pm_runtime_deactivate_timer(dev);
110	/*
111	 * In case there's a request pending, make sure its work function will
112	 * return without doing anything.
113	 */
114	dev->power.request = RPM_REQ_NONE;
115}
116
117/*
118 * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time.
119 * @dev: Device to handle.
120 *
121 * Compute the autosuspend-delay expiration time based on the device's
122 * power.last_busy time.  If the delay has already expired or is disabled
123 * (negative) or the power.use_autosuspend flag isn't set, return 0.
124 * Otherwise return the expiration time in jiffies (adjusted to be nonzero).
125 *
126 * This function may be called either with or without dev->power.lock held.
127 * Either way it can be racy, since power.last_busy may be updated at any time.
128 */
129unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
130{
131	int autosuspend_delay;
132	long elapsed;
133	unsigned long last_busy;
134	unsigned long expires = 0;
135
136	if (!dev->power.use_autosuspend)
137		goto out;
138
139	autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay);
140	if (autosuspend_delay < 0)
141		goto out;
142
143	last_busy = ACCESS_ONCE(dev->power.last_busy);
144	elapsed = jiffies - last_busy;
145	if (elapsed < 0)
146		goto out;	/* jiffies has wrapped around. */
147
148	/*
149	 * If the autosuspend_delay is >= 1 second, align the timer by rounding
150	 * up to the nearest second.
151	 */
152	expires = last_busy + msecs_to_jiffies(autosuspend_delay);
153	if (autosuspend_delay >= 1000)
154		expires = round_jiffies(expires);
155	expires += !expires;
156	if (elapsed >= expires - last_busy)
157		expires = 0;	/* Already expired. */
158
159 out:
160	return expires;
161}
162EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
163
164static int dev_memalloc_noio(struct device *dev, void *data)
165{
166	return dev->power.memalloc_noio;
167}
168
169/*
170 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
171 * @dev: Device to handle.
172 * @enable: True for setting the flag and False for clearing the flag.
173 *
174 * Set the flag for all devices in the path from the device to the
175 * root device in the device tree if @enable is true, otherwise clear
176 * the flag for devices in the path whose siblings don't set the flag.
177 *
178 * The function should only be called by block device, or network
179 * device driver for solving the deadlock problem during runtime
180 * resume/suspend:
181 *
182 *     If memory allocation with GFP_KERNEL is called inside runtime
183 *     resume/suspend callback of any one of its ancestors(or the
184 *     block device itself), the deadlock may be triggered inside the
185 *     memory allocation since it might not complete until the block
186 *     device becomes active and the involed page I/O finishes. The
187 *     situation is pointed out first by Alan Stern. Network device
188 *     are involved in iSCSI kind of situation.
189 *
190 * The lock of dev_hotplug_mutex is held in the function for handling
191 * hotplug race because pm_runtime_set_memalloc_noio() may be called
192 * in async probe().
193 *
194 * The function should be called between device_add() and device_del()
195 * on the affected device(block/network device).
196 */
197void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
198{
199	static DEFINE_MUTEX(dev_hotplug_mutex);
200
201	mutex_lock(&dev_hotplug_mutex);
202	for (;;) {
203		bool enabled;
204
205		/* hold power lock since bitfield is not SMP-safe. */
206		spin_lock_irq(&dev->power.lock);
207		enabled = dev->power.memalloc_noio;
208		dev->power.memalloc_noio = enable;
209		spin_unlock_irq(&dev->power.lock);
210
211		/*
212		 * not need to enable ancestors any more if the device
213		 * has been enabled.
214		 */
215		if (enabled && enable)
216			break;
217
218		dev = dev->parent;
219
220		/*
221		 * clear flag of the parent device only if all the
222		 * children don't set the flag because ancestor's
223		 * flag was set by any one of the descendants.
224		 */
225		if (!dev || (!enable &&
226			     device_for_each_child(dev, NULL,
227						   dev_memalloc_noio)))
228			break;
229	}
230	mutex_unlock(&dev_hotplug_mutex);
231}
232EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
233
234/**
235 * rpm_check_suspend_allowed - Test whether a device may be suspended.
236 * @dev: Device to test.
237 */
238static int rpm_check_suspend_allowed(struct device *dev)
239{
240	int retval = 0;
241
242	if (dev->power.runtime_error)
243		retval = -EINVAL;
244	else if (dev->power.disable_depth > 0)
245		retval = -EACCES;
246	else if (atomic_read(&dev->power.usage_count) > 0)
247		retval = -EAGAIN;
248	else if (!pm_children_suspended(dev))
249		retval = -EBUSY;
250
251	/* Pending resume requests take precedence over suspends. */
252	else if ((dev->power.deferred_resume
253			&& dev->power.runtime_status == RPM_SUSPENDING)
254	    || (dev->power.request_pending
255			&& dev->power.request == RPM_REQ_RESUME))
256		retval = -EAGAIN;
257	else if (__dev_pm_qos_read_value(dev) < 0)
258		retval = -EPERM;
259	else if (dev->power.runtime_status == RPM_SUSPENDED)
260		retval = 1;
261
262	return retval;
263}
264
265/**
266 * __rpm_callback - Run a given runtime PM callback for a given device.
267 * @cb: Runtime PM callback to run.
268 * @dev: Device to run the callback for.
269 */
270static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
271	__releases(&dev->power.lock) __acquires(&dev->power.lock)
272{
273	int retval;
274
275	if (dev->power.irq_safe)
276		spin_unlock(&dev->power.lock);
277	else
278		spin_unlock_irq(&dev->power.lock);
279
280	retval = cb(dev);
281
282	if (dev->power.irq_safe)
283		spin_lock(&dev->power.lock);
284	else
285		spin_lock_irq(&dev->power.lock);
286
287	return retval;
288}
289
290/**
291 * rpm_idle - Notify device bus type if the device can be suspended.
292 * @dev: Device to notify the bus type about.
293 * @rpmflags: Flag bits.
294 *
295 * Check if the device's runtime PM status allows it to be suspended.  If
296 * another idle notification has been started earlier, return immediately.  If
297 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
298 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
299 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
300 *
301 * This function must be called under dev->power.lock with interrupts disabled.
302 */
303static int rpm_idle(struct device *dev, int rpmflags)
304{
305	int (*callback)(struct device *);
306	int retval;
307
308	trace_rpm_idle(dev, rpmflags);
309	retval = rpm_check_suspend_allowed(dev);
310	if (retval < 0)
311		;	/* Conditions are wrong. */
312
313	/* Idle notifications are allowed only in the RPM_ACTIVE state. */
314	else if (dev->power.runtime_status != RPM_ACTIVE)
315		retval = -EAGAIN;
316
317	/*
318	 * Any pending request other than an idle notification takes
319	 * precedence over us, except that the timer may be running.
320	 */
321	else if (dev->power.request_pending &&
322	    dev->power.request > RPM_REQ_IDLE)
323		retval = -EAGAIN;
324
325	/* Act as though RPM_NOWAIT is always set. */
326	else if (dev->power.idle_notification)
327		retval = -EINPROGRESS;
328	if (retval)
329		goto out;
330
331	/* Pending requests need to be canceled. */
332	dev->power.request = RPM_REQ_NONE;
333
334	if (dev->power.no_callbacks)
335		goto out;
336
337	/* Carry out an asynchronous or a synchronous idle notification. */
338	if (rpmflags & RPM_ASYNC) {
339		dev->power.request = RPM_REQ_IDLE;
340		if (!dev->power.request_pending) {
341			dev->power.request_pending = true;
342			queue_work(pm_wq, &dev->power.work);
343		}
344		trace_rpm_return_int(dev, _THIS_IP_, 0);
345		return 0;
346	}
347
348	dev->power.idle_notification = true;
349
350	callback = rpm_get_idle_cb(dev);
351
352	if (callback)
353		retval = __rpm_callback(callback, dev);
354
355	dev->power.idle_notification = false;
356	wake_up_all(&dev->power.wait_queue);
357
358 out:
359	trace_rpm_return_int(dev, _THIS_IP_, retval);
360	return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
361}
362
363/**
364 * rpm_callback - Run a given runtime PM callback for a given device.
365 * @cb: Runtime PM callback to run.
366 * @dev: Device to run the callback for.
367 */
368static int rpm_callback(int (*cb)(struct device *), struct device *dev)
369{
370	int retval;
371
372	if (!cb)
373		return -ENOSYS;
374
375	if (dev->power.memalloc_noio) {
376		unsigned int noio_flag;
377
378		/*
379		 * Deadlock might be caused if memory allocation with
380		 * GFP_KERNEL happens inside runtime_suspend and
381		 * runtime_resume callbacks of one block device's
382		 * ancestor or the block device itself. Network
383		 * device might be thought as part of iSCSI block
384		 * device, so network device and its ancestor should
385		 * be marked as memalloc_noio too.
386		 */
387		noio_flag = memalloc_noio_save();
388		retval = __rpm_callback(cb, dev);
389		memalloc_noio_restore(noio_flag);
390	} else {
391		retval = __rpm_callback(cb, dev);
392	}
393
394	dev->power.runtime_error = retval;
395	return retval != -EACCES ? retval : -EIO;
396}
397
398/**
399 * rpm_suspend - Carry out runtime suspend of given device.
400 * @dev: Device to suspend.
401 * @rpmflags: Flag bits.
402 *
403 * Check if the device's runtime PM status allows it to be suspended.
404 * Cancel a pending idle notification, autosuspend or suspend. If
405 * another suspend has been started earlier, either return immediately
406 * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC
407 * flags. If the RPM_ASYNC flag is set then queue a suspend request;
408 * otherwise run the ->runtime_suspend() callback directly. When
409 * ->runtime_suspend succeeded, if a deferred resume was requested while
410 * the callback was running then carry it out, otherwise send an idle
411 * notification for its parent (if the suspend succeeded and both
412 * ignore_children of parent->power and irq_safe of dev->power are not set).
413 * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO
414 * flag is set and the next autosuspend-delay expiration time is in the
415 * future, schedule another autosuspend attempt.
416 *
417 * This function must be called under dev->power.lock with interrupts disabled.
418 */
419static int rpm_suspend(struct device *dev, int rpmflags)
420	__releases(&dev->power.lock) __acquires(&dev->power.lock)
421{
422	int (*callback)(struct device *);
423	struct device *parent = NULL;
424	int retval;
425
426	trace_rpm_suspend(dev, rpmflags);
427
428 repeat:
429	retval = rpm_check_suspend_allowed(dev);
430
431	if (retval < 0)
432		;	/* Conditions are wrong. */
433
434	/* Synchronous suspends are not allowed in the RPM_RESUMING state. */
435	else if (dev->power.runtime_status == RPM_RESUMING &&
436	    !(rpmflags & RPM_ASYNC))
437		retval = -EAGAIN;
438	if (retval)
439		goto out;
440
441	/* If the autosuspend_delay time hasn't expired yet, reschedule. */
442	if ((rpmflags & RPM_AUTO)
443	    && dev->power.runtime_status != RPM_SUSPENDING) {
444		unsigned long expires = pm_runtime_autosuspend_expiration(dev);
445
446		if (expires != 0) {
447			/* Pending requests need to be canceled. */
448			dev->power.request = RPM_REQ_NONE;
449
450			/*
451			 * Optimization: If the timer is already running and is
452			 * set to expire at or before the autosuspend delay,
453			 * avoid the overhead of resetting it.  Just let it
454			 * expire; pm_suspend_timer_fn() will take care of the
455			 * rest.
456			 */
457			if (!(dev->power.timer_expires && time_before_eq(
458			    dev->power.timer_expires, expires))) {
459				dev->power.timer_expires = expires;
460				mod_timer(&dev->power.suspend_timer, expires);
461			}
462			dev->power.timer_autosuspends = 1;
463			goto out;
464		}
465	}
466
467	/* Other scheduled or pending requests need to be canceled. */
468	pm_runtime_cancel_pending(dev);
469
470	if (dev->power.runtime_status == RPM_SUSPENDING) {
471		DEFINE_WAIT(wait);
472
473		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
474			retval = -EINPROGRESS;
475			goto out;
476		}
477
478		if (dev->power.irq_safe) {
479			spin_unlock(&dev->power.lock);
480
481			cpu_relax();
482
483			spin_lock(&dev->power.lock);
484			goto repeat;
485		}
486
487		/* Wait for the other suspend running in parallel with us. */
488		for (;;) {
489			prepare_to_wait(&dev->power.wait_queue, &wait,
490					TASK_UNINTERRUPTIBLE);
491			if (dev->power.runtime_status != RPM_SUSPENDING)
492				break;
493
494			spin_unlock_irq(&dev->power.lock);
495
496			schedule();
497
498			spin_lock_irq(&dev->power.lock);
499		}
500		finish_wait(&dev->power.wait_queue, &wait);
501		goto repeat;
502	}
503
504	if (dev->power.no_callbacks)
505		goto no_callback;	/* Assume success. */
506
507	/* Carry out an asynchronous or a synchronous suspend. */
508	if (rpmflags & RPM_ASYNC) {
509		dev->power.request = (rpmflags & RPM_AUTO) ?
510		    RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
511		if (!dev->power.request_pending) {
512			dev->power.request_pending = true;
513			queue_work(pm_wq, &dev->power.work);
514		}
515		goto out;
516	}
517
518	__update_runtime_status(dev, RPM_SUSPENDING);
519
520	callback = rpm_get_suspend_cb(dev);
521
522	retval = rpm_callback(callback, dev);
523	if (retval)
524		goto fail;
525
526 no_callback:
527	__update_runtime_status(dev, RPM_SUSPENDED);
528	pm_runtime_deactivate_timer(dev);
529
530	if (dev->parent) {
531		parent = dev->parent;
532		atomic_add_unless(&parent->power.child_count, -1, 0);
533	}
534	wake_up_all(&dev->power.wait_queue);
535
536	if (dev->power.deferred_resume) {
537		dev->power.deferred_resume = false;
538		rpm_resume(dev, 0);
539		retval = -EAGAIN;
540		goto out;
541	}
542
543	/* Maybe the parent is now able to suspend. */
544	if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
545		spin_unlock(&dev->power.lock);
546
547		spin_lock(&parent->power.lock);
548		rpm_idle(parent, RPM_ASYNC);
549		spin_unlock(&parent->power.lock);
550
551		spin_lock(&dev->power.lock);
552	}
553
554 out:
555	trace_rpm_return_int(dev, _THIS_IP_, retval);
556
557	return retval;
558
559 fail:
560	__update_runtime_status(dev, RPM_ACTIVE);
561	dev->power.deferred_resume = false;
562	wake_up_all(&dev->power.wait_queue);
563
564	if (retval == -EAGAIN || retval == -EBUSY) {
565		dev->power.runtime_error = 0;
566
567		/*
568		 * If the callback routine failed an autosuspend, and
569		 * if the last_busy time has been updated so that there
570		 * is a new autosuspend expiration time, automatically
571		 * reschedule another autosuspend.
572		 */
573		if ((rpmflags & RPM_AUTO) &&
574		    pm_runtime_autosuspend_expiration(dev) != 0)
575			goto repeat;
576	} else {
577		pm_runtime_cancel_pending(dev);
578	}
579	goto out;
580}
581
582/**
583 * rpm_resume - Carry out runtime resume of given device.
584 * @dev: Device to resume.
585 * @rpmflags: Flag bits.
586 *
587 * Check if the device's runtime PM status allows it to be resumed.  Cancel
588 * any scheduled or pending requests.  If another resume has been started
589 * earlier, either return immediately or wait for it to finish, depending on the
590 * RPM_NOWAIT and RPM_ASYNC flags.  Similarly, if there's a suspend running in
591 * parallel with this function, either tell the other process to resume after
592 * suspending (deferred_resume) or wait for it to finish.  If the RPM_ASYNC
593 * flag is set then queue a resume request; otherwise run the
594 * ->runtime_resume() callback directly.  Queue an idle notification for the
595 * device if the resume succeeded.
596 *
597 * This function must be called under dev->power.lock with interrupts disabled.
598 */
599static int rpm_resume(struct device *dev, int rpmflags)
600	__releases(&dev->power.lock) __acquires(&dev->power.lock)
601{
602	int (*callback)(struct device *);
603	struct device *parent = NULL;
604	int retval = 0;
605
606	trace_rpm_resume(dev, rpmflags);
607
608 repeat:
609	if (dev->power.runtime_error)
610		retval = -EINVAL;
611	else if (dev->power.disable_depth == 1 && dev->power.is_suspended
612	    && dev->power.runtime_status == RPM_ACTIVE)
613		retval = 1;
614	else if (dev->power.disable_depth > 0)
615		retval = -EACCES;
616	if (retval)
617		goto out;
618
619	/*
620	 * Other scheduled or pending requests need to be canceled.  Small
621	 * optimization: If an autosuspend timer is running, leave it running
622	 * rather than cancelling it now only to restart it again in the near
623	 * future.
624	 */
625	dev->power.request = RPM_REQ_NONE;
626	if (!dev->power.timer_autosuspends)
627		pm_runtime_deactivate_timer(dev);
628
629	if (dev->power.runtime_status == RPM_ACTIVE) {
630		retval = 1;
631		goto out;
632	}
633
634	if (dev->power.runtime_status == RPM_RESUMING
635	    || dev->power.runtime_status == RPM_SUSPENDING) {
636		DEFINE_WAIT(wait);
637
638		if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
639			if (dev->power.runtime_status == RPM_SUSPENDING)
640				dev->power.deferred_resume = true;
641			else
642				retval = -EINPROGRESS;
643			goto out;
644		}
645
646		if (dev->power.irq_safe) {
647			spin_unlock(&dev->power.lock);
648
649			cpu_relax();
650
651			spin_lock(&dev->power.lock);
652			goto repeat;
653		}
654
655		/* Wait for the operation carried out in parallel with us. */
656		for (;;) {
657			prepare_to_wait(&dev->power.wait_queue, &wait,
658					TASK_UNINTERRUPTIBLE);
659			if (dev->power.runtime_status != RPM_RESUMING
660			    && dev->power.runtime_status != RPM_SUSPENDING)
661				break;
662
663			spin_unlock_irq(&dev->power.lock);
664
665			schedule();
666
667			spin_lock_irq(&dev->power.lock);
668		}
669		finish_wait(&dev->power.wait_queue, &wait);
670		goto repeat;
671	}
672
673	/*
674	 * See if we can skip waking up the parent.  This is safe only if
675	 * power.no_callbacks is set, because otherwise we don't know whether
676	 * the resume will actually succeed.
677	 */
678	if (dev->power.no_callbacks && !parent && dev->parent) {
679		spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
680		if (dev->parent->power.disable_depth > 0
681		    || dev->parent->power.ignore_children
682		    || dev->parent->power.runtime_status == RPM_ACTIVE) {
683			atomic_inc(&dev->parent->power.child_count);
684			spin_unlock(&dev->parent->power.lock);
685			retval = 1;
686			goto no_callback;	/* Assume success. */
687		}
688		spin_unlock(&dev->parent->power.lock);
689	}
690
691	/* Carry out an asynchronous or a synchronous resume. */
692	if (rpmflags & RPM_ASYNC) {
693		dev->power.request = RPM_REQ_RESUME;
694		if (!dev->power.request_pending) {
695			dev->power.request_pending = true;
696			queue_work(pm_wq, &dev->power.work);
697		}
698		retval = 0;
699		goto out;
700	}
701
702	if (!parent && dev->parent) {
703		/*
704		 * Increment the parent's usage counter and resume it if
705		 * necessary.  Not needed if dev is irq-safe; then the
706		 * parent is permanently resumed.
707		 */
708		parent = dev->parent;
709		if (dev->power.irq_safe)
710			goto skip_parent;
711		spin_unlock(&dev->power.lock);
712
713		pm_runtime_get_noresume(parent);
714
715		spin_lock(&parent->power.lock);
716		/*
717		 * We can resume if the parent's runtime PM is disabled or it
718		 * is set to ignore children.
719		 */
720		if (!parent->power.disable_depth
721		    && !parent->power.ignore_children) {
722			rpm_resume(parent, 0);
723			if (parent->power.runtime_status != RPM_ACTIVE)
724				retval = -EBUSY;
725		}
726		spin_unlock(&parent->power.lock);
727
728		spin_lock(&dev->power.lock);
729		if (retval)
730			goto out;
731		goto repeat;
732	}
733 skip_parent:
734
735	if (dev->power.no_callbacks)
736		goto no_callback;	/* Assume success. */
737
738	__update_runtime_status(dev, RPM_RESUMING);
739
740	callback = rpm_get_resume_cb(dev);
741
742	retval = rpm_callback(callback, dev);
743	if (retval) {
744		__update_runtime_status(dev, RPM_SUSPENDED);
745		pm_runtime_cancel_pending(dev);
746	} else {
747 no_callback:
748		__update_runtime_status(dev, RPM_ACTIVE);
749		if (parent)
750			atomic_inc(&parent->power.child_count);
751	}
752	wake_up_all(&dev->power.wait_queue);
753
754	if (retval >= 0)
755		rpm_idle(dev, RPM_ASYNC);
756
757 out:
758	if (parent && !dev->power.irq_safe) {
759		spin_unlock_irq(&dev->power.lock);
760
761		pm_runtime_put(parent);
762
763		spin_lock_irq(&dev->power.lock);
764	}
765
766	trace_rpm_return_int(dev, _THIS_IP_, retval);
767
768	return retval;
769}
770
771/**
772 * pm_runtime_work - Universal runtime PM work function.
773 * @work: Work structure used for scheduling the execution of this function.
774 *
775 * Use @work to get the device object the work is to be done for, determine what
776 * is to be done and execute the appropriate runtime PM function.
777 */
778static void pm_runtime_work(struct work_struct *work)
779{
780	struct device *dev = container_of(work, struct device, power.work);
781	enum rpm_request req;
782
783	spin_lock_irq(&dev->power.lock);
784
785	if (!dev->power.request_pending)
786		goto out;
787
788	req = dev->power.request;
789	dev->power.request = RPM_REQ_NONE;
790	dev->power.request_pending = false;
791
792	switch (req) {
793	case RPM_REQ_NONE:
794		break;
795	case RPM_REQ_IDLE:
796		rpm_idle(dev, RPM_NOWAIT);
797		break;
798	case RPM_REQ_SUSPEND:
799		rpm_suspend(dev, RPM_NOWAIT);
800		break;
801	case RPM_REQ_AUTOSUSPEND:
802		rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
803		break;
804	case RPM_REQ_RESUME:
805		rpm_resume(dev, RPM_NOWAIT);
806		break;
807	}
808
809 out:
810	spin_unlock_irq(&dev->power.lock);
811}
812
813/**
814 * pm_suspend_timer_fn - Timer function for pm_schedule_suspend().
815 * @data: Device pointer passed by pm_schedule_suspend().
816 *
817 * Check if the time is right and queue a suspend request.
818 */
819static void pm_suspend_timer_fn(unsigned long data)
820{
821	struct device *dev = (struct device *)data;
822	unsigned long flags;
823	unsigned long expires;
824
825	spin_lock_irqsave(&dev->power.lock, flags);
826
827	expires = dev->power.timer_expires;
828	/* If 'expire' is after 'jiffies' we've been called too early. */
829	if (expires > 0 && !time_after(expires, jiffies)) {
830		dev->power.timer_expires = 0;
831		rpm_suspend(dev, dev->power.timer_autosuspends ?
832		    (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
833	}
834
835	spin_unlock_irqrestore(&dev->power.lock, flags);
836}
837
838/**
839 * pm_schedule_suspend - Set up a timer to submit a suspend request in future.
840 * @dev: Device to suspend.
841 * @delay: Time to wait before submitting a suspend request, in milliseconds.
842 */
843int pm_schedule_suspend(struct device *dev, unsigned int delay)
844{
845	unsigned long flags;
846	int retval;
847
848	spin_lock_irqsave(&dev->power.lock, flags);
849
850	if (!delay) {
851		retval = rpm_suspend(dev, RPM_ASYNC);
852		goto out;
853	}
854
855	retval = rpm_check_suspend_allowed(dev);
856	if (retval)
857		goto out;
858
859	/* Other scheduled or pending requests need to be canceled. */
860	pm_runtime_cancel_pending(dev);
861
862	dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
863	dev->power.timer_expires += !dev->power.timer_expires;
864	dev->power.timer_autosuspends = 0;
865	mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
866
867 out:
868	spin_unlock_irqrestore(&dev->power.lock, flags);
869
870	return retval;
871}
872EXPORT_SYMBOL_GPL(pm_schedule_suspend);
873
874/**
875 * __pm_runtime_idle - Entry point for runtime idle operations.
876 * @dev: Device to send idle notification for.
877 * @rpmflags: Flag bits.
878 *
879 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
880 * return immediately if it is larger than zero.  Then carry out an idle
881 * notification, either synchronous or asynchronous.
882 *
883 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
884 * or if pm_runtime_irq_safe() has been called.
885 */
886int __pm_runtime_idle(struct device *dev, int rpmflags)
887{
888	unsigned long flags;
889	int retval;
890
891	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
892
893	if (rpmflags & RPM_GET_PUT) {
894		if (!atomic_dec_and_test(&dev->power.usage_count))
895			return 0;
896	}
897
898	spin_lock_irqsave(&dev->power.lock, flags);
899	retval = rpm_idle(dev, rpmflags);
900	spin_unlock_irqrestore(&dev->power.lock, flags);
901
902	return retval;
903}
904EXPORT_SYMBOL_GPL(__pm_runtime_idle);
905
906/**
907 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
908 * @dev: Device to suspend.
909 * @rpmflags: Flag bits.
910 *
911 * If the RPM_GET_PUT flag is set, decrement the device's usage count and
912 * return immediately if it is larger than zero.  Then carry out a suspend,
913 * either synchronous or asynchronous.
914 *
915 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
916 * or if pm_runtime_irq_safe() has been called.
917 */
918int __pm_runtime_suspend(struct device *dev, int rpmflags)
919{
920	unsigned long flags;
921	int retval;
922
923	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
924
925	if (rpmflags & RPM_GET_PUT) {
926		if (!atomic_dec_and_test(&dev->power.usage_count))
927			return 0;
928	}
929
930	spin_lock_irqsave(&dev->power.lock, flags);
931	retval = rpm_suspend(dev, rpmflags);
932	spin_unlock_irqrestore(&dev->power.lock, flags);
933
934	return retval;
935}
936EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
937
938/**
939 * __pm_runtime_resume - Entry point for runtime resume operations.
940 * @dev: Device to resume.
941 * @rpmflags: Flag bits.
942 *
943 * If the RPM_GET_PUT flag is set, increment the device's usage count.  Then
944 * carry out a resume, either synchronous or asynchronous.
945 *
946 * This routine may be called in atomic context if the RPM_ASYNC flag is set,
947 * or if pm_runtime_irq_safe() has been called.
948 */
949int __pm_runtime_resume(struct device *dev, int rpmflags)
950{
951	unsigned long flags;
952	int retval;
953
954	might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
955
956	if (rpmflags & RPM_GET_PUT)
957		atomic_inc(&dev->power.usage_count);
958
959	spin_lock_irqsave(&dev->power.lock, flags);
960	retval = rpm_resume(dev, rpmflags);
961	spin_unlock_irqrestore(&dev->power.lock, flags);
962
963	return retval;
964}
965EXPORT_SYMBOL_GPL(__pm_runtime_resume);
966
967/**
968 * __pm_runtime_set_status - Set runtime PM status of a device.
969 * @dev: Device to handle.
970 * @status: New runtime PM status of the device.
971 *
972 * If runtime PM of the device is disabled or its power.runtime_error field is
973 * different from zero, the status may be changed either to RPM_ACTIVE, or to
974 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
975 * However, if the device has a parent and the parent is not active, and the
976 * parent's power.ignore_children flag is unset, the device's status cannot be
977 * set to RPM_ACTIVE, so -EBUSY is returned in that case.
978 *
979 * If successful, __pm_runtime_set_status() clears the power.runtime_error field
980 * and the device parent's counter of unsuspended children is modified to
981 * reflect the new status.  If the new status is RPM_SUSPENDED, an idle
982 * notification request for the parent is submitted.
983 */
984int __pm_runtime_set_status(struct device *dev, unsigned int status)
985{
986	struct device *parent = dev->parent;
987	unsigned long flags;
988	bool notify_parent = false;
989	int error = 0;
990
991	if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
992		return -EINVAL;
993
994	spin_lock_irqsave(&dev->power.lock, flags);
995
996	if (!dev->power.runtime_error && !dev->power.disable_depth) {
997		error = -EAGAIN;
998		goto out;
999	}
1000
1001	if (dev->power.runtime_status == status)
1002		goto out_set;
1003
1004	if (status == RPM_SUSPENDED) {
1005		/* It always is possible to set the status to 'suspended'. */
1006		if (parent) {
1007			atomic_add_unless(&parent->power.child_count, -1, 0);
1008			notify_parent = !parent->power.ignore_children;
1009		}
1010		goto out_set;
1011	}
1012
1013	if (parent) {
1014		spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1015
1016		/*
1017		 * It is invalid to put an active child under a parent that is
1018		 * not active, has runtime PM enabled and the
1019		 * 'power.ignore_children' flag unset.
1020		 */
1021		if (!parent->power.disable_depth
1022		    && !parent->power.ignore_children
1023		    && parent->power.runtime_status != RPM_ACTIVE)
1024			error = -EBUSY;
1025		else if (dev->power.runtime_status == RPM_SUSPENDED)
1026			atomic_inc(&parent->power.child_count);
1027
1028		spin_unlock(&parent->power.lock);
1029
1030		if (error)
1031			goto out;
1032	}
1033
1034 out_set:
1035	__update_runtime_status(dev, status);
1036	dev->power.runtime_error = 0;
1037 out:
1038	spin_unlock_irqrestore(&dev->power.lock, flags);
1039
1040	if (notify_parent)
1041		pm_request_idle(parent);
1042
1043	return error;
1044}
1045EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1046
1047/**
1048 * __pm_runtime_barrier - Cancel pending requests and wait for completions.
1049 * @dev: Device to handle.
1050 *
1051 * Flush all pending requests for the device from pm_wq and wait for all
1052 * runtime PM operations involving the device in progress to complete.
1053 *
1054 * Should be called under dev->power.lock with interrupts disabled.
1055 */
1056static void __pm_runtime_barrier(struct device *dev)
1057{
1058	pm_runtime_deactivate_timer(dev);
1059
1060	if (dev->power.request_pending) {
1061		dev->power.request = RPM_REQ_NONE;
1062		spin_unlock_irq(&dev->power.lock);
1063
1064		cancel_work_sync(&dev->power.work);
1065
1066		spin_lock_irq(&dev->power.lock);
1067		dev->power.request_pending = false;
1068	}
1069
1070	if (dev->power.runtime_status == RPM_SUSPENDING
1071	    || dev->power.runtime_status == RPM_RESUMING
1072	    || dev->power.idle_notification) {
1073		DEFINE_WAIT(wait);
1074
1075		/* Suspend, wake-up or idle notification in progress. */
1076		for (;;) {
1077			prepare_to_wait(&dev->power.wait_queue, &wait,
1078					TASK_UNINTERRUPTIBLE);
1079			if (dev->power.runtime_status != RPM_SUSPENDING
1080			    && dev->power.runtime_status != RPM_RESUMING
1081			    && !dev->power.idle_notification)
1082				break;
1083			spin_unlock_irq(&dev->power.lock);
1084
1085			schedule();
1086
1087			spin_lock_irq(&dev->power.lock);
1088		}
1089		finish_wait(&dev->power.wait_queue, &wait);
1090	}
1091}
1092
1093/**
1094 * pm_runtime_barrier - Flush pending requests and wait for completions.
1095 * @dev: Device to handle.
1096 *
1097 * Prevent the device from being suspended by incrementing its usage counter and
1098 * if there's a pending resume request for the device, wake the device up.
1099 * Next, make sure that all pending requests for the device have been flushed
1100 * from pm_wq and wait for all runtime PM operations involving the device in
1101 * progress to complete.
1102 *
1103 * Return value:
1104 * 1, if there was a resume request pending and the device had to be woken up,
1105 * 0, otherwise
1106 */
1107int pm_runtime_barrier(struct device *dev)
1108{
1109	int retval = 0;
1110
1111	pm_runtime_get_noresume(dev);
1112	spin_lock_irq(&dev->power.lock);
1113
1114	if (dev->power.request_pending
1115	    && dev->power.request == RPM_REQ_RESUME) {
1116		rpm_resume(dev, 0);
1117		retval = 1;
1118	}
1119
1120	__pm_runtime_barrier(dev);
1121
1122	spin_unlock_irq(&dev->power.lock);
1123	pm_runtime_put_noidle(dev);
1124
1125	return retval;
1126}
1127EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1128
1129/**
1130 * __pm_runtime_disable - Disable runtime PM of a device.
1131 * @dev: Device to handle.
1132 * @check_resume: If set, check if there's a resume request for the device.
1133 *
1134 * Increment power.disable_depth for the device and if it was zero previously,
1135 * cancel all pending runtime PM requests for the device and wait for all
1136 * operations in progress to complete.  The device can be either active or
1137 * suspended after its runtime PM has been disabled.
1138 *
1139 * If @check_resume is set and there's a resume request pending when
1140 * __pm_runtime_disable() is called and power.disable_depth is zero, the
1141 * function will wake up the device before disabling its runtime PM.
1142 */
1143void __pm_runtime_disable(struct device *dev, bool check_resume)
1144{
1145	spin_lock_irq(&dev->power.lock);
1146
1147	if (dev->power.disable_depth > 0) {
1148		dev->power.disable_depth++;
1149		goto out;
1150	}
1151
1152	/*
1153	 * Wake up the device if there's a resume request pending, because that
1154	 * means there probably is some I/O to process and disabling runtime PM
1155	 * shouldn't prevent the device from processing the I/O.
1156	 */
1157	if (check_resume && dev->power.request_pending
1158	    && dev->power.request == RPM_REQ_RESUME) {
1159		/*
1160		 * Prevent suspends and idle notifications from being carried
1161		 * out after we have woken up the device.
1162		 */
1163		pm_runtime_get_noresume(dev);
1164
1165		rpm_resume(dev, 0);
1166
1167		pm_runtime_put_noidle(dev);
1168	}
1169
1170	if (!dev->power.disable_depth++)
1171		__pm_runtime_barrier(dev);
1172
1173 out:
1174	spin_unlock_irq(&dev->power.lock);
1175}
1176EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1177
1178/**
1179 * pm_runtime_enable - Enable runtime PM of a device.
1180 * @dev: Device to handle.
1181 */
1182void pm_runtime_enable(struct device *dev)
1183{
1184	unsigned long flags;
1185
1186	spin_lock_irqsave(&dev->power.lock, flags);
1187
1188	if (dev->power.disable_depth > 0)
1189		dev->power.disable_depth--;
1190	else
1191		dev_warn(dev, "Unbalanced %s!\n", __func__);
1192
1193	spin_unlock_irqrestore(&dev->power.lock, flags);
1194}
1195EXPORT_SYMBOL_GPL(pm_runtime_enable);
1196
1197/**
1198 * pm_runtime_forbid - Block runtime PM of a device.
1199 * @dev: Device to handle.
1200 *
1201 * Increase the device's usage count and clear its power.runtime_auto flag,
1202 * so that it cannot be suspended at run time until pm_runtime_allow() is called
1203 * for it.
1204 */
1205void pm_runtime_forbid(struct device *dev)
1206{
1207	spin_lock_irq(&dev->power.lock);
1208	if (!dev->power.runtime_auto)
1209		goto out;
1210
1211	dev->power.runtime_auto = false;
1212	atomic_inc(&dev->power.usage_count);
1213	rpm_resume(dev, 0);
1214
1215 out:
1216	spin_unlock_irq(&dev->power.lock);
1217}
1218EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1219
1220/**
1221 * pm_runtime_allow - Unblock runtime PM of a device.
1222 * @dev: Device to handle.
1223 *
1224 * Decrease the device's usage count and set its power.runtime_auto flag.
1225 */
1226void pm_runtime_allow(struct device *dev)
1227{
1228	spin_lock_irq(&dev->power.lock);
1229	if (dev->power.runtime_auto)
1230		goto out;
1231
1232	dev->power.runtime_auto = true;
1233	if (atomic_dec_and_test(&dev->power.usage_count))
1234		rpm_idle(dev, RPM_AUTO);
1235
1236 out:
1237	spin_unlock_irq(&dev->power.lock);
1238}
1239EXPORT_SYMBOL_GPL(pm_runtime_allow);
1240
1241/**
1242 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1243 * @dev: Device to handle.
1244 *
1245 * Set the power.no_callbacks flag, which tells the PM core that this
1246 * device is power-managed through its parent and has no runtime PM
1247 * callbacks of its own.  The runtime sysfs attributes will be removed.
1248 */
1249void pm_runtime_no_callbacks(struct device *dev)
1250{
1251	spin_lock_irq(&dev->power.lock);
1252	dev->power.no_callbacks = 1;
1253	spin_unlock_irq(&dev->power.lock);
1254	if (device_is_registered(dev))
1255		rpm_sysfs_remove(dev);
1256}
1257EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1258
1259/**
1260 * pm_runtime_irq_safe - Leave interrupts disabled during callbacks.
1261 * @dev: Device to handle
1262 *
1263 * Set the power.irq_safe flag, which tells the PM core that the
1264 * ->runtime_suspend() and ->runtime_resume() callbacks for this device should
1265 * always be invoked with the spinlock held and interrupts disabled.  It also
1266 * causes the parent's usage counter to be permanently incremented, preventing
1267 * the parent from runtime suspending -- otherwise an irq-safe child might have
1268 * to wait for a non-irq-safe parent.
1269 */
1270void pm_runtime_irq_safe(struct device *dev)
1271{
1272	if (dev->parent)
1273		pm_runtime_get_sync(dev->parent);
1274	spin_lock_irq(&dev->power.lock);
1275	dev->power.irq_safe = 1;
1276	spin_unlock_irq(&dev->power.lock);
1277}
1278EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1279
1280/**
1281 * update_autosuspend - Handle a change to a device's autosuspend settings.
1282 * @dev: Device to handle.
1283 * @old_delay: The former autosuspend_delay value.
1284 * @old_use: The former use_autosuspend value.
1285 *
1286 * Prevent runtime suspend if the new delay is negative and use_autosuspend is
1287 * set; otherwise allow it.  Send an idle notification if suspends are allowed.
1288 *
1289 * This function must be called under dev->power.lock with interrupts disabled.
1290 */
1291static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1292{
1293	int delay = dev->power.autosuspend_delay;
1294
1295	/* Should runtime suspend be prevented now? */
1296	if (dev->power.use_autosuspend && delay < 0) {
1297
1298		/* If it used to be allowed then prevent it. */
1299		if (!old_use || old_delay >= 0) {
1300			atomic_inc(&dev->power.usage_count);
1301			rpm_resume(dev, 0);
1302		}
1303	}
1304
1305	/* Runtime suspend should be allowed now. */
1306	else {
1307
1308		/* If it used to be prevented then allow it. */
1309		if (old_use && old_delay < 0)
1310			atomic_dec(&dev->power.usage_count);
1311
1312		/* Maybe we can autosuspend now. */
1313		rpm_idle(dev, RPM_AUTO);
1314	}
1315}
1316
1317/**
1318 * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value.
1319 * @dev: Device to handle.
1320 * @delay: Value of the new delay in milliseconds.
1321 *
1322 * Set the device's power.autosuspend_delay value.  If it changes to negative
1323 * and the power.use_autosuspend flag is set, prevent runtime suspends.  If it
1324 * changes the other way, allow runtime suspends.
1325 */
1326void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1327{
1328	int old_delay, old_use;
1329
1330	spin_lock_irq(&dev->power.lock);
1331	old_delay = dev->power.autosuspend_delay;
1332	old_use = dev->power.use_autosuspend;
1333	dev->power.autosuspend_delay = delay;
1334	update_autosuspend(dev, old_delay, old_use);
1335	spin_unlock_irq(&dev->power.lock);
1336}
1337EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1338
1339/**
1340 * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag.
1341 * @dev: Device to handle.
1342 * @use: New value for use_autosuspend.
1343 *
1344 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1345 * suspends as needed.
1346 */
1347void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1348{
1349	int old_delay, old_use;
1350
1351	spin_lock_irq(&dev->power.lock);
1352	old_delay = dev->power.autosuspend_delay;
1353	old_use = dev->power.use_autosuspend;
1354	dev->power.use_autosuspend = use;
1355	update_autosuspend(dev, old_delay, old_use);
1356	spin_unlock_irq(&dev->power.lock);
1357}
1358EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1359
1360/**
1361 * pm_runtime_init - Initialize runtime PM fields in given device object.
1362 * @dev: Device object to initialize.
1363 */
1364void pm_runtime_init(struct device *dev)
1365{
1366	dev->power.runtime_status = RPM_SUSPENDED;
1367	dev->power.idle_notification = false;
1368
1369	dev->power.disable_depth = 1;
1370	atomic_set(&dev->power.usage_count, 0);
1371
1372	dev->power.runtime_error = 0;
1373
1374	atomic_set(&dev->power.child_count, 0);
1375	pm_suspend_ignore_children(dev, false);
1376	dev->power.runtime_auto = true;
1377
1378	dev->power.request_pending = false;
1379	dev->power.request = RPM_REQ_NONE;
1380	dev->power.deferred_resume = false;
1381	dev->power.accounting_timestamp = jiffies;
1382	INIT_WORK(&dev->power.work, pm_runtime_work);
1383
1384	dev->power.timer_expires = 0;
1385	setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
1386			(unsigned long)dev);
1387
1388	init_waitqueue_head(&dev->power.wait_queue);
1389}
1390
1391/**
1392 * pm_runtime_remove - Prepare for removing a device from device hierarchy.
1393 * @dev: Device object being removed from device hierarchy.
1394 */
1395void pm_runtime_remove(struct device *dev)
1396{
1397	__pm_runtime_disable(dev, false);
1398
1399	/* Change the status back to 'suspended' to match the initial status. */
1400	if (dev->power.runtime_status == RPM_ACTIVE)
1401		pm_runtime_set_suspended(dev);
1402	if (dev->power.irq_safe && dev->parent)
1403		pm_runtime_put(dev->parent);
1404}
1405#endif
1406
1407/**
1408 * pm_runtime_force_suspend - Force a device into suspend state if needed.
1409 * @dev: Device to suspend.
1410 *
1411 * Disable runtime PM so we safely can check the device's runtime PM status and
1412 * if it is active, invoke it's .runtime_suspend callback to bring it into
1413 * suspend state. Keep runtime PM disabled to preserve the state unless we
1414 * encounter errors.
1415 *
1416 * Typically this function may be invoked from a system suspend callback to make
1417 * sure the device is put into low power state.
1418 */
1419int pm_runtime_force_suspend(struct device *dev)
1420{
1421	int (*callback)(struct device *);
1422	int ret = 0;
1423
1424	pm_runtime_disable(dev);
1425
1426	/*
1427	 * Note that pm_runtime_status_suspended() returns false while
1428	 * !CONFIG_PM_RUNTIME, which means the device will be put into low
1429	 * power state.
1430	 */
1431	if (pm_runtime_status_suspended(dev))
1432		return 0;
1433
1434	callback = rpm_get_suspend_cb(dev);
1435
1436	if (!callback) {
1437		ret = -ENOSYS;
1438		goto err;
1439	}
1440
1441	ret = callback(dev);
1442	if (ret)
1443		goto err;
1444
1445	pm_runtime_set_suspended(dev);
1446	return 0;
1447err:
1448	pm_runtime_enable(dev);
1449	return ret;
1450}
1451EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1452
1453/**
1454 * pm_runtime_force_resume - Force a device into resume state.
1455 * @dev: Device to resume.
1456 *
1457 * Prior invoking this function we expect the user to have brought the device
1458 * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
1459 * those actions and brings the device into full power. We update the runtime PM
1460 * status and re-enables runtime PM.
1461 *
1462 * Typically this function may be invoked from a system resume callback to make
1463 * sure the device is put into full power state.
1464 */
1465int pm_runtime_force_resume(struct device *dev)
1466{
1467	int (*callback)(struct device *);
1468	int ret = 0;
1469
1470	callback = rpm_get_resume_cb(dev);
1471
1472	if (!callback) {
1473		ret = -ENOSYS;
1474		goto out;
1475	}
1476
1477	ret = callback(dev);
1478	if (ret)
1479		goto out;
1480
1481	pm_runtime_set_active(dev);
1482	pm_runtime_mark_last_busy(dev);
1483out:
1484	pm_runtime_enable(dev);
1485	return ret;
1486}
1487EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
1488