]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/base/power/main.c
284f564bb12bef60c3eabd7b6d56ce0d55eb13bd
[linux-2.6-omap-h63xx.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will intialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/resume-trace.h>
25 #include <linux/rwsem.h>
26
27 #include "../base.h"
28 #include "power.h"
29
30 /*
31  * The entries in the dpm_list list are in a depth first order, simply
32  * because children are guaranteed to be discovered after parents, and
33  * are inserted at the back of the list on discovery.
34  *
35  * Since device_pm_add() may be called with a device semaphore held,
36  * we must never try to acquire a device semaphore while holding
37  * dpm_list_mutex.
38  */
39
40 LIST_HEAD(dpm_list);
41
42 static DEFINE_MUTEX(dpm_list_mtx);
43
44 /*
45  * Set once the preparation of devices for a PM transition has started, reset
46  * before starting to resume devices.  Protected by dpm_list_mtx.
47  */
48 static bool transition_started;
49
50 /**
51  *      device_pm_lock - lock the list of active devices used by the PM core
52  */
53 void device_pm_lock(void)
54 {
55         mutex_lock(&dpm_list_mtx);
56 }
57
58 /**
59  *      device_pm_unlock - unlock the list of active devices used by the PM core
60  */
61 void device_pm_unlock(void)
62 {
63         mutex_unlock(&dpm_list_mtx);
64 }
65
66 /**
67  *      device_pm_add - add a device to the list of active devices
68  *      @dev:   Device to be added to the list
69  */
70 void device_pm_add(struct device *dev)
71 {
72         pr_debug("PM: Adding info for %s:%s\n",
73                  dev->bus ? dev->bus->name : "No Bus",
74                  kobject_name(&dev->kobj));
75         mutex_lock(&dpm_list_mtx);
76         if (dev->parent) {
77                 if (dev->parent->power.status >= DPM_SUSPENDING) {
78                         dev_warn(dev, "parent %s is sleeping, will not add\n",
79                                 dev->parent->bus_id);
80                         WARN_ON(true);
81                 }
82         } else if (transition_started) {
83                 /*
84                  * We refuse to register parentless devices while a PM
85                  * transition is in progress in order to avoid leaving them
86                  * unhandled down the road
87                  */
88                 WARN_ON(true);
89         }
90
91         list_add_tail(&dev->power.entry, &dpm_list);
92         mutex_unlock(&dpm_list_mtx);
93 }
94
95 /**
96  *      device_pm_remove - remove a device from the list of active devices
97  *      @dev:   Device to be removed from the list
98  *
99  *      This function also removes the device's PM-related sysfs attributes.
100  */
101 void device_pm_remove(struct device *dev)
102 {
103         pr_debug("PM: Removing info for %s:%s\n",
104                  dev->bus ? dev->bus->name : "No Bus",
105                  kobject_name(&dev->kobj));
106         mutex_lock(&dpm_list_mtx);
107         list_del_init(&dev->power.entry);
108         mutex_unlock(&dpm_list_mtx);
109 }
110
111 /**
112  *      pm_op - execute the PM operation appropiate for given PM event
113  *      @dev:   Device.
114  *      @ops:   PM operations to choose from.
115  *      @state: PM transition of the system being carried out.
116  */
117 static int pm_op(struct device *dev, struct pm_ops *ops, pm_message_t state)
118 {
119         int error = 0;
120
121         switch (state.event) {
122 #ifdef CONFIG_SUSPEND
123         case PM_EVENT_SUSPEND:
124                 if (ops->suspend) {
125                         error = ops->suspend(dev);
126                         suspend_report_result(ops->suspend, error);
127                 }
128                 break;
129         case PM_EVENT_RESUME:
130                 if (ops->resume) {
131                         error = ops->resume(dev);
132                         suspend_report_result(ops->resume, error);
133                 }
134                 break;
135 #endif /* CONFIG_SUSPEND */
136 #ifdef CONFIG_HIBERNATION
137         case PM_EVENT_FREEZE:
138         case PM_EVENT_QUIESCE:
139                 if (ops->freeze) {
140                         error = ops->freeze(dev);
141                         suspend_report_result(ops->freeze, error);
142                 }
143                 break;
144         case PM_EVENT_HIBERNATE:
145                 if (ops->poweroff) {
146                         error = ops->poweroff(dev);
147                         suspend_report_result(ops->poweroff, error);
148                 }
149                 break;
150         case PM_EVENT_THAW:
151         case PM_EVENT_RECOVER:
152                 if (ops->thaw) {
153                         error = ops->thaw(dev);
154                         suspend_report_result(ops->thaw, error);
155                 }
156                 break;
157         case PM_EVENT_RESTORE:
158                 if (ops->restore) {
159                         error = ops->restore(dev);
160                         suspend_report_result(ops->restore, error);
161                 }
162                 break;
163 #endif /* CONFIG_HIBERNATION */
164         default:
165                 error = -EINVAL;
166         }
167         return error;
168 }
169
170 /**
171  *      pm_noirq_op - execute the PM operation appropiate for given PM event
172  *      @dev:   Device.
173  *      @ops:   PM operations to choose from.
174  *      @state: PM transition of the system being carried out.
175  *
176  *      The operation is executed with interrupts disabled by the only remaining
177  *      functional CPU in the system.
178  */
179 static int pm_noirq_op(struct device *dev, struct pm_ext_ops *ops,
180                         pm_message_t state)
181 {
182         int error = 0;
183
184         switch (state.event) {
185 #ifdef CONFIG_SUSPEND
186         case PM_EVENT_SUSPEND:
187                 if (ops->suspend_noirq) {
188                         error = ops->suspend_noirq(dev);
189                         suspend_report_result(ops->suspend_noirq, error);
190                 }
191                 break;
192         case PM_EVENT_RESUME:
193                 if (ops->resume_noirq) {
194                         error = ops->resume_noirq(dev);
195                         suspend_report_result(ops->resume_noirq, error);
196                 }
197                 break;
198 #endif /* CONFIG_SUSPEND */
199 #ifdef CONFIG_HIBERNATION
200         case PM_EVENT_FREEZE:
201         case PM_EVENT_QUIESCE:
202                 if (ops->freeze_noirq) {
203                         error = ops->freeze_noirq(dev);
204                         suspend_report_result(ops->freeze_noirq, error);
205                 }
206                 break;
207         case PM_EVENT_HIBERNATE:
208                 if (ops->poweroff_noirq) {
209                         error = ops->poweroff_noirq(dev);
210                         suspend_report_result(ops->poweroff_noirq, error);
211                 }
212                 break;
213         case PM_EVENT_THAW:
214         case PM_EVENT_RECOVER:
215                 if (ops->thaw_noirq) {
216                         error = ops->thaw_noirq(dev);
217                         suspend_report_result(ops->thaw_noirq, error);
218                 }
219                 break;
220         case PM_EVENT_RESTORE:
221                 if (ops->restore_noirq) {
222                         error = ops->restore_noirq(dev);
223                         suspend_report_result(ops->restore_noirq, error);
224                 }
225                 break;
226 #endif /* CONFIG_HIBERNATION */
227         default:
228                 error = -EINVAL;
229         }
230         return error;
231 }
232
233 static char *pm_verb(int event)
234 {
235         switch (event) {
236         case PM_EVENT_SUSPEND:
237                 return "suspend";
238         case PM_EVENT_RESUME:
239                 return "resume";
240         case PM_EVENT_FREEZE:
241                 return "freeze";
242         case PM_EVENT_QUIESCE:
243                 return "quiesce";
244         case PM_EVENT_HIBERNATE:
245                 return "hibernate";
246         case PM_EVENT_THAW:
247                 return "thaw";
248         case PM_EVENT_RESTORE:
249                 return "restore";
250         case PM_EVENT_RECOVER:
251                 return "recover";
252         default:
253                 return "(unknown PM event)";
254         }
255 }
256
257 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
258 {
259         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
260                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
261                 ", may wakeup" : "");
262 }
263
264 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
265                         int error)
266 {
267         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
268                 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
269 }
270
271 /*------------------------- Resume routines -------------------------*/
272
273 /**
274  *      resume_device_noirq - Power on one device (early resume).
275  *      @dev:   Device.
276  *      @state: PM transition of the system being carried out.
277  *
278  *      Must be called with interrupts disabled.
279  */
280 static int resume_device_noirq(struct device *dev, pm_message_t state)
281 {
282         int error = 0;
283
284         TRACE_DEVICE(dev);
285         TRACE_RESUME(0);
286
287         if (!dev->bus)
288                 goto End;
289
290         if (dev->bus->pm) {
291                 pm_dev_dbg(dev, state, "EARLY ");
292                 error = pm_noirq_op(dev, dev->bus->pm, state);
293         } else if (dev->bus->resume_early) {
294                 pm_dev_dbg(dev, state, "legacy EARLY ");
295                 error = dev->bus->resume_early(dev);
296         }
297  End:
298         TRACE_RESUME(error);
299         return error;
300 }
301
302 /**
303  *      dpm_power_up - Power on all regular (non-sysdev) devices.
304  *      @state: PM transition of the system being carried out.
305  *
306  *      Execute the appropriate "noirq resume" callback for all devices marked
307  *      as DPM_OFF_IRQ.
308  *
309  *      Must be called with interrupts disabled and only one CPU running.
310  */
311 static void dpm_power_up(pm_message_t state)
312 {
313         struct device *dev;
314
315         list_for_each_entry(dev, &dpm_list, power.entry)
316                 if (dev->power.status > DPM_OFF) {
317                         int error;
318
319                         dev->power.status = DPM_OFF;
320                         error = resume_device_noirq(dev, state);
321                         if (error)
322                                 pm_dev_err(dev, state, " early", error);
323                 }
324 }
325
326 /**
327  *      device_power_up - Turn on all devices that need special attention.
328  *      @state: PM transition of the system being carried out.
329  *
330  *      Power on system devices, then devices that required we shut them down
331  *      with interrupts disabled.
332  *
333  *      Must be called with interrupts disabled.
334  */
335 void device_power_up(pm_message_t state)
336 {
337         sysdev_resume();
338         dpm_power_up(state);
339 }
340 EXPORT_SYMBOL_GPL(device_power_up);
341
342 /**
343  *      resume_device - Restore state for one device.
344  *      @dev:   Device.
345  *      @state: PM transition of the system being carried out.
346  */
347 static int resume_device(struct device *dev, pm_message_t state)
348 {
349         int error = 0;
350
351         TRACE_DEVICE(dev);
352         TRACE_RESUME(0);
353
354         down(&dev->sem);
355
356         if (dev->bus) {
357                 if (dev->bus->pm) {
358                         pm_dev_dbg(dev, state, "");
359                         error = pm_op(dev, &dev->bus->pm->base, state);
360                 } else if (dev->bus->resume) {
361                         pm_dev_dbg(dev, state, "legacy ");
362                         error = dev->bus->resume(dev);
363                 }
364                 if (error)
365                         goto End;
366         }
367
368         if (dev->type) {
369                 if (dev->type->pm) {
370                         pm_dev_dbg(dev, state, "type ");
371                         error = pm_op(dev, dev->type->pm, state);
372                 } else if (dev->type->resume) {
373                         pm_dev_dbg(dev, state, "legacy type ");
374                         error = dev->type->resume(dev);
375                 }
376                 if (error)
377                         goto End;
378         }
379
380         if (dev->class) {
381                 if (dev->class->pm) {
382                         pm_dev_dbg(dev, state, "class ");
383                         error = pm_op(dev, dev->class->pm, state);
384                 } else if (dev->class->resume) {
385                         pm_dev_dbg(dev, state, "legacy class ");
386                         error = dev->class->resume(dev);
387                 }
388         }
389  End:
390         up(&dev->sem);
391
392         TRACE_RESUME(error);
393         return error;
394 }
395
396 /**
397  *      dpm_resume - Resume every device.
398  *      @state: PM transition of the system being carried out.
399  *
400  *      Execute the appropriate "resume" callback for all devices the status of
401  *      which indicates that they are inactive.
402  */
403 static void dpm_resume(pm_message_t state)
404 {
405         struct list_head list;
406
407         INIT_LIST_HEAD(&list);
408         mutex_lock(&dpm_list_mtx);
409         transition_started = false;
410         while (!list_empty(&dpm_list)) {
411                 struct device *dev = to_device(dpm_list.next);
412
413                 get_device(dev);
414                 if (dev->power.status >= DPM_OFF) {
415                         int error;
416
417                         dev->power.status = DPM_RESUMING;
418                         mutex_unlock(&dpm_list_mtx);
419
420                         error = resume_device(dev, state);
421
422                         mutex_lock(&dpm_list_mtx);
423                         if (error)
424                                 pm_dev_err(dev, state, "", error);
425                 } else if (dev->power.status == DPM_SUSPENDING) {
426                         /* Allow new children of the device to be registered */
427                         dev->power.status = DPM_RESUMING;
428                 }
429                 if (!list_empty(&dev->power.entry))
430                         list_move_tail(&dev->power.entry, &list);
431                 put_device(dev);
432         }
433         list_splice(&list, &dpm_list);
434         mutex_unlock(&dpm_list_mtx);
435 }
436
437 /**
438  *      complete_device - Complete a PM transition for given device
439  *      @dev:   Device.
440  *      @state: PM transition of the system being carried out.
441  */
442 static void complete_device(struct device *dev, pm_message_t state)
443 {
444         down(&dev->sem);
445
446         if (dev->class && dev->class->pm && dev->class->pm->complete) {
447                 pm_dev_dbg(dev, state, "completing class ");
448                 dev->class->pm->complete(dev);
449         }
450
451         if (dev->type && dev->type->pm && dev->type->pm->complete) {
452                 pm_dev_dbg(dev, state, "completing type ");
453                 dev->type->pm->complete(dev);
454         }
455
456         if (dev->bus && dev->bus->pm && dev->bus->pm->base.complete) {
457                 pm_dev_dbg(dev, state, "completing ");
458                 dev->bus->pm->base.complete(dev);
459         }
460
461         up(&dev->sem);
462 }
463
464 /**
465  *      dpm_complete - Complete a PM transition for all devices.
466  *      @state: PM transition of the system being carried out.
467  *
468  *      Execute the ->complete() callbacks for all devices that are not marked
469  *      as DPM_ON.
470  */
471 static void dpm_complete(pm_message_t state)
472 {
473         struct list_head list;
474
475         INIT_LIST_HEAD(&list);
476         mutex_lock(&dpm_list_mtx);
477         while (!list_empty(&dpm_list)) {
478                 struct device *dev = to_device(dpm_list.prev);
479
480                 get_device(dev);
481                 if (dev->power.status > DPM_ON) {
482                         dev->power.status = DPM_ON;
483                         mutex_unlock(&dpm_list_mtx);
484
485                         complete_device(dev, state);
486
487                         mutex_lock(&dpm_list_mtx);
488                 }
489                 if (!list_empty(&dev->power.entry))
490                         list_move(&dev->power.entry, &list);
491                 put_device(dev);
492         }
493         list_splice(&list, &dpm_list);
494         mutex_unlock(&dpm_list_mtx);
495 }
496
497 /**
498  *      device_resume - Restore state of each device in system.
499  *      @state: PM transition of the system being carried out.
500  *
501  *      Resume all the devices, unlock them all, and allow new
502  *      devices to be registered once again.
503  */
504 void device_resume(pm_message_t state)
505 {
506         might_sleep();
507         dpm_resume(state);
508         dpm_complete(state);
509 }
510 EXPORT_SYMBOL_GPL(device_resume);
511
512
513 /*------------------------- Suspend routines -------------------------*/
514
515 /**
516  *      resume_event - return a PM message representing the resume event
517  *                     corresponding to given sleep state.
518  *      @sleep_state: PM message representing a sleep state.
519  */
520 static pm_message_t resume_event(pm_message_t sleep_state)
521 {
522         switch (sleep_state.event) {
523         case PM_EVENT_SUSPEND:
524                 return PMSG_RESUME;
525         case PM_EVENT_FREEZE:
526         case PM_EVENT_QUIESCE:
527                 return PMSG_RECOVER;
528         case PM_EVENT_HIBERNATE:
529                 return PMSG_RESTORE;
530         }
531         return PMSG_ON;
532 }
533
534 /**
535  *      suspend_device_noirq - Shut down one device (late suspend).
536  *      @dev:   Device.
537  *      @state: PM transition of the system being carried out.
538  *
539  *      This is called with interrupts off and only a single CPU running.
540  */
541 static int suspend_device_noirq(struct device *dev, pm_message_t state)
542 {
543         int error = 0;
544
545         if (!dev->bus)
546                 return 0;
547
548         if (dev->bus->pm) {
549                 pm_dev_dbg(dev, state, "LATE ");
550                 error = pm_noirq_op(dev, dev->bus->pm, state);
551         } else if (dev->bus->suspend_late) {
552                 pm_dev_dbg(dev, state, "legacy LATE ");
553                 error = dev->bus->suspend_late(dev, state);
554                 suspend_report_result(dev->bus->suspend_late, error);
555         }
556         return error;
557 }
558
559 /**
560  *      device_power_down - Shut down special devices.
561  *      @state: PM transition of the system being carried out.
562  *
563  *      Power down devices that require interrupts to be disabled.
564  *      Then power down system devices.
565  *
566  *      Must be called with interrupts disabled and only one CPU running.
567  */
568 int device_power_down(pm_message_t state)
569 {
570         struct device *dev;
571         int error = 0;
572
573         list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
574                 error = suspend_device_noirq(dev, state);
575                 if (error) {
576                         pm_dev_err(dev, state, " late", error);
577                         break;
578                 }
579                 dev->power.status = DPM_OFF_IRQ;
580         }
581         if (!error)
582                 error = sysdev_suspend(state);
583         if (error)
584                 dpm_power_up(resume_event(state));
585         return error;
586 }
587 EXPORT_SYMBOL_GPL(device_power_down);
588
589 /**
590  *      suspend_device - Save state of one device.
591  *      @dev:   Device.
592  *      @state: PM transition of the system being carried out.
593  */
594 static int suspend_device(struct device *dev, pm_message_t state)
595 {
596         int error = 0;
597
598         down(&dev->sem);
599
600         if (dev->class) {
601                 if (dev->class->pm) {
602                         pm_dev_dbg(dev, state, "class ");
603                         error = pm_op(dev, dev->class->pm, state);
604                 } else if (dev->class->suspend) {
605                         pm_dev_dbg(dev, state, "legacy class ");
606                         error = dev->class->suspend(dev, state);
607                         suspend_report_result(dev->class->suspend, error);
608                 }
609                 if (error)
610                         goto End;
611         }
612
613         if (dev->type) {
614                 if (dev->type->pm) {
615                         pm_dev_dbg(dev, state, "type ");
616                         error = pm_op(dev, dev->type->pm, state);
617                 } else if (dev->type->suspend) {
618                         pm_dev_dbg(dev, state, "legacy type ");
619                         error = dev->type->suspend(dev, state);
620                         suspend_report_result(dev->type->suspend, error);
621                 }
622                 if (error)
623                         goto End;
624         }
625
626         if (dev->bus) {
627                 if (dev->bus->pm) {
628                         pm_dev_dbg(dev, state, "");
629                         error = pm_op(dev, &dev->bus->pm->base, state);
630                 } else if (dev->bus->suspend) {
631                         pm_dev_dbg(dev, state, "legacy ");
632                         error = dev->bus->suspend(dev, state);
633                         suspend_report_result(dev->bus->suspend, error);
634                 }
635         }
636  End:
637         up(&dev->sem);
638
639         return error;
640 }
641
642 /**
643  *      dpm_suspend - Suspend every device.
644  *      @state: PM transition of the system being carried out.
645  *
646  *      Execute the appropriate "suspend" callbacks for all devices.
647  */
648 static int dpm_suspend(pm_message_t state)
649 {
650         struct list_head list;
651         int error = 0;
652
653         INIT_LIST_HEAD(&list);
654         mutex_lock(&dpm_list_mtx);
655         while (!list_empty(&dpm_list)) {
656                 struct device *dev = to_device(dpm_list.prev);
657
658                 get_device(dev);
659                 mutex_unlock(&dpm_list_mtx);
660
661                 error = suspend_device(dev, state);
662
663                 mutex_lock(&dpm_list_mtx);
664                 if (error) {
665                         pm_dev_err(dev, state, "", error);
666                         put_device(dev);
667                         break;
668                 }
669                 dev->power.status = DPM_OFF;
670                 if (!list_empty(&dev->power.entry))
671                         list_move(&dev->power.entry, &list);
672                 put_device(dev);
673         }
674         list_splice(&list, dpm_list.prev);
675         mutex_unlock(&dpm_list_mtx);
676         return error;
677 }
678
679 /**
680  *      prepare_device - Execute the ->prepare() callback(s) for given device.
681  *      @dev:   Device.
682  *      @state: PM transition of the system being carried out.
683  */
684 static int prepare_device(struct device *dev, pm_message_t state)
685 {
686         int error = 0;
687
688         down(&dev->sem);
689
690         if (dev->bus && dev->bus->pm && dev->bus->pm->base.prepare) {
691                 pm_dev_dbg(dev, state, "preparing ");
692                 error = dev->bus->pm->base.prepare(dev);
693                 suspend_report_result(dev->bus->pm->base.prepare, error);
694                 if (error)
695                         goto End;
696         }
697
698         if (dev->type && dev->type->pm && dev->type->pm->prepare) {
699                 pm_dev_dbg(dev, state, "preparing type ");
700                 error = dev->type->pm->prepare(dev);
701                 suspend_report_result(dev->type->pm->prepare, error);
702                 if (error)
703                         goto End;
704         }
705
706         if (dev->class && dev->class->pm && dev->class->pm->prepare) {
707                 pm_dev_dbg(dev, state, "preparing class ");
708                 error = dev->class->pm->prepare(dev);
709                 suspend_report_result(dev->class->pm->prepare, error);
710         }
711  End:
712         up(&dev->sem);
713
714         return error;
715 }
716
717 /**
718  *      dpm_prepare - Prepare all devices for a PM transition.
719  *      @state: PM transition of the system being carried out.
720  *
721  *      Execute the ->prepare() callback for all devices.
722  */
723 static int dpm_prepare(pm_message_t state)
724 {
725         struct list_head list;
726         int error = 0;
727
728         INIT_LIST_HEAD(&list);
729         mutex_lock(&dpm_list_mtx);
730         transition_started = true;
731         while (!list_empty(&dpm_list)) {
732                 struct device *dev = to_device(dpm_list.next);
733
734                 get_device(dev);
735                 dev->power.status = DPM_PREPARING;
736                 mutex_unlock(&dpm_list_mtx);
737
738                 error = prepare_device(dev, state);
739
740                 mutex_lock(&dpm_list_mtx);
741                 if (error) {
742                         dev->power.status = DPM_ON;
743                         if (error == -EAGAIN) {
744                                 put_device(dev);
745                                 continue;
746                         }
747                         printk(KERN_ERR "PM: Failed to prepare device %s "
748                                 "for power transition: error %d\n",
749                                 kobject_name(&dev->kobj), error);
750                         put_device(dev);
751                         break;
752                 }
753                 dev->power.status = DPM_SUSPENDING;
754                 if (!list_empty(&dev->power.entry))
755                         list_move_tail(&dev->power.entry, &list);
756                 put_device(dev);
757         }
758         list_splice(&list, &dpm_list);
759         mutex_unlock(&dpm_list_mtx);
760         return error;
761 }
762
763 /**
764  *      device_suspend - Save state and stop all devices in system.
765  *      @state: PM transition of the system being carried out.
766  *
767  *      Prepare and suspend all devices.
768  */
769 int device_suspend(pm_message_t state)
770 {
771         int error;
772
773         might_sleep();
774         error = dpm_prepare(state);
775         if (!error)
776                 error = dpm_suspend(state);
777         return error;
778 }
779 EXPORT_SYMBOL_GPL(device_suspend);
780
781 void __suspend_report_result(const char *function, void *fn, int ret)
782 {
783         if (ret) {
784                 printk(KERN_ERR "%s(): ", function);
785                 print_fn_descriptor_symbol("%s returns ", fn);
786                 printk("%d\n", ret);
787         }
788 }
789 EXPORT_SYMBOL_GPL(__suspend_report_result);