1 #ifndef _LINUX_SUSPEND_H
2 #define _LINUX_SUSPEND_H
4 #if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5 #include <asm/suspend.h>
7 #include <linux/swap.h>
8 #include <linux/notifier.h>
9 #include <linux/init.h>
12 #include <asm/errno.h>
14 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
15 extern int pm_prepare_console(void);
16 extern void pm_restore_console(void);
18 static inline int pm_prepare_console(void) { return 0; }
19 static inline void pm_restore_console(void) {}
22 typedef int __bitwise suspend_state_t;
24 #define PM_SUSPEND_ON ((__force suspend_state_t) 0)
25 #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 1)
26 #define PM_SUSPEND_MEM ((__force suspend_state_t) 3)
27 #define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
30 * struct platform_suspend_ops - Callbacks for managing platform dependent
31 * system sleep states.
33 * @valid: Callback to determine if given system sleep state is supported by
35 * Valid (ie. supported) states are advertised in /sys/power/state. Note
36 * that it still may be impossible to enter given system sleep state if the
37 * conditions aren't right.
38 * There is the %suspend_valid_only_mem function available that can be
39 * assigned to this if the platform only supports mem sleep.
41 * @begin: Initialise a transition to given system sleep state.
42 * @begin() is executed right prior to suspending devices. The information
43 * conveyed to the platform code by @begin() should be disregarded by it as
44 * soon as @end() is executed. If @begin() fails (ie. returns nonzero),
45 * @prepare(), @enter() and @finish() will not be called by the PM core.
46 * This callback is optional. However, if it is implemented, the argument
47 * passed to @enter() is redundant and should be ignored.
49 * @prepare: Prepare the platform for entering the system sleep state indicated
51 * @prepare() is called right after devices have been suspended (ie. the
52 * appropriate .suspend() method has been executed for each device) and
53 * before the nonboot CPUs are disabled (it is executed with IRQs enabled).
54 * This callback is optional. It returns 0 on success or a negative
55 * error code otherwise, in which case the system cannot enter the desired
56 * sleep state (@enter() and @finish() will not be called in that case).
58 * @enter: Enter the system sleep state indicated by @begin() or represented by
59 * the argument if @begin() is not implemented.
60 * This callback is mandatory. It returns 0 on success or a negative
61 * error code otherwise, in which case the system cannot enter the desired
64 * @finish: Called when the system has just left a sleep state, right after
65 * the nonboot CPUs have been enabled and before devices are resumed (it is
66 * executed with IRQs enabled).
67 * This callback is optional, but should be implemented by the platforms
68 * that implement @prepare(). If implemented, it is always called after
69 * @enter() (even if @enter() fails).
71 * @end: Called by the PM core right after resuming devices, to indicate to
72 * the platform that the system has returned to the working state or
73 * the transition to the sleep state has been aborted.
74 * This callback is optional, but should be implemented by the platforms
75 * that implement @begin(), but platforms implementing @begin() should
76 * also provide a @end() which cleans up transitions aborted before
79 struct platform_suspend_ops {
80 int (*valid)(suspend_state_t state);
81 int (*begin)(suspend_state_t state);
83 int (*enter)(suspend_state_t state);
90 * suspend_set_ops - set platform dependent suspend operations
91 * @ops: The new suspend operations to set.
93 extern void suspend_set_ops(struct platform_suspend_ops *ops);
94 extern int suspend_valid_only_mem(suspend_state_t state);
97 * arch_suspend_disable_irqs - disable IRQs for suspend
99 * Disables IRQs (in the default case). This is a weak symbol in the common
100 * code and thus allows architectures to override it if more needs to be
101 * done. Not called for suspend to disk.
103 extern void arch_suspend_disable_irqs(void);
106 * arch_suspend_enable_irqs - enable IRQs after suspend
108 * Enables IRQs (in the default case). This is a weak symbol in the common
109 * code and thus allows architectures to override it if more needs to be
110 * done. Not called for suspend to disk.
112 extern void arch_suspend_enable_irqs(void);
114 extern int pm_suspend(suspend_state_t state);
115 #else /* !CONFIG_SUSPEND */
116 #define suspend_valid_only_mem NULL
118 static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
119 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
120 #endif /* !CONFIG_SUSPEND */
122 /* struct pbe is used for creating lists of pages that should be restored
123 * atomically during the resume from disk, because the page frames they have
124 * occupied before the suspend are in use.
127 void *address; /* address of the copy */
128 void *orig_address; /* original address of a page */
132 /* mm/page_alloc.c */
133 extern void drain_local_pages(void);
134 extern void mark_free_pages(struct zone *zone);
137 * struct platform_hibernation_ops - hibernation platform support
139 * The methods in this structure allow a platform to carry out special
140 * operations required by it during a hibernation transition.
142 * All the methods below must be implemented.
144 * @begin: Tell the platform driver that we're starting hibernation.
145 * Called right after shrinking memory and before freezing devices.
147 * @end: Called by the PM core right after resuming devices, to indicate to
148 * the platform that the system has returned to the working state.
150 * @pre_snapshot: Prepare the platform for creating the hibernation image.
151 * Called right after devices have been frozen and before the nonboot
152 * CPUs are disabled (runs with IRQs on).
154 * @finish: Restore the previous state of the platform after the hibernation
155 * image has been created *or* put the platform into the normal operation
156 * mode after the hibernation (the same method is executed in both cases).
157 * Called right after the nonboot CPUs have been enabled and before
158 * thawing devices (runs with IRQs on).
160 * @prepare: Prepare the platform for entering the low power state.
161 * Called right after the hibernation image has been saved and before
162 * devices are prepared for entering the low power state.
164 * @enter: Put the system into the low power state after the hibernation image
165 * has been saved to disk.
166 * Called after the nonboot CPUs have been disabled and all of the low
167 * level devices have been shut down (runs with IRQs off).
169 * @leave: Perform the first stage of the cleanup after the system sleep state
170 * indicated by @set_target() has been left.
171 * Called right after the control has been passed from the boot kernel to
172 * the image kernel, before the nonboot CPUs are enabled and before devices
173 * are resumed. Executed with interrupts disabled.
175 * @pre_restore: Prepare system for the restoration from a hibernation image.
176 * Called right after devices have been frozen and before the nonboot
177 * CPUs are disabled (runs with IRQs on).
179 * @restore_cleanup: Clean up after a failing image restoration.
180 * Called right after the nonboot CPUs have been enabled and before
181 * thawing devices (runs with IRQs on).
183 struct platform_hibernation_ops {
186 int (*pre_snapshot)(void);
187 void (*finish)(void);
188 int (*prepare)(void);
191 int (*pre_restore)(void);
192 void (*restore_cleanup)(void);
195 #ifdef CONFIG_HIBERNATION
196 /* kernel/power/snapshot.c */
197 extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
198 static inline void register_nosave_region(unsigned long b, unsigned long e)
200 __register_nosave_region(b, e, 0);
202 static inline void register_nosave_region_late(unsigned long b, unsigned long e)
204 __register_nosave_region(b, e, 1);
206 extern int swsusp_page_is_forbidden(struct page *);
207 extern void swsusp_set_page_free(struct page *);
208 extern void swsusp_unset_page_free(struct page *);
209 extern unsigned long get_safe_page(gfp_t gfp_mask);
211 extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
212 extern int hibernate(void);
213 #else /* CONFIG_HIBERNATION */
214 static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
215 static inline void swsusp_set_page_free(struct page *p) {}
216 static inline void swsusp_unset_page_free(struct page *p) {}
218 static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
219 static inline int hibernate(void) { return -ENOSYS; }
220 #endif /* CONFIG_HIBERNATION */
222 #ifdef CONFIG_PM_SLEEP
223 void save_processor_state(void);
224 void restore_processor_state(void);
226 /* kernel/power/main.c */
227 extern int register_pm_notifier(struct notifier_block *nb);
228 extern int unregister_pm_notifier(struct notifier_block *nb);
230 #define pm_notifier(fn, pri) { \
231 static struct notifier_block fn##_nb = \
232 { .notifier_call = fn, .priority = pri }; \
233 register_pm_notifier(&fn##_nb); \
235 #else /* !CONFIG_PM_SLEEP */
237 static inline int register_pm_notifier(struct notifier_block *nb)
242 static inline int unregister_pm_notifier(struct notifier_block *nb)
247 #define pm_notifier(fn, pri) do { (void)(fn); } while (0)
248 #endif /* !CONFIG_PM_SLEEP */
250 #ifndef CONFIG_HIBERNATION
251 static inline void register_nosave_region(unsigned long b, unsigned long e)
254 static inline void register_nosave_region_late(unsigned long b, unsigned long e)
259 #endif /* _LINUX_SUSPEND_H */