]> www.pilppa.org Git - linux-2.6-omap-h63xx.git/blob - drivers/acpi/dispatcher/dsmethod.c
Merge branch 'timers/range-hrtimers' into v28-range-hrtimers-for-linus-v2
[linux-2.6-omap-h63xx.git] / drivers / acpi / dispatcher / dsmethod.c
1 /******************************************************************************
2  *
3  * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
4  *
5  *****************************************************************************/
6
7 /*
8  * Copyright (C) 2000 - 2008, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43
44 #include <acpi/acpi.h>
45 #include <acpi/amlcode.h>
46 #include <acpi/acdispat.h>
47 #include <acpi/acinterp.h>
48 #include <acpi/acnamesp.h>
49 #include <acpi/acdisasm.h>
50
51 #define _COMPONENT          ACPI_DISPATCHER
52 ACPI_MODULE_NAME("dsmethod")
53
54 /* Local prototypes */
55 static acpi_status
56 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
57
58 /*******************************************************************************
59  *
60  * FUNCTION:    acpi_ds_method_error
61  *
62  * PARAMETERS:  Status          - Execution status
63  *              walk_state      - Current state
64  *
65  * RETURN:      Status
66  *
67  * DESCRIPTION: Called on method error. Invoke the global exception handler if
68  *              present, dump the method data if the disassembler is configured
69  *
70  *              Note: Allows the exception handler to change the status code
71  *
72  ******************************************************************************/
73
74 acpi_status
75 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
76 {
77         ACPI_FUNCTION_ENTRY();
78
79         /* Ignore AE_OK and control exception codes */
80
81         if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
82                 return (status);
83         }
84
85         /* Invoke the global exception handler */
86
87         if (acpi_gbl_exception_handler) {
88
89                 /* Exit the interpreter, allow handler to execute methods */
90
91                 acpi_ex_exit_interpreter();
92
93                 /*
94                  * Handler can map the exception code to anything it wants, including
95                  * AE_OK, in which case the executing method will not be aborted.
96                  */
97                 status = acpi_gbl_exception_handler(status,
98                                                     walk_state->method_node ?
99                                                     walk_state->method_node->
100                                                     name.integer : 0,
101                                                     walk_state->opcode,
102                                                     walk_state->aml_offset,
103                                                     NULL);
104                 acpi_ex_enter_interpreter();
105         }
106 #ifdef ACPI_DISASSEMBLER
107         if (ACPI_FAILURE(status)) {
108
109                 /* Display method locals/args if disassembler is present */
110
111                 acpi_dm_dump_method_info(status, walk_state, walk_state->op);
112         }
113 #endif
114
115         return (status);
116 }
117
118 /*******************************************************************************
119  *
120  * FUNCTION:    acpi_ds_create_method_mutex
121  *
122  * PARAMETERS:  obj_desc            - The method object
123  *
124  * RETURN:      Status
125  *
126  * DESCRIPTION: Create a mutex object for a serialized control method
127  *
128  ******************************************************************************/
129
130 static acpi_status
131 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
132 {
133         union acpi_operand_object *mutex_desc;
134         acpi_status status;
135
136         ACPI_FUNCTION_TRACE(ds_create_method_mutex);
137
138         /* Create the new mutex object */
139
140         mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
141         if (!mutex_desc) {
142                 return_ACPI_STATUS(AE_NO_MEMORY);
143         }
144
145         /* Create the actual OS Mutex */
146
147         status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
148         if (ACPI_FAILURE(status)) {
149                 return_ACPI_STATUS(status);
150         }
151
152         mutex_desc->mutex.sync_level = method_desc->method.sync_level;
153         method_desc->method.mutex = mutex_desc;
154         return_ACPI_STATUS(AE_OK);
155 }
156
157 /*******************************************************************************
158  *
159  * FUNCTION:    acpi_ds_begin_method_execution
160  *
161  * PARAMETERS:  method_node         - Node of the method
162  *              obj_desc            - The method object
163  *              walk_state          - current state, NULL if not yet executing
164  *                                    a method.
165  *
166  * RETURN:      Status
167  *
168  * DESCRIPTION: Prepare a method for execution.  Parses the method if necessary,
169  *              increments the thread count, and waits at the method semaphore
170  *              for clearance to execute.
171  *
172  ******************************************************************************/
173
174 acpi_status
175 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
176                                union acpi_operand_object *obj_desc,
177                                struct acpi_walk_state *walk_state)
178 {
179         acpi_status status = AE_OK;
180
181         ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
182
183         if (!method_node) {
184                 return_ACPI_STATUS(AE_NULL_ENTRY);
185         }
186
187         /* Prevent wraparound of thread count */
188
189         if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
190                 ACPI_ERROR((AE_INFO,
191                             "Method reached maximum reentrancy limit (255)"));
192                 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
193         }
194
195         /*
196          * If this method is serialized, we need to acquire the method mutex.
197          */
198         if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) {
199                 /*
200                  * Create a mutex for the method if it is defined to be Serialized
201                  * and a mutex has not already been created. We defer the mutex creation
202                  * until a method is actually executed, to minimize the object count
203                  */
204                 if (!obj_desc->method.mutex) {
205                         status = acpi_ds_create_method_mutex(obj_desc);
206                         if (ACPI_FAILURE(status)) {
207                                 return_ACPI_STATUS(status);
208                         }
209                 }
210
211                 /*
212                  * The current_sync_level (per-thread) must be less than or equal to
213                  * the sync level of the method. This mechanism provides some
214                  * deadlock prevention
215                  *
216                  * Top-level method invocation has no walk state at this point
217                  */
218                 if (walk_state &&
219                     (walk_state->thread->current_sync_level >
220                      obj_desc->method.mutex->mutex.sync_level)) {
221                         ACPI_ERROR((AE_INFO,
222                                     "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)",
223                                     acpi_ut_get_node_name(method_node),
224                                     walk_state->thread->current_sync_level));
225
226                         return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
227                 }
228
229                 /*
230                  * Obtain the method mutex if necessary. Do not acquire mutex for a
231                  * recursive call.
232                  */
233                 if (!walk_state ||
234                     !obj_desc->method.mutex->mutex.thread_id ||
235                     (walk_state->thread->thread_id !=
236                      obj_desc->method.mutex->mutex.thread_id)) {
237                         /*
238                          * Acquire the method mutex. This releases the interpreter if we
239                          * block (and reacquires it before it returns)
240                          */
241                         status =
242                             acpi_ex_system_wait_mutex(obj_desc->method.mutex->
243                                                       mutex.os_mutex,
244                                                       ACPI_WAIT_FOREVER);
245                         if (ACPI_FAILURE(status)) {
246                                 return_ACPI_STATUS(status);
247                         }
248
249                         /* Update the mutex and walk info and save the original sync_level */
250
251                         if (walk_state) {
252                                 obj_desc->method.mutex->mutex.
253                                     original_sync_level =
254                                     walk_state->thread->current_sync_level;
255
256                                 obj_desc->method.mutex->mutex.thread_id =
257                                     walk_state->thread->thread_id;
258                                 walk_state->thread->current_sync_level =
259                                     obj_desc->method.sync_level;
260                         } else {
261                                 obj_desc->method.mutex->mutex.
262                                     original_sync_level =
263                                     obj_desc->method.mutex->mutex.sync_level;
264                         }
265                 }
266
267                 /* Always increase acquisition depth */
268
269                 obj_desc->method.mutex->mutex.acquisition_depth++;
270         }
271
272         /*
273          * Allocate an Owner ID for this method, only if this is the first thread
274          * to begin concurrent execution. We only need one owner_id, even if the
275          * method is invoked recursively.
276          */
277         if (!obj_desc->method.owner_id) {
278                 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
279                 if (ACPI_FAILURE(status)) {
280                         goto cleanup;
281                 }
282         }
283
284         /*
285          * Increment the method parse tree thread count since it has been
286          * reentered one more time (even if it is the same thread)
287          */
288         obj_desc->method.thread_count++;
289         return_ACPI_STATUS(status);
290
291       cleanup:
292         /* On error, must release the method mutex (if present) */
293
294         if (obj_desc->method.mutex) {
295                 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
296         }
297         return_ACPI_STATUS(status);
298 }
299
300 /*******************************************************************************
301  *
302  * FUNCTION:    acpi_ds_call_control_method
303  *
304  * PARAMETERS:  Thread              - Info for this thread
305  *              this_walk_state     - Current walk state
306  *              Op                  - Current Op to be walked
307  *
308  * RETURN:      Status
309  *
310  * DESCRIPTION: Transfer execution to a called control method
311  *
312  ******************************************************************************/
313
314 acpi_status
315 acpi_ds_call_control_method(struct acpi_thread_state *thread,
316                             struct acpi_walk_state *this_walk_state,
317                             union acpi_parse_object *op)
318 {
319         acpi_status status;
320         struct acpi_namespace_node *method_node;
321         struct acpi_walk_state *next_walk_state = NULL;
322         union acpi_operand_object *obj_desc;
323         struct acpi_evaluate_info *info;
324         u32 i;
325
326         ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
327
328         ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
329                           "Calling method %p, currentstate=%p\n",
330                           this_walk_state->prev_op, this_walk_state));
331
332         /*
333          * Get the namespace entry for the control method we are about to call
334          */
335         method_node = this_walk_state->method_call_node;
336         if (!method_node) {
337                 return_ACPI_STATUS(AE_NULL_ENTRY);
338         }
339
340         obj_desc = acpi_ns_get_attached_object(method_node);
341         if (!obj_desc) {
342                 return_ACPI_STATUS(AE_NULL_OBJECT);
343         }
344
345         /* Init for new method, possibly wait on method mutex */
346
347         status = acpi_ds_begin_method_execution(method_node, obj_desc,
348                                                 this_walk_state);
349         if (ACPI_FAILURE(status)) {
350                 return_ACPI_STATUS(status);
351         }
352
353         /* Begin method parse/execution. Create a new walk state */
354
355         next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
356                                                     NULL, obj_desc, thread);
357         if (!next_walk_state) {
358                 status = AE_NO_MEMORY;
359                 goto cleanup;
360         }
361
362         /*
363          * The resolved arguments were put on the previous walk state's operand
364          * stack. Operands on the previous walk state stack always
365          * start at index 0. Also, null terminate the list of arguments
366          */
367         this_walk_state->operands[this_walk_state->num_operands] = NULL;
368
369         /*
370          * Allocate and initialize the evaluation information block
371          * TBD: this is somewhat inefficient, should change interface to
372          * ds_init_aml_walk. For now, keeps this struct off the CPU stack
373          */
374         info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
375         if (!info) {
376                 return_ACPI_STATUS(AE_NO_MEMORY);
377         }
378
379         info->parameters = &this_walk_state->operands[0];
380
381         status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
382                                        obj_desc->method.aml_start,
383                                        obj_desc->method.aml_length, info,
384                                        ACPI_IMODE_EXECUTE);
385
386         ACPI_FREE(info);
387         if (ACPI_FAILURE(status)) {
388                 goto cleanup;
389         }
390
391         /*
392          * Delete the operands on the previous walkstate operand stack
393          * (they were copied to new objects)
394          */
395         for (i = 0; i < obj_desc->method.param_count; i++) {
396                 acpi_ut_remove_reference(this_walk_state->operands[i]);
397                 this_walk_state->operands[i] = NULL;
398         }
399
400         /* Clear the operand stack */
401
402         this_walk_state->num_operands = 0;
403
404         ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
405                           "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
406                           method_node->name.ascii, next_walk_state));
407
408         /* Invoke an internal method if necessary */
409
410         if (obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY) {
411                 status = obj_desc->method.implementation(next_walk_state);
412         }
413
414         return_ACPI_STATUS(status);
415
416       cleanup:
417
418         /* On error, we must terminate the method properly */
419
420         acpi_ds_terminate_control_method(obj_desc, next_walk_state);
421         if (next_walk_state) {
422                 acpi_ds_delete_walk_state(next_walk_state);
423         }
424
425         return_ACPI_STATUS(status);
426 }
427
428 /*******************************************************************************
429  *
430  * FUNCTION:    acpi_ds_restart_control_method
431  *
432  * PARAMETERS:  walk_state          - State for preempted method (caller)
433  *              return_desc         - Return value from the called method
434  *
435  * RETURN:      Status
436  *
437  * DESCRIPTION: Restart a method that was preempted by another (nested) method
438  *              invocation.  Handle the return value (if any) from the callee.
439  *
440  ******************************************************************************/
441
442 acpi_status
443 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
444                                union acpi_operand_object *return_desc)
445 {
446         acpi_status status;
447         int same_as_implicit_return;
448
449         ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
450
451         ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
452                           "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
453                           acpi_ut_get_node_name(walk_state->method_node),
454                           walk_state->method_call_op, return_desc));
455
456         ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
457                           "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
458                           walk_state->return_used,
459                           walk_state->results, walk_state));
460
461         /* Did the called method return a value? */
462
463         if (return_desc) {
464
465                 /* Is the implicit return object the same as the return desc? */
466
467                 same_as_implicit_return =
468                     (walk_state->implicit_return_obj == return_desc);
469
470                 /* Are we actually going to use the return value? */
471
472                 if (walk_state->return_used) {
473
474                         /* Save the return value from the previous method */
475
476                         status = acpi_ds_result_push(return_desc, walk_state);
477                         if (ACPI_FAILURE(status)) {
478                                 acpi_ut_remove_reference(return_desc);
479                                 return_ACPI_STATUS(status);
480                         }
481
482                         /*
483                          * Save as THIS method's return value in case it is returned
484                          * immediately to yet another method
485                          */
486                         walk_state->return_desc = return_desc;
487                 }
488
489                 /*
490                  * The following code is the optional support for the so-called
491                  * "implicit return". Some AML code assumes that the last value of the
492                  * method is "implicitly" returned to the caller, in the absence of an
493                  * explicit return value.
494                  *
495                  * Just save the last result of the method as the return value.
496                  *
497                  * NOTE: this is optional because the ASL language does not actually
498                  * support this behavior.
499                  */
500                 else if (!acpi_ds_do_implicit_return
501                          (return_desc, walk_state, FALSE)
502                          || same_as_implicit_return) {
503                         /*
504                          * Delete the return value if it will not be used by the
505                          * calling method or remove one reference if the explicit return
506                          * is the same as the implicit return value.
507                          */
508                         acpi_ut_remove_reference(return_desc);
509                 }
510         }
511
512         return_ACPI_STATUS(AE_OK);
513 }
514
515 /*******************************************************************************
516  *
517  * FUNCTION:    acpi_ds_terminate_control_method
518  *
519  * PARAMETERS:  method_desc         - Method object
520  *              walk_state          - State associated with the method
521  *
522  * RETURN:      None
523  *
524  * DESCRIPTION: Terminate a control method.  Delete everything that the method
525  *              created, delete all locals and arguments, and delete the parse
526  *              tree if requested.
527  *
528  * MUTEX:       Interpreter is locked
529  *
530  ******************************************************************************/
531
532 void
533 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
534                                  struct acpi_walk_state *walk_state)
535 {
536
537         ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
538
539         /* method_desc is required, walk_state is optional */
540
541         if (!method_desc) {
542                 return_VOID;
543         }
544
545         if (walk_state) {
546
547                 /* Delete all arguments and locals */
548
549                 acpi_ds_method_data_delete_all(walk_state);
550
551                 /*
552                  * If method is serialized, release the mutex and restore the
553                  * current sync level for this thread
554                  */
555                 if (method_desc->method.mutex) {
556
557                         /* Acquisition Depth handles recursive calls */
558
559                         method_desc->method.mutex->mutex.acquisition_depth--;
560                         if (!method_desc->method.mutex->mutex.acquisition_depth) {
561                                 walk_state->thread->current_sync_level =
562                                     method_desc->method.mutex->mutex.
563                                     original_sync_level;
564
565                                 acpi_os_release_mutex(method_desc->method.
566                                                       mutex->mutex.os_mutex);
567                                 method_desc->method.mutex->mutex.thread_id = NULL;
568                         }
569                 }
570
571                 /*
572                  * Delete any namespace objects created anywhere within
573                  * the namespace by the execution of this method
574                  */
575                 acpi_ns_delete_namespace_by_owner(method_desc->method.owner_id);
576         }
577
578         /* Decrement the thread count on the method */
579
580         if (method_desc->method.thread_count) {
581                 method_desc->method.thread_count--;
582         } else {
583                 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
584         }
585
586         /* Are there any other threads currently executing this method? */
587
588         if (method_desc->method.thread_count) {
589                 /*
590                  * Additional threads. Do not release the owner_id in this case,
591                  * we immediately reuse it for the next thread executing this method
592                  */
593                 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
594                                   "*** Completed execution of one thread, %d threads remaining\n",
595                                   method_desc->method.thread_count));
596         } else {
597                 /* This is the only executing thread for this method */
598
599                 /*
600                  * Support to dynamically change a method from not_serialized to
601                  * Serialized if it appears that the method is incorrectly written and
602                  * does not support multiple thread execution. The best example of this
603                  * is if such a method creates namespace objects and blocks. A second
604                  * thread will fail with an AE_ALREADY_EXISTS exception
605                  *
606                  * This code is here because we must wait until the last thread exits
607                  * before creating the synchronization semaphore.
608                  */
609                 if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
610                     && (!method_desc->method.mutex)) {
611                         (void)acpi_ds_create_method_mutex(method_desc);
612                 }
613
614                 /* No more threads, we can free the owner_id */
615
616                 acpi_ut_release_owner_id(&method_desc->method.owner_id);
617         }
618
619         return_VOID;
620 }