Pull bugzilla-8385 into release branch
This commit is contained in:
commit
3dd6786f55
16 changed files with 181 additions and 126 deletions
|
@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
|
||||||
* Obtain the method mutex if necessary. Do not acquire mutex for a
|
* Obtain the method mutex if necessary. Do not acquire mutex for a
|
||||||
* recursive call.
|
* recursive call.
|
||||||
*/
|
*/
|
||||||
if (acpi_os_get_thread_id() !=
|
if (!walk_state ||
|
||||||
obj_desc->method.mutex->mutex.owner_thread_id) {
|
!obj_desc->method.mutex->mutex.owner_thread ||
|
||||||
|
(walk_state->thread !=
|
||||||
|
obj_desc->method.mutex->mutex.owner_thread)) {
|
||||||
/*
|
/*
|
||||||
* Acquire the method mutex. This releases the interpreter if we
|
* Acquire the method mutex. This releases the interpreter if we
|
||||||
* block (and reacquires it before it returns)
|
* block (and reacquires it before it returns)
|
||||||
|
@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update the mutex and walk info and save the original sync_level */
|
/* Update the mutex and walk info and save the original sync_level */
|
||||||
obj_desc->method.mutex->mutex.owner_thread_id =
|
|
||||||
acpi_os_get_thread_id();
|
|
||||||
|
|
||||||
if (walk_state) {
|
if (walk_state) {
|
||||||
obj_desc->method.mutex->mutex.
|
obj_desc->method.mutex->mutex.
|
||||||
original_sync_level =
|
original_sync_level =
|
||||||
walk_state->thread->current_sync_level;
|
walk_state->thread->current_sync_level;
|
||||||
|
|
||||||
|
obj_desc->method.mutex->mutex.owner_thread =
|
||||||
|
walk_state->thread;
|
||||||
walk_state->thread->current_sync_level =
|
walk_state->thread->current_sync_level =
|
||||||
obj_desc->method.sync_level;
|
obj_desc->method.sync_level;
|
||||||
} else {
|
} else {
|
||||||
|
@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
|
||||||
|
|
||||||
acpi_os_release_mutex(method_desc->method.mutex->mutex.
|
acpi_os_release_mutex(method_desc->method.mutex->mutex.
|
||||||
os_mutex);
|
os_mutex);
|
||||||
method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
|
method_desc->method.mutex->mutex.owner_thread = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -196,15 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
|
||||||
notify_info->notify.value = (u16) notify_value;
|
notify_info->notify.value = (u16) notify_value;
|
||||||
notify_info->notify.handler_obj = handler_obj;
|
notify_info->notify.handler_obj = handler_obj;
|
||||||
|
|
||||||
acpi_ex_exit_interpreter();
|
status =
|
||||||
|
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
|
||||||
acpi_ev_notify_dispatch(notify_info);
|
notify_info);
|
||||||
|
|
||||||
status = acpi_ex_enter_interpreter();
|
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
return_ACPI_STATUS(status);
|
acpi_ut_delete_generic_state(notify_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!handler_obj) {
|
if (!handler_obj) {
|
||||||
|
|
|
@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||||
u32 bit_width, acpi_integer * value)
|
u32 bit_width, acpi_integer * value)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
acpi_status status2;
|
|
||||||
acpi_adr_space_handler handler;
|
acpi_adr_space_handler handler;
|
||||||
acpi_adr_space_setup region_setup;
|
acpi_adr_space_setup region_setup;
|
||||||
union acpi_operand_object *handler_desc;
|
union acpi_operand_object *handler_desc;
|
||||||
|
@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||||
* setup will potentially execute control methods
|
* setup will potentially execute control methods
|
||||||
* (e.g., _REG method for this region)
|
* (e.g., _REG method for this region)
|
||||||
*/
|
*/
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_relinquish_interpreter();
|
||||||
|
|
||||||
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
|
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
|
||||||
handler_desc->address_space.context,
|
handler_desc->address_space.context,
|
||||||
|
@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||||
|
|
||||||
/* Re-enter the interpreter */
|
/* Re-enter the interpreter */
|
||||||
|
|
||||||
status2 = acpi_ex_enter_interpreter();
|
acpi_ex_reacquire_interpreter();
|
||||||
if (ACPI_FAILURE(status2)) {
|
|
||||||
return_ACPI_STATUS(status2);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Check for failure of the Region Setup */
|
/* Check for failure of the Region Setup */
|
||||||
|
|
||||||
|
@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||||
* exit the interpreter because the handler *might* block -- we don't
|
* exit the interpreter because the handler *might* block -- we don't
|
||||||
* know what it will do, so we can't hold the lock on the intepreter.
|
* know what it will do, so we can't hold the lock on the intepreter.
|
||||||
*/
|
*/
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_relinquish_interpreter();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Call the handler */
|
/* Call the handler */
|
||||||
|
@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
|
||||||
* We just returned from a non-default handler, we must re-enter the
|
* We just returned from a non-default handler, we must re-enter the
|
||||||
* interpreter
|
* interpreter
|
||||||
*/
|
*/
|
||||||
status2 = acpi_ex_enter_interpreter();
|
acpi_ex_reacquire_interpreter();
|
||||||
if (ACPI_FAILURE(status2)) {
|
|
||||||
return_ACPI_STATUS(status2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
|
|
|
@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
|
||||||
return (AE_BAD_PARAMETER);
|
return (AE_BAD_PARAMETER);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = acpi_ex_enter_interpreter();
|
/* Must lock interpreter to prevent race conditions */
|
||||||
if (ACPI_FAILURE(status)) {
|
|
||||||
return (status);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
acpi_ex_enter_interpreter();
|
||||||
status = acpi_ev_acquire_global_lock(timeout);
|
status = acpi_ev_acquire_global_lock(timeout);
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_exit_interpreter();
|
||||||
|
|
||||||
|
|
|
@ -583,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
|
||||||
* Get the sync_level. If method is serialized, a mutex will be
|
* Get the sync_level. If method is serialized, a mutex will be
|
||||||
* created for this method when it is parsed.
|
* created for this method when it is parsed.
|
||||||
*/
|
*/
|
||||||
if (acpi_gbl_all_methods_serialized) {
|
if (method_flags & AML_METHOD_SERIALIZED) {
|
||||||
obj_desc->method.sync_level = 0;
|
|
||||||
obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
|
|
||||||
} else if (method_flags & AML_METHOD_SERIALIZED) {
|
|
||||||
/*
|
/*
|
||||||
* ACPI 1.0: sync_level = 0
|
* ACPI 1.0: sync_level = 0
|
||||||
* ACPI 2.0: sync_level = sync_level in method declaration
|
* ACPI 2.0: sync_level = sync_level in method declaration
|
||||||
|
|
|
@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
|
||||||
static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
|
static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
|
||||||
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
|
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
|
||||||
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
|
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
|
||||||
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"},
|
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
|
||||||
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
|
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
|
||||||
"Acquire Depth"},
|
"Acquire Depth"},
|
||||||
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
|
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
|
||||||
|
|
|
@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
|
||||||
*
|
*
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
|
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
|
||||||
struct acpi_thread_state *thread)
|
|
||||||
{
|
{
|
||||||
|
struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
|
||||||
|
|
||||||
if (!thread) {
|
if (!thread) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
||||||
|
|
||||||
/* Support for multiple acquires by the owning thread */
|
/* Support for multiple acquires by the owning thread */
|
||||||
|
|
||||||
if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
|
if (obj_desc->mutex.owner_thread) {
|
||||||
/*
|
if (obj_desc->mutex.owner_thread->thread_id ==
|
||||||
* The mutex is already owned by this thread, just increment the
|
walk_state->thread->thread_id) {
|
||||||
* acquisition depth
|
/*
|
||||||
*/
|
* The mutex is already owned by this thread, just increment the
|
||||||
obj_desc->mutex.acquisition_depth++;
|
* acquisition depth
|
||||||
return_ACPI_STATUS(AE_OK);
|
*/
|
||||||
|
obj_desc->mutex.acquisition_depth++;
|
||||||
|
return_ACPI_STATUS(AE_OK);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
|
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
|
||||||
|
@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
|
||||||
|
|
||||||
/* Have the mutex: update mutex and walk info and save the sync_level */
|
/* Have the mutex: update mutex and walk info and save the sync_level */
|
||||||
|
|
||||||
obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
|
obj_desc->mutex.owner_thread = walk_state->thread;
|
||||||
obj_desc->mutex.acquisition_depth = 1;
|
obj_desc->mutex.acquisition_depth = 1;
|
||||||
obj_desc->mutex.original_sync_level =
|
obj_desc->mutex.original_sync_level =
|
||||||
walk_state->thread->current_sync_level;
|
walk_state->thread->current_sync_level;
|
||||||
|
@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||||
|
|
||||||
/* The mutex must have been previously acquired in order to release it */
|
/* The mutex must have been previously acquired in order to release it */
|
||||||
|
|
||||||
if (!obj_desc->mutex.owner_thread_id) {
|
if (!obj_desc->mutex.owner_thread) {
|
||||||
ACPI_ERROR((AE_INFO,
|
ACPI_ERROR((AE_INFO,
|
||||||
"Cannot release Mutex [%4.4s], not acquired",
|
"Cannot release Mutex [%4.4s], not acquired",
|
||||||
acpi_ut_get_node_name(obj_desc->mutex.node)));
|
acpi_ut_get_node_name(obj_desc->mutex.node)));
|
||||||
|
@ -262,14 +266,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||||
* The Mutex is owned, but this thread must be the owner.
|
* The Mutex is owned, but this thread must be the owner.
|
||||||
* Special case for Global Lock, any thread can release
|
* Special case for Global Lock, any thread can release
|
||||||
*/
|
*/
|
||||||
if ((obj_desc->mutex.owner_thread_id !=
|
if ((obj_desc->mutex.owner_thread->thread_id !=
|
||||||
walk_state->thread->thread_id)
|
walk_state->thread->thread_id)
|
||||||
&& (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
|
&& (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
|
||||||
ACPI_ERROR((AE_INFO,
|
ACPI_ERROR((AE_INFO,
|
||||||
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
|
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
|
||||||
(unsigned long)walk_state->thread->thread_id,
|
(unsigned long)walk_state->thread->thread_id,
|
||||||
acpi_ut_get_node_name(obj_desc->mutex.node),
|
acpi_ut_get_node_name(obj_desc->mutex.node),
|
||||||
(unsigned long)obj_desc->mutex.owner_thread_id));
|
(unsigned long)obj_desc->mutex.owner_thread->thread_id));
|
||||||
return_ACPI_STATUS(AE_AML_NOT_OWNER);
|
return_ACPI_STATUS(AE_AML_NOT_OWNER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,7 +300,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||||
|
|
||||||
/* Unlink the mutex from the owner's list */
|
/* Unlink the mutex from the owner's list */
|
||||||
|
|
||||||
acpi_ex_unlink_mutex(obj_desc, walk_state->thread);
|
acpi_ex_unlink_mutex(obj_desc);
|
||||||
|
|
||||||
/* Release the mutex, special case for Global Lock */
|
/* Release the mutex, special case for Global Lock */
|
||||||
|
|
||||||
|
@ -308,7 +312,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||||
|
|
||||||
/* Update the mutex and restore sync_level */
|
/* Update the mutex and restore sync_level */
|
||||||
|
|
||||||
obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
|
obj_desc->mutex.owner_thread = NULL;
|
||||||
walk_state->thread->current_sync_level =
|
walk_state->thread->current_sync_level =
|
||||||
obj_desc->mutex.original_sync_level;
|
obj_desc->mutex.original_sync_level;
|
||||||
|
|
||||||
|
@ -363,7 +367,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
|
||||||
|
|
||||||
/* Mark mutex unowned */
|
/* Mark mutex unowned */
|
||||||
|
|
||||||
obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
|
obj_desc->mutex.owner_thread = NULL;
|
||||||
|
|
||||||
/* Update Thread sync_level (Last mutex is the important one) */
|
/* Update Thread sync_level (Last mutex is the important one) */
|
||||||
|
|
||||||
|
|
|
@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
|
||||||
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
acpi_status status2;
|
|
||||||
|
|
||||||
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
|
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
|
||||||
|
|
||||||
|
@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
||||||
|
|
||||||
/* We must wait, so unlock the interpreter */
|
/* We must wait, so unlock the interpreter */
|
||||||
|
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_relinquish_interpreter();
|
||||||
|
|
||||||
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
|
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
|
||||||
|
|
||||||
|
@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
||||||
|
|
||||||
/* Reacquire the interpreter */
|
/* Reacquire the interpreter */
|
||||||
|
|
||||||
status2 = acpi_ex_enter_interpreter();
|
acpi_ex_reacquire_interpreter();
|
||||||
if (ACPI_FAILURE(status2)) {
|
|
||||||
|
|
||||||
/* Report fatal error, could not acquire interpreter */
|
|
||||||
|
|
||||||
return_ACPI_STATUS(status2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
|
@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
|
||||||
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
acpi_status status2;
|
|
||||||
|
|
||||||
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
|
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
|
||||||
|
|
||||||
|
@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
||||||
|
|
||||||
/* We must wait, so unlock the interpreter */
|
/* We must wait, so unlock the interpreter */
|
||||||
|
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_relinquish_interpreter();
|
||||||
|
|
||||||
status = acpi_os_acquire_mutex(mutex, timeout);
|
status = acpi_os_acquire_mutex(mutex, timeout);
|
||||||
|
|
||||||
|
@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
|
||||||
|
|
||||||
/* Reacquire the interpreter */
|
/* Reacquire the interpreter */
|
||||||
|
|
||||||
status2 = acpi_ex_enter_interpreter();
|
acpi_ex_reacquire_interpreter();
|
||||||
if (ACPI_FAILURE(status2)) {
|
|
||||||
|
|
||||||
/* Report fatal error, could not acquire interpreter */
|
|
||||||
|
|
||||||
return_ACPI_STATUS(status2);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
|
@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
|
||||||
|
|
||||||
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
|
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
|
||||||
|
|
||||||
ACPI_FUNCTION_ENTRY();
|
ACPI_FUNCTION_ENTRY();
|
||||||
|
|
||||||
/* Since this thread will sleep, we must release the interpreter */
|
/* Since this thread will sleep, we must release the interpreter */
|
||||||
|
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_relinquish_interpreter();
|
||||||
|
|
||||||
acpi_os_sleep(how_long);
|
acpi_os_sleep(how_long);
|
||||||
|
|
||||||
/* And now we must get the interpreter again */
|
/* And now we must get the interpreter again */
|
||||||
|
|
||||||
status = acpi_ex_enter_interpreter();
|
acpi_ex_reacquire_interpreter();
|
||||||
return (status);
|
return (AE_OK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
|
|
@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
|
||||||
*
|
*
|
||||||
* PARAMETERS: None
|
* PARAMETERS: None
|
||||||
*
|
*
|
||||||
* RETURN: Status
|
* RETURN: None
|
||||||
*
|
*
|
||||||
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
|
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
|
||||||
* the interpreter region is a fatal system error
|
* the interpreter region is a fatal system error. Used in
|
||||||
|
* conjunction with exit_interpreter.
|
||||||
*
|
*
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
acpi_status acpi_ex_enter_interpreter(void)
|
void acpi_ex_enter_interpreter(void)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
|
||||||
|
@ -91,10 +92,42 @@ acpi_status acpi_ex_enter_interpreter(void)
|
||||||
|
|
||||||
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
|
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
|
ACPI_ERROR((AE_INFO,
|
||||||
|
"Could not acquire AML Interpreter mutex"));
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_VOID;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
*
|
||||||
|
* FUNCTION: acpi_ex_reacquire_interpreter
|
||||||
|
*
|
||||||
|
* PARAMETERS: None
|
||||||
|
*
|
||||||
|
* RETURN: None
|
||||||
|
*
|
||||||
|
* DESCRIPTION: Reacquire the interpreter execution region from within the
|
||||||
|
* interpreter code. Failure to enter the interpreter region is a
|
||||||
|
* fatal system error. Used in conjuction with
|
||||||
|
* relinquish_interpreter
|
||||||
|
*
|
||||||
|
******************************************************************************/
|
||||||
|
|
||||||
|
void acpi_ex_reacquire_interpreter(void)
|
||||||
|
{
|
||||||
|
ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the global serialized flag is set, do not release the interpreter,
|
||||||
|
* since it was not actually released by acpi_ex_relinquish_interpreter.
|
||||||
|
* This forces the interpreter to be single threaded.
|
||||||
|
*/
|
||||||
|
if (!acpi_gbl_all_methods_serialized) {
|
||||||
|
acpi_ex_enter_interpreter();
|
||||||
|
}
|
||||||
|
|
||||||
|
return_VOID;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
@ -105,17 +138,9 @@ acpi_status acpi_ex_enter_interpreter(void)
|
||||||
*
|
*
|
||||||
* RETURN: None
|
* RETURN: None
|
||||||
*
|
*
|
||||||
* DESCRIPTION: Exit the interpreter execution region
|
* DESCRIPTION: Exit the interpreter execution region. This is the top level
|
||||||
*
|
* routine used to exit the interpreter when all processing has
|
||||||
* Cases where the interpreter is unlocked:
|
* been completed.
|
||||||
* 1) Completion of the execution of a control method
|
|
||||||
* 2) Method blocked on a Sleep() AML opcode
|
|
||||||
* 3) Method blocked on an Acquire() AML opcode
|
|
||||||
* 4) Method blocked on a Wait() AML opcode
|
|
||||||
* 5) Method blocked to acquire the global lock
|
|
||||||
* 6) Method blocked to execute a serialized control method that is
|
|
||||||
* already executing
|
|
||||||
* 7) About to invoke a user-installed opregion handler
|
|
||||||
*
|
*
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
|
@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
|
||||||
|
|
||||||
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
|
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
|
||||||
if (ACPI_FAILURE(status)) {
|
if (ACPI_FAILURE(status)) {
|
||||||
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
|
ACPI_ERROR((AE_INFO,
|
||||||
|
"Could not release AML Interpreter mutex"));
|
||||||
|
}
|
||||||
|
|
||||||
|
return_VOID;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
*
|
||||||
|
* FUNCTION: acpi_ex_relinquish_interpreter
|
||||||
|
*
|
||||||
|
* PARAMETERS: None
|
||||||
|
*
|
||||||
|
* RETURN: None
|
||||||
|
*
|
||||||
|
* DESCRIPTION: Exit the interpreter execution region, from within the
|
||||||
|
* interpreter - before attempting an operation that will possibly
|
||||||
|
* block the running thread.
|
||||||
|
*
|
||||||
|
* Cases where the interpreter is unlocked internally
|
||||||
|
* 1) Method to be blocked on a Sleep() AML opcode
|
||||||
|
* 2) Method to be blocked on an Acquire() AML opcode
|
||||||
|
* 3) Method to be blocked on a Wait() AML opcode
|
||||||
|
* 4) Method to be blocked to acquire the global lock
|
||||||
|
* 5) Method to be blocked waiting to execute a serialized control method
|
||||||
|
* that is currently executing
|
||||||
|
* 6) About to invoke a user-installed opregion handler
|
||||||
|
*
|
||||||
|
******************************************************************************/
|
||||||
|
|
||||||
|
void acpi_ex_relinquish_interpreter(void)
|
||||||
|
{
|
||||||
|
ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the global serialized flag is set, do not release the interpreter.
|
||||||
|
* This forces the interpreter to be single threaded.
|
||||||
|
*/
|
||||||
|
if (!acpi_gbl_all_methods_serialized) {
|
||||||
|
acpi_ex_exit_interpreter();
|
||||||
}
|
}
|
||||||
|
|
||||||
return_VOID;
|
return_VOID;
|
||||||
|
@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
|
||||||
*
|
*
|
||||||
* RETURN: none
|
* RETURN: none
|
||||||
*
|
*
|
||||||
* DESCRIPTION: Truncate a number to 32-bits if the currently executing method
|
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
|
||||||
* belongs to a 32-bit ACPI table.
|
* 32-bit, as determined by the revision of the DSDT.
|
||||||
*
|
*
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
|
||||||
|
|
|
@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
|
||||||
* Execute the method via the interpreter. The interpreter is locked
|
* Execute the method via the interpreter. The interpreter is locked
|
||||||
* here before calling into the AML parser
|
* here before calling into the AML parser
|
||||||
*/
|
*/
|
||||||
status = acpi_ex_enter_interpreter();
|
acpi_ex_enter_interpreter();
|
||||||
if (ACPI_FAILURE(status)) {
|
|
||||||
return_ACPI_STATUS(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
status = acpi_ps_execute_method(info);
|
status = acpi_ps_execute_method(info);
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_exit_interpreter();
|
||||||
} else {
|
} else {
|
||||||
|
@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
|
||||||
* resolution, we must lock it because we could access an opregion.
|
* resolution, we must lock it because we could access an opregion.
|
||||||
* The opregion access code assumes that the interpreter is locked.
|
* The opregion access code assumes that the interpreter is locked.
|
||||||
*/
|
*/
|
||||||
status = acpi_ex_enter_interpreter();
|
acpi_ex_enter_interpreter();
|
||||||
if (ACPI_FAILURE(status)) {
|
|
||||||
return_ACPI_STATUS(status);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Function has a strange interface */
|
/* Function has a strange interface */
|
||||||
|
|
||||||
|
|
|
@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
|
||||||
u32 level, void *context, void **return_value)
|
u32 level, void *context, void **return_value)
|
||||||
{
|
{
|
||||||
acpi_object_type type;
|
acpi_object_type type;
|
||||||
acpi_status status;
|
acpi_status status = AE_OK;
|
||||||
struct acpi_init_walk_info *info =
|
struct acpi_init_walk_info *info =
|
||||||
(struct acpi_init_walk_info *)context;
|
(struct acpi_init_walk_info *)context;
|
||||||
struct acpi_namespace_node *node =
|
struct acpi_namespace_node *node =
|
||||||
|
@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
|
||||||
/*
|
/*
|
||||||
* Must lock the interpreter before executing AML code
|
* Must lock the interpreter before executing AML code
|
||||||
*/
|
*/
|
||||||
status = acpi_ex_enter_interpreter();
|
acpi_ex_enter_interpreter();
|
||||||
if (ACPI_FAILURE(status)) {
|
|
||||||
return (status);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each of these types can contain executable AML code within the
|
* Each of these types can contain executable AML code within the
|
||||||
|
|
|
@ -170,7 +170,6 @@ acpi_evaluate_object(acpi_handle handle,
|
||||||
struct acpi_buffer *return_buffer)
|
struct acpi_buffer *return_buffer)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
acpi_status status2;
|
|
||||||
struct acpi_evaluate_info *info;
|
struct acpi_evaluate_info *info;
|
||||||
acpi_size buffer_space_needed;
|
acpi_size buffer_space_needed;
|
||||||
u32 i;
|
u32 i;
|
||||||
|
@ -329,14 +328,12 @@ acpi_evaluate_object(acpi_handle handle,
|
||||||
* Delete the internal return object. NOTE: Interpreter must be
|
* Delete the internal return object. NOTE: Interpreter must be
|
||||||
* locked to avoid race condition.
|
* locked to avoid race condition.
|
||||||
*/
|
*/
|
||||||
status2 = acpi_ex_enter_interpreter();
|
acpi_ex_enter_interpreter();
|
||||||
if (ACPI_SUCCESS(status2)) {
|
|
||||||
|
|
||||||
/* Remove one reference on the return object (should delete it) */
|
/* Remove one reference on the return object (should delete it) */
|
||||||
|
|
||||||
acpi_ut_remove_reference(info->return_object);
|
acpi_ut_remove_reference(info->return_object);
|
||||||
acpi_ex_exit_interpreter();
|
acpi_ex_exit_interpreter();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
|
|
|
@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
|
||||||
static acpi_osd_handler acpi_irq_handler;
|
static acpi_osd_handler acpi_irq_handler;
|
||||||
static void *acpi_irq_context;
|
static void *acpi_irq_context;
|
||||||
static struct workqueue_struct *kacpid_wq;
|
static struct workqueue_struct *kacpid_wq;
|
||||||
|
static struct workqueue_struct *kacpi_notify_wq;
|
||||||
|
|
||||||
static void __init acpi_request_region (struct acpi_generic_address *addr,
|
static void __init acpi_request_region (struct acpi_generic_address *addr,
|
||||||
unsigned int length, char *desc)
|
unsigned int length, char *desc)
|
||||||
|
@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
|
||||||
return AE_NULL_ENTRY;
|
return AE_NULL_ENTRY;
|
||||||
}
|
}
|
||||||
kacpid_wq = create_singlethread_workqueue("kacpid");
|
kacpid_wq = create_singlethread_workqueue("kacpid");
|
||||||
|
kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
|
||||||
BUG_ON(!kacpid_wq);
|
BUG_ON(!kacpid_wq);
|
||||||
|
BUG_ON(!kacpi_notify_wq);
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
destroy_workqueue(kacpid_wq);
|
destroy_workqueue(kacpid_wq);
|
||||||
|
destroy_workqueue(kacpi_notify_wq);
|
||||||
|
|
||||||
return AE_OK;
|
return AE_OK;
|
||||||
}
|
}
|
||||||
|
@ -601,6 +604,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void acpi_os_execute_deferred(struct work_struct *work)
|
static void acpi_os_execute_deferred(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
||||||
|
if (!dpc) {
|
||||||
|
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
dpc->function(dpc->context);
|
||||||
|
kfree(dpc);
|
||||||
|
|
||||||
|
/* Yield cpu to notify thread */
|
||||||
|
cond_resched();
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void acpi_os_execute_notify(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
|
||||||
|
|
||||||
|
@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||||
acpi_status status = AE_OK;
|
acpi_status status = AE_OK;
|
||||||
struct acpi_os_dpc *dpc;
|
struct acpi_os_dpc *dpc;
|
||||||
|
|
||||||
ACPI_FUNCTION_TRACE("os_queue_for_execution");
|
|
||||||
|
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
|
||||||
"Scheduling function [%p(%p)] for deferred execution.\n",
|
"Scheduling function [%p(%p)] for deferred execution.\n",
|
||||||
function, context));
|
function, context));
|
||||||
|
|
||||||
if (!function)
|
if (!function)
|
||||||
return_ACPI_STATUS(AE_BAD_PARAMETER);
|
return AE_BAD_PARAMETER;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate/initialize DPC structure. Note that this memory will be
|
* Allocate/initialize DPC structure. Note that this memory will be
|
||||||
|
@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
|
||||||
dpc->function = function;
|
dpc->function = function;
|
||||||
dpc->context = context;
|
dpc->context = context;
|
||||||
|
|
||||||
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
if (type == OSL_NOTIFY_HANDLER) {
|
||||||
if (!queue_work(kacpid_wq, &dpc->work)) {
|
INIT_WORK(&dpc->work, acpi_os_execute_notify);
|
||||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
if (!queue_work(kacpi_notify_wq, &dpc->work)) {
|
||||||
|
status = AE_ERROR;
|
||||||
|
kfree(dpc);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
|
||||||
|
if (!queue_work(kacpid_wq, &dpc->work)) {
|
||||||
|
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
||||||
"Call to queue_work() failed.\n"));
|
"Call to queue_work() failed.\n"));
|
||||||
kfree(dpc);
|
status = AE_ERROR;
|
||||||
status = AE_ERROR;
|
kfree(dpc);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return_ACPI_STATUS(status);
|
return_ACPI_STATUS(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
|
||||||
acpi_os_delete_mutex(object->mutex.os_mutex);
|
acpi_os_delete_mutex(object->mutex.os_mutex);
|
||||||
acpi_gbl_global_lock_mutex = NULL;
|
acpi_gbl_global_lock_mutex = NULL;
|
||||||
} else {
|
} else {
|
||||||
|
acpi_ex_unlink_mutex(object);
|
||||||
acpi_os_delete_mutex(object->mutex.os_mutex);
|
acpi_os_delete_mutex(object->mutex.os_mutex);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -253,8 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
|
||||||
|
|
||||||
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
|
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
|
||||||
|
|
||||||
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
|
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc);
|
||||||
struct acpi_thread_state *thread);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* exprep - ACPI AML execution - prep utilities
|
* exprep - ACPI AML execution - prep utilities
|
||||||
|
@ -446,10 +445,14 @@ acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc,
|
||||||
/*
|
/*
|
||||||
* exutils - interpreter/scanner utilities
|
* exutils - interpreter/scanner utilities
|
||||||
*/
|
*/
|
||||||
acpi_status acpi_ex_enter_interpreter(void);
|
void acpi_ex_enter_interpreter(void);
|
||||||
|
|
||||||
void acpi_ex_exit_interpreter(void);
|
void acpi_ex_exit_interpreter(void);
|
||||||
|
|
||||||
|
void acpi_ex_reacquire_interpreter(void);
|
||||||
|
|
||||||
|
void acpi_ex_relinquish_interpreter(void);
|
||||||
|
|
||||||
void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
|
void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
|
||||||
|
|
||||||
u8 acpi_ex_acquire_global_lock(u32 rule);
|
u8 acpi_ex_acquire_global_lock(u32 rule);
|
||||||
|
|
|
@ -155,7 +155,7 @@ struct acpi_object_event {
|
||||||
struct acpi_object_mutex {
|
struct acpi_object_mutex {
|
||||||
ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
|
ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
|
||||||
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
|
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
|
||||||
acpi_thread_id owner_thread_id; /* Current owner of the mutex */
|
struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
|
||||||
acpi_mutex os_mutex; /* Actual OS synchronization object */
|
acpi_mutex os_mutex; /* Actual OS synchronization object */
|
||||||
union acpi_operand_object *prev; /* Link for list of acquired mutexes */
|
union acpi_operand_object *prev; /* Link for list of acquired mutexes */
|
||||||
union acpi_operand_object *next; /* Link for list of acquired mutexes */
|
union acpi_operand_object *next; /* Link for list of acquired mutexes */
|
||||||
|
|
Loading…
Add table
Reference in a new issue