Merge "Merge android-4.4@ceee5bd (v4.4.95) into msm-4.4"
This commit is contained in:
commit
aaacae8143
61 changed files with 917 additions and 510 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 94
|
||||
SUBLEVEL = 95
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -22,8 +22,6 @@
|
|||
|
||||
#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500
|
||||
|
||||
extern const compat_ulong_t aarch32_sigret_code[6];
|
||||
|
||||
int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
||||
struct pt_regs *regs);
|
||||
int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
||||
|
|
|
@ -479,11 +479,6 @@ lws_start:
|
|||
comiclr,>> __NR_lws_entries, %r20, %r0
|
||||
b,n lws_exit_nosys
|
||||
|
||||
/* WARNING: Trashing sr2 and sr3 */
|
||||
mfsp %sr7,%r1 /* get userspace into sr3 */
|
||||
mtsp %r1,%sr3
|
||||
mtsp %r0,%sr2 /* get kernel space into sr2 */
|
||||
|
||||
/* Load table start */
|
||||
ldil L%lws_table, %r1
|
||||
ldo R%lws_table(%r1), %r28 /* Scratch use of r28 */
|
||||
|
@ -632,9 +627,9 @@ cas_action:
|
|||
stw %r1, 4(%sr2,%r20)
|
||||
#endif
|
||||
/* The load and store could fail */
|
||||
1: ldw,ma 0(%sr3,%r26), %r28
|
||||
1: ldw,ma 0(%r26), %r28
|
||||
sub,<> %r28, %r25, %r0
|
||||
2: stw,ma %r24, 0(%sr3,%r26)
|
||||
2: stw,ma %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
|
@ -711,9 +706,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 8bit load */
|
||||
4: ldb 0(%sr3,%r25), %r25
|
||||
4: ldb 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
5: ldb 0(%sr3,%r24), %r24
|
||||
5: ldb 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -721,9 +716,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 16bit load */
|
||||
6: ldh 0(%sr3,%r25), %r25
|
||||
6: ldh 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
7: ldh 0(%sr3,%r24), %r24
|
||||
7: ldh 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -731,9 +726,9 @@ lws_compare_and_swap_2:
|
|||
nop
|
||||
|
||||
/* 32bit load */
|
||||
8: ldw 0(%sr3,%r25), %r25
|
||||
8: ldw 0(%r25), %r25
|
||||
b cas2_lock_start
|
||||
9: ldw 0(%sr3,%r24), %r24
|
||||
9: ldw 0(%r24), %r24
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
@ -742,14 +737,14 @@ lws_compare_and_swap_2:
|
|||
|
||||
/* 64bit load */
|
||||
#ifdef CONFIG_64BIT
|
||||
10: ldd 0(%sr3,%r25), %r25
|
||||
11: ldd 0(%sr3,%r24), %r24
|
||||
10: ldd 0(%r25), %r25
|
||||
11: ldd 0(%r24), %r24
|
||||
#else
|
||||
/* Load new value into r22/r23 - high/low */
|
||||
10: ldw 0(%sr3,%r25), %r22
|
||||
11: ldw 4(%sr3,%r25), %r23
|
||||
/* Load old value into r22/r23 - high/low */
|
||||
10: ldw 0(%r25), %r22
|
||||
11: ldw 4(%r25), %r23
|
||||
/* Load new value into fr4 for atomic store later */
|
||||
12: flddx 0(%sr3,%r24), %fr4
|
||||
12: flddx 0(%r24), %fr4
|
||||
#endif
|
||||
|
||||
cas2_lock_start:
|
||||
|
@ -799,30 +794,30 @@ cas2_action:
|
|||
ldo 1(%r0),%r28
|
||||
|
||||
/* 8bit CAS */
|
||||
13: ldb,ma 0(%sr3,%r26), %r29
|
||||
13: ldb,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
14: stb,ma %r24, 0(%sr3,%r26)
|
||||
14: stb,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 16bit CAS */
|
||||
15: ldh,ma 0(%sr3,%r26), %r29
|
||||
15: ldh,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
16: sth,ma %r24, 0(%sr3,%r26)
|
||||
16: sth,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
nop
|
||||
|
||||
/* 32bit CAS */
|
||||
17: ldw,ma 0(%sr3,%r26), %r29
|
||||
17: ldw,ma 0(%r26), %r29
|
||||
sub,= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
18: stw,ma %r24, 0(%sr3,%r26)
|
||||
18: stw,ma %r24, 0(%r26)
|
||||
b cas2_end
|
||||
copy %r0, %r28
|
||||
nop
|
||||
|
@ -830,22 +825,22 @@ cas2_action:
|
|||
|
||||
/* 64bit CAS */
|
||||
#ifdef CONFIG_64BIT
|
||||
19: ldd,ma 0(%sr3,%r26), %r29
|
||||
19: ldd,ma 0(%r26), %r29
|
||||
sub,*= %r29, %r25, %r0
|
||||
b,n cas2_end
|
||||
20: std,ma %r24, 0(%sr3,%r26)
|
||||
20: std,ma %r24, 0(%r26)
|
||||
copy %r0, %r28
|
||||
#else
|
||||
/* Compare first word */
|
||||
19: ldw,ma 0(%sr3,%r26), %r29
|
||||
19: ldw 0(%r26), %r29
|
||||
sub,= %r29, %r22, %r0
|
||||
b,n cas2_end
|
||||
/* Compare second word */
|
||||
20: ldw,ma 4(%sr3,%r26), %r29
|
||||
20: ldw 4(%r26), %r29
|
||||
sub,= %r29, %r23, %r0
|
||||
b,n cas2_end
|
||||
/* Perform the store */
|
||||
21: fstdx %fr4, 0(%sr3,%r26)
|
||||
21: fstdx %fr4, 0(%r26)
|
||||
copy %r0, %r28
|
||||
#endif
|
||||
|
||||
|
|
|
@ -90,6 +90,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg)
|
|||
bool want;
|
||||
|
||||
sinfo = msg->signed_infos;
|
||||
if (!sinfo)
|
||||
goto inconsistent;
|
||||
|
||||
if (sinfo->authattrs) {
|
||||
want = true;
|
||||
msg->have_authattrs = true;
|
||||
|
|
|
@ -601,6 +601,8 @@ enum {
|
|||
* (protected by @proc->inner_lock)
|
||||
* @todo: list of work to do for this thread
|
||||
* (protected by @proc->inner_lock)
|
||||
* @process_todo: whether work in @todo should be processed
|
||||
* (protected by @proc->inner_lock)
|
||||
* @return_error: transaction errors reported by this thread
|
||||
* (only accessed by this thread)
|
||||
* @reply_error: transaction errors reported by target thread
|
||||
|
@ -627,6 +629,7 @@ struct binder_thread {
|
|||
bool looper_need_return; /* can be written by other thread */
|
||||
struct binder_transaction *transaction_stack;
|
||||
struct list_head todo;
|
||||
bool process_todo;
|
||||
struct binder_error return_error;
|
||||
struct binder_error reply_error;
|
||||
wait_queue_head_t wait;
|
||||
|
@ -814,6 +817,16 @@ static bool binder_worklist_empty(struct binder_proc *proc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_work_ilocked() - Add an item to the work list
|
||||
* @work: struct binder_work to add to list
|
||||
* @target_list: list to add work to
|
||||
*
|
||||
* Adds the work to the specified list. Asserts that work
|
||||
* is not already on a list.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_work_ilocked(struct binder_work *work,
|
||||
struct list_head *target_list)
|
||||
|
@ -824,22 +837,56 @@ binder_enqueue_work_ilocked(struct binder_work *work,
|
|||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_work() - Add an item to the work list
|
||||
* @proc: binder_proc associated with list
|
||||
* binder_enqueue_thread_work_ilocked_nowake() - Add thread work
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
* @target_list: list to add work to
|
||||
*
|
||||
* Adds the work to the specified list. Asserts that work
|
||||
* is not already on a list.
|
||||
* Adds the work to the todo list of the thread. Doesn't set the process_todo
|
||||
* flag, which means that (if it wasn't already set) the thread will go to
|
||||
* sleep without handling this work when it calls read.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_work(struct binder_proc *proc,
|
||||
struct binder_work *work,
|
||||
struct list_head *target_list)
|
||||
binder_enqueue_thread_work_ilocked_nowake(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_enqueue_work_ilocked(work, target_list);
|
||||
binder_inner_proc_unlock(proc);
|
||||
binder_enqueue_work_ilocked(work, &thread->todo);
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
*
|
||||
* Adds the work to the todo list of the thread, and enables processing
|
||||
* of the todo queue.
|
||||
*
|
||||
* Requires the proc->inner_lock to be held.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_enqueue_work_ilocked(work, &thread->todo);
|
||||
thread->process_todo = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* binder_enqueue_thread_work() - Add an item to the thread work list
|
||||
* @thread: thread to queue work to
|
||||
* @work: struct binder_work to add to list
|
||||
*
|
||||
* Adds the work to the todo list of the thread, and enables processing
|
||||
* of the todo queue.
|
||||
*/
|
||||
static void
|
||||
binder_enqueue_thread_work(struct binder_thread *thread,
|
||||
struct binder_work *work)
|
||||
{
|
||||
binder_inner_proc_lock(thread->proc);
|
||||
binder_enqueue_thread_work_ilocked(thread, work);
|
||||
binder_inner_proc_unlock(thread->proc);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -954,7 +1001,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
|
|||
static bool binder_has_work_ilocked(struct binder_thread *thread,
|
||||
bool do_proc_work)
|
||||
{
|
||||
return !binder_worklist_empty_ilocked(&thread->todo) ||
|
||||
return thread->process_todo ||
|
||||
thread->looper_need_return ||
|
||||
(do_proc_work &&
|
||||
!binder_worklist_empty_ilocked(&thread->proc->todo));
|
||||
|
@ -1371,6 +1418,17 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|||
node->local_strong_refs++;
|
||||
if (!node->has_strong_ref && target_list) {
|
||||
binder_dequeue_work_ilocked(&node->work);
|
||||
/*
|
||||
* Note: this function is the only place where we queue
|
||||
* directly to a thread->todo without using the
|
||||
* corresponding binder_enqueue_thread_work() helper
|
||||
* functions; in this case it's ok to not set the
|
||||
* process_todo flag, since we know this node work will
|
||||
* always be followed by other work that starts queue
|
||||
* processing: in case of synchronous transactions, a
|
||||
* BR_REPLY or BR_ERROR; in case of oneway
|
||||
* transactions, a BR_TRANSACTION_COMPLETE.
|
||||
*/
|
||||
binder_enqueue_work_ilocked(&node->work, target_list);
|
||||
}
|
||||
} else {
|
||||
|
@ -1382,6 +1440,9 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
|
|||
node->debug_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* See comment above
|
||||
*/
|
||||
binder_enqueue_work_ilocked(&node->work, target_list);
|
||||
}
|
||||
}
|
||||
|
@ -2071,9 +2132,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
|
|||
binder_pop_transaction_ilocked(target_thread, t);
|
||||
if (target_thread->reply_error.cmd == BR_OK) {
|
||||
target_thread->reply_error.cmd = error_code;
|
||||
binder_enqueue_work_ilocked(
|
||||
&target_thread->reply_error.work,
|
||||
&target_thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
target_thread,
|
||||
&target_thread->reply_error.work);
|
||||
wake_up_interruptible(&target_thread->wait);
|
||||
} else {
|
||||
WARN(1, "Unexpected reply error: %u\n",
|
||||
|
@ -2712,11 +2773,10 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|||
struct binder_proc *proc,
|
||||
struct binder_thread *thread)
|
||||
{
|
||||
struct list_head *target_list = NULL;
|
||||
struct binder_node *node = t->buffer->target_node;
|
||||
struct binder_priority node_prio;
|
||||
bool oneway = !!(t->flags & TF_ONE_WAY);
|
||||
bool wakeup = true;
|
||||
bool pending_async = false;
|
||||
|
||||
BUG_ON(!node);
|
||||
binder_node_lock(node);
|
||||
|
@ -2726,8 +2786,7 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|||
if (oneway) {
|
||||
BUG_ON(thread);
|
||||
if (node->has_async_transaction) {
|
||||
target_list = &node->async_todo;
|
||||
wakeup = false;
|
||||
pending_async = true;
|
||||
} else {
|
||||
node->has_async_transaction = 1;
|
||||
}
|
||||
|
@ -2741,22 +2800,20 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!thread && !target_list)
|
||||
if (!thread && !pending_async)
|
||||
thread = binder_select_thread_ilocked(proc);
|
||||
|
||||
if (thread) {
|
||||
target_list = &thread->todo;
|
||||
binder_transaction_priority(thread->task, t, node_prio,
|
||||
node->inherit_rt);
|
||||
} else if (!target_list) {
|
||||
target_list = &proc->todo;
|
||||
binder_enqueue_thread_work_ilocked(thread, &t->work);
|
||||
} else if (!pending_async) {
|
||||
binder_enqueue_work_ilocked(&t->work, &proc->todo);
|
||||
} else {
|
||||
BUG_ON(target_list != &node->async_todo);
|
||||
binder_enqueue_work_ilocked(&t->work, &node->async_todo);
|
||||
}
|
||||
|
||||
binder_enqueue_work_ilocked(&t->work, target_list);
|
||||
|
||||
if (wakeup)
|
||||
if (!pending_async)
|
||||
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
||||
|
||||
binder_inner_proc_unlock(proc);
|
||||
|
@ -3258,10 +3315,10 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
}
|
||||
}
|
||||
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
|
||||
binder_enqueue_work(proc, tcomplete, &thread->todo);
|
||||
t->work.type = BINDER_WORK_TRANSACTION;
|
||||
|
||||
if (reply) {
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (target_thread->is_dead) {
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
|
@ -3269,7 +3326,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
}
|
||||
BUG_ON(t->buffer->async_transaction != 0);
|
||||
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
||||
binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
wake_up_interruptible_sync(&target_thread->wait);
|
||||
binder_restore_priority(current, in_reply_to->saved_priority);
|
||||
|
@ -3277,6 +3334,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
} else if (!(t->flags & TF_ONE_WAY)) {
|
||||
BUG_ON(t->buffer->async_transaction != 0);
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_enqueue_thread_work_ilocked_nowake(thread, tcomplete);
|
||||
t->need_reply = 1;
|
||||
t->from_parent = thread->transaction_stack;
|
||||
thread->transaction_stack = t;
|
||||
|
@ -3290,6 +3348,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
} else {
|
||||
BUG_ON(target_node == NULL);
|
||||
BUG_ON(t->buffer->async_transaction != 1);
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
if (!binder_proc_transaction(t, target_proc, NULL))
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
|
@ -3369,15 +3428,11 @@ err_invalid_target_handle:
|
|||
if (in_reply_to) {
|
||||
binder_restore_priority(current, in_reply_to->saved_priority);
|
||||
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
|
||||
binder_enqueue_work(thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
||||
binder_send_failed_reply(in_reply_to, return_error);
|
||||
} else {
|
||||
thread->return_error.cmd = return_error;
|
||||
binder_enqueue_work(thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(thread, &thread->return_error.work);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3681,10 +3736,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
WARN_ON(thread->return_error.cmd !=
|
||||
BR_OK);
|
||||
thread->return_error.cmd = BR_ERROR;
|
||||
binder_enqueue_work(
|
||||
thread->proc,
|
||||
&thread->return_error.work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work(
|
||||
thread,
|
||||
&thread->return_error.work);
|
||||
binder_debug(
|
||||
BINDER_DEBUG_FAILED_TRANSACTION,
|
||||
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
|
||||
|
@ -3764,9 +3818,9 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
if (thread->looper &
|
||||
(BINDER_LOOPER_STATE_REGISTERED |
|
||||
BINDER_LOOPER_STATE_ENTERED))
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
&thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
thread,
|
||||
&death->work);
|
||||
else {
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
|
@ -3821,8 +3875,8 @@ static int binder_thread_write(struct binder_proc *proc,
|
|||
if (thread->looper &
|
||||
(BINDER_LOOPER_STATE_REGISTERED |
|
||||
BINDER_LOOPER_STATE_ENTERED))
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work, &thread->todo);
|
||||
binder_enqueue_thread_work_ilocked(
|
||||
thread, &death->work);
|
||||
else {
|
||||
binder_enqueue_work_ilocked(
|
||||
&death->work,
|
||||
|
@ -3996,6 +4050,8 @@ retry:
|
|||
break;
|
||||
}
|
||||
w = binder_dequeue_work_head_ilocked(list);
|
||||
if (binder_worklist_empty_ilocked(&thread->todo))
|
||||
thread->process_todo = false;
|
||||
|
||||
switch (w->type) {
|
||||
case BINDER_WORK_TRANSACTION: {
|
||||
|
|
|
@ -282,6 +282,9 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
|||
goto err_vm_insert_page_failed;
|
||||
}
|
||||
|
||||
if (index + 1 > alloc->pages_high)
|
||||
alloc->pages_high = index + 1;
|
||||
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
/* vm_insert_page does not seem to increment the refcount */
|
||||
}
|
||||
|
@ -561,7 +564,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
|||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%d: merge free, buffer %pK do not share page with %pK or %pK\n",
|
||||
alloc->pid, buffer->data,
|
||||
prev->data, next->data);
|
||||
prev->data, next ? next->data : NULL);
|
||||
binder_update_page_range(alloc, 0, buffer_start_page(buffer),
|
||||
buffer_start_page(buffer) + PAGE_SIZE);
|
||||
}
|
||||
|
@ -855,6 +858,7 @@ void binder_alloc_print_pages(struct seq_file *m,
|
|||
}
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -986,7 +990,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
|||
return ret;
|
||||
}
|
||||
|
||||
struct shrinker binder_shrinker = {
|
||||
static struct shrinker binder_shrinker = {
|
||||
.count_objects = binder_shrink_count,
|
||||
.scan_objects = binder_shrink_scan,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
|
|
|
@ -92,6 +92,7 @@ struct binder_lru_page {
|
|||
* @pages: array of binder_lru_page
|
||||
* @buffer_size: size of address space specified via mmap
|
||||
* @pid: pid for associated binder_proc (invariant after init)
|
||||
* @pages_high: high watermark of offset in @pages
|
||||
*
|
||||
* Bookkeeping structure for per-proc address space management for binder
|
||||
* buffers. It is normally initialized during binder_init() and binder_mmap()
|
||||
|
@ -112,6 +113,7 @@ struct binder_alloc {
|
|||
size_t buffer_size;
|
||||
uint32_t buffer_free;
|
||||
int pid;
|
||||
size_t pages_high;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
|
||||
|
|
|
@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
|
|||
if (mbus->hw_io_coherency)
|
||||
w->mbus_attr |= ATTR_HW_COHERENCY;
|
||||
w->base = base & DDR_BASE_CS_LOW_MASK;
|
||||
w->size = (size | ~DDR_SIZE_MASK) + 1;
|
||||
w->size = (u64)(size | ~DDR_SIZE_MASK) + 1;
|
||||
}
|
||||
}
|
||||
mvebu_mbus_dram_info.num_cs = cs;
|
||||
|
|
|
@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
|
|||
/* Turn off the clock (and clear the event) */
|
||||
disable_timer(cs5535_event_clock);
|
||||
|
||||
if (clockevent_state_shutdown(&cs5535_clockevent))
|
||||
if (clockevent_state_detached(&cs5535_clockevent) ||
|
||||
clockevent_state_shutdown(&cs5535_clockevent))
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Clear the counter */
|
||||
|
|
|
@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
|
|||
-fPIC -fno-strict-aliasing -mno-red-zone \
|
||||
-mno-mmx -mno-sse
|
||||
|
||||
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
|
||||
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
|
||||
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
|
||||
-fno-builtin -fpic -mno-single-pic-base
|
||||
|
||||
|
|
|
@ -40,5 +40,5 @@ int
|
|||
g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine)
|
||||
{
|
||||
return nvkm_xtensa_new_(&g84_bsp, device, index,
|
||||
true, 0x103000, pengine);
|
||||
device->chipset != 0x92, 0x103000, pengine);
|
||||
}
|
||||
|
|
|
@ -240,6 +240,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde)
|
|||
mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem);
|
||||
}
|
||||
|
||||
mmu->func->flush(vm);
|
||||
|
||||
nvkm_memory_del(&pgt);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -338,12 +338,15 @@ static int ismt_process_desc(const struct ismt_desc *desc,
|
|||
data->word = dma_buffer[0] | (dma_buffer[1] << 8);
|
||||
break;
|
||||
case I2C_SMBUS_BLOCK_DATA:
|
||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||
if (desc->rxbytes != dma_buffer[0] + 1)
|
||||
return -EMSGSIZE;
|
||||
|
||||
memcpy(data->block, dma_buffer, desc->rxbytes);
|
||||
break;
|
||||
case I2C_SMBUS_I2C_BLOCK_DATA:
|
||||
memcpy(&data->block[1], dma_buffer, desc->rxbytes);
|
||||
data->block[0] = desc->rxbytes;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
|
|||
}
|
||||
|
||||
cf->can_id = id & ESD_IDMASK;
|
||||
cf->can_dlc = get_can_dlc(msg->msg.rx.dlc);
|
||||
cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR);
|
||||
|
||||
if (id & ESD_EXTID)
|
||||
cf->can_id |= CAN_EFF_FLAG;
|
||||
|
|
|
@ -356,6 +356,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
|
|||
|
||||
gs_free_tx_context(txc);
|
||||
|
||||
atomic_dec(&dev->active_tx_urbs);
|
||||
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
|
@ -444,14 +446,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
|
|||
urb->transfer_buffer_length,
|
||||
urb->transfer_buffer,
|
||||
urb->transfer_dma);
|
||||
|
||||
atomic_dec(&dev->active_tx_urbs);
|
||||
|
||||
if (!netif_device_present(netdev))
|
||||
return;
|
||||
|
||||
if (netif_queue_stopped(netdev))
|
||||
netif_wake_queue(netdev);
|
||||
}
|
||||
|
||||
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
|
|
@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi)
|
|||
}
|
||||
|
||||
static void
|
||||
wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys,
|
||||
u8 len)
|
||||
wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events,
|
||||
const u8 *dlys, u8 len)
|
||||
{
|
||||
u32 t1_offset, t2_offset;
|
||||
u8 ctr;
|
||||
|
@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi)
|
|||
static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
||||
{
|
||||
u16 currband;
|
||||
s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
|
||||
s8 *lna1_gain_db = NULL;
|
||||
s8 *lna1_gain_db_2 = NULL;
|
||||
s8 *lna2_gain_db = NULL;
|
||||
s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
|
||||
s8 *tia_gain_db;
|
||||
s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
|
||||
s8 *tia_gainbits;
|
||||
u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
|
||||
u16 *rfseq_init_gain;
|
||||
static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 };
|
||||
const s8 *lna1_gain_db = NULL;
|
||||
const s8 *lna1_gain_db_2 = NULL;
|
||||
const s8 *lna2_gain_db = NULL;
|
||||
static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 };
|
||||
const s8 *tia_gain_db;
|
||||
static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 };
|
||||
const s8 *tia_gainbits;
|
||||
static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f };
|
||||
const u16 *rfseq_init_gain;
|
||||
u16 init_gaincode;
|
||||
u16 clip1hi_gaincode;
|
||||
u16 clip1md_gaincode = 0;
|
||||
|
@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
|
||||
if ((freq <= 5080) || (freq == 5825)) {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
11, 17, 22, 25};
|
||||
s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25};
|
||||
static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
|
||||
crsminu_th = 0x3e;
|
||||
lna1_gain_db = lna1A_gain_db_rev7;
|
||||
|
@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
lna2_gain_db = lna2A_gain_db_rev7;
|
||||
} else if ((freq >= 5500) && (freq <= 5700)) {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
12, 18, 22, 26};
|
||||
s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
|
||||
static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 };
|
||||
|
||||
crsminu_th = 0x45;
|
||||
clip1md_gaincode_B = 0x14;
|
||||
|
@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi)
|
|||
lna2_gain_db = lna2A_gain_db_rev7;
|
||||
} else {
|
||||
|
||||
s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
|
||||
s8 lna1A_gain_db_2_rev7[] = {
|
||||
12, 18, 22, 26};
|
||||
s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 };
|
||||
static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26};
|
||||
static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 };
|
||||
|
||||
crsminu_th = 0x41;
|
||||
lna1_gain_db = lna1A_gain_db_rev7;
|
||||
|
@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_CLR_HIQ_DIS,
|
||||
NPHY_RFSEQ_CMD_SET_HPF_BW
|
||||
};
|
||||
u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
|
||||
s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
|
||||
s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
|
||||
s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
|
||||
s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
|
||||
s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
|
||||
s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
|
||||
s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
|
||||
s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
|
||||
s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
|
||||
s8 *lna1_gain_db = NULL;
|
||||
s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
|
||||
s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
|
||||
s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
|
||||
s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
|
||||
s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
|
||||
s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
|
||||
s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
|
||||
s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
|
||||
s8 *lna2_gain_db = NULL;
|
||||
s8 tiaG_gain_db[] = {
|
||||
static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 };
|
||||
static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 };
|
||||
static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 };
|
||||
static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 };
|
||||
static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 };
|
||||
static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 };
|
||||
static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 };
|
||||
static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 };
|
||||
static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 };
|
||||
static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 };
|
||||
const s8 *lna1_gain_db = NULL;
|
||||
static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 };
|
||||
static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 };
|
||||
static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 };
|
||||
static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 };
|
||||
static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 };
|
||||
static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 };
|
||||
static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 };
|
||||
static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 };
|
||||
const s8 *lna2_gain_db = NULL;
|
||||
static const s8 tiaG_gain_db[] = {
|
||||
0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A };
|
||||
s8 tiaA_gain_db[] = {
|
||||
static const s8 tiaA_gain_db[] = {
|
||||
0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 };
|
||||
s8 tiaA_gain_db_rev4[] = {
|
||||
static const s8 tiaA_gain_db_rev4[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 tiaA_gain_db_rev5[] = {
|
||||
static const s8 tiaA_gain_db_rev5[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 tiaA_gain_db_rev6[] = {
|
||||
static const s8 tiaA_gain_db_rev6[] = {
|
||||
0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d };
|
||||
s8 *tia_gain_db;
|
||||
s8 tiaG_gainbits[] = {
|
||||
const s8 *tia_gain_db;
|
||||
static const s8 tiaG_gainbits[] = {
|
||||
0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 };
|
||||
s8 tiaA_gainbits[] = {
|
||||
static const s8 tiaA_gainbits[] = {
|
||||
0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 };
|
||||
s8 tiaA_gainbits_rev4[] = {
|
||||
static const s8 tiaA_gainbits_rev4[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 tiaA_gainbits_rev5[] = {
|
||||
static const s8 tiaA_gainbits_rev5[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 tiaA_gainbits_rev6[] = {
|
||||
static const s8 tiaA_gainbits_rev6[] = {
|
||||
0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 };
|
||||
s8 *tia_gainbits;
|
||||
s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
|
||||
s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
|
||||
u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
|
||||
u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
|
||||
u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
|
||||
u16 rfseqG_init_gain_rev5_elna[] = {
|
||||
const s8 *tia_gainbits;
|
||||
static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 };
|
||||
static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 };
|
||||
static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f };
|
||||
static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f };
|
||||
static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f };
|
||||
static const u16 rfseqG_init_gain_rev5_elna[] = {
|
||||
0x013f, 0x013f, 0x013f, 0x013f };
|
||||
u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
|
||||
u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
|
||||
u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
|
||||
u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
|
||||
u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
|
||||
u16 rfseqA_init_gain_rev4_elna[] = {
|
||||
static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f };
|
||||
static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f };
|
||||
static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f };
|
||||
static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f };
|
||||
static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f };
|
||||
static const u16 rfseqA_init_gain_rev4_elna[] = {
|
||||
0x314f, 0x314f, 0x314f, 0x314f };
|
||||
u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
|
||||
u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
|
||||
u16 *rfseq_init_gain;
|
||||
static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f };
|
||||
static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f };
|
||||
const u16 *rfseq_init_gain;
|
||||
u16 initG_gaincode = 0x627e;
|
||||
u16 initG_gaincode_rev4 = 0x527e;
|
||||
u16 initG_gaincode_rev5 = 0x427e;
|
||||
|
@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
u16 clip1mdA_gaincode_rev6 = 0x2084;
|
||||
u16 clip1md_gaincode = 0;
|
||||
u16 clip1loG_gaincode = 0x0074;
|
||||
u16 clip1loG_gaincode_rev5[] = {
|
||||
static const u16 clip1loG_gaincode_rev5[] = {
|
||||
0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c
|
||||
};
|
||||
u16 clip1loG_gaincode_rev6[] = {
|
||||
static const u16 clip1loG_gaincode_rev6[] = {
|
||||
0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e
|
||||
};
|
||||
u16 clip1loG_gaincode_rev6_224B0 = 0x1074;
|
||||
|
@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi)
|
|||
|
||||
static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
||||
{
|
||||
u8 rfseq_rx2tx_events[] = {
|
||||
static const u8 rfseq_rx2tx_events[] = {
|
||||
NPHY_RFSEQ_CMD_NOP,
|
||||
NPHY_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_RFSEQ_CMD_TR_SWITCH,
|
||||
|
@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_EXT_PA
|
||||
};
|
||||
u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 };
|
||||
u8 rfseq_tx2rx_events[] = {
|
||||
static const u8 rfseq_tx2rx_events[] = {
|
||||
NPHY_RFSEQ_CMD_NOP,
|
||||
NPHY_RFSEQ_CMD_EXT_PA,
|
||||
NPHY_RFSEQ_CMD_TX_GAIN,
|
||||
|
@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_RFSEQ_CMD_CLR_HIQ_DIS
|
||||
};
|
||||
u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
|
||||
u8 rfseq_tx2rx_events_rev3[] = {
|
||||
static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 };
|
||||
static const u8 rfseq_tx2rx_events_rev3[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_EXT_PA,
|
||||
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
|
||||
NPHY_REV3_RFSEQ_CMD_TX_GAIN,
|
||||
|
@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS,
|
||||
NPHY_REV3_RFSEQ_CMD_END
|
||||
};
|
||||
u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
|
||||
static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 };
|
||||
u8 rfseq_rx2tx_events_rev3[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_NOP,
|
||||
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
|
||||
|
@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
};
|
||||
u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
|
||||
|
||||
u8 rfseq_rx2tx_events_rev3_ipa[] = {
|
||||
static const u8 rfseq_rx2tx_events_rev3_ipa[] = {
|
||||
NPHY_REV3_RFSEQ_CMD_NOP,
|
||||
NPHY_REV3_RFSEQ_CMD_RXG_FBW,
|
||||
NPHY_REV3_RFSEQ_CMD_TR_SWITCH,
|
||||
|
@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
NPHY_REV3_RFSEQ_CMD_INT_PA_PU,
|
||||
NPHY_REV3_RFSEQ_CMD_END
|
||||
};
|
||||
u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
|
||||
u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
|
||||
static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
|
||||
static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f };
|
||||
|
||||
s16 alpha0, alpha1, alpha2;
|
||||
s16 beta0, beta1, beta2;
|
||||
u32 leg_data_weights, ht_data_weights, nss1_data_weights,
|
||||
stbc_data_weights;
|
||||
u8 chan_freq_range = 0;
|
||||
u16 dac_control = 0x0002;
|
||||
static const u16 dac_control = 0x0002;
|
||||
u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 };
|
||||
u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 };
|
||||
u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 };
|
||||
|
@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 };
|
||||
u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 };
|
||||
u16 *aux_adc_gain;
|
||||
u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
|
||||
u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
|
||||
static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 };
|
||||
static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 };
|
||||
s32 min_nvar_val = 0x18d;
|
||||
s32 min_nvar_offset_6mbps = 20;
|
||||
u8 pdetrange;
|
||||
|
@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
|
|||
u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77;
|
||||
u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
|
||||
u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 };
|
||||
static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 };
|
||||
u16 ipalvlshift_3p3_war_en = 0;
|
||||
u16 rccal_bcap_val, rccal_scap_val;
|
||||
u16 rccal_tx20_11b_bcap = 0;
|
||||
|
@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core)
|
|||
u16 bbmult;
|
||||
u16 tblentry;
|
||||
|
||||
struct nphy_txiqcal_ladder ladder_lo[] = {
|
||||
static const struct nphy_txiqcal_ladder ladder_lo[] = {
|
||||
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
|
||||
{25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5},
|
||||
{25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7}
|
||||
};
|
||||
|
||||
struct nphy_txiqcal_ladder ladder_iq[] = {
|
||||
static const struct nphy_txiqcal_ladder ladder_iq[] = {
|
||||
{3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0},
|
||||
{25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1},
|
||||
{100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7}
|
||||
|
@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain,
|
|||
u16 cal_gain[2];
|
||||
struct nphy_iqcal_params cal_params[2];
|
||||
u32 tbl_len;
|
||||
void *tbl_ptr;
|
||||
const void *tbl_ptr;
|
||||
bool ladder_updated[2];
|
||||
u8 mphase_cal_lastphase = 0;
|
||||
int bcmerror = 0;
|
||||
bool phyhang_avoid_state = false;
|
||||
|
||||
u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
|
||||
0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901,
|
||||
0x1902,
|
||||
0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607,
|
||||
0x6407
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
|
||||
0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400,
|
||||
0x3200,
|
||||
0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406,
|
||||
0x6407
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
|
||||
0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201,
|
||||
0x1202,
|
||||
0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207,
|
||||
0x4707
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
|
||||
0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900,
|
||||
0x2300,
|
||||
0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706,
|
||||
0x4707
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_startcoefs[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_startcoefs[] = {
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
|
||||
0x8123, 0x8264, 0x8086, 0x8245, 0x8056,
|
||||
0x9123, 0x9264, 0x9086, 0x9245, 0x9056
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_recal[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
|
||||
0x8101, 0x8253, 0x8053, 0x8234, 0x8034,
|
||||
0x9101, 0x9253, 0x9053, 0x9234, 0x9034
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = {
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
|
||||
0x0000
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
|
||||
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234,
|
||||
0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234
|
||||
};
|
||||
|
||||
u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
|
||||
static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
|
||||
0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223,
|
||||
0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223
|
||||
};
|
||||
|
|
|
@ -1127,7 +1127,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr)
|
|||
}
|
||||
if (0 == tmp) {
|
||||
read_addr = REG_DBI_RDATA + addr % 4;
|
||||
ret = rtl_read_byte(rtlpriv, read_addr);
|
||||
ret = rtl_read_word(rtlpriv, read_addr);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1762,6 +1762,9 @@ static const struct usb_device_id acm_ids[] = {
|
|||
{ USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
|
||||
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
|
||||
},
|
||||
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
|
||||
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
|
||||
},
|
||||
|
||||
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
|
||||
.driver_info = CLEAR_HALT_CONDITIONS,
|
||||
|
|
|
@ -926,10 +926,12 @@ int usb_get_bos_descriptor(struct usb_device *dev)
|
|||
for (i = 0; i < num; i++) {
|
||||
buffer += length;
|
||||
cap = (struct usb_dev_cap_header *)buffer;
|
||||
length = cap->bLength;
|
||||
|
||||
if (total_len < length)
|
||||
if (total_len < sizeof(*cap) || total_len < cap->bLength) {
|
||||
dev->bos->desc->bNumDeviceCaps = i;
|
||||
break;
|
||||
}
|
||||
length = cap->bLength;
|
||||
total_len -= length;
|
||||
|
||||
if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) {
|
||||
|
|
|
@ -1417,11 +1417,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
|
|||
totlen += isopkt[u].length;
|
||||
}
|
||||
u *= sizeof(struct usb_iso_packet_descriptor);
|
||||
if (totlen <= uurb->buffer_length)
|
||||
uurb->buffer_length = totlen;
|
||||
else
|
||||
WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
|
||||
totlen, uurb->buffer_length);
|
||||
uurb->buffer_length = totlen;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -2667,13 +2667,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
|
|||
if (!(portstatus & USB_PORT_STAT_CONNECTION))
|
||||
return -ENOTCONN;
|
||||
|
||||
/* bomb out completely if the connection bounced. A USB 3.0
|
||||
* connection may bounce if multiple warm resets were issued,
|
||||
/* Retry if connect change is set but status is still connected.
|
||||
* A USB 3.0 connection may bounce if multiple warm resets were issued,
|
||||
* but the device may have successfully re-connected. Ignore it.
|
||||
*/
|
||||
if (!hub_is_superspeed(hub->hdev) &&
|
||||
(portchange & USB_PORT_STAT_C_CONNECTION))
|
||||
return -ENOTCONN;
|
||||
(portchange & USB_PORT_STAT_C_CONNECTION)) {
|
||||
usb_clear_port_feature(hub->hdev, port1,
|
||||
USB_PORT_FEAT_C_CONNECTION);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (!(portstatus & USB_PORT_STAT_ENABLE))
|
||||
return -EBUSY;
|
||||
|
|
|
@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = {
|
|||
/* Corsair Strafe RGB */
|
||||
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
|
||||
|
||||
/* MIDI keyboard WORLDE MINI */
|
||||
{ USB_DEVICE(0x1c75, 0x0204), .driver_info =
|
||||
USB_QUIRK_CONFIG_INTF_STRINGS },
|
||||
|
||||
/* Acer C120 LED Projector */
|
||||
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
|
||||
|
||||
|
|
|
@ -877,7 +877,7 @@ b_host:
|
|||
*/
|
||||
if (int_usb & MUSB_INTR_RESET) {
|
||||
handled = IRQ_HANDLED;
|
||||
if (devctl & MUSB_DEVCTL_HM) {
|
||||
if (is_host_active(musb)) {
|
||||
/*
|
||||
* When BABBLE happens what we can depends on which
|
||||
* platform MUSB is running, because some platforms
|
||||
|
@ -887,9 +887,7 @@ b_host:
|
|||
* drop the session.
|
||||
*/
|
||||
dev_err(musb->controller, "Babble\n");
|
||||
|
||||
if (is_host_active(musb))
|
||||
musb_recover_from_babble(musb);
|
||||
musb_recover_from_babble(musb);
|
||||
} else {
|
||||
dev_dbg(musb->controller, "BUS RESET as %s\n",
|
||||
usb_otg_state_string(musb->xceiv->otg->state));
|
||||
|
|
|
@ -320,6 +320,8 @@ static int sunxi_musb_exit(struct musb *musb)
|
|||
if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags))
|
||||
sunxi_sram_release(musb->controller->parent);
|
||||
|
||||
devm_usb_put_phy(glue->dev, glue->xceiv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ struct metrousb_private {
|
|||
static const struct usb_device_id id_table[] = {
|
||||
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
|
||||
{ USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */
|
||||
{ }, /* Terminating entry. */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(usb, id_table);
|
||||
|
|
|
@ -313,6 +313,12 @@ retry:
|
|||
}
|
||||
down_read(&keyring_key->sem);
|
||||
ukp = user_key_payload(keyring_key);
|
||||
if (!ukp) {
|
||||
/* key was revoked before we acquired its semaphore */
|
||||
res = -EKEYREVOKED;
|
||||
up_read(&keyring_key->sem);
|
||||
goto out;
|
||||
}
|
||||
if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
|
||||
res = -EINVAL;
|
||||
up_read(&keyring_key->sem);
|
||||
|
|
|
@ -330,6 +330,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data)
|
|||
rcu_read_lock();
|
||||
|
||||
confkey = user_key_payload(key);
|
||||
if (!confkey) {
|
||||
/* key was revoked */
|
||||
rcu_read_unlock();
|
||||
key_put(key);
|
||||
goto no_config;
|
||||
}
|
||||
|
||||
buf = confkey->data;
|
||||
|
||||
for (len = confkey->datalen - 1; len >= 0; len--) {
|
||||
|
|
|
@ -126,6 +126,11 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
|
|||
return (unsigned long) key_ref & 1UL;
|
||||
}
|
||||
|
||||
enum key_state {
|
||||
KEY_IS_UNINSTANTIATED,
|
||||
KEY_IS_POSITIVE, /* Positively instantiated */
|
||||
};
|
||||
|
||||
/*****************************************************************************/
|
||||
/*
|
||||
* authentication token / access credential / keyring
|
||||
|
@ -157,6 +162,7 @@ struct key {
|
|||
* - may not match RCU dereferenced payload
|
||||
* - payload should contain own length
|
||||
*/
|
||||
short state; /* Key state (+) or rejection error (-) */
|
||||
|
||||
#ifdef KEY_DEBUGGING
|
||||
unsigned magic;
|
||||
|
@ -165,19 +171,17 @@ struct key {
|
|||
#endif
|
||||
|
||||
unsigned long flags; /* status flags (change with bitops) */
|
||||
#define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */
|
||||
#define KEY_FLAG_DEAD 1 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 2 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_NEGATIVE 5 /* set if key is negative */
|
||||
#define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */
|
||||
#define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */
|
||||
#define KEY_FLAG_TRUSTED 8 /* set if key is trusted */
|
||||
#define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */
|
||||
#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */
|
||||
#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */
|
||||
#define KEY_FLAG_UID_KEYRING 12 /* set if key is a user or user session keyring */
|
||||
#define KEY_FLAG_DEAD 0 /* set if key type has been deleted */
|
||||
#define KEY_FLAG_REVOKED 1 /* set if key had been revoked */
|
||||
#define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */
|
||||
#define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */
|
||||
#define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */
|
||||
#define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */
|
||||
#define KEY_FLAG_TRUSTED 6 /* set if key is trusted */
|
||||
#define KEY_FLAG_TRUSTED_ONLY 7 /* set if keyring only accepts links to trusted keys */
|
||||
#define KEY_FLAG_BUILTIN 8 /* set if key is builtin */
|
||||
#define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */
|
||||
#define KEY_FLAG_UID_KEYRING 10 /* set if key is a user or user session keyring */
|
||||
|
||||
/* the key type and key description string
|
||||
* - the desc is used to match a key against search criteria
|
||||
|
@ -203,7 +207,6 @@ struct key {
|
|||
struct list_head name_link;
|
||||
struct assoc_array keys;
|
||||
};
|
||||
int reject_error;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -319,17 +322,27 @@ extern void key_set_timeout(struct key *, unsigned);
|
|||
#define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */
|
||||
#define KEY_NEED_ALL 0x3f /* All the above permissions */
|
||||
|
||||
static inline short key_read_state(const struct key *key)
|
||||
{
|
||||
/* Barrier versus mark_key_instantiated(). */
|
||||
return smp_load_acquire(&key->state);
|
||||
}
|
||||
|
||||
/**
|
||||
* key_is_instantiated - Determine if a key has been positively instantiated
|
||||
* key_is_positive - Determine if a key has been positively instantiated
|
||||
* @key: The key to check.
|
||||
*
|
||||
* Return true if the specified key has been positively instantiated, false
|
||||
* otherwise.
|
||||
*/
|
||||
static inline bool key_is_instantiated(const struct key *key)
|
||||
static inline bool key_is_positive(const struct key *key)
|
||||
{
|
||||
return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
|
||||
!test_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
return key_read_state(key) == KEY_IS_POSITIVE;
|
||||
}
|
||||
|
||||
static inline bool key_is_negative(const struct key *key)
|
||||
{
|
||||
return key_read_state(key) < 0;
|
||||
}
|
||||
|
||||
#define rcu_dereference_key(KEY) \
|
||||
|
|
|
@ -29,8 +29,8 @@ struct mbus_dram_target_info
|
|||
struct mbus_dram_window {
|
||||
u8 cs_index;
|
||||
u8 mbus_attr;
|
||||
u32 base;
|
||||
u32 size;
|
||||
u64 base;
|
||||
u64 size;
|
||||
} cs[4];
|
||||
};
|
||||
|
||||
|
|
|
@ -227,9 +227,10 @@ extern void proc_sched_set_task(struct task_struct *p);
|
|||
#define TASK_WAKING 256
|
||||
#define TASK_PARKED 512
|
||||
#define TASK_NOLOAD 1024
|
||||
#define TASK_STATE_MAX 2048
|
||||
#define TASK_NEW 2048
|
||||
#define TASK_STATE_MAX 4096
|
||||
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
|
||||
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
|
||||
|
||||
extern char ___assert_task_state[1 - 2*!!(
|
||||
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
|
||||
|
@ -1061,12 +1062,13 @@ struct wake_q_node {
|
|||
struct wake_q_head {
|
||||
struct wake_q_node *first;
|
||||
struct wake_q_node **lastp;
|
||||
int count;
|
||||
};
|
||||
|
||||
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
|
||||
|
||||
#define WAKE_Q(name) \
|
||||
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
|
||||
struct wake_q_head name = { WAKE_Q_TAIL, &name.first, 0 }
|
||||
|
||||
extern void wake_q_add(struct wake_q_head *head,
|
||||
struct task_struct *task);
|
||||
|
@ -1656,6 +1658,7 @@ struct task_struct {
|
|||
struct related_thread_group *grp;
|
||||
struct list_head grp_list;
|
||||
u64 cpu_cycles;
|
||||
u64 last_sleep_ts;
|
||||
#endif
|
||||
#ifdef CONFIG_CGROUP_SCHED
|
||||
struct task_group *sched_task_group;
|
||||
|
|
|
@ -1462,7 +1462,7 @@ TRACE_EVENT(sched_contrib_scale_f,
|
|||
extern unsigned int sysctl_sched_use_walt_cpu_util;
|
||||
extern unsigned int sysctl_sched_use_walt_task_util;
|
||||
extern unsigned int walt_ravg_window;
|
||||
extern unsigned int walt_disabled;
|
||||
extern bool walt_disabled;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -111,14 +111,11 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
|
|||
{
|
||||
if (tg != &root_task_group)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We can only assume the task group can't go away on us if
|
||||
* autogroup_move_group() can see us on ->thread_group list.
|
||||
* If we race with autogroup_move_group() the caller can use the old
|
||||
* value of signal->autogroup but in this case sched_move_task() will
|
||||
* be called again before autogroup_kref_put().
|
||||
*/
|
||||
if (p->flags & PF_EXITING)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -138,13 +135,17 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
|
|||
}
|
||||
|
||||
p->signal->autogroup = autogroup_kref_get(ag);
|
||||
|
||||
if (!READ_ONCE(sysctl_sched_autogroup_enabled))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* We can't avoid sched_move_task() after we changed signal->autogroup,
|
||||
* this process can already run with task_group() == prev->tg or we can
|
||||
* race with cgroup code which can read autogroup = prev under rq->lock.
|
||||
* In the latter case for_each_thread() can not miss a migrating thread,
|
||||
* cpu_cgroup_attach() must not be possible after cgroup_exit() and it
|
||||
* can't be removed from thread list, we hold ->siglock.
|
||||
*/
|
||||
for_each_thread(p, t)
|
||||
sched_move_task(t);
|
||||
out:
|
||||
|
||||
unlock_task_sighand(p, &flags);
|
||||
autogroup_kref_put(prev);
|
||||
}
|
||||
|
|
|
@ -99,6 +99,10 @@
|
|||
|
||||
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static bool have_sched_energy_data(void);
|
||||
#endif
|
||||
|
||||
DEFINE_MUTEX(sched_domains_mutex);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
|
||||
|
||||
|
@ -201,6 +205,11 @@ static int sched_feat_set(char *cmp)
|
|||
sysctl_sched_features &= ~(1UL << i);
|
||||
sched_feat_disable(i);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
if (i == __SCHED_FEAT_ENERGY_AWARE)
|
||||
WARN(!have_sched_energy_data(),
|
||||
"Missing sched energy data\n");
|
||||
#endif
|
||||
sysctl_sched_features |= (1UL << i);
|
||||
sched_feat_enable(i);
|
||||
}
|
||||
|
@ -554,6 +563,8 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|||
if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
|
||||
return;
|
||||
|
||||
head->count++;
|
||||
|
||||
get_task_struct(task);
|
||||
|
||||
/*
|
||||
|
@ -563,6 +574,10 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
|
|||
head->lastp = &node->next;
|
||||
}
|
||||
|
||||
static int
|
||||
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
|
||||
int sibling_count_hint);
|
||||
|
||||
void wake_up_q(struct wake_q_head *head)
|
||||
{
|
||||
struct wake_q_node *node = head->first;
|
||||
|
@ -577,10 +592,10 @@ void wake_up_q(struct wake_q_head *head)
|
|||
task->wake_q.next = NULL;
|
||||
|
||||
/*
|
||||
* wake_up_process() implies a wmb() to pair with the queueing
|
||||
* try_to_wake_up() implies a wmb() to pair with the queueing
|
||||
* in wake_q_add() so as not to miss wakeups.
|
||||
*/
|
||||
wake_up_process(task);
|
||||
try_to_wake_up(task, TASK_NORMAL, 0, head->count);
|
||||
put_task_struct(task);
|
||||
}
|
||||
}
|
||||
|
@ -1702,14 +1717,16 @@ out:
|
|||
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
|
||||
*/
|
||||
static inline
|
||||
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
||||
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
bool allow_isolated = (p->flags & PF_KTHREAD);
|
||||
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
if (p->nr_cpus_allowed > 1)
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags,
|
||||
sibling_count_hint);
|
||||
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
|
@ -2007,6 +2024,8 @@ static void ttwu_queue(struct task_struct *p, int cpu)
|
|||
* @p: the thread to be awakened
|
||||
* @state: the mask of task states that can be woken
|
||||
* @wake_flags: wake modifier flags (WF_*)
|
||||
* @sibling_count_hint: A hint at the number of threads that are being woken up
|
||||
* in this event.
|
||||
*
|
||||
* Put it on the run-queue if it's not already there. The "current"
|
||||
* thread is always on the run-queue (except when the actual
|
||||
|
@ -2018,7 +2037,8 @@ static void ttwu_queue(struct task_struct *p, int cpu)
|
|||
* or @state didn't match @p's state.
|
||||
*/
|
||||
static int
|
||||
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
||||
try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu, src_cpu, success = 0;
|
||||
|
@ -2134,7 +2154,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
|
|||
if (p->sched_class->task_waking)
|
||||
p->sched_class->task_waking(p);
|
||||
|
||||
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
|
||||
cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags,
|
||||
sibling_count_hint);
|
||||
|
||||
/* Refresh src_cpu as it could have changed since we last read it */
|
||||
src_cpu = task_cpu(p);
|
||||
|
@ -2236,7 +2257,7 @@ out:
|
|||
*/
|
||||
int wake_up_process(struct task_struct *p)
|
||||
{
|
||||
return try_to_wake_up(p, TASK_NORMAL, 0);
|
||||
return try_to_wake_up(p, TASK_NORMAL, 0, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_process);
|
||||
|
||||
|
@ -2256,13 +2277,13 @@ EXPORT_SYMBOL(wake_up_process);
|
|||
int wake_up_process_no_notif(struct task_struct *p)
|
||||
{
|
||||
WARN_ON(task_is_stopped_or_traced(p));
|
||||
return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
|
||||
return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(wake_up_process_no_notif);
|
||||
|
||||
int wake_up_state(struct task_struct *p, unsigned int state)
|
||||
{
|
||||
return try_to_wake_up(p, state, 0);
|
||||
return try_to_wake_up(p, state, 0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2337,9 +2358,16 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||
p->se.prev_sum_exec_runtime = 0;
|
||||
p->se.nr_migrations = 0;
|
||||
p->se.vruntime = 0;
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
p->last_sleep_ts = 0;
|
||||
#endif
|
||||
|
||||
INIT_LIST_HEAD(&p->se.group_node);
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
p->se.cfs_rq = NULL;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
||||
#endif
|
||||
|
@ -2429,11 +2457,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||
|
||||
__sched_fork(clone_flags, p);
|
||||
/*
|
||||
* We mark the process as running here. This guarantees that
|
||||
* We mark the process as NEW here. This guarantees that
|
||||
* nobody will actually run it, and a signal or other external
|
||||
* event cannot wake it up and insert it on the runqueue either.
|
||||
*/
|
||||
p->state = TASK_RUNNING;
|
||||
p->state = TASK_NEW;
|
||||
|
||||
/*
|
||||
* Make sure we do not leak PI boosting priority to the child.
|
||||
|
@ -2470,8 +2498,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||
p->sched_class = &fair_sched_class;
|
||||
}
|
||||
|
||||
if (p->sched_class->task_fork)
|
||||
p->sched_class->task_fork(p);
|
||||
init_entity_runnable_average(&p->se);
|
||||
|
||||
/*
|
||||
* The child is not yet in the pid-hash so no cgroup attach races,
|
||||
|
@ -2481,7 +2508,13 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
|||
* Silence PROVE_RCU.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
set_task_cpu(p, cpu);
|
||||
/*
|
||||
* We're setting the cpu for the first time, we don't migrate,
|
||||
* so use __set_task_cpu().
|
||||
*/
|
||||
__set_task_cpu(p, cpu);
|
||||
if (p->sched_class->task_fork)
|
||||
p->sched_class->task_fork(p);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
|
@ -2614,6 +2647,8 @@ void wake_up_new_task(struct task_struct *p)
|
|||
|
||||
add_new_task_to_grp(p);
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
p->state = TASK_RUNNING;
|
||||
|
||||
/* Initialize new task's runnable average */
|
||||
init_entity_runnable_average(&p->se);
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -2621,11 +2656,15 @@ void wake_up_new_task(struct task_struct *p)
|
|||
* Fork balancing, do it here and not earlier because:
|
||||
* - cpus_allowed can change in the fork path
|
||||
* - any previously selected cpu might disappear through hotplug
|
||||
*
|
||||
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
|
||||
* as we're not fully set-up yet.
|
||||
*/
|
||||
set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
|
||||
__set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0, 1));
|
||||
#endif
|
||||
rq = __task_rq_lock(p);
|
||||
mark_task_starting(p);
|
||||
update_rq_clock(rq);
|
||||
post_init_entity_util_avg(&p->se);
|
||||
activate_task(rq, p, ENQUEUE_WAKEUP_NEW);
|
||||
p->on_rq = TASK_ON_RQ_QUEUED;
|
||||
|
@ -3071,7 +3110,7 @@ void sched_exec(void)
|
|||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
curr_cpu = task_cpu(p);
|
||||
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
|
||||
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0, 1);
|
||||
if (dest_cpu == smp_processor_id())
|
||||
goto unlock;
|
||||
|
||||
|
@ -3171,7 +3210,9 @@ static void sched_freq_tick_pelt(int cpu)
|
|||
* utilization and to harm its performance the least, request
|
||||
* a jump to a higher OPP as soon as the margin of free capacity
|
||||
* is impacted (specified by capacity_margin).
|
||||
* Remember CPU utilization in sched_capacity_reqs should be normalised.
|
||||
*/
|
||||
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
|
||||
}
|
||||
|
||||
|
@ -3198,7 +3239,9 @@ static void sched_freq_tick_walt(int cpu)
|
|||
* It is likely that the load is growing so we
|
||||
* keep the added margin in our request as an
|
||||
* extra boost.
|
||||
* Remember CPU utilization in sched_capacity_reqs should be normalised.
|
||||
*/
|
||||
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
|
||||
|
||||
}
|
||||
|
@ -3579,6 +3622,10 @@ static void __sched notrace __schedule(bool preempt)
|
|||
if (!is_idle_task(prev) && !prev->on_rq)
|
||||
update_avg_burst(prev);
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
if (!prev->on_rq)
|
||||
prev->last_sleep_ts = wallclock;
|
||||
#endif
|
||||
rq->nr_switches++;
|
||||
rq->curr = next;
|
||||
++*switch_count;
|
||||
|
@ -3755,7 +3802,7 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
|
|||
int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
|
||||
void *key)
|
||||
{
|
||||
return try_to_wake_up(curr->private, mode, wake_flags);
|
||||
return try_to_wake_up(curr->private, mode, wake_flags, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(default_wake_function);
|
||||
|
||||
|
@ -3781,6 +3828,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
|
|||
BUG_ON(prio > MAX_PRIO);
|
||||
|
||||
rq = __task_rq_lock(p);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* Idle task boosting is a nono in general. There is one
|
||||
|
@ -3876,6 +3924,8 @@ void set_user_nice(struct task_struct *p, long nice)
|
|||
* the task might be in the middle of scheduling on another CPU.
|
||||
*/
|
||||
rq = task_rq_lock(p, &flags);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* The RT priorities are set via sched_setscheduler(), but we still
|
||||
* allow the 'normal' nice value to be set - but as expected
|
||||
|
@ -4303,6 +4353,7 @@ recheck:
|
|||
* runqueue lock must be held.
|
||||
*/
|
||||
rq = task_rq_lock(p, &flags);
|
||||
update_rq_clock(rq);
|
||||
|
||||
/*
|
||||
* Changing the policy of the stop threads its a very bad idea
|
||||
|
@ -7151,6 +7202,19 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
|
|||
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
|
||||
}
|
||||
|
||||
static bool have_sched_energy_data(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!rcu_dereference(per_cpu(sd_scs, cpu)) ||
|
||||
!rcu_dereference(per_cpu(sd_ea, cpu)))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the per-cpu provided sd energy data is consistent for all cpus
|
||||
* within the mask.
|
||||
|
@ -7967,6 +8031,9 @@ static int build_sched_domains(const struct cpumask *cpu_map,
|
|||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
WARN(sched_feat(ENERGY_AWARE) && !have_sched_energy_data(),
|
||||
"Missing data for energy aware scheduling\n");
|
||||
|
||||
ret = 0;
|
||||
error:
|
||||
__free_domain_allocs(&d, alloc_state, cpu_map);
|
||||
|
@ -8784,14 +8851,37 @@ void sched_offline_group(struct task_group *tg)
|
|||
spin_unlock_irqrestore(&task_group_lock, flags);
|
||||
}
|
||||
|
||||
/* change task's runqueue when it moves between groups.
|
||||
* The caller of this function should have put the task in its new group
|
||||
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
|
||||
* reflect its new group.
|
||||
static void sched_change_group(struct task_struct *tsk, int type)
|
||||
{
|
||||
struct task_group *tg;
|
||||
|
||||
/*
|
||||
* All callers are synchronized by task_rq_lock(); we do not use RCU
|
||||
* which is pointless here. Thus, we pass "true" to task_css_check()
|
||||
* to prevent lockdep warnings.
|
||||
*/
|
||||
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
tsk->sched_task_group = tg;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
if (tsk->sched_class->task_change_group)
|
||||
tsk->sched_class->task_change_group(tsk, type);
|
||||
else
|
||||
#endif
|
||||
set_task_rq(tsk, task_cpu(tsk));
|
||||
}
|
||||
|
||||
/*
|
||||
* Change task's runqueue when it moves between groups.
|
||||
*
|
||||
* The caller of this function should have put the task in its new group by
|
||||
* now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
|
||||
* its new group.
|
||||
*/
|
||||
void sched_move_task(struct task_struct *tsk)
|
||||
{
|
||||
struct task_group *tg;
|
||||
int queued, running;
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
@ -8806,22 +8896,7 @@ void sched_move_task(struct task_struct *tsk)
|
|||
if (unlikely(running))
|
||||
put_prev_task(rq, tsk);
|
||||
|
||||
/*
|
||||
* All callers are synchronized by task_rq_lock(); we do not use RCU
|
||||
* which is pointless here. Thus, we pass "true" to task_css_check()
|
||||
* to prevent lockdep warnings.
|
||||
*/
|
||||
tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
|
||||
struct task_group, css);
|
||||
tg = autogroup_task_group(tsk, tg);
|
||||
tsk->sched_task_group = tg;
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
if (tsk->sched_class->task_move_group)
|
||||
tsk->sched_class->task_move_group(tsk);
|
||||
else
|
||||
#endif
|
||||
set_task_rq(tsk, task_cpu(tsk));
|
||||
sched_change_group(tsk, TASK_MOVE_GROUP);
|
||||
|
||||
if (unlikely(running))
|
||||
tsk->sched_class->set_curr_task(rq);
|
||||
|
@ -9258,15 +9333,28 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
|
|||
sched_free_group(tg);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called before wake_up_new_task(), therefore we really only
|
||||
* have to set its group bits, all the other stuff does not apply.
|
||||
*/
|
||||
static void cpu_cgroup_fork(struct task_struct *task, void *private)
|
||||
{
|
||||
sched_move_task(task);
|
||||
unsigned long flags;
|
||||
struct rq *rq;
|
||||
|
||||
rq = task_rq_lock(task, &flags);
|
||||
|
||||
update_rq_clock(rq);
|
||||
sched_change_group(task, TASK_SET_GROUP);
|
||||
|
||||
task_rq_unlock(rq, task, &flags);
|
||||
}
|
||||
|
||||
static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
|
||||
{
|
||||
struct task_struct *task;
|
||||
struct cgroup_subsys_state *css;
|
||||
int ret = 0;
|
||||
|
||||
cgroup_taskset_for_each(task, css, tset) {
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
|
@ -9277,8 +9365,24 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
|
|||
if (task->sched_class != &fair_sched_class)
|
||||
return -EINVAL;
|
||||
#endif
|
||||
/*
|
||||
* Serialize against wake_up_new_task() such that if its
|
||||
* running, we're sure to observe its full state.
|
||||
*/
|
||||
raw_spin_lock_irq(&task->pi_lock);
|
||||
/*
|
||||
* Avoid calling sched_move_task() before wake_up_new_task()
|
||||
* has happened. This would lead to problems with PELT, due to
|
||||
* move wanting to detach+attach while we're not attached yet.
|
||||
*/
|
||||
if (task->state == TASK_NEW)
|
||||
ret = -EINVAL;
|
||||
raw_spin_unlock_irq(&task->pi_lock);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cpu_cgroup_attach(struct cgroup_taskset *tset)
|
||||
|
|
|
@ -202,7 +202,7 @@ static void update_fdomain_capacity_request(int cpu)
|
|||
}
|
||||
|
||||
/* Convert the new maximum capacity request into a cpu frequency */
|
||||
freq_new = capacity * policy->max >> SCHED_CAPACITY_SHIFT;
|
||||
freq_new = capacity * policy->cpuinfo.max_freq >> SCHED_CAPACITY_SHIFT;
|
||||
if (cpufreq_frequency_table_target(policy, policy->freq_table,
|
||||
freq_new, CPUFREQ_RELATION_L,
|
||||
&index_new))
|
||||
|
|
|
@ -216,8 +216,9 @@ static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
|
|||
|
||||
*util = boosted_cpu_util(cpu);
|
||||
if (likely(use_pelt()))
|
||||
*util = min((*util + rt), max_cap);
|
||||
*util = *util + rt;
|
||||
|
||||
*util = min(*util, max_cap);
|
||||
*max = max_cap;
|
||||
}
|
||||
|
||||
|
|
|
@ -1107,7 +1107,8 @@ static void yield_task_dl(struct rq *rq)
|
|||
static int find_later_rq(struct task_struct *task);
|
||||
|
||||
static int
|
||||
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
struct task_struct *curr;
|
||||
struct rq *rq;
|
||||
|
|
|
@ -762,7 +762,9 @@ void init_entity_runnable_average(struct sched_entity *se)
|
|||
}
|
||||
|
||||
static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
|
||||
static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
|
||||
static void attach_entity_cfs_rq(struct sched_entity *se);
|
||||
static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
||||
|
||||
/*
|
||||
* With new tasks being created, their initial util_avgs are extrapolated
|
||||
|
@ -833,7 +835,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
|
|||
attach_entity_cfs_rq(se);
|
||||
}
|
||||
|
||||
#else
|
||||
#else /* !CONFIG_SMP */
|
||||
void init_entity_runnable_average(struct sched_entity *se)
|
||||
{
|
||||
}
|
||||
|
@ -4412,11 +4414,14 @@ void remove_entity_load_avg(struct sched_entity *se)
|
|||
struct cfs_rq *cfs_rq = cfs_rq_of(se);
|
||||
|
||||
/*
|
||||
* Newly created task or never used group entity should not be removed
|
||||
* from its (source) cfs_rq
|
||||
* tasks cannot exit without having gone through wake_up_new_task() ->
|
||||
* post_init_entity_util_avg() which will have added things to the
|
||||
* cfs_rq, so we can remove unconditionally.
|
||||
*
|
||||
* Similarly for groups, they will have passed through
|
||||
* post_init_entity_util_avg() before unregister_sched_fair_group()
|
||||
* calls this.
|
||||
*/
|
||||
if (se->avg.last_update_time == 0)
|
||||
return;
|
||||
|
||||
sync_entity_load_avg(se);
|
||||
atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
|
||||
|
@ -5824,7 +5829,7 @@ static void update_capacity_of(int cpu)
|
|||
if (!sched_freq())
|
||||
return;
|
||||
|
||||
/* Convert scale-invariant capacity to cpu. */
|
||||
/* Normalize scale-invariant capacity to cpu. */
|
||||
req_cap = boosted_cpu_util(cpu);
|
||||
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, req_cap);
|
||||
|
@ -5867,7 +5872,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
*
|
||||
* note: in the case of encountering a throttled cfs_rq we will
|
||||
* post the final h_nr_running increment below.
|
||||
*/
|
||||
*/
|
||||
if (cfs_rq_throttled(cfs_rq))
|
||||
break;
|
||||
cfs_rq->h_nr_running++;
|
||||
|
@ -6023,7 +6028,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
if (rq->cfs.nr_running)
|
||||
update_capacity_of(cpu_of(rq));
|
||||
else if (sched_freq())
|
||||
set_cfs_cpu_capacity(cpu_of(rq), false, 0);
|
||||
set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6446,6 +6451,7 @@ struct energy_env {
|
|||
int util_delta;
|
||||
int src_cpu;
|
||||
int dst_cpu;
|
||||
int trg_cpu;
|
||||
int energy;
|
||||
int payoff;
|
||||
struct task_struct *task;
|
||||
|
@ -6462,11 +6468,14 @@ struct energy_env {
|
|||
} cap;
|
||||
};
|
||||
|
||||
static int cpu_util_wake(int cpu, struct task_struct *p);
|
||||
|
||||
/*
|
||||
* __cpu_norm_util() returns the cpu util relative to a specific capacity,
|
||||
* i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE] which is useful for
|
||||
* energy calculations. Using the scale-invariant util returned by
|
||||
* cpu_util() and approximating scale-invariant util by:
|
||||
* i.e. it's busy ratio, in the range [0..SCHED_LOAD_SCALE], which is useful for
|
||||
* energy calculations.
|
||||
*
|
||||
* Since util is a scale-invariant utilization defined as:
|
||||
*
|
||||
* util ~ (curr_freq/max_freq)*1024 * capacity_orig/1024 * running_time/time
|
||||
*
|
||||
|
@ -6476,34 +6485,32 @@ struct energy_env {
|
|||
*
|
||||
* norm_util = running_time/time ~ util/capacity
|
||||
*/
|
||||
static unsigned long __cpu_norm_util(int cpu, unsigned long capacity, int delta)
|
||||
static unsigned long __cpu_norm_util(unsigned long util, unsigned long capacity)
|
||||
{
|
||||
int util = __cpu_util(cpu, delta);
|
||||
|
||||
if (util >= capacity)
|
||||
return SCHED_CAPACITY_SCALE;
|
||||
|
||||
return (util << SCHED_CAPACITY_SHIFT)/capacity;
|
||||
}
|
||||
|
||||
static int calc_util_delta(struct energy_env *eenv, int cpu)
|
||||
static unsigned long group_max_util(struct energy_env *eenv)
|
||||
{
|
||||
if (cpu == eenv->src_cpu)
|
||||
return -eenv->util_delta;
|
||||
if (cpu == eenv->dst_cpu)
|
||||
return eenv->util_delta;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
unsigned long group_max_util(struct energy_env *eenv)
|
||||
{
|
||||
int i, delta;
|
||||
unsigned long max_util = 0;
|
||||
unsigned long util;
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(i, sched_group_cpus(eenv->sg_cap)) {
|
||||
delta = calc_util_delta(eenv, i);
|
||||
max_util = max(max_util, __cpu_util(i, delta));
|
||||
for_each_cpu(cpu, sched_group_cpus(eenv->sg_cap)) {
|
||||
util = cpu_util_wake(cpu, eenv->task);
|
||||
|
||||
/*
|
||||
* If we are looking at the target CPU specified by the eenv,
|
||||
* then we should add the (estimated) utilization of the task
|
||||
* assuming we will wake it up on that CPU.
|
||||
*/
|
||||
if (unlikely(cpu == eenv->trg_cpu))
|
||||
util += eenv->util_delta;
|
||||
|
||||
max_util = max(max_util, util);
|
||||
}
|
||||
|
||||
return max_util;
|
||||
|
@ -6511,44 +6518,56 @@ unsigned long group_max_util(struct energy_env *eenv)
|
|||
|
||||
/*
|
||||
* group_norm_util() returns the approximated group util relative to it's
|
||||
* current capacity (busy ratio) in the range [0..SCHED_LOAD_SCALE] for use in
|
||||
* energy calculations. Since task executions may or may not overlap in time in
|
||||
* the group the true normalized util is between max(cpu_norm_util(i)) and
|
||||
* sum(cpu_norm_util(i)) when iterating over all cpus in the group, i. The
|
||||
* latter is used as the estimate as it leads to a more pessimistic energy
|
||||
* current capacity (busy ratio), in the range [0..SCHED_LOAD_SCALE], for use
|
||||
* in energy calculations.
|
||||
*
|
||||
* Since task executions may or may not overlap in time in the group the true
|
||||
* normalized util is between MAX(cpu_norm_util(i)) and SUM(cpu_norm_util(i))
|
||||
* when iterating over all CPUs in the group.
|
||||
* The latter estimate is used as it leads to a more pessimistic energy
|
||||
* estimate (more busy).
|
||||
*/
|
||||
static unsigned
|
||||
long group_norm_util(struct energy_env *eenv, struct sched_group *sg)
|
||||
{
|
||||
int i, delta;
|
||||
unsigned long util_sum = 0;
|
||||
unsigned long capacity = sg->sge->cap_states[eenv->cap_idx].cap;
|
||||
unsigned long util, util_sum = 0;
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(i, sched_group_cpus(sg)) {
|
||||
delta = calc_util_delta(eenv, i);
|
||||
util_sum += __cpu_norm_util(i, capacity, delta);
|
||||
for_each_cpu(cpu, sched_group_cpus(sg)) {
|
||||
util = cpu_util_wake(cpu, eenv->task);
|
||||
|
||||
/*
|
||||
* If we are looking at the target CPU specified by the eenv,
|
||||
* then we should add the (estimated) utilization of the task
|
||||
* assuming we will wake it up on that CPU.
|
||||
*/
|
||||
if (unlikely(cpu == eenv->trg_cpu))
|
||||
util += eenv->util_delta;
|
||||
|
||||
util_sum += __cpu_norm_util(util, capacity);
|
||||
}
|
||||
|
||||
if (util_sum > SCHED_CAPACITY_SCALE)
|
||||
return SCHED_CAPACITY_SCALE;
|
||||
return util_sum;
|
||||
return min_t(unsigned long, util_sum, SCHED_CAPACITY_SCALE);
|
||||
}
|
||||
|
||||
static int find_new_capacity(struct energy_env *eenv,
|
||||
const struct sched_group_energy * const sge)
|
||||
{
|
||||
int idx;
|
||||
int idx, max_idx = sge->nr_cap_states - 1;
|
||||
unsigned long util = group_max_util(eenv);
|
||||
|
||||
/* default is max_cap if we don't find a match */
|
||||
eenv->cap_idx = max_idx;
|
||||
|
||||
for (idx = 0; idx < sge->nr_cap_states; idx++) {
|
||||
if (sge->cap_states[idx].cap >= util)
|
||||
if (sge->cap_states[idx].cap >= util) {
|
||||
eenv->cap_idx = idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
eenv->cap_idx = idx;
|
||||
|
||||
return idx;
|
||||
return eenv->cap_idx;
|
||||
}
|
||||
|
||||
static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
|
||||
|
@ -6721,6 +6740,8 @@ static inline bool cpu_in_sg(struct sched_group *sg, int cpu)
|
|||
return cpu != -1 && cpumask_test_cpu(cpu, sched_group_cpus(sg));
|
||||
}
|
||||
|
||||
static inline unsigned long task_util(struct task_struct *p);
|
||||
|
||||
/*
|
||||
* energy_diff(): Estimate the energy impact of changing the utilization
|
||||
* distribution. eenv specifies the change: utilisation amount, source, and
|
||||
|
@ -6736,11 +6757,13 @@ static inline int __energy_diff(struct energy_env *eenv)
|
|||
int diff, margin;
|
||||
|
||||
struct energy_env eenv_before = {
|
||||
.util_delta = 0,
|
||||
.util_delta = task_util(eenv->task),
|
||||
.src_cpu = eenv->src_cpu,
|
||||
.dst_cpu = eenv->dst_cpu,
|
||||
.trg_cpu = eenv->src_cpu,
|
||||
.nrg = { 0, 0, 0, 0},
|
||||
.cap = { 0, 0, 0 },
|
||||
.task = eenv->task,
|
||||
};
|
||||
|
||||
if (eenv->src_cpu == eenv->dst_cpu)
|
||||
|
@ -6799,7 +6822,11 @@ static inline int __energy_diff(struct energy_env *eenv)
|
|||
#ifdef CONFIG_SCHED_TUNE
|
||||
|
||||
struct target_nrg schedtune_target_nrg;
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHEDTUNE
|
||||
extern bool schedtune_initialized;
|
||||
#endif /* CONFIG_CGROUP_SCHEDTUNE */
|
||||
|
||||
/*
|
||||
* System energy normalization
|
||||
* Returns the normalized value, in the range [0..SCHED_CAPACITY_SCALE],
|
||||
|
@ -6810,9 +6837,11 @@ normalize_energy(int energy_diff)
|
|||
{
|
||||
u32 normalized_nrg;
|
||||
|
||||
#ifdef CONFIG_CGROUP_SCHEDTUNE
|
||||
/* during early setup, we don't know the extents */
|
||||
if (unlikely(!schedtune_initialized))
|
||||
return energy_diff < 0 ? -1 : 1 ;
|
||||
#endif /* CONFIG_CGROUP_SCHEDTUNE */
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
{
|
||||
|
@ -6848,8 +6877,14 @@ energy_diff(struct energy_env *eenv)
|
|||
__energy_diff(eenv);
|
||||
|
||||
/* Return energy diff when boost margin is 0 */
|
||||
if (boost == 0)
|
||||
if (boost == 0) {
|
||||
trace_sched_energy_diff(eenv->task,
|
||||
eenv->src_cpu, eenv->dst_cpu, eenv->util_delta,
|
||||
eenv->nrg.before, eenv->nrg.after, eenv->nrg.diff,
|
||||
eenv->cap.before, eenv->cap.after, eenv->cap.delta,
|
||||
0, -eenv->nrg.diff);
|
||||
return eenv->nrg.diff;
|
||||
}
|
||||
|
||||
/* Compute normalized energy diff */
|
||||
nrg_delta = normalize_energy(eenv->nrg.diff);
|
||||
|
@ -6892,15 +6927,18 @@ energy_diff(struct energy_env *eenv)
|
|||
* being client/server, worker/dispatcher, interrupt source or whatever is
|
||||
* irrelevant, spread criteria is apparent partner count exceeds socket size.
|
||||
*/
|
||||
static int wake_wide(struct task_struct *p)
|
||||
static int wake_wide(struct task_struct *p, int sibling_count_hint)
|
||||
{
|
||||
unsigned int master = current->wakee_flips;
|
||||
unsigned int slave = p->wakee_flips;
|
||||
int factor = this_cpu_read(sd_llc_size);
|
||||
int llc_size = this_cpu_read(sd_llc_size);
|
||||
|
||||
if (sibling_count_hint >= llc_size)
|
||||
return 1;
|
||||
|
||||
if (master < slave)
|
||||
swap(master, slave);
|
||||
if (slave < factor || master < slave * factor)
|
||||
if (slave < llc_size || master < slave * llc_size)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
@ -7106,8 +7144,6 @@ boosted_task_util(struct task_struct *task)
|
|||
return util + margin;
|
||||
}
|
||||
|
||||
static int cpu_util_wake(int cpu, struct task_struct *p);
|
||||
|
||||
static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
|
||||
{
|
||||
return capacity_orig_of(cpu) - cpu_util_wake(cpu, p);
|
||||
|
@ -7116,6 +7152,8 @@ static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
|
|||
/*
|
||||
* find_idlest_group finds and returns the least busy CPU group within the
|
||||
* domain.
|
||||
*
|
||||
* Assumes p is allowed on at least one CPU in sd.
|
||||
*/
|
||||
static struct sched_group *
|
||||
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
||||
|
@ -7123,7 +7161,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||
{
|
||||
struct sched_group *idlest = NULL, *group = sd->groups;
|
||||
struct sched_group *most_spare_sg = NULL;
|
||||
unsigned long min_load = ULONG_MAX, this_load = 0;
|
||||
unsigned long min_load = ULONG_MAX, this_load = ULONG_MAX;
|
||||
unsigned long most_spare = 0, this_spare = 0;
|
||||
int load_idx = sd->forkexec_idx;
|
||||
int imbalance = 100 + (sd->imbalance_pct-100)/2;
|
||||
|
@ -7191,23 +7229,31 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
|
|||
* utilized systems if we require spare_capacity > task_util(p),
|
||||
* so we allow for some task stuffing by using
|
||||
* spare_capacity > task_util(p)/2.
|
||||
*
|
||||
* Spare capacity can't be used for fork because the utilization has
|
||||
* not been set yet, we must first select a rq to compute the initial
|
||||
* utilization.
|
||||
*/
|
||||
if (sd_flag & SD_BALANCE_FORK)
|
||||
goto skip_spare;
|
||||
|
||||
if (this_spare > task_util(p) / 2 &&
|
||||
imbalance*this_spare > 100*most_spare)
|
||||
return NULL;
|
||||
else if (most_spare > task_util(p) / 2)
|
||||
return most_spare_sg;
|
||||
|
||||
skip_spare:
|
||||
if (!idlest || 100*this_load < imbalance*min_load)
|
||||
return NULL;
|
||||
return idlest;
|
||||
}
|
||||
|
||||
/*
|
||||
* find_idlest_cpu - find the idlest cpu among the cpus in group.
|
||||
* find_idlest_group_cpu - find the idlest cpu among the cpus in group.
|
||||
*/
|
||||
static int
|
||||
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
||||
{
|
||||
unsigned long load, min_load = ULONG_MAX;
|
||||
unsigned int min_exit_latency = UINT_MAX;
|
||||
|
@ -7254,6 +7300,68 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
|
|||
}
|
||||
|
||||
return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
|
||||
}
|
||||
|
||||
static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p,
|
||||
int cpu, int prev_cpu, int sd_flag)
|
||||
{
|
||||
int new_cpu = cpu;
|
||||
int wu = sd_flag & SD_BALANCE_WAKE;
|
||||
int cas_cpu = -1;
|
||||
|
||||
if (wu) {
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
|
||||
schedstat_inc(this_rq(), eas_stats.cas_attempts);
|
||||
}
|
||||
|
||||
if (!cpumask_intersects(sched_domain_span(sd), &p->cpus_allowed))
|
||||
return prev_cpu;
|
||||
|
||||
while (sd) {
|
||||
struct sched_group *group;
|
||||
struct sched_domain *tmp;
|
||||
int weight;
|
||||
|
||||
if (wu)
|
||||
schedstat_inc(sd, eas_stats.cas_attempts);
|
||||
|
||||
if (!(sd->flags & sd_flag)) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
group = find_idlest_group(sd, p, cpu, sd_flag);
|
||||
if (!group) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
new_cpu = find_idlest_group_cpu(group, p, cpu);
|
||||
if (new_cpu == cpu) {
|
||||
/* Now try balancing at a lower domain level of cpu */
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Now try balancing at a lower domain level of new_cpu */
|
||||
cpu = cas_cpu = new_cpu;
|
||||
weight = sd->span_weight;
|
||||
sd = NULL;
|
||||
for_each_domain(cpu, tmp) {
|
||||
if (weight <= tmp->span_weight)
|
||||
break;
|
||||
if (tmp->flags & sd_flag)
|
||||
sd = tmp;
|
||||
}
|
||||
/* while loop will break here if sd == NULL */
|
||||
}
|
||||
|
||||
if (wu && (cas_cpu >= 0)) {
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
|
||||
schedstat_inc(this_rq(), eas_stats.cas_count);
|
||||
}
|
||||
|
||||
return new_cpu;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7397,7 +7505,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
|
|||
unsigned long target_capacity = ULONG_MAX;
|
||||
unsigned long min_wake_util = ULONG_MAX;
|
||||
unsigned long target_max_spare_cap = 0;
|
||||
unsigned long target_util = ULONG_MAX;
|
||||
unsigned long best_active_util = ULONG_MAX;
|
||||
int best_idle_cstate = INT_MAX;
|
||||
struct sched_domain *sd;
|
||||
|
@ -7535,6 +7642,19 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
|
|||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enforce EAS mode
|
||||
*
|
||||
* For non latency sensitive tasks, skip CPUs that
|
||||
* will be overutilized by moving the task there.
|
||||
*
|
||||
* The goal here is to remain in EAS mode as long as
|
||||
* possible at least for !prefer_idle tasks.
|
||||
*/
|
||||
if ((new_util * capacity_margin) >
|
||||
(capacity_orig * SCHED_CAPACITY_SCALE))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Case B) Non latency sensitive tasks on IDLE CPUs.
|
||||
*
|
||||
|
@ -7613,7 +7733,6 @@ static inline int find_best_target(struct task_struct *p, int *backup_cpu,
|
|||
|
||||
target_max_spare_cap = capacity_orig - new_util;
|
||||
target_capacity = capacity_orig;
|
||||
target_util = new_util;
|
||||
target_cpu = i;
|
||||
}
|
||||
|
||||
|
@ -7734,6 +7853,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
|
|||
.src_cpu = prev_cpu,
|
||||
.dst_cpu = target_cpu,
|
||||
.task = p,
|
||||
.trg_cpu = target_cpu,
|
||||
};
|
||||
|
||||
|
||||
|
@ -7752,7 +7872,9 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
|
|||
/* No energy saving for target_cpu, try backup */
|
||||
target_cpu = tmp_backup;
|
||||
eenv.dst_cpu = target_cpu;
|
||||
if (tmp_backup < 0 || energy_diff(&eenv) >= 0) {
|
||||
if (tmp_backup < 0 ||
|
||||
tmp_backup == prev_cpu ||
|
||||
energy_diff(&eenv) >= 0) {
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_secb_no_nrg_sav);
|
||||
schedstat_inc(this_rq(), eas_stats.secb_no_nrg_sav);
|
||||
target_cpu = prev_cpu;
|
||||
|
@ -7787,7 +7909,8 @@ unlock:
|
|||
* preempt must be disabled.
|
||||
*/
|
||||
static int
|
||||
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
|
||||
select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
|
@ -7799,9 +7922,12 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|||
return select_best_cpu(p, prev_cpu, 0, sync);
|
||||
#endif
|
||||
|
||||
if (sd_flag & SD_BALANCE_WAKE)
|
||||
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
|
||||
&& cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
|
||||
if (sd_flag & SD_BALANCE_WAKE) {
|
||||
record_wakee(p);
|
||||
want_affine = !wake_wide(p, sibling_count_hint) &&
|
||||
!wake_cap(p, cpu, prev_cpu) &&
|
||||
cpumask_test_cpu(cpu, &p->cpus_allowed);
|
||||
}
|
||||
|
||||
if (energy_aware() && !(cpu_rq(prev_cpu)->rd->overutilized))
|
||||
return select_energy_cpu_brute(p, prev_cpu, sync);
|
||||
|
@ -7833,61 +7959,21 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
|||
new_cpu = cpu;
|
||||
}
|
||||
|
||||
if (sd && !(sd_flag & SD_BALANCE_FORK)) {
|
||||
/*
|
||||
* We're going to need the task's util for capacity_spare_wake
|
||||
* in find_idlest_group. Sync it up to prev_cpu's
|
||||
* last_update_time.
|
||||
*/
|
||||
sync_entity_load_avg(&p->se);
|
||||
}
|
||||
|
||||
if (!sd) {
|
||||
if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
|
||||
new_cpu = select_idle_sibling(p, prev_cpu, new_cpu);
|
||||
|
||||
} else {
|
||||
int wu = sd_flag & SD_BALANCE_WAKE;
|
||||
int cas_cpu = -1;
|
||||
|
||||
if (wu) {
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_cas_attempts);
|
||||
schedstat_inc(this_rq(), eas_stats.cas_attempts);
|
||||
}
|
||||
|
||||
while (sd) {
|
||||
struct sched_group *group;
|
||||
int weight;
|
||||
|
||||
if (wu)
|
||||
schedstat_inc(sd, eas_stats.cas_attempts);
|
||||
|
||||
if (!(sd->flags & sd_flag)) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
group = find_idlest_group(sd, p, cpu, sd_flag);
|
||||
if (!group) {
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
new_cpu = find_idlest_cpu(group, p, cpu);
|
||||
if (new_cpu == -1 || new_cpu == cpu) {
|
||||
/* Now try balancing at a lower domain level of cpu */
|
||||
sd = sd->child;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Now try balancing at a lower domain level of new_cpu */
|
||||
cpu = cas_cpu = new_cpu;
|
||||
weight = sd->span_weight;
|
||||
sd = NULL;
|
||||
for_each_domain(cpu, tmp) {
|
||||
if (weight <= tmp->span_weight)
|
||||
break;
|
||||
if (tmp->flags & sd_flag)
|
||||
sd = tmp;
|
||||
}
|
||||
/* while loop will break here if sd == NULL */
|
||||
}
|
||||
|
||||
if (wu && (cas_cpu >= 0)) {
|
||||
schedstat_inc(p, se.statistics.nr_wakeups_cas_count);
|
||||
schedstat_inc(this_rq(), eas_stats.cas_count);
|
||||
}
|
||||
new_cpu = find_idlest_cpu(sd, p, cpu, prev_cpu, sd_flag);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -10040,8 +10126,11 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
|
|||
if (busiest->group_type == group_imbalanced)
|
||||
goto force_balance;
|
||||
|
||||
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
|
||||
if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
|
||||
/*
|
||||
* When dst_cpu is idle, prevent SMP nice and/or asymmetric group
|
||||
* capacities from resulting in underutilization due to avg_load.
|
||||
*/
|
||||
if (env->idle != CPU_NOT_IDLE && group_has_capacity(env, local) &&
|
||||
busiest->group_no_capacity)
|
||||
goto force_balance;
|
||||
|
||||
|
@ -10410,6 +10499,7 @@ redo:
|
|||
|
||||
more_balance:
|
||||
raw_spin_lock_irqsave(&busiest->lock, flags);
|
||||
update_rq_clock(busiest);
|
||||
|
||||
/* The world might have changed. Validate assumptions */
|
||||
if (busiest->nr_running <= 1) {
|
||||
|
@ -10867,6 +10957,7 @@ static int active_load_balance_cpu_stop(void *data)
|
|||
if (likely(sd)) {
|
||||
env.sd = sd;
|
||||
schedstat_inc(sd, alb_count);
|
||||
update_rq_clock(busiest_rq);
|
||||
|
||||
p = detach_one_task(&env);
|
||||
if (p) {
|
||||
|
@ -11530,31 +11621,17 @@ static void task_fork_fair(struct task_struct *p)
|
|||
{
|
||||
struct cfs_rq *cfs_rq;
|
||||
struct sched_entity *se = &p->se, *curr;
|
||||
int this_cpu = smp_processor_id();
|
||||
struct rq *rq = this_rq();
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
raw_spin_lock(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
|
||||
cfs_rq = task_cfs_rq(current);
|
||||
curr = cfs_rq->curr;
|
||||
|
||||
/*
|
||||
* Not only the cpu but also the task_group of the parent might have
|
||||
* been changed after parent->se.parent,cfs_rq were copied to
|
||||
* child->se.parent,cfs_rq. So call __set_task_cpu() to make those
|
||||
* of child point to valid ones.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
__set_task_cpu(p, this_cpu);
|
||||
rcu_read_unlock();
|
||||
|
||||
update_curr(cfs_rq);
|
||||
|
||||
if (curr)
|
||||
if (curr) {
|
||||
update_curr(cfs_rq);
|
||||
se->vruntime = curr->vruntime;
|
||||
}
|
||||
place_entity(cfs_rq, se, 1);
|
||||
|
||||
if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
|
||||
|
@ -11567,8 +11644,7 @@ static void task_fork_fair(struct task_struct *p)
|
|||
}
|
||||
|
||||
se->vruntime -= cfs_rq->min_vruntime;
|
||||
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -11760,6 +11836,14 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
static void task_set_group_fair(struct task_struct *p)
|
||||
{
|
||||
struct sched_entity *se = &p->se;
|
||||
|
||||
set_task_rq(p, task_cpu(p));
|
||||
se->depth = se->parent ? se->parent->depth + 1 : 0;
|
||||
}
|
||||
|
||||
static void task_move_group_fair(struct task_struct *p)
|
||||
{
|
||||
detach_task_cfs_rq(p);
|
||||
|
@ -11772,6 +11856,19 @@ static void task_move_group_fair(struct task_struct *p)
|
|||
attach_task_cfs_rq(p);
|
||||
}
|
||||
|
||||
static void task_change_group_fair(struct task_struct *p, int type)
|
||||
{
|
||||
switch (type) {
|
||||
case TASK_SET_GROUP:
|
||||
task_set_group_fair(p);
|
||||
break;
|
||||
|
||||
case TASK_MOVE_GROUP:
|
||||
task_move_group_fair(p);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void free_fair_sched_group(struct task_group *tg)
|
||||
{
|
||||
int i;
|
||||
|
@ -12003,7 +12100,7 @@ const struct sched_class fair_sched_class = {
|
|||
.update_curr = update_curr_fair,
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
.task_move_group = task_move_group_fair,
|
||||
.task_change_group = task_change_group_fair,
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
.inc_hmp_sched_stats = inc_hmp_sched_stats_fair,
|
||||
|
|
|
@ -9,7 +9,8 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
return task_cpu(p); /* IDLE tasks as never migrated */
|
||||
}
|
||||
|
|
|
@ -1479,7 +1479,8 @@ task_may_not_preempt(struct task_struct *task, int cpu)
|
|||
}
|
||||
|
||||
static int
|
||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
struct task_struct *curr;
|
||||
struct rq *rq;
|
||||
|
|
|
@ -340,7 +340,15 @@ extern void sched_move_task(struct task_struct *tsk);
|
|||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void set_task_rq_fair(struct sched_entity *se,
|
||||
struct cfs_rq *prev, struct cfs_rq *next);
|
||||
#else /* !CONFIG_SMP */
|
||||
static inline void set_task_rq_fair(struct sched_entity *se,
|
||||
struct cfs_rq *prev, struct cfs_rq *next) { }
|
||||
#endif /* CONFIG_SMP */
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
|
||||
extern struct task_group *css_tg(struct cgroup_subsys_state *css);
|
||||
#else /* CONFIG_CGROUP_SCHED */
|
||||
|
@ -804,6 +812,7 @@ struct rq {
|
|||
u64 cur_irqload;
|
||||
u64 avg_irqload;
|
||||
u64 irqload_ts;
|
||||
u64 cum_window_demand;
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
|
||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||
|
@ -1750,6 +1759,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
|
||||
p->se.cfs_rq = tg->cfs_rq[cpu];
|
||||
p->se.parent = tg->se[cpu];
|
||||
#endif
|
||||
|
@ -2036,7 +2046,8 @@ struct sched_class {
|
|||
void (*put_prev_task) (struct rq *rq, struct task_struct *p);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
|
||||
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags,
|
||||
int subling_count_hint);
|
||||
void (*migrate_task_rq)(struct task_struct *p);
|
||||
|
||||
void (*task_waking) (struct task_struct *task);
|
||||
|
@ -2069,8 +2080,11 @@ struct sched_class {
|
|||
|
||||
void (*update_curr) (struct rq *rq);
|
||||
|
||||
#define TASK_SET_GROUP 0
|
||||
#define TASK_MOVE_GROUP 1
|
||||
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
void (*task_move_group) (struct task_struct *p);
|
||||
void (*task_change_group)(struct task_struct *p, int type);
|
||||
#endif
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p);
|
||||
|
@ -2342,7 +2356,7 @@ static inline unsigned long capacity_orig_of(int cpu)
|
|||
|
||||
extern unsigned int sysctl_sched_use_walt_cpu_util;
|
||||
extern unsigned int walt_ravg_window;
|
||||
extern unsigned int walt_disabled;
|
||||
extern bool walt_disabled;
|
||||
|
||||
/*
|
||||
* cpu_util returns the amount of capacity of a CPU that is used by CFS
|
||||
|
@ -2418,6 +2432,10 @@ static inline bool sched_freq(void)
|
|||
return static_key_false(&__sched_freq);
|
||||
}
|
||||
|
||||
/*
|
||||
* sched_capacity_reqs expects capacity requests to be normalised.
|
||||
* All capacities should sum to the range of 0-1024.
|
||||
*/
|
||||
DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
|
||||
void update_cpu_capacity_request(int cpu, bool request);
|
||||
|
||||
|
@ -2887,6 +2905,17 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
|
|||
static inline void cpufreq_update_this_cpu(struct rq *rq, unsigned int flags) {}
|
||||
#endif /* CONFIG_CPU_FREQ */
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
|
||||
static inline bool
|
||||
walt_task_in_cum_window_demand(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
return cpu_of(rq) == task_cpu(p) &&
|
||||
(p->on_rq || p->last_sleep_ts >= rq->window_start);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
|
||||
#ifdef arch_scale_freq_capacity
|
||||
#ifndef arch_scale_freq_invariant
|
||||
#define arch_scale_freq_invariant() (true)
|
||||
|
|
|
@ -11,7 +11,8 @@
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags,
|
||||
int sibling_count_hint)
|
||||
{
|
||||
return task_cpu(p); /* stop tasks as never migrate */
|
||||
}
|
||||
|
|
|
@ -41,25 +41,17 @@ static __read_mostly unsigned int walt_io_is_busy = 0;
|
|||
|
||||
unsigned int sysctl_sched_walt_init_task_load_pct = 15;
|
||||
|
||||
/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
|
||||
unsigned int __read_mostly walt_disabled = 0;
|
||||
/* true -> use PELT based load stats, false -> use window-based load stats */
|
||||
bool __read_mostly walt_disabled = false;
|
||||
|
||||
/* Window size (in ns) */
|
||||
__read_mostly unsigned int walt_ravg_window = 20000000;
|
||||
|
||||
/* Min window size (in ns) = 10ms */
|
||||
#ifdef CONFIG_HZ_300
|
||||
/*
|
||||
* Tick interval becomes to 3333333 due to
|
||||
* rounding error when HZ=300.
|
||||
* Window size (in ns). Adjust for the tick size so that the window
|
||||
* rollover occurs just before the tick boundary.
|
||||
*/
|
||||
#define MIN_SCHED_RAVG_WINDOW (3333333 * 6)
|
||||
#else
|
||||
#define MIN_SCHED_RAVG_WINDOW 10000000
|
||||
#endif
|
||||
|
||||
/* Max window size (in ns) = 1s */
|
||||
#define MAX_SCHED_RAVG_WINDOW 1000000000
|
||||
__read_mostly unsigned int walt_ravg_window =
|
||||
(20000000 / TICK_NSEC) * TICK_NSEC;
|
||||
#define MIN_SCHED_RAVG_WINDOW ((10000000 / TICK_NSEC) * TICK_NSEC)
|
||||
#define MAX_SCHED_RAVG_WINDOW ((1000000000 / TICK_NSEC) * TICK_NSEC)
|
||||
|
||||
static unsigned int sync_cpu;
|
||||
static ktime_t ktime_last;
|
||||
|
@ -70,11 +62,28 @@ static unsigned int task_load(struct task_struct *p)
|
|||
return p->ravg.demand;
|
||||
}
|
||||
|
||||
static inline void fixup_cum_window_demand(struct rq *rq, s64 delta)
|
||||
{
|
||||
rq->cum_window_demand += delta;
|
||||
if (unlikely((s64)rq->cum_window_demand < 0))
|
||||
rq->cum_window_demand = 0;
|
||||
}
|
||||
|
||||
void
|
||||
walt_inc_cumulative_runnable_avg(struct rq *rq,
|
||||
struct task_struct *p)
|
||||
{
|
||||
rq->cumulative_runnable_avg += p->ravg.demand;
|
||||
|
||||
/*
|
||||
* Add a task's contribution to the cumulative window demand when
|
||||
*
|
||||
* (1) task is enqueued with on_rq = 1 i.e migration,
|
||||
* prio/cgroup/class change.
|
||||
* (2) task is waking for the first time in this window.
|
||||
*/
|
||||
if (p->on_rq || (p->last_sleep_ts < rq->window_start))
|
||||
fixup_cum_window_demand(rq, p->ravg.demand);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -83,6 +92,14 @@ walt_dec_cumulative_runnable_avg(struct rq *rq,
|
|||
{
|
||||
rq->cumulative_runnable_avg -= p->ravg.demand;
|
||||
BUG_ON((s64)rq->cumulative_runnable_avg < 0);
|
||||
|
||||
/*
|
||||
* on_rq will be 1 for sleeping tasks. So check if the task
|
||||
* is migrating or dequeuing in RUNNING state to change the
|
||||
* prio/cgroup/class.
|
||||
*/
|
||||
if (task_on_rq_migrating(p) || p->state == TASK_RUNNING)
|
||||
fixup_cum_window_demand(rq, -(s64)p->ravg.demand);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -95,6 +112,8 @@ fixup_cumulative_runnable_avg(struct rq *rq,
|
|||
if ((s64)rq->cumulative_runnable_avg < 0)
|
||||
panic("cra less than zero: tld: %lld, task_load(p) = %u\n",
|
||||
task_load_delta, task_load(p));
|
||||
|
||||
fixup_cum_window_demand(rq, task_load_delta);
|
||||
}
|
||||
|
||||
u64 walt_ktime_clock(void)
|
||||
|
@ -153,10 +172,28 @@ static int exiting_task(struct task_struct *p)
|
|||
|
||||
static int __init set_walt_ravg_window(char *str)
|
||||
{
|
||||
unsigned int adj_window;
|
||||
bool no_walt = walt_disabled;
|
||||
|
||||
get_option(&str, &walt_ravg_window);
|
||||
|
||||
walt_disabled = (walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
|
||||
walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
|
||||
/* Adjust for CONFIG_HZ */
|
||||
adj_window = (walt_ravg_window / TICK_NSEC) * TICK_NSEC;
|
||||
|
||||
/* Warn if we're a bit too far away from the expected window size */
|
||||
WARN(adj_window < walt_ravg_window - NSEC_PER_MSEC,
|
||||
"tick-adjusted window size %u, original was %u\n", adj_window,
|
||||
walt_ravg_window);
|
||||
|
||||
walt_ravg_window = adj_window;
|
||||
|
||||
walt_disabled = walt_disabled ||
|
||||
(walt_ravg_window < MIN_SCHED_RAVG_WINDOW ||
|
||||
walt_ravg_window > MAX_SCHED_RAVG_WINDOW);
|
||||
|
||||
WARN(!no_walt && walt_disabled,
|
||||
"invalid window size, disabling WALT\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -180,6 +217,8 @@ update_window_start(struct rq *rq, u64 wallclock)
|
|||
|
||||
nr_windows = div64_u64(delta, walt_ravg_window);
|
||||
rq->window_start += (u64)nr_windows * (u64)walt_ravg_window;
|
||||
|
||||
rq->cum_window_demand = rq->cumulative_runnable_avg;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -568,10 +607,20 @@ static void update_history(struct rq *rq, struct task_struct *p,
|
|||
* A throttled deadline sched class task gets dequeued without
|
||||
* changing p->on_rq. Since the dequeue decrements hmp stats
|
||||
* avoid decrementing it here again.
|
||||
*
|
||||
* When window is rolled over, the cumulative window demand
|
||||
* is reset to the cumulative runnable average (contribution from
|
||||
* the tasks on the runqueue). If the current task is dequeued
|
||||
* already, it's demand is not included in the cumulative runnable
|
||||
* average. So add the task demand separately to cumulative window
|
||||
* demand.
|
||||
*/
|
||||
if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
|
||||
!p->dl.dl_throttled))
|
||||
fixup_cumulative_runnable_avg(rq, p, demand);
|
||||
if (!task_has_dl_policy(p) || !p->dl.dl_throttled) {
|
||||
if (task_on_rq_queued(p))
|
||||
fixup_cumulative_runnable_avg(rq, p, demand);
|
||||
else if (rq->curr == p)
|
||||
fixup_cum_window_demand(rq, demand);
|
||||
}
|
||||
|
||||
p->ravg.demand = demand;
|
||||
|
||||
|
@ -792,6 +841,17 @@ void walt_fixup_busy_time(struct task_struct *p, int new_cpu)
|
|||
|
||||
walt_update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0);
|
||||
|
||||
/*
|
||||
* When a task is migrating during the wakeup, adjust
|
||||
* the task's contribution towards cumulative window
|
||||
* demand.
|
||||
*/
|
||||
if (p->state == TASK_WAKING &&
|
||||
p->last_sleep_ts >= src_rq->window_start) {
|
||||
fixup_cum_window_demand(src_rq, -(s64)p->ravg.demand);
|
||||
fixup_cum_window_demand(dest_rq, p->ravg.demand);
|
||||
}
|
||||
|
||||
if (p->ravg.curr_window) {
|
||||
src_rq->curr_runnable_sum -= p->ravg.curr_window;
|
||||
dest_rq->curr_runnable_sum += p->ravg.curr_window;
|
||||
|
|
|
@ -59,6 +59,6 @@ static inline u64 walt_ktime_clock(void) { return 0; }
|
|||
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
|
||||
extern unsigned int walt_disabled;
|
||||
extern bool walt_disabled;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key,
|
|||
down_read(&key->sem);
|
||||
ukp = user_key_payload(key);
|
||||
|
||||
if (!ukp) {
|
||||
/* key was revoked before we acquired its semaphore */
|
||||
err = -EKEYREVOKED;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
if (ukp->datalen < sizeof(*pkh))
|
||||
goto err1;
|
||||
|
||||
|
|
|
@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data)
|
|||
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
|
||||
{
|
||||
seq_puts(m, key->description);
|
||||
if (key_is_instantiated(key)) {
|
||||
if (key_is_positive(key)) {
|
||||
int err = PTR_ERR(key->payload.data[dns_key_error]);
|
||||
|
||||
if (err)
|
||||
|
|
|
@ -138,7 +138,7 @@ void big_key_revoke(struct key *key)
|
|||
|
||||
/* clear the quota */
|
||||
key_payload_reserve(key, 0);
|
||||
if (key_is_instantiated(key) &&
|
||||
if (key_is_positive(key) &&
|
||||
(size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD)
|
||||
vfs_truncate(path, 0);
|
||||
}
|
||||
|
@ -170,7 +170,7 @@ void big_key_describe(const struct key *key, struct seq_file *m)
|
|||
|
||||
seq_puts(m, key->description);
|
||||
|
||||
if (key_is_instantiated(key))
|
||||
if (key_is_positive(key))
|
||||
seq_printf(m, ": %zu [%s]",
|
||||
datalen,
|
||||
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
|
||||
|
|
|
@ -315,6 +315,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k
|
|||
|
||||
down_read(&ukey->sem);
|
||||
upayload = user_key_payload(ukey);
|
||||
if (!upayload) {
|
||||
/* key was revoked before we acquired its semaphore */
|
||||
up_read(&ukey->sem);
|
||||
key_put(ukey);
|
||||
ukey = ERR_PTR(-EKEYREVOKED);
|
||||
goto error;
|
||||
}
|
||||
*master_key = upayload->data;
|
||||
*master_keylen = upayload->datalen;
|
||||
error:
|
||||
|
@ -845,7 +852,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
|
|||
size_t datalen = prep->datalen;
|
||||
int ret = 0;
|
||||
|
||||
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
|
||||
if (key_is_negative(key))
|
||||
return -ENOKEY;
|
||||
if (datalen <= 0 || datalen > 32767 || !prep->data)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
|
|||
while (!list_empty(keys)) {
|
||||
struct key *key =
|
||||
list_entry(keys->next, struct key, graveyard_link);
|
||||
short state = key->state;
|
||||
|
||||
list_del(&key->graveyard_link);
|
||||
|
||||
kdebug("- %u", key->serial);
|
||||
key_check(key);
|
||||
|
||||
/* Throw away the key data if the key is instantiated */
|
||||
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) &&
|
||||
!test_bit(KEY_FLAG_NEGATIVE, &key->flags) &&
|
||||
key->type->destroy)
|
||||
if (state == KEY_IS_POSITIVE && key->type->destroy)
|
||||
key->type->destroy(key);
|
||||
|
||||
security_key_free(key);
|
||||
|
@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
|
|||
}
|
||||
|
||||
atomic_dec(&key->user->nkeys);
|
||||
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
|
||||
if (state != KEY_IS_UNINSTANTIATED)
|
||||
atomic_dec(&key->user->nikeys);
|
||||
|
||||
key_user_put(key->user);
|
||||
|
|
|
@ -395,6 +395,18 @@ int key_payload_reserve(struct key *key, size_t datalen)
|
|||
}
|
||||
EXPORT_SYMBOL(key_payload_reserve);
|
||||
|
||||
/*
|
||||
* Change the key state to being instantiated.
|
||||
*/
|
||||
static void mark_key_instantiated(struct key *key, int reject_error)
|
||||
{
|
||||
/* Commit the payload before setting the state; barrier versus
|
||||
* key_read_state().
|
||||
*/
|
||||
smp_store_release(&key->state,
|
||||
(reject_error < 0) ? reject_error : KEY_IS_POSITIVE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Instantiate a key and link it into the target keyring atomically. Must be
|
||||
* called with the target keyring's semaphore writelocked. The target key's
|
||||
|
@ -418,14 +430,14 @@ static int __key_instantiate_and_link(struct key *key,
|
|||
mutex_lock(&key_construction_mutex);
|
||||
|
||||
/* can't instantiate twice */
|
||||
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
|
||||
if (key->state == KEY_IS_UNINSTANTIATED) {
|
||||
/* instantiate the key */
|
||||
ret = key->type->instantiate(key, prep);
|
||||
|
||||
if (ret == 0) {
|
||||
/* mark the key as being instantiated */
|
||||
atomic_inc(&key->user->nikeys);
|
||||
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
|
||||
mark_key_instantiated(key, 0);
|
||||
|
||||
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
|
||||
awaken = 1;
|
||||
|
@ -553,13 +565,10 @@ int key_reject_and_link(struct key *key,
|
|||
mutex_lock(&key_construction_mutex);
|
||||
|
||||
/* can't instantiate twice */
|
||||
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
|
||||
if (key->state == KEY_IS_UNINSTANTIATED) {
|
||||
/* mark the key as being negatively instantiated */
|
||||
atomic_inc(&key->user->nikeys);
|
||||
key->reject_error = -error;
|
||||
smp_wmb();
|
||||
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
|
||||
mark_key_instantiated(key, -error);
|
||||
now = current_kernel_time();
|
||||
key->expiry = now.tv_sec + timeout;
|
||||
key_schedule_gc(key->expiry + key_gc_delay);
|
||||
|
@ -731,8 +740,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
|
|||
|
||||
ret = key->type->update(key, prep);
|
||||
if (ret == 0)
|
||||
/* updating a negative key instantiates it */
|
||||
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
/* Updating a negative key positively instantiates it */
|
||||
mark_key_instantiated(key, 0);
|
||||
|
||||
up_write(&key->sem);
|
||||
|
||||
|
@ -907,6 +916,16 @@ error:
|
|||
*/
|
||||
__key_link_end(keyring, &index_key, edit);
|
||||
|
||||
key = key_ref_to_ptr(key_ref);
|
||||
if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) {
|
||||
ret = wait_for_key_construction(key, true);
|
||||
if (ret < 0) {
|
||||
key_ref_put(key_ref);
|
||||
key_ref = ERR_PTR(ret);
|
||||
goto error_free_prep;
|
||||
}
|
||||
}
|
||||
|
||||
key_ref = __key_update(key_ref, &prep);
|
||||
goto error_free_prep;
|
||||
}
|
||||
|
@ -957,8 +976,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen)
|
|||
|
||||
ret = key->type->update(key, &prep);
|
||||
if (ret == 0)
|
||||
/* updating a negative key instantiates it */
|
||||
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
|
||||
/* Updating a negative key positively instantiates it */
|
||||
mark_key_instantiated(key, 0);
|
||||
|
||||
up_write(&key->sem);
|
||||
|
||||
|
|
|
@ -738,10 +738,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
|
|||
|
||||
key = key_ref_to_ptr(key_ref);
|
||||
|
||||
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
|
||||
ret = -ENOKEY;
|
||||
goto error2;
|
||||
}
|
||||
ret = key_read_state(key);
|
||||
if (ret < 0)
|
||||
goto error2; /* Negatively instantiated */
|
||||
|
||||
/* see if we can read it directly */
|
||||
ret = key_permission(key_ref, KEY_NEED_READ);
|
||||
|
@ -873,7 +872,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
|
|||
atomic_dec(&key->user->nkeys);
|
||||
atomic_inc(&newowner->nkeys);
|
||||
|
||||
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
|
||||
if (key->state != KEY_IS_UNINSTANTIATED) {
|
||||
atomic_dec(&key->user->nikeys);
|
||||
atomic_inc(&newowner->nikeys);
|
||||
}
|
||||
|
|
|
@ -407,7 +407,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
|
|||
else
|
||||
seq_puts(m, "[anon]");
|
||||
|
||||
if (key_is_instantiated(keyring)) {
|
||||
if (key_is_positive(keyring)) {
|
||||
if (keyring->keys.nr_leaves_on_tree != 0)
|
||||
seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
|
||||
else
|
||||
|
@ -522,7 +522,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
|
|||
{
|
||||
struct keyring_search_context *ctx = iterator_data;
|
||||
const struct key *key = keyring_ptr_to_key(object);
|
||||
unsigned long kflags = key->flags;
|
||||
unsigned long kflags = READ_ONCE(key->flags);
|
||||
short state = READ_ONCE(key->state);
|
||||
|
||||
kenter("{%d}", key->serial);
|
||||
|
||||
|
@ -566,9 +567,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
|
|||
|
||||
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
|
||||
/* we set a different error code if we pass a negative key */
|
||||
if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
|
||||
smp_rmb();
|
||||
ctx->result = ERR_PTR(key->reject_error);
|
||||
if (state < 0) {
|
||||
ctx->result = ERR_PTR(state);
|
||||
kleave(" = %d [neg]", ctx->skipped_ret);
|
||||
goto skipped;
|
||||
}
|
||||
|
|
|
@ -182,6 +182,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
|
|||
unsigned long timo;
|
||||
key_ref_t key_ref, skey_ref;
|
||||
char xbuf[16];
|
||||
short state;
|
||||
int rc;
|
||||
|
||||
struct keyring_search_context ctx = {
|
||||
|
@ -240,17 +241,19 @@ static int proc_keys_show(struct seq_file *m, void *v)
|
|||
sprintf(xbuf, "%luw", timo / (60*60*24*7));
|
||||
}
|
||||
|
||||
state = key_read_state(key);
|
||||
|
||||
#define showflag(KEY, LETTER, FLAG) \
|
||||
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
|
||||
|
||||
seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
|
||||
key->serial,
|
||||
showflag(key, 'I', KEY_FLAG_INSTANTIATED),
|
||||
state != KEY_IS_UNINSTANTIATED ? 'I' : '-',
|
||||
showflag(key, 'R', KEY_FLAG_REVOKED),
|
||||
showflag(key, 'D', KEY_FLAG_DEAD),
|
||||
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
|
||||
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
|
||||
showflag(key, 'N', KEY_FLAG_NEGATIVE),
|
||||
state < 0 ? 'N' : '-',
|
||||
showflag(key, 'i', KEY_FLAG_INVALIDATED),
|
||||
atomic_read(&key->usage),
|
||||
xbuf,
|
||||
|
|
|
@ -727,7 +727,7 @@ try_again:
|
|||
|
||||
ret = -EIO;
|
||||
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
|
||||
!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
|
||||
key_read_state(key) == KEY_IS_UNINSTANTIATED)
|
||||
goto invalid_key;
|
||||
|
||||
/* check the permissions */
|
||||
|
|
|
@ -594,10 +594,9 @@ int wait_for_key_construction(struct key *key, bool intr)
|
|||
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (ret)
|
||||
return -ERESTARTSYS;
|
||||
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
|
||||
smp_rmb();
|
||||
return key->reject_error;
|
||||
}
|
||||
ret = key_read_state(key);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return key_validate(key);
|
||||
}
|
||||
EXPORT_SYMBOL(wait_for_key_construction);
|
||||
|
|
|
@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key,
|
|||
|
||||
seq_puts(m, "key:");
|
||||
seq_puts(m, key->description);
|
||||
if (key_is_instantiated(key))
|
||||
if (key_is_positive(key))
|
||||
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
|
||||
}
|
||||
|
||||
|
|
|
@ -1014,7 +1014,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
|
|||
char *datablob;
|
||||
int ret = 0;
|
||||
|
||||
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
|
||||
if (key_is_negative(key))
|
||||
return -ENOKEY;
|
||||
p = key->payload.data[0];
|
||||
if (!p->migratable)
|
||||
|
|
|
@ -120,7 +120,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep)
|
|||
|
||||
if (ret == 0) {
|
||||
/* attach the new data, displacing the old */
|
||||
if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags))
|
||||
if (key_is_positive(key))
|
||||
zap = key->payload.data[0];
|
||||
else
|
||||
zap = NULL;
|
||||
|
@ -174,7 +174,7 @@ EXPORT_SYMBOL_GPL(user_destroy);
|
|||
void user_describe(const struct key *key, struct seq_file *m)
|
||||
{
|
||||
seq_puts(m, key->description);
|
||||
if (key_is_instantiated(key))
|
||||
if (key_is_positive(key))
|
||||
seq_printf(m, ": %u", key->datalen);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#include <sound/core.h>
|
||||
#include "seq_lock.h"
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
|
||||
|
||||
/* wait until all locks are released */
|
||||
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
|
||||
{
|
||||
|
@ -42,5 +40,3 @@ void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
|
|||
}
|
||||
|
||||
EXPORT_SYMBOL(snd_use_lock_sync_helper);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
|
||||
#include <linux/sched.h>
|
||||
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_SND_DEBUG)
|
||||
|
||||
typedef atomic_t snd_use_lock_t;
|
||||
|
||||
/* initialize lock */
|
||||
|
@ -20,14 +18,4 @@ typedef atomic_t snd_use_lock_t;
|
|||
void snd_use_lock_sync_helper(snd_use_lock_t *lock, const char *file, int line);
|
||||
#define snd_use_lock_sync(lockp) snd_use_lock_sync_helper(lockp, __BASE_FILE__, __LINE__)
|
||||
|
||||
#else /* SMP || CONFIG_SND_DEBUG */
|
||||
|
||||
typedef spinlock_t snd_use_lock_t; /* dummy */
|
||||
#define snd_use_lock_init(lockp) /**/
|
||||
#define snd_use_lock_use(lockp) /**/
|
||||
#define snd_use_lock_free(lockp) /**/
|
||||
#define snd_use_lock_sync(lockp) /**/
|
||||
|
||||
#endif /* SMP || CONFIG_SND_DEBUG */
|
||||
|
||||
#endif /* __SND_SEQ_LOCK_H */
|
||||
|
|
|
@ -1755,7 +1755,7 @@ static int get_kctl_0dB_offset(struct hda_codec *codec,
|
|||
return -1;
|
||||
if (*step_to_check && *step_to_check != step) {
|
||||
codec_err(codec, "Mismatching dB step for vmaster slave (%d!=%d)\n",
|
||||
- *step_to_check, step);
|
||||
*step_to_check, step);
|
||||
return -1;
|
||||
}
|
||||
*step_to_check = step;
|
||||
|
|
|
@ -1305,6 +1305,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
|
|||
case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
|
||||
case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
|
||||
case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
|
||||
case USB_ID(0x2772, 0x0230): /* Pro-Ject Pre Box S2 Digital */
|
||||
if (fp->altsetting == 2)
|
||||
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
|
||||
break;
|
||||
|
|
Loading…
Add table
Reference in a new issue