From 1cb87c38cb81847938bbb4776d9c6f3afe8fd938 Mon Sep 17 00:00:00 2001 From: Ke Wang Date: Wed, 1 Nov 2017 14:11:06 +0800 Subject: [PATCH 01/50] sched: EAS: Fix the condition to distinguish energy before/after Before commit 5f8b3a757d65 ("sched/fair: consider task utilization in group_norm_util()"), eenv->util_delta is used to distinguish energy before and energy after in sched_group_energy(). After that commit, eenv->util_delta can not do that any more. In this commit, use trg_cpu to distinguish energy before/after in sched_group_energy(). Before apply this commit, cap_before/cap_delta is not correct: -0 [001] 147504.608920: sched_energy_diff: pid=7 comm=rcu_preempt src_cpu=1 dst_cpu=3 usage_delta=7 nrg_before=250 nrg_after=250 nrg_diff=0 cap_before=0 cap_after=528 cap_delta=1056 nrg_delta=0 nrg_payoff=0 After apply this commit, cap_before/cap_delta retrun to normal: -0 [001] 220.494011: sched_energy_diff: pid=7 comm=rcu_preempt src_cpu=1 dst_cpu=2 usage_delta=3 nrg_before=248 nrg_after=248 nrg_diff=0 cap_before=528 cap_after=528 cap_delta=0 nrg_delta=0 nrg_payoff=0 Signed-off-by: Ke Wang --- kernel/sched/fair.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ac22d32a6255..06b814b58d20 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5534,13 +5534,13 @@ static int sched_group_energy(struct energy_env *eenv) if (sg->group_weight == 1) { /* Remove capacity of src CPU (before task move) */ - if (eenv->util_delta == 0 && + if (eenv->trg_cpu == eenv->src_cpu && cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) { eenv->cap.before = sg->sge->cap_states[cap_idx].cap; eenv->cap.delta -= eenv->cap.before; } /* Add capacity of dst CPU (after task move) */ - if (eenv->util_delta != 0 && + if (eenv->trg_cpu == eenv->dst_cpu && cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) { eenv->cap.after = sg->sge->cap_states[cap_idx].cap; eenv->cap.delta += eenv->cap.after; From 7d5a251c66be3516c14cffa80e6b076b37736971 Mon Sep 17 00:00:00 2001 From: Ke Wang Date: Mon, 30 Oct 2017 17:38:16 +0800 Subject: [PATCH 02/50] sched: EAS: update trg_cpu to backup_cpu if no energy saving for target_cpu If no energy saving for target_cpu in the calculation of energy_diff(), backup_cpu will be set as the new dst_cpu for the next calculation. At this point, we also need update the new trg_cpu as backup_cpu to make sure the subsequent calculation of energy_diff() is correct. Signed-off-by: Ke Wang --- kernel/sched/fair.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 06b814b58d20..3b429c5ce721 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6723,6 +6723,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync /* No energy saving for target_cpu, try backup */ target_cpu = tmp_backup; eenv.dst_cpu = target_cpu; + eenv.trg_cpu = target_cpu; if (tmp_backup < 0 || tmp_backup == prev_cpu || energy_diff(&eenv) >= 0) { From 47c87b2654376e7dda646ca5a2af067c5d368ca7 Mon Sep 17 00:00:00 2001 From: Ke Wang Date: Wed, 1 Nov 2017 16:07:38 +0800 Subject: [PATCH 03/50] sched: EAS: Fix the calculation of group util in group_idle_state() util_delta becomes not zero in eenv_before, which will affect the calculation of grp_util in group_idle_state(). Fix it under the new condition. Change-Id: Ic3853bb45876a8e388afcbe4e72d25fc42b1d7b0 Signed-off-by: Ke Wang --- kernel/sched/fair.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3b429c5ce721..5c65f3ad6da1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5429,13 +5429,6 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg) /* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */ state++; - /* - * Try to estimate if a deeper idle state is - * achievable when we move the task. - */ - for_each_cpu(i, sched_group_cpus(sg)) - grp_util += cpu_util(i); - src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg)); dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg)); if (src_in_grp == dst_in_grp) { @@ -5444,10 +5437,16 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg) */ goto end; } - /* add or remove util as appropriate to indicate what group util - * will be (worst case - no concurrent execution) after moving the task + + /* + * Try to estimate if a deeper idle state is + * achievable when we move the task. */ - grp_util += src_in_grp ? -eenv->util_delta : eenv->util_delta; + for_each_cpu(i, sched_group_cpus(sg)) { + grp_util += cpu_util_wake(i, eenv->task); + if (unlikely(i == eenv->trg_cpu)) + grp_util += eenv->util_delta; + } if (grp_util <= ((long)sg->sgc->max_capacity * (int)sg->group_weight)) { From 05aa89aa8402ae319108731770a6c7d23f88c431 Mon Sep 17 00:00:00 2001 From: Evgenii Stepanov Date: Wed, 1 Nov 2017 15:04:42 -0700 Subject: [PATCH 04/50] ANDROID: Revert "arm: move ELF_ET_DYN_BASE to 4MB" This ARM mmap change breaks AddressSanitizer: Shadow memory range interleaves with an existing memory mapping. ASan cannot proceed correctly. ABORTING. Revert it until ASAN runtime library is updated to handle it. Bug: 67425063 This reverts commit d2471b5e84f32de4e09b58f5436a4ce3ee935e32. --- arch/arm/include/asm/elf.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index f13ae153fb24..d2315ffd8f12 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,8 +112,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the base location for PIE (ET_DYN with INTERP) loads. */ -#define ELF_ET_DYN_BASE 0x400000UL +/* This is the location that an ET_DYN program is loaded if exec'ed. Typical + use of this is to invoke "./ld.so someprog" to test out a new version of + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we From e080c59bfb7cf2e0498883d352c9053e5f2bf960 Mon Sep 17 00:00:00 2001 From: Evgenii Stepanov Date: Wed, 1 Nov 2017 15:10:12 -0700 Subject: [PATCH 05/50] ANDROID: Revert "arm64: move ELF_ET_DYN_BASE to 4GB / 4MB" Part of the above change was reverted in 240628085effc47e86f51fc3fb37bc0e628f9a85; this change reverts the rest. This ARM mmap change breaks AddressSanitizer: Shadow memory range interleaves with an existing memory mapping. ASan cannot proceed correctly. ABORTING. Revert it until ASAN runtime library is updated to handle it. Bug: 67425063 --- arch/arm64/include/asm/elf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index cd3dfa346649..f9d64ed4fd2b 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -169,7 +169,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT /* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ -#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL +#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 From cd04e987d1da0eadc25c2186bd6bd93f22c3e851 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Mon, 11 Sep 2017 17:05:49 -0700 Subject: [PATCH 06/50] ANDROID: sched/rt: add schedtune accounting This patch adds schedtune enqueue/dequeue to RT scheduling class. Change-Id: If416e64319d62191f3aedd675d3e9a21fe2102fb Signed-off-by: Joel Fernandes --- kernel/sched/rt.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 069f8982867f..88f28e996249 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -9,6 +9,7 @@ #include #include "walt.h" +#include "tune.h" int sched_rr_timeslice = RR_TIMESLICE; @@ -1321,6 +1322,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); + + schedtune_enqueue_task(p, cpu_of(rq)); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) @@ -1332,6 +1335,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) walt_dec_cumulative_runnable_avg(rq, p); dequeue_pushable_task(rq, p); + schedtune_dequeue_task(p, cpu_of(rq)); } /* From d194ba5d712f051ff6c025f3484bb72f219764e3 Mon Sep 17 00:00:00 2001 From: Joel Fernandes Date: Mon, 11 Sep 2017 17:10:37 -0700 Subject: [PATCH 07/50] ANDROID: sched/rt: schedtune: Add boost retention to RT Boosted RT tasks can be deboosted quickly, this makes boost usless for RT tasks and causes lots of glitching. Use timers to prevent de-boost too soon and wait for long enough such that next enqueue happens after a threshold. While this can be solved in the governor, there are following advantages: - The approach used is governor-independent - Reduces boost group lock contention for frequently sleepers/wakers - Works with schedfreq without any other schedfreq hacks. Bug: 30210506 Change-Id: I41788b235586988be446505deb7c0529758a9898 Signed-off-by: Joel Fernandes --- include/linux/sched.h | 4 ++ kernel/sched/core.c | 1 + kernel/sched/rt.c | 154 ++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 1 + 4 files changed, 160 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 60af9d2fa922..6197d9c8e5a0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1433,6 +1433,10 @@ struct sched_rt_entity { unsigned long watchdog_stamp; unsigned int time_slice; + /* Accesses for these must be guarded by rq->lock of the task's rq */ + bool schedtune_enqueued; + struct hrtimer schedtune_timer; + struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED struct sched_rt_entity *parent; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0c9e332ceb3b..3030633d8900 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2200,6 +2200,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) init_dl_task_timer(&p->dl); __dl_clear_params(p); + init_rt_schedtune_timer(&p->rt); INIT_LIST_HEAD(&p->rt.run_list); #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 88f28e996249..f41435e7f75d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -7,6 +7,7 @@ #include #include +#include #include "walt.h" #include "tune.h" @@ -986,6 +987,73 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) return 0; } +#define RT_SCHEDTUNE_INTERVAL 50000000ULL + +static void sched_rt_update_capacity_req(struct rq *rq); + +static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer) +{ + struct sched_rt_entity *rt_se = container_of(timer, + struct sched_rt_entity, + schedtune_timer); + struct task_struct *p = rt_task_of(rt_se); + struct rq *rq = task_rq(p); + + raw_spin_lock(&rq->lock); + + /* + * Nothing to do if: + * - task has switched runqueues + * - task isn't RT anymore + */ + if (rq != task_rq(p) || (p->sched_class != &rt_sched_class)) + goto out; + + /* + * If task got enqueued back during callback time, it means we raced + * with the enqueue on another cpu, that's Ok, just do nothing as + * enqueue path would have tried to cancel us and we shouldn't run + * Also check the schedtune_enqueued flag as class-switch on a + * sleeping task may have already canceled the timer and done dq + */ + if (p->on_rq || !rt_se->schedtune_enqueued) + goto out; + + /* + * RT task is no longer active, cancel boost + */ + rt_se->schedtune_enqueued = false; + schedtune_dequeue_task(p, cpu_of(rq)); + sched_rt_update_capacity_req(rq); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); +out: + raw_spin_unlock(&rq->lock); + + /* + * This can free the task_struct if no more references. + */ + put_task_struct(p); + + return HRTIMER_NORESTART; +} + +void init_rt_schedtune_timer(struct sched_rt_entity *rt_se) +{ + struct hrtimer *timer = &rt_se->schedtune_timer; + + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer->function = rt_schedtune_timer; + rt_se->schedtune_enqueued = false; +} + +static void start_schedtune_timer(struct sched_rt_entity *rt_se) +{ + struct hrtimer *timer = &rt_se->schedtune_timer; + + hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL), + HRTIMER_MODE_REL_PINNED); +} + /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -1323,7 +1391,33 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); + if (!schedtune_task_boost(p)) + return; + + /* + * If schedtune timer is active, that means a boost was already + * done, just cancel the timer so that deboost doesn't happen. + * Otherwise, increase the boost. If an enqueued timer was + * cancelled, put the task reference. + */ + if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1) + put_task_struct(p); + + /* + * schedtune_enqueued can be true in the following situation: + * enqueue_task_rt grabs rq lock before timer fires + * or before its callback acquires rq lock + * schedtune_enqueued can be false if timer callback is running + * and timer just released rq lock, or if the timer finished + * running and canceling the boost + */ + if (rt_se->schedtune_enqueued) + return; + + rt_se->schedtune_enqueued = true; schedtune_enqueue_task(p, cpu_of(rq)); + sched_rt_update_capacity_req(rq); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) @@ -1335,7 +1429,20 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) walt_dec_cumulative_runnable_avg(rq, p); dequeue_pushable_task(rq, p); + + if (!rt_se->schedtune_enqueued) + return; + + if (flags == DEQUEUE_SLEEP) { + get_task_struct(p); + start_schedtune_timer(rt_se); + return; + } + + rt_se->schedtune_enqueued = false; schedtune_dequeue_task(p, cpu_of(rq)); + sched_rt_update_capacity_req(rq); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); } /* @@ -1375,6 +1482,33 @@ static void yield_task_rt(struct rq *rq) #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); +/* + * Perform a schedtune dequeue and cancelation of boost timers if needed. + * Should be called only with the rq->lock held. + */ +static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p) +{ + struct sched_rt_entity *rt_se = &p->rt; + + BUG_ON(!raw_spin_is_locked(&rq->lock)); + + if (!rt_se->schedtune_enqueued) + return; + + /* + * Incase of class change cancel any active timers. If an enqueued + * timer was cancelled, put the task ref. + */ + if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1) + put_task_struct(p); + + /* schedtune_enqueued is true, deboost it */ + rt_se->schedtune_enqueued = false; + schedtune_dequeue_task(p, task_cpu(p)); + sched_rt_update_capacity_req(rq); + cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); +} + static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, int sibling_count_hint) @@ -1429,6 +1563,19 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, rcu_read_unlock(); out: + /* + * If previous CPU was different, make sure to cancel any active + * schedtune timers and deboost. + */ + if (task_cpu(p) != cpu) { + unsigned long fl; + struct rq *prq = task_rq(p); + + raw_spin_lock_irqsave(&prq->lock, fl); + schedtune_dequeue_rt(prq, p); + raw_spin_unlock_irqrestore(&prq->lock, fl); + } + return cpu; } @@ -2205,6 +2352,13 @@ static void rq_offline_rt(struct rq *rq) */ static void switched_from_rt(struct rq *rq, struct task_struct *p) { + /* + * On class switch from rt, always cancel active schedtune timers, + * this handles the cases where we switch class for a task that is + * already rt-dequeued but has a running timer. + */ + schedtune_dequeue_rt(rq, p); + /* * If there are other RT tasks then we will reschedule * and the scheduling of the other RT tasks will handle diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0238e94b0a1e..028e232103c2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1398,6 +1398,7 @@ extern void resched_cpu(int cpu); extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); +extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se); extern struct dl_bandwidth def_dl_bandwidth; extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); From df147c9e336cfcb4183db1eb9552b0429060cd0d Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Thu, 2 Nov 2017 15:13:26 +0530 Subject: [PATCH 08/50] cpufreq: Drop schedfreq governor We all should be using (and improving) the schedutil governor now. Get rid of the non-upstream governor. Tested on Hikey. Change-Id: Ic660756536e5da51952738c3c18b94e31f58cd57 Signed-off-by: Viresh Kumar --- drivers/cpufreq/Kconfig | 13 - include/linux/sched/sysctl.h | 1 - kernel/sched/Makefile | 1 - kernel/sched/core.c | 86 ------ kernel/sched/cpufreq_sched.c | 525 ----------------------------------- kernel/sched/fair.c | 89 +----- kernel/sched/rt.c | 49 +--- kernel/sched/sched.h | 75 ----- kernel/sysctl.c | 7 - 9 files changed, 4 insertions(+), 842 deletions(-) delete mode 100644 kernel/sched/cpufreq_sched.c diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 91b05e2b8799..91e2743f057b 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -224,19 +224,6 @@ config CPU_FREQ_GOV_CONSERVATIVE If in doubt, say N. -config CPU_FREQ_GOV_SCHED - bool "'sched' cpufreq governor" - depends on CPU_FREQ - depends on SMP - select CPU_FREQ_GOV_COMMON - help - 'sched' - this governor scales cpu frequency from the - scheduler as a function of cpu capacity utilization. It does - not evaluate utilization on a periodic basis (as ondemand - does) but instead is event-driven by the scheduler. - - If in doubt, say N. - config CPU_FREQ_GOV_SCHEDUTIL bool "'schedutil' cpufreq policy governor" depends on CPU_FREQ && SMP diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 2bf4520d8089..167279a4e24b 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern unsigned int sysctl_sched_child_runs_first; extern unsigned int sysctl_sched_sync_hint_enable; -extern unsigned int sysctl_sched_initial_task_util; extern unsigned int sysctl_sched_cstate_aware; #ifdef CONFIG_SCHED_WALT extern unsigned int sysctl_sched_use_walt_cpu_util; diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index ca0d94096170..d7ec4f7dd0d9 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -22,5 +22,4 @@ obj-$(CONFIG_SCHED_DEBUG) += debug.o obj-$(CONFIG_SCHED_TUNE) += tune.o obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3030633d8900..889fb1aff1e0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2983,91 +2983,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) return ns; } -#ifdef CONFIG_CPU_FREQ_GOV_SCHED - -static inline -unsigned long add_capacity_margin(unsigned long cpu_capacity) -{ - cpu_capacity = cpu_capacity * capacity_margin; - cpu_capacity /= SCHED_CAPACITY_SCALE; - return cpu_capacity; -} - -static inline -unsigned long sum_capacity_reqs(unsigned long cfs_cap, - struct sched_capacity_reqs *scr) -{ - unsigned long total = add_capacity_margin(cfs_cap + scr->rt); - return total += scr->dl; -} - -unsigned long boosted_cpu_util(int cpu); -static void sched_freq_tick_pelt(int cpu) -{ - unsigned long cpu_utilization = boosted_cpu_util(cpu); - unsigned long capacity_curr = capacity_curr_of(cpu); - struct sched_capacity_reqs *scr; - - scr = &per_cpu(cpu_sched_capacity_reqs, cpu); - if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr) - return; - - /* - * To make free room for a task that is building up its "real" - * utilization and to harm its performance the least, request - * a jump to a higher OPP as soon as the margin of free capacity - * is impacted (specified by capacity_margin). - * Remember CPU utilization in sched_capacity_reqs should be normalised. - */ - cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); - set_cfs_cpu_capacity(cpu, true, cpu_utilization); -} - -#ifdef CONFIG_SCHED_WALT -static void sched_freq_tick_walt(int cpu) -{ - unsigned long cpu_utilization = cpu_util_freq(cpu); - unsigned long capacity_curr = capacity_curr_of(cpu); - - if (walt_disabled || !sysctl_sched_use_walt_cpu_util) - return sched_freq_tick_pelt(cpu); - - /* - * Add a margin to the WALT utilization to check if we will need to - * increase frequency. - * NOTE: WALT tracks a single CPU signal for all the scheduling - * classes, thus this margin is going to be added to the DL class as - * well, which is something we do not do in sched_freq_tick_pelt case. - */ - if (add_capacity_margin(cpu_utilization) <= capacity_curr) - return; - - /* - * It is likely that the load is growing so we - * keep the added margin in our request as an - * extra boost. - * Remember CPU utilization in sched_capacity_reqs should be normalised. - */ - cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); - set_cfs_cpu_capacity(cpu, true, cpu_utilization); - -} -#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu) -#else -#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu) -#endif /* CONFIG_SCHED_WALT */ - -static void sched_freq_tick(int cpu) -{ - if (!sched_freq()) - return; - - _sched_freq_tick(cpu); -} -#else -static inline void sched_freq_tick(int cpu) { } -#endif /* CONFIG_CPU_FREQ_GOV_SCHED */ - /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. @@ -3088,7 +3003,6 @@ void scheduler_tick(void) curr->sched_class->task_tick(rq, curr, 0); update_cpu_load_active(rq); calc_global_load_tick(rq); - sched_freq_tick(cpu); raw_spin_unlock(&rq->lock); perf_event_task_tick(); diff --git a/kernel/sched/cpufreq_sched.c b/kernel/sched/cpufreq_sched.c deleted file mode 100644 index ec0aed7a8f96..000000000000 --- a/kernel/sched/cpufreq_sched.c +++ /dev/null @@ -1,525 +0,0 @@ -/* - * Copyright (C) 2015 Michael Turquette - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include - -#include "sched.h" - -#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */ -#define THROTTLE_UP_NSEC 500000 /* 500us default */ - -struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE; -static bool __read_mostly cpufreq_driver_slow; - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED -static struct cpufreq_governor cpufreq_gov_sched; -#endif - -static DEFINE_PER_CPU(unsigned long, enabled); -DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs); - -struct gov_tunables { - struct gov_attr_set attr_set; - unsigned int up_throttle_nsec; - unsigned int down_throttle_nsec; -}; - -/** - * gov_data - per-policy data internal to the governor - * @up_throttle: next throttling period expiry if increasing OPP - * @down_throttle: next throttling period expiry if decreasing OPP - * @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP - * @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP - * @task: worker thread for dvfs transition that may block/sleep - * @irq_work: callback used to wake up worker thread - * @requested_freq: last frequency requested by the sched governor - * - * struct gov_data is the per-policy cpufreq_sched-specific data structure. A - * per-policy instance of it is created when the cpufreq_sched governor receives - * the CPUFREQ_GOV_START condition and a pointer to it exists in the gov_data - * member of struct cpufreq_policy. - * - * Readers of this data must call down_read(policy->rwsem). Writers must - * call down_write(policy->rwsem). - */ -struct gov_data { - ktime_t up_throttle; - ktime_t down_throttle; - struct gov_tunables *tunables; - struct list_head tunables_hook; - struct task_struct *task; - struct irq_work irq_work; - unsigned int requested_freq; -}; - -static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy, - unsigned int freq) -{ - struct gov_data *gd = policy->governor_data; - - /* avoid race with cpufreq_sched_stop */ - if (!down_write_trylock(&policy->rwsem)) - return; - - __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); - - gd->up_throttle = ktime_add_ns(ktime_get(), - gd->tunables->up_throttle_nsec); - gd->down_throttle = ktime_add_ns(ktime_get(), - gd->tunables->down_throttle_nsec); - up_write(&policy->rwsem); -} - -static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq) -{ - ktime_t now = ktime_get(); - - ktime_t throttle = gd->requested_freq < cur_freq ? - gd->down_throttle : gd->up_throttle; - - if (ktime_after(now, throttle)) - return false; - - while (1) { - int usec_left = ktime_to_ns(ktime_sub(throttle, now)); - - usec_left /= NSEC_PER_USEC; - trace_cpufreq_sched_throttled(usec_left); - usleep_range(usec_left, usec_left + 100); - now = ktime_get(); - if (ktime_after(now, throttle)) - return true; - } -} - -/* - * we pass in struct cpufreq_policy. This is safe because changing out the - * policy requires a call to __cpufreq_governor(policy, CPUFREQ_GOV_STOP), - * which tears down all of the data structures and __cpufreq_governor(policy, - * CPUFREQ_GOV_START) will do a full rebuild, including this kthread with the - * new policy pointer - */ -static int cpufreq_sched_thread(void *data) -{ - struct sched_param param; - struct cpufreq_policy *policy; - struct gov_data *gd; - unsigned int new_request = 0; - unsigned int last_request = 0; - int ret; - - policy = (struct cpufreq_policy *) data; - gd = policy->governor_data; - - param.sched_priority = 50; - ret = sched_setscheduler_nocheck(gd->task, SCHED_FIFO, ¶m); - if (ret) { - pr_warn("%s: failed to set SCHED_FIFO\n", __func__); - do_exit(-EINVAL); - } else { - pr_debug("%s: kthread (%d) set to SCHED_FIFO\n", - __func__, gd->task->pid); - } - - do { - new_request = gd->requested_freq; - if (new_request == last_request) { - set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) - break; - schedule(); - } else { - /* - * if the frequency thread sleeps while waiting to be - * unthrottled, start over to check for a newer request - */ - if (finish_last_request(gd, policy->cur)) - continue; - last_request = new_request; - cpufreq_sched_try_driver_target(policy, new_request); - } - } while (!kthread_should_stop()); - - return 0; -} - -static void cpufreq_sched_irq_work(struct irq_work *irq_work) -{ - struct gov_data *gd; - - gd = container_of(irq_work, struct gov_data, irq_work); - if (!gd) - return; - - wake_up_process(gd->task); -} - -static void update_fdomain_capacity_request(int cpu) -{ - unsigned int freq_new, index_new, cpu_tmp; - struct cpufreq_policy *policy; - struct gov_data *gd; - unsigned long capacity = 0; - - /* - * Avoid grabbing the policy if possible. A test is still - * required after locking the CPU's policy to avoid racing - * with the governor changing. - */ - if (!per_cpu(enabled, cpu)) - return; - - policy = cpufreq_cpu_get(cpu); - if (IS_ERR_OR_NULL(policy)) - return; - - if (policy->governor != &cpufreq_gov_sched || - !policy->governor_data) - goto out; - - gd = policy->governor_data; - - /* find max capacity requested by cpus in this policy */ - for_each_cpu(cpu_tmp, policy->cpus) { - struct sched_capacity_reqs *scr; - - scr = &per_cpu(cpu_sched_capacity_reqs, cpu_tmp); - capacity = max(capacity, scr->total); - } - - /* Convert the new maximum capacity request into a cpu frequency */ - freq_new = capacity * policy->cpuinfo.max_freq >> SCHED_CAPACITY_SHIFT; - if (cpufreq_frequency_table_target(policy, policy->freq_table, - freq_new, CPUFREQ_RELATION_L, - &index_new)) - goto out; - freq_new = policy->freq_table[index_new].frequency; - - if (freq_new > policy->max) - freq_new = policy->max; - - if (freq_new < policy->min) - freq_new = policy->min; - - trace_cpufreq_sched_request_opp(cpu, capacity, freq_new, - gd->requested_freq); - if (freq_new == gd->requested_freq) - goto out; - - gd->requested_freq = freq_new; - - /* - * Throttling is not yet supported on platforms with fast cpufreq - * drivers. - */ - if (cpufreq_driver_slow) - irq_work_queue_on(&gd->irq_work, cpu); - else - cpufreq_sched_try_driver_target(policy, freq_new); - -out: - cpufreq_cpu_put(policy); -} - -#ifdef CONFIG_SCHED_WALT -static inline unsigned long -requested_capacity(struct sched_capacity_reqs *scr) -{ - if (!walt_disabled && sysctl_sched_use_walt_cpu_util) - return scr->cfs; - return scr->cfs + scr->rt; -} -#else -#define requested_capacity(scr) (scr->cfs + scr->rt) -#endif - -void update_cpu_capacity_request(int cpu, bool request) -{ - unsigned long new_capacity; - struct sched_capacity_reqs *scr; - - /* The rq lock serializes access to the CPU's sched_capacity_reqs. */ - lockdep_assert_held(&cpu_rq(cpu)->lock); - - scr = &per_cpu(cpu_sched_capacity_reqs, cpu); - - new_capacity = requested_capacity(scr); - new_capacity = new_capacity * capacity_margin - / SCHED_CAPACITY_SCALE; - new_capacity += scr->dl; - - if (new_capacity == scr->total) - return; - - trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity); - - scr->total = new_capacity; - if (request) - update_fdomain_capacity_request(cpu); -} - -static inline void set_sched_freq(void) -{ - static_key_slow_inc(&__sched_freq); -} - -static inline void clear_sched_freq(void) -{ - static_key_slow_dec(&__sched_freq); -} - -/* Tunables */ -static struct gov_tunables *global_tunables; - -static inline struct gov_tunables *to_tunables(struct gov_attr_set *attr_set) -{ - return container_of(attr_set, struct gov_tunables, attr_set); -} - -static ssize_t up_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf) -{ - struct gov_tunables *tunables = to_tunables(attr_set); - - return sprintf(buf, "%u\n", tunables->up_throttle_nsec); -} - -static ssize_t up_throttle_nsec_store(struct gov_attr_set *attr_set, - const char *buf, size_t count) -{ - struct gov_tunables *tunables = to_tunables(attr_set); - int ret; - long unsigned int val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - tunables->up_throttle_nsec = val; - return count; -} - -static ssize_t down_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf) -{ - struct gov_tunables *tunables = to_tunables(attr_set); - - return sprintf(buf, "%u\n", tunables->down_throttle_nsec); -} - -static ssize_t down_throttle_nsec_store(struct gov_attr_set *attr_set, - const char *buf, size_t count) -{ - struct gov_tunables *tunables = to_tunables(attr_set); - int ret; - long unsigned int val; - - ret = kstrtoul(buf, 0, &val); - if (ret < 0) - return ret; - tunables->down_throttle_nsec = val; - return count; -} - -static struct governor_attr up_throttle_nsec = __ATTR_RW(up_throttle_nsec); -static struct governor_attr down_throttle_nsec = __ATTR_RW(down_throttle_nsec); - -static struct attribute *schedfreq_attributes[] = { - &up_throttle_nsec.attr, - &down_throttle_nsec.attr, - NULL -}; - -static struct kobj_type tunables_ktype = { - .default_attrs = schedfreq_attributes, - .sysfs_ops = &governor_sysfs_ops, -}; - -static int cpufreq_sched_policy_init(struct cpufreq_policy *policy) -{ - struct gov_data *gd; - int cpu; - int rc; - - for_each_cpu(cpu, policy->cpus) - memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0, - sizeof(struct sched_capacity_reqs)); - - gd = kzalloc(sizeof(*gd), GFP_KERNEL); - if (!gd) - return -ENOMEM; - - policy->governor_data = gd; - - if (!global_tunables) { - gd->tunables = kzalloc(sizeof(*gd->tunables), GFP_KERNEL); - if (!gd->tunables) - goto free_gd; - - gd->tunables->up_throttle_nsec = - policy->cpuinfo.transition_latency ? - policy->cpuinfo.transition_latency : - THROTTLE_UP_NSEC; - gd->tunables->down_throttle_nsec = - THROTTLE_DOWN_NSEC; - - rc = kobject_init_and_add(&gd->tunables->attr_set.kobj, - &tunables_ktype, - get_governor_parent_kobj(policy), - "%s", cpufreq_gov_sched.name); - if (rc) - goto free_tunables; - - gov_attr_set_init(&gd->tunables->attr_set, - &gd->tunables_hook); - - pr_debug("%s: throttle_threshold = %u [ns]\n", - __func__, gd->tunables->up_throttle_nsec); - - if (!have_governor_per_policy()) - global_tunables = gd->tunables; - } else { - gd->tunables = global_tunables; - gov_attr_set_get(&global_tunables->attr_set, - &gd->tunables_hook); - } - - policy->governor_data = gd; - if (cpufreq_driver_is_slow()) { - cpufreq_driver_slow = true; - gd->task = kthread_create(cpufreq_sched_thread, policy, - "kschedfreq:%d", - cpumask_first(policy->related_cpus)); - if (IS_ERR_OR_NULL(gd->task)) { - pr_err("%s: failed to create kschedfreq thread\n", - __func__); - goto free_tunables; - } - get_task_struct(gd->task); - kthread_bind_mask(gd->task, policy->related_cpus); - wake_up_process(gd->task); - init_irq_work(&gd->irq_work, cpufreq_sched_irq_work); - } - - set_sched_freq(); - - return 0; - -free_tunables: - kfree(gd->tunables); -free_gd: - policy->governor_data = NULL; - kfree(gd); - return -ENOMEM; -} - -static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy) -{ - unsigned int count; - struct gov_data *gd = policy->governor_data; - - clear_sched_freq(); - if (cpufreq_driver_slow) { - kthread_stop(gd->task); - put_task_struct(gd->task); - } - - count = gov_attr_set_put(&gd->tunables->attr_set, &gd->tunables_hook); - if (!count) { - if (!have_governor_per_policy()) - global_tunables = NULL; - kfree(gd->tunables); - } - - policy->governor_data = NULL; - - kfree(gd); - return 0; -} - -static int cpufreq_sched_start(struct cpufreq_policy *policy) -{ - int cpu; - - for_each_cpu(cpu, policy->cpus) - per_cpu(enabled, cpu) = 1; - - return 0; -} - -static void cpufreq_sched_limits(struct cpufreq_policy *policy) -{ - unsigned int clamp_freq; - struct gov_data *gd = policy->governor_data;; - - pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n", - policy->cpu, policy->min, policy->max, - policy->cur); - - clamp_freq = clamp(gd->requested_freq, policy->min, policy->max); - - if (policy->cur != clamp_freq) - __cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L); -} - -static int cpufreq_sched_stop(struct cpufreq_policy *policy) -{ - int cpu; - - for_each_cpu(cpu, policy->cpus) - per_cpu(enabled, cpu) = 0; - - return 0; -} - -static int cpufreq_sched_setup(struct cpufreq_policy *policy, - unsigned int event) -{ - switch (event) { - case CPUFREQ_GOV_POLICY_INIT: - return cpufreq_sched_policy_init(policy); - case CPUFREQ_GOV_POLICY_EXIT: - return cpufreq_sched_policy_exit(policy); - case CPUFREQ_GOV_START: - return cpufreq_sched_start(policy); - case CPUFREQ_GOV_STOP: - return cpufreq_sched_stop(policy); - case CPUFREQ_GOV_LIMITS: - cpufreq_sched_limits(policy); - break; - } - return 0; -} - - -#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED -static -#endif -struct cpufreq_governor cpufreq_gov_sched = { - .name = "sched", - .governor = cpufreq_sched_setup, - .owner = THIS_MODULE, -}; - -static int __init cpufreq_sched_init(void) -{ - int cpu; - - for_each_cpu(cpu, cpu_possible_mask) - per_cpu(enabled, cpu) = 0; - return cpufreq_register_governor(&cpufreq_gov_sched); -} - -/* Try to make this the default governor */ -fs_initcall(cpufreq_sched_init); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5c65f3ad6da1..b5ea66e5551c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -54,7 +54,6 @@ unsigned int sysctl_sched_latency = 6000000ULL; unsigned int normalized_sysctl_sched_latency = 6000000ULL; unsigned int sysctl_sched_sync_hint_enable = 1; -unsigned int sysctl_sched_initial_task_util = 0; unsigned int sysctl_sched_cstate_aware = 1; #ifdef CONFIG_SCHED_WALT @@ -750,9 +749,7 @@ void init_entity_runnable_average(struct sched_entity *se) sa->load_sum = sa->load_avg * LOAD_AVG_MAX; /* * In previous Android versions, we used to have: - * sa->util_avg = sched_freq() ? - * sysctl_sched_initial_task_util : - * scale_load_down(SCHED_LOAD_SCALE); + * sa->util_avg = scale_load_down(SCHED_LOAD_SCALE); * sa->util_sum = sa->util_avg * LOAD_AVG_MAX; * However, that functionality has been moved to enqueue. * It is unclear if we should restore this in enqueue. @@ -4668,21 +4665,6 @@ unsigned long boosted_cpu_util(int cpu); #define boosted_cpu_util(cpu) cpu_util_freq(cpu) #endif -#ifdef CONFIG_SMP -static void update_capacity_of(int cpu) -{ - unsigned long req_cap; - - if (!sched_freq()) - return; - - /* Normalize scale-invariant capacity to cpu. */ - req_cap = boosted_cpu_util(cpu); - req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); - set_cfs_cpu_capacity(cpu, true, req_cap); -} -#endif - /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -4695,7 +4677,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; #ifdef CONFIG_SMP int task_new = flags & ENQUEUE_WAKEUP_NEW; - int task_wakeup = flags & ENQUEUE_WAKEUP; #endif /* @@ -4769,16 +4750,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) rq->rd->overutilized = true; trace_sched_overutilized(true); } - - /* - * We want to potentially trigger a freq switch - * request only for tasks that are waking up; this is - * because we get here also during load balancing, but - * in these cases it seems wise to trigger as single - * request after load balancing is done. - */ - if (task_new || task_wakeup) - update_capacity_of(cpu_of(rq)); } #endif /* CONFIG_SMP */ @@ -4854,25 +4825,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) */ schedtune_dequeue_task(p, cpu_of(rq)); - if (!se) { + if (!se) walt_dec_cumulative_runnable_avg(rq, p); - - /* - * We want to potentially trigger a freq switch - * request only for tasks that are going to sleep; - * this is because we get here also during load - * balancing, but in these cases it seems wise to - * trigger as single request after load balancing is - * done. - */ - if (task_sleep) { - if (rq->cfs.nr_running) - update_capacity_of(cpu_of(rq)); - else if (sched_freq()) - set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */ - } - } - #endif /* CONFIG_SMP */ hrtick_update(rq); @@ -7709,10 +7663,6 @@ static void attach_one_task(struct rq *rq, struct task_struct *p) { raw_spin_lock(&rq->lock); attach_task(rq, p); - /* - * We want to potentially raise target_cpu's OPP. - */ - update_capacity_of(cpu_of(rq)); raw_spin_unlock(&rq->lock); } @@ -7734,11 +7684,6 @@ static void attach_tasks(struct lb_env *env) attach_task(env->dst_rq, p); } - /* - * We want to potentially raise env.dst_cpu's OPP. - */ - update_capacity_of(env->dst_cpu); - raw_spin_unlock(&env->dst_rq->lock); } @@ -9081,11 +9026,6 @@ more_balance: * ld_moved - cumulative load moved across iterations */ cur_ld_moved = detach_tasks(&env); - /* - * We want to potentially lower env.src_cpu's OPP. - */ - if (cur_ld_moved) - update_capacity_of(env.src_cpu); /* * We've detached some tasks from busiest_rq. Every @@ -9310,7 +9250,6 @@ static int idle_balance(struct rq *this_rq) struct sched_domain *sd; int pulled_task = 0; u64 curr_cost = 0; - long removed_util=0; idle_enter_fair(this_rq); @@ -9334,17 +9273,6 @@ static int idle_balance(struct rq *this_rq) raw_spin_unlock(&this_rq->lock); - /* - * If removed_util_avg is !0 we most probably migrated some task away - * from this_cpu. In this case we might be willing to trigger an OPP - * update, but we want to do so if we don't find anybody else to pull - * here (we will trigger an OPP update with the pulled task's enqueue - * anyway). - * - * Record removed_util before calling update_blocked_averages, and use - * it below (before returning) to see if an OPP update is required. - */ - removed_util = atomic_long_read(&(this_rq->cfs).removed_util_avg); update_blocked_averages(this_cpu); rcu_read_lock(); for_each_domain(this_cpu, sd) { @@ -9409,12 +9337,6 @@ out: if (pulled_task) { idle_exit_fair(this_rq); this_rq->idle_stamp = 0; - } else if (removed_util) { - /* - * No task pulled and someone has been migrated away. - * Good case to trigger an OPP update. - */ - update_capacity_of(this_cpu); } return pulled_task; @@ -9488,13 +9410,8 @@ static int active_load_balance_cpu_stop(void *data) update_rq_clock(busiest_rq); p = detach_one_task(&env); - if (p) { + if (p) schedstat_inc(sd, alb_pushed); - /* - * We want to potentially lower env.src_cpu's OPP. - */ - update_capacity_of(env.src_cpu); - } else schedstat_inc(sd, alb_failed); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f41435e7f75d..ebf0d9329c86 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1636,41 +1636,6 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag #endif } -#ifdef CONFIG_SMP -static void sched_rt_update_capacity_req(struct rq *rq) -{ - u64 total, used, age_stamp, avg; - s64 delta; - - if (!sched_freq()) - return; - - sched_avg_update(rq); - /* - * Since we're reading these variables without serialization make sure - * we read them once before doing sanity checks on them. - */ - age_stamp = READ_ONCE(rq->age_stamp); - avg = READ_ONCE(rq->rt_avg); - delta = rq_clock(rq) - age_stamp; - - if (unlikely(delta < 0)) - delta = 0; - - total = sched_avg_period() + delta; - - used = div_u64(avg, total); - if (unlikely(used > SCHED_CAPACITY_SCALE)) - used = SCHED_CAPACITY_SCALE; - - set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used)); -} -#else -static inline void sched_rt_update_capacity_req(struct rq *rq) -{ } - -#endif - static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq) { @@ -1739,17 +1704,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) if (prev->sched_class == &rt_sched_class) update_curr_rt(rq); - if (!rt_rq->rt_queued) { - /* - * The next task to be picked on this rq will have a lower - * priority than rt tasks so we can spend some time to update - * the capacity used by rt tasks based on the last activity. - * This value will be the used as an estimation of the next - * activity. - */ - sched_rt_update_capacity_req(rq); + if (!rt_rq->rt_queued) return NULL; - } put_prev_task(rq, prev); @@ -2476,9 +2432,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) update_curr_rt(rq); - if (rq->rt.rt_nr_running) - sched_rt_update_capacity_req(rq); - watchdog(rq, p); /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 028e232103c2..782746140711 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1629,81 +1629,6 @@ static inline unsigned long cpu_util_freq(int cpu) #endif -#ifdef CONFIG_CPU_FREQ_GOV_SCHED -#define capacity_max SCHED_CAPACITY_SCALE -extern unsigned int capacity_margin; -extern struct static_key __sched_freq; - -static inline bool sched_freq(void) -{ - return static_key_false(&__sched_freq); -} - -/* - * sched_capacity_reqs expects capacity requests to be normalised. - * All capacities should sum to the range of 0-1024. - */ -DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs); -void update_cpu_capacity_request(int cpu, bool request); - -static inline void set_cfs_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ - struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu); - -#ifdef CONFIG_SCHED_WALT - if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { - int rtdl = scr->rt + scr->dl; - /* - * WALT tracks the utilization of a CPU considering the load - * generated by all the scheduling classes. - * Since the following call to: - * update_cpu_capacity - * is already adding the RT and DL utilizations let's remove - * these contributions from the WALT signal. - */ - if (capacity > rtdl) - capacity -= rtdl; - else - capacity = 0; - } -#endif - if (scr->cfs != capacity) { - scr->cfs = capacity; - update_cpu_capacity_request(cpu, request); - } -} - -static inline void set_rt_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ - if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) { - per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity; - update_cpu_capacity_request(cpu, request); - } -} - -static inline void set_dl_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ - if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) { - per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity; - update_cpu_capacity_request(cpu, request); - } -} -#else -static inline bool sched_freq(void) { return false; } -static inline void set_cfs_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ } -static inline void set_rt_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ } -static inline void set_dl_cpu_capacity(int cpu, bool request, - unsigned long capacity) -{ } -#endif - static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq)); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 55caf81a833f..4e2f98dd2052 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -342,13 +342,6 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, #endif - { - .procname = "sched_initial_task_util", - .data = &sysctl_sched_initial_task_util, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - }, { .procname = "sched_cstate_aware", .data = &sysctl_sched_cstate_aware, From 3822fe484cef0ef3e37e7106bfd684639f64e77b Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Wed, 8 Nov 2017 00:43:37 +0000 Subject: [PATCH 09/50] Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT" This reverts commit d194ba5d712f051ff6c025f3484bb72f219764e3. Reason for revert: Broke some builds. Will fix and resubmit. Change-Id: I4e6fa1562346eda1bbf058f1d5ace5ba6256ce07 --- include/linux/sched.h | 4 -- kernel/sched/core.c | 1 - kernel/sched/rt.c | 154 ------------------------------------------ kernel/sched/sched.h | 1 - 4 files changed, 160 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 6197d9c8e5a0..60af9d2fa922 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1433,10 +1433,6 @@ struct sched_rt_entity { unsigned long watchdog_stamp; unsigned int time_slice; - /* Accesses for these must be guarded by rq->lock of the task's rq */ - bool schedtune_enqueued; - struct hrtimer schedtune_timer; - struct sched_rt_entity *back; #ifdef CONFIG_RT_GROUP_SCHED struct sched_rt_entity *parent; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 889fb1aff1e0..1eb91a696069 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2200,7 +2200,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) init_dl_task_timer(&p->dl); __dl_clear_params(p); - init_rt_schedtune_timer(&p->rt); INIT_LIST_HEAD(&p->rt.run_list); #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ebf0d9329c86..c8322ab130eb 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -7,7 +7,6 @@ #include #include -#include #include "walt.h" #include "tune.h" @@ -987,73 +986,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) return 0; } -#define RT_SCHEDTUNE_INTERVAL 50000000ULL - -static void sched_rt_update_capacity_req(struct rq *rq); - -static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer) -{ - struct sched_rt_entity *rt_se = container_of(timer, - struct sched_rt_entity, - schedtune_timer); - struct task_struct *p = rt_task_of(rt_se); - struct rq *rq = task_rq(p); - - raw_spin_lock(&rq->lock); - - /* - * Nothing to do if: - * - task has switched runqueues - * - task isn't RT anymore - */ - if (rq != task_rq(p) || (p->sched_class != &rt_sched_class)) - goto out; - - /* - * If task got enqueued back during callback time, it means we raced - * with the enqueue on another cpu, that's Ok, just do nothing as - * enqueue path would have tried to cancel us and we shouldn't run - * Also check the schedtune_enqueued flag as class-switch on a - * sleeping task may have already canceled the timer and done dq - */ - if (p->on_rq || !rt_se->schedtune_enqueued) - goto out; - - /* - * RT task is no longer active, cancel boost - */ - rt_se->schedtune_enqueued = false; - schedtune_dequeue_task(p, cpu_of(rq)); - sched_rt_update_capacity_req(rq); - cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); -out: - raw_spin_unlock(&rq->lock); - - /* - * This can free the task_struct if no more references. - */ - put_task_struct(p); - - return HRTIMER_NORESTART; -} - -void init_rt_schedtune_timer(struct sched_rt_entity *rt_se) -{ - struct hrtimer *timer = &rt_se->schedtune_timer; - - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer->function = rt_schedtune_timer; - rt_se->schedtune_enqueued = false; -} - -static void start_schedtune_timer(struct sched_rt_entity *rt_se) -{ - struct hrtimer *timer = &rt_se->schedtune_timer; - - hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL), - HRTIMER_MODE_REL_PINNED); -} - /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -1391,33 +1323,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); - if (!schedtune_task_boost(p)) - return; - - /* - * If schedtune timer is active, that means a boost was already - * done, just cancel the timer so that deboost doesn't happen. - * Otherwise, increase the boost. If an enqueued timer was - * cancelled, put the task reference. - */ - if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1) - put_task_struct(p); - - /* - * schedtune_enqueued can be true in the following situation: - * enqueue_task_rt grabs rq lock before timer fires - * or before its callback acquires rq lock - * schedtune_enqueued can be false if timer callback is running - * and timer just released rq lock, or if the timer finished - * running and canceling the boost - */ - if (rt_se->schedtune_enqueued) - return; - - rt_se->schedtune_enqueued = true; schedtune_enqueue_task(p, cpu_of(rq)); - sched_rt_update_capacity_req(rq); - cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) @@ -1429,20 +1335,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) walt_dec_cumulative_runnable_avg(rq, p); dequeue_pushable_task(rq, p); - - if (!rt_se->schedtune_enqueued) - return; - - if (flags == DEQUEUE_SLEEP) { - get_task_struct(p); - start_schedtune_timer(rt_se); - return; - } - - rt_se->schedtune_enqueued = false; schedtune_dequeue_task(p, cpu_of(rq)); - sched_rt_update_capacity_req(rq); - cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); } /* @@ -1482,33 +1375,6 @@ static void yield_task_rt(struct rq *rq) #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); -/* - * Perform a schedtune dequeue and cancelation of boost timers if needed. - * Should be called only with the rq->lock held. - */ -static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p) -{ - struct sched_rt_entity *rt_se = &p->rt; - - BUG_ON(!raw_spin_is_locked(&rq->lock)); - - if (!rt_se->schedtune_enqueued) - return; - - /* - * Incase of class change cancel any active timers. If an enqueued - * timer was cancelled, put the task ref. - */ - if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1) - put_task_struct(p); - - /* schedtune_enqueued is true, deboost it */ - rt_se->schedtune_enqueued = false; - schedtune_dequeue_task(p, task_cpu(p)); - sched_rt_update_capacity_req(rq); - cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT); -} - static int select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, int sibling_count_hint) @@ -1563,19 +1429,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags, rcu_read_unlock(); out: - /* - * If previous CPU was different, make sure to cancel any active - * schedtune timers and deboost. - */ - if (task_cpu(p) != cpu) { - unsigned long fl; - struct rq *prq = task_rq(p); - - raw_spin_lock_irqsave(&prq->lock, fl); - schedtune_dequeue_rt(prq, p); - raw_spin_unlock_irqrestore(&prq->lock, fl); - } - return cpu; } @@ -2308,13 +2161,6 @@ static void rq_offline_rt(struct rq *rq) */ static void switched_from_rt(struct rq *rq, struct task_struct *p) { - /* - * On class switch from rt, always cancel active schedtune timers, - * this handles the cases where we switch class for a task that is - * already rt-dequeued but has a running timer. - */ - schedtune_dequeue_rt(rq, p); - /* * If there are other RT tasks then we will reschedule * and the scheduling of the other RT tasks will handle diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 782746140711..203d64a0c947 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1398,7 +1398,6 @@ extern void resched_cpu(int cpu); extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); -extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se); extern struct dl_bandwidth def_dl_bandwidth; extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); From faa82dcc9d4dc3cc362d28832216df54de963e6e Mon Sep 17 00:00:00 2001 From: Jonathan Basseri Date: Wed, 25 Oct 2017 09:52:27 -0700 Subject: [PATCH 10/50] BACKPORT: xfrm: Clear sk_dst_cache when applying per-socket policy. If a socket has a valid dst cache, then xfrm_lookup_route will get skipped. However, the cache is not invalidated when applying policy to a socket (i.e. IPV6_XFRM_POLICY). The result is that new policies are sometimes ignored on those sockets. (Note: This was broken for IPv4 and IPv6 at different times.) This can be demonstrated like so, 1. Create UDP socket. 2. connect() the socket. 3. Apply an outbound XFRM policy to the socket. (setsockopt) 4. send() data on the socket. Packets will continue to be sent in the clear instead of matching an xfrm or returning a no-match error (EAGAIN). This affects calls to send() and not sendto(). Invalidating the sk_dst_cache is necessary to correctly apply xfrm policies. Since we do this in xfrm_user_policy(), the sk_lock was already acquired in either do_ip_setsockopt() or do_ipv6_setsockopt(), and we may call __sk_dst_reset(). Performance impact should be negligible, since this code is only called when changing xfrm policy, and only affects the socket in question. Change-Id: I54b4ec422aa5f4e31652a8c6913696f0a5610a51 Fixes: 00bc0ef5880d ("ipv6: Skip XFRM lookup if dst_entry in socket cache is valid") Tested: https://android-review.googlesource.com/517555 Tested: https://android-review.googlesource.com/418659 Signed-off-by: Jonathan Basseri Signed-off-by: Steffen Klassert (cherry picked from commit 2b06cdf3e688b98fcc9945873b5d42792bd4eee0) --- net/xfrm/xfrm_state.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9895a8c56d8c..c0aa1b18fe8f 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1869,6 +1869,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen if (err >= 0) { xfrm_sk_policy_insert(sk, err, pol); xfrm_pol_put(pol); + __sk_dst_reset(sk); err = 0; } From 70358782743fbc2b2fa26b4e7370b359350cfba3 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 29 Oct 2017 11:02:04 +0100 Subject: [PATCH 11/50] ALSA: timer: Add missing mutex lock for compat ioctls commit 79fb0518fec8c8b4ea7f1729f54f293724b3dbb0 upstream. The races among ioctl and other operations were protected by the commit af368027a49a ("ALSA: timer: Fix race among timer ioctls") and later fixes, but one code path was forgotten in the scenario: the 32bit compat ioctl. As syzkaller recently spotted, a very similar use-after-free may happen with the combination of compat ioctls. The fix is simply to apply the same ioctl_lock to the compat_ioctl callback, too. Fixes: af368027a49a ("ALSA: timer: Fix race among timer ioctls") Reference: http://lkml.kernel.org/r/089e082686ac9b482e055c832617@google.com Reported-by: syzbot Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/timer_compat.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c index 2e908225d754..0b4b028e8e98 100644 --- a/sound/core/timer_compat.c +++ b/sound/core/timer_compat.c @@ -106,7 +106,8 @@ enum { #endif /* CONFIG_X86_X32 */ }; -static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg) +static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, + unsigned long arg) { void __user *argp = compat_ptr(arg); @@ -127,7 +128,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns case SNDRV_TIMER_IOCTL_PAUSE: case SNDRV_TIMER_IOCTL_PAUSE_OLD: case SNDRV_TIMER_IOCTL_NEXT_DEVICE: - return snd_timer_user_ioctl(file, cmd, (unsigned long)argp); + return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp); case SNDRV_TIMER_IOCTL_INFO32: return snd_timer_user_info_compat(file, argp); case SNDRV_TIMER_IOCTL_STATUS32: @@ -139,3 +140,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns } return -ENOIOCTLCMD; } + +static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct snd_timer_user *tu = file->private_data; + long ret; + + mutex_lock(&tu->ioctl_lock); + ret = __snd_timer_user_ioctl_compat(file, cmd, arg); + mutex_unlock(&tu->ioctl_lock); + return ret; +} From 8142e9516d5dcb549b681491ca9af54407f4894c Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Sun, 29 Oct 2017 11:10:43 +0100 Subject: [PATCH 12/50] ALSA: seq: Fix nested rwsem annotation for lockdep splat commit 1f20f9ff57ca23b9f5502fca85ce3977e8496cb1 upstream. syzkaller reported the lockdep splat due to the possible deadlock of grp->list_mutex of each sequencer client object. Actually this is rather a false-positive report due to the missing nested lock annotations. The sequencer client may deliver the event directly to another client which takes another own lock. For addressing this issue, this patch replaces the simple down_read() with down_read_nested(). As a lock subclass, the already existing "hop" can be re-used, which indicates the depth of the call. Reference: http://lkml.kernel.org/r/089e082686ac9b482e055c832617@google.com Reported-by: syzbot Reported-by: Dmitry Vyukov Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/core/seq/seq_clientmgr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index e847b9923c19..b36de76f24e2 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -676,7 +676,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client, if (atomic) read_lock(&grp->list_lock); else - down_read(&grp->list_mutex); + down_read_nested(&grp->list_mutex, hop); list_for_each_entry(subs, &grp->list_head, src_list) { /* both ports ready? */ if (atomic_read(&subs->ref_count) != 2) From d304c9169b3879e7ea8a1eb48001c8f40f7e74ad Mon Sep 17 00:00:00 2001 From: Ronnie Sahlberg Date: Mon, 30 Oct 2017 13:28:03 +1100 Subject: [PATCH 13/50] cifs: check MaxPathNameComponentLength != 0 before using it commit f74bc7c6679200a4a83156bb89cbf6c229fe8ec0 upstream. And fix tcon leak in error path. Signed-off-by: Ronnie Sahlberg Signed-off-by: Steve French Reviewed-by: David Disseldorp Signed-off-by: Greg Kroah-Hartman --- fs/cifs/dir.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 297e05c9e2b0..49a0d6b027c1 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon) struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); int i; - if (unlikely(direntry->d_name.len > + if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength && + direntry->d_name.len > le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength))) return -ENAMETOOLONG; @@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, rc = check_name(direntry, tcon); if (rc) - goto out_free_xid; + goto out; server = tcon->ses->server; From 97c5668c972476bde719276ab3e836085367f8b8 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Nov 2017 00:47:03 +0000 Subject: [PATCH 14/50] KEYS: return full count in keyring_read() if buffer is too small commit 3239b6f29bdfb4b0a2ba59df995fc9e6f4df7f1f upstream. Commit e645016abc80 ("KEYS: fix writing past end of user-supplied buffer in keyring_read()") made keyring_read() stop corrupting userspace memory when the user-supplied buffer is too small. However it also made the return value in that case be the short buffer size rather than the size required, yet keyctl_read() is actually documented to return the size required. Therefore, switch it over to the documented behavior. Note that for now we continue to have it fill the short buffer, since it did that before (pre-v3.13) and dump_key_tree_aux() in keyutils arguably relies on it. Fixes: e645016abc80 ("KEYS: fix writing past end of user-supplied buffer in keyring_read()") Reported-by: Ben Hutchings Signed-off-by: Eric Biggers Signed-off-by: David Howells Reviewed-by: James Morris Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- security/keys/keyring.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/security/keys/keyring.c b/security/keys/keyring.c index ef828238cdc0..d5264f950ce1 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { struct keyring_read_iterator_context ctx; - unsigned long nr_keys; - int ret; + long ret; kenter("{%d},,%zu", key_serial(keyring), buflen); if (buflen & (sizeof(key_serial_t) - 1)) return -EINVAL; - nr_keys = keyring->keys.nr_leaves_on_tree; - if (nr_keys == 0) - return 0; - - /* Calculate how much data we could return */ - if (!buffer || !buflen) - return nr_keys * sizeof(key_serial_t); - - /* Copy the IDs of the subscribed keys into the buffer */ - ctx.buffer = (key_serial_t __user *)buffer; - ctx.buflen = buflen; - ctx.count = 0; - ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); - if (ret < 0) { - kleave(" = %d [iterate]", ret); - return ret; + /* Copy as many key IDs as fit into the buffer */ + if (buffer && buflen) { + ctx.buffer = (key_serial_t __user *)buffer; + ctx.buflen = buflen; + ctx.count = 0; + ret = assoc_array_iterate(&keyring->keys, + keyring_read_iterator, &ctx); + if (ret < 0) { + kleave(" = %ld [iterate]", ret); + return ret; + } } - kleave(" = %zu [ok]", ctx.count); - return ctx.count; + /* Return the size of the buffer needed */ + ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t); + if (ret <= buflen) + kleave("= %ld [ok]", ret); + else + kleave("= %ld [buffer too small]", ret); + return ret; } /* From 618b930317fbea5561f9e5b07b26468d595ec110 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 2 Nov 2017 00:47:19 +0000 Subject: [PATCH 15/50] KEYS: fix out-of-bounds read during ASN.1 parsing commit 2eb9eabf1e868fda15808954fb29b0f105ed65f1 upstream. syzkaller with KASAN reported an out-of-bounds read in asn1_ber_decoder(). It can be reproduced by the following command, assuming CONFIG_X509_CERTIFICATE_PARSER=y and CONFIG_KASAN=y: keyctl add asymmetric desc $'\x30\x30' @s The bug is that the length of an ASN.1 data value isn't validated in the case where it is encoded using the short form, causing the decoder to read past the end of the input buffer. Fix it by validating the length. The bug report was: BUG: KASAN: slab-out-of-bounds in asn1_ber_decoder+0x10cb/0x1730 lib/asn1_decoder.c:233 Read of size 1 at addr ffff88003cccfa02 by task syz-executor0/6818 CPU: 1 PID: 6818 Comm: syz-executor0 Not tainted 4.14.0-rc7-00008-g5f479447d983 #2 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:16 [inline] dump_stack+0xb3/0x10b lib/dump_stack.c:52 print_address_description+0x79/0x2a0 mm/kasan/report.c:252 kasan_report_error mm/kasan/report.c:351 [inline] kasan_report+0x236/0x340 mm/kasan/report.c:409 __asan_report_load1_noabort+0x14/0x20 mm/kasan/report.c:427 asn1_ber_decoder+0x10cb/0x1730 lib/asn1_decoder.c:233 x509_cert_parse+0x1db/0x650 crypto/asymmetric_keys/x509_cert_parser.c:89 x509_key_preparse+0x64/0x7a0 crypto/asymmetric_keys/x509_public_key.c:174 asymmetric_key_preparse+0xcb/0x1a0 crypto/asymmetric_keys/asymmetric_type.c:388 key_create_or_update+0x347/0xb20 security/keys/key.c:855 SYSC_add_key security/keys/keyctl.c:122 [inline] SyS_add_key+0x1cd/0x340 security/keys/keyctl.c:62 entry_SYSCALL_64_fastpath+0x1f/0xbe RIP: 0033:0x447c89 RSP: 002b:00007fca7a5d3bd8 EFLAGS: 00000246 ORIG_RAX: 00000000000000f8 RAX: ffffffffffffffda RBX: 00007fca7a5d46cc RCX: 0000000000447c89 RDX: 0000000020006f4a RSI: 0000000020006000 RDI: 0000000020001ff5 RBP: 0000000000000046 R08: fffffffffffffffd R09: 0000000000000000 R10: 0000000000000002 R11: 0000000000000246 R12: 0000000000000000 R13: 0000000000000000 R14: 00007fca7a5d49c0 R15: 00007fca7a5d4700 Fixes: 42d5ec27f873 ("X.509: Add an ASN.1 decoder") Signed-off-by: Eric Biggers Signed-off-by: David Howells Signed-off-by: James Morris Signed-off-by: Greg Kroah-Hartman --- lib/asn1_decoder.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 554522934c44..faa2a3f017f8 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -283,6 +283,9 @@ next_op: if (unlikely(len > datalen - dp)) goto data_overrun_error; } + } else { + if (unlikely(len > datalen - dp)) + goto data_overrun_error; } if (flags & FLAG_CONS) { From 97d64b7f2fdf945c40c5dc2004defec5f260cb6d Mon Sep 17 00:00:00 2001 From: Ricard Wanderlof Date: Thu, 7 Sep 2017 15:31:38 +0200 Subject: [PATCH 16/50] ASoC: adau17x1: Workaround for noise bug in ADC commit 1e6f4fc06f6411adf98bbbe7fcd79442cd2b2a75 upstream. The ADC in the ADAU1361 (and possibly other Analog Devices codecs) exhibits a cyclic variation in the noise floor (in our test setup between -87 and -93 dB), a new value being attained within this range whenever a new capture stream is started. The cycle repeats after about 10 or 11 restarts. The workaround recommended by the manufacturer is to toggle the ADOSR bit in the Converter Control 0 register each time a new capture stream is started. I have verified that the patch fixes this problem on the ADAU1361, and according to the manufacturer toggling the bit in question in this manner will at least have no detrimental effect on other chips served by this driver. Signed-off-by: Ricard Wanderlof Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/adau17x1.c | 24 +++++++++++++++++++++++- sound/soc/codecs/adau17x1.h | 2 ++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c index fcf05b254ecd..0b9e13eb0a0a 100644 --- a/sound/soc/codecs/adau17x1.c +++ b/sound/soc/codecs/adau17x1.c @@ -89,6 +89,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w, return 0; } +static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); + struct adau *adau = snd_soc_codec_get_drvdata(codec); + + /* + * If we are capturing, toggle the ADOSR bit in Converter Control 0 to + * avoid losing SNR (workaround from ADI). This must be done after + * the ADC(s) have been enabled. According to the data sheet, it is + * normally illegal to set this bit when the sampling rate is 96 kHz, + * but according to ADI it is acceptable for this workaround. + */ + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, + ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR); + regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, + ADAU17X1_CONVERTER0_ADOSR, 0); + + return 0; +} + static const char * const adau17x1_mono_stereo_text[] = { "Stereo", "Mono Left Channel (L+R)", @@ -120,7 +141,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = { SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0, &adau17x1_dac_mode_mux), - SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0), + SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0, + adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU), SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0), SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0), SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0), diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h index e13583e6ff56..6b46461cdc03 100644 --- a/sound/soc/codecs/adau17x1.h +++ b/sound/soc/codecs/adau17x1.h @@ -123,5 +123,7 @@ bool adau17x1_has_dsp(struct adau *adau); #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7 +#define ADAU17X1_CONVERTER0_ADOSR BIT(3) + #endif From 581ac5f431c9b01a820ee10581b042328d870230 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Thu, 2 Nov 2017 16:12:03 +0000 Subject: [PATCH 17/50] arm64: ensure __dump_instr() checks addr_limit commit 7a7003b1da010d2b0d1dc8bf21c10f5c73b389f1 upstream. It's possible for a user to deliberately trigger __dump_instr with a chosen kernel address. Let's avoid problems resulting from this by using get_user() rather than __get_user(), ensuring that we don't erroneously access kernel memory. Where we use __dump_instr() on kernel text, we already switch to KERNEL_DS, so this shouldn't adversely affect those cases. Fixes: 60ffc30d5652810d ("arm64: Exception handling") Acked-by: Will Deacon Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas Signed-off-by: Greg Kroah-Hartman --- arch/arm64/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ca7f0ac5f708..210826d5bba5 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -129,7 +129,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs) for (i = -4; i < 1; i++) { unsigned int val, bad; - bad = __get_user(val, &((u32 *)addr)[i]); + bad = get_user(val, &((u32 *)addr)[i]); if (!bad) p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); From 4e351b8dd8b773669c3f0c5e50e4e61031f9e43e Mon Sep 17 00:00:00 2001 From: Yan Markman Date: Sun, 16 Oct 2016 00:22:32 +0300 Subject: [PATCH 18/50] ARM: dts: mvebu: pl310-cache disable double-linefill commit cda80a82ac3e89309706c027ada6ab232be1d640 upstream. Under heavy system stress mvebu SoC using Cortex A9 sporadically encountered instability issues. The "double linefill" feature of L2 cache was identified as causing dependency between read and write which lead to the deadlock. Especially, it was the cause of deadlock seen under heavy PCIe traffic, as this dependency violates PCIE overtaking rule. Fixes: c8f5a878e554 ("ARM: mvebu: use DT properties to fine-tune the L2 configuration") Signed-off-by: Yan Markman Signed-off-by: Igal Liberman Signed-off-by: Nadav Haklai [gregory.clement@free-electrons.com: reformulate commit log, add Armada 375 and add Fixes tag] Signed-off-by: Gregory CLEMENT Signed-off-by: Greg Kroah-Hartman --- arch/arm/boot/dts/armada-375.dtsi | 4 ++-- arch/arm/boot/dts/armada-38x.dtsi | 4 ++-- arch/arm/boot/dts/armada-39x.dtsi | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi index cc952cf8ec30..024f1b75b0a3 100644 --- a/arch/arm/boot/dts/armada-375.dtsi +++ b/arch/arm/boot/dts/armada-375.dtsi @@ -176,9 +176,9 @@ reg = <0x8000 0x1000>; cache-unified; cache-level = <2>; - arm,double-linefill-incr = <1>; + arm,double-linefill-incr = <0>; arm,double-linefill-wrap = <0>; - arm,double-linefill = <1>; + arm,double-linefill = <0>; prefetch-data = <1>; }; diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index e8b7f6726772..bf20918f1fad 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi @@ -143,9 +143,9 @@ reg = <0x8000 0x1000>; cache-unified; cache-level = <2>; - arm,double-linefill-incr = <1>; + arm,double-linefill-incr = <0>; arm,double-linefill-wrap = <0>; - arm,double-linefill = <1>; + arm,double-linefill = <0>; prefetch-data = <1>; }; diff --git a/arch/arm/boot/dts/armada-39x.dtsi b/arch/arm/boot/dts/armada-39x.dtsi index dc6efd386dbc..e67f1fd7a4d1 100644 --- a/arch/arm/boot/dts/armada-39x.dtsi +++ b/arch/arm/boot/dts/armada-39x.dtsi @@ -104,9 +104,9 @@ reg = <0x8000 0x1000>; cache-unified; cache-level = <2>; - arm,double-linefill-incr = <1>; + arm,double-linefill-incr = <0>; arm,double-linefill-wrap = <0>; - arm,double-linefill = <1>; + arm,double-linefill = <0>; prefetch-data = <1>; }; From cc7d9933400f9b7f16e4d36cb6974395c3116695 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 20 Oct 2017 21:17:05 +0100 Subject: [PATCH 19/50] ARM: 8715/1: add a private asm/unaligned.h commit 1cce91dfc8f7990ca3aea896bfb148f240b12860 upstream. The asm-generic/unaligned.h header provides two different implementations for accessing unaligned variables: the access_ok.h version used when CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set pretends that all pointers are in fact aligned, while the le_struct.h version convinces gcc that the alignment of a pointer is '1', to make it issue the correct load/store instructions depending on the architecture flags. On ARMv5 and older, we always use the second version, to let the compiler use byte accesses. On ARMv6 and newer, we currently use the access_ok.h version, so the compiler can use any instruction including stm/ldm and ldrd/strd that will cause an alignment trap. This trap can significantly impact performance when we have to do a lot of fixups and, worse, has led to crashes in the LZ4 decompressor code that does not have a trap handler. This adds an ARM specific version of asm/unaligned.h that uses the le_struct.h/be_struct.h implementation unconditionally. This should lead to essentially the same code on ARMv6+ as before, with the exception of using regular load/store instructions instead of the trapping instructions multi-register variants. The crash in the LZ4 decompressor code was probably introduced by the patch replacing the LZ4 implementation, commit 4e1a33b105dd ("lib: update LZ4 compressor module"), so linux-4.11 and higher would be affected most. However, we probably want to have this backported to all older stable kernels as well, to help with the performance issues. There are two follow-ups that I think we should also work on, but not backport to stable kernels, first to change the asm-generic version of the header to remove the ARM special case, and second to review all other uses of CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS to see if they might be affected by the same problem on ARM. Signed-off-by: Arnd Bergmann Signed-off-by: Russell King Signed-off-by: Greg Kroah-Hartman --- arch/arm/include/asm/Kbuild | 1 - arch/arm/include/asm/unaligned.h | 27 +++++++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 arch/arm/include/asm/unaligned.h diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index bd425302c97a..628a38a11a70 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -36,4 +36,3 @@ generic-y += termbits.h generic-y += termios.h generic-y += timex.h generic-y += trace_clock.h -generic-y += unaligned.h diff --git a/arch/arm/include/asm/unaligned.h b/arch/arm/include/asm/unaligned.h new file mode 100644 index 000000000000..ab905ffcf193 --- /dev/null +++ b/arch/arm/include/asm/unaligned.h @@ -0,0 +1,27 @@ +#ifndef __ASM_ARM_UNALIGNED_H +#define __ASM_ARM_UNALIGNED_H + +/* + * We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+, + * but we don't want to use linux/unaligned/access_ok.h since that can lead + * to traps on unaligned stm/ldm or strd/ldrd. + */ +#include + +#if defined(__LITTLE_ENDIAN) +# include +# include +# include +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#elif defined(__BIG_ENDIAN) +# include +# include +# include +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#else +# error need to define endianess +#endif + +#endif /* __ASM_ARM_UNALIGNED_H */ From fa312b481b2b4e1eeb35aa9c436df1d39f1c8333 Mon Sep 17 00:00:00 2001 From: Ashish Samant Date: Thu, 2 Nov 2017 15:59:37 -0700 Subject: [PATCH 20/50] ocfs2: fstrim: Fix start offset of first cluster group during fstrim commit 105ddc93f06ebe3e553f58563d11ed63dbcd59f0 upstream. The first cluster group descriptor is not stored at the start of the group but at an offset from the start. We need to take this into account while doing fstrim on the first cluster group. Otherwise we will wrongly start fstrim a few blocks after the desired start block and the range can cross over into the next cluster group and zero out the group descriptor there. This can cause filesytem corruption that cannot be fixed by fsck. Link: http://lkml.kernel.org/r/1507835579-7308-1-git-send-email-ashish.samant@oracle.com Signed-off-by: Ashish Samant Reviewed-by: Junxiao Bi Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/ocfs2/alloc.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 86181d6526dc..93e6f029a322 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -7270,13 +7270,24 @@ out: static int ocfs2_trim_extent(struct super_block *sb, struct ocfs2_group_desc *gd, - u32 start, u32 count) + u64 group, u32 start, u32 count) { u64 discard, bcount; + struct ocfs2_super *osb = OCFS2_SB(sb); bcount = ocfs2_clusters_to_blocks(sb, count); - discard = le64_to_cpu(gd->bg_blkno) + - ocfs2_clusters_to_blocks(sb, start); + discard = ocfs2_clusters_to_blocks(sb, start); + + /* + * For the first cluster group, the gd->bg_blkno is not at the start + * of the group, but at an offset from the start. If we add it while + * calculating discard for first group, we will wrongly start fstrim a + * few blocks after the desried start block and the range can cross + * over into the next cluster group. So, add it only if this is not + * the first cluster group. + */ + if (group != osb->first_cluster_group_blkno) + discard += le64_to_cpu(gd->bg_blkno); trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount); @@ -7284,7 +7295,7 @@ static int ocfs2_trim_extent(struct super_block *sb, } static int ocfs2_trim_group(struct super_block *sb, - struct ocfs2_group_desc *gd, + struct ocfs2_group_desc *gd, u64 group, u32 start, u32 max, u32 minbits) { int ret = 0, count = 0, next; @@ -7303,7 +7314,7 @@ static int ocfs2_trim_group(struct super_block *sb, next = ocfs2_find_next_bit(bitmap, max, start); if ((next - start) >= minbits) { - ret = ocfs2_trim_extent(sb, gd, + ret = ocfs2_trim_extent(sb, gd, group, start, next - start); if (ret < 0) { mlog_errno(ret); @@ -7401,7 +7412,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range) } gd = (struct ocfs2_group_desc *)gd_bh->b_data; - cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen); + cnt = ocfs2_trim_group(sb, gd, group, + first_bit, last_bit, minlen); brelse(gd_bh); gd_bh = NULL; if (cnt < 0) { From 6e6eba5ba14579f1fede964d189a7153444fb3b6 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 2 Aug 2016 11:43:17 +0900 Subject: [PATCH 21/50] perf tools: Fix build failure on perl script context commit b581c01fff646b5075d65359c8667de9c667da9e upstream. On my Archlinux machine, perf faild to build like below: CC scripts/perl/Perf-Trace-Util/Context.o In file included from /usr/lib/perl5/core/perl/CORE/perl.h:3905:0, from Context.xs:23: /usr/lib/perl5/core/perl/CORE/inline.h: In function : /usr/lib/perl5/core/perl/CORE/cop.h:612:13: warning: declaration of 'av' shadows a previous local [-Werror-shadow] AV *av =3D GvAV(PL_defgv); ^ /usr/lib/perl5/core/perl/CORE/inline.h:526:5: note: in expansion of macro 'CX_POP_SAVEARRAY' CX_POP_SAVEARRAY(cx); ^~~~~~~~~~~~~~~~ In file included from /usr/lib/perl5/core/perl/CORE/perl.h:5853:0, from Context.xs:23: /usr/lib/perl5/core/perl/CORE/inline.h:518:9: note: shadowed declaration is here AV *av; ^~ What I did to fix is adding '-Wno-shadow' as the error message said it's the cause of the failure. Since it's from the perl (not perf) code base, we don't have the control so I just wanted to ignore the warning when compiling perl scripting code. Committer note: This also fixes the build on Fedora Rawhide. Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20160802024317.31725-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo Cc: Tuomas Tynkkynen Signed-off-by: Greg Kroah-Hartman --- tools/perf/scripts/perl/Perf-Trace-Util/Build | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Build b/tools/perf/scripts/perl/Perf-Trace-Util/Build index 928e110179cb..34faecf774ae 100644 --- a/tools/perf/scripts/perl/Perf-Trace-Util/Build +++ b/tools/perf/scripts/perl/Perf-Trace-Util/Build @@ -1,3 +1,5 @@ libperf-y += Context.o -CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default +CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes +CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef +CFLAGS_Context.o += -Wno-switch-default -Wno-shadow From 031b02bc16aeeb34c8038026cbbca1e6430c9d75 Mon Sep 17 00:00:00 2001 From: Kasin Li Date: Mon, 19 Jun 2017 15:36:53 -0600 Subject: [PATCH 22/50] drm/msm: Fix potential buffer overflow issue commit 4a630fadbb29d9efaedb525f1a8f7449ad107641 upstream. In function submit_create, if nr_cmds or nr_bos is assigned with negative value, the allocated buffer may be small than intended. Using this buffer will lead to buffer overflow issue. Signed-off-by: Kasin Li Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/msm/msm_gem_submit.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index fed44d4e5b72..34edb4a4ccd4 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -34,10 +34,13 @@ static inline void __user *to_user_ptr(u64 address) } static struct msm_gem_submit *submit_create(struct drm_device *dev, - struct msm_gpu *gpu, int nr) + struct msm_gpu *gpu, uint32_t nr) { struct msm_gem_submit *submit; - int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); + uint64_t sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); + + if (sz > SIZE_MAX) + return NULL; submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (submit) { From ded34f972348b0f252256bee161839c1aa5d8ae4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 30 Jun 2017 10:59:15 +0300 Subject: [PATCH 23/50] drm/msm: fix an integer overflow test commit 65e93108891e571f177c202add9288eda9ac4100 upstream. We recently added an integer overflow check but it needs an additional tweak to work properly on 32 bit systems. The problem is that we're doing the right hand side of the assignment as type unsigned long so the max it will have an integer overflow instead of being larger than SIZE_MAX. That means the "sz > SIZE_MAX" condition is never true even on 32 bit systems. We need to first cast it to u64 and then do the math. Fixes: 4a630fadbb29 ("drm/msm: Fix potential buffer overflow issue") Signed-off-by: Dan Carpenter Acked-by: Jordan Crouse Signed-off-by: Rob Clark Cc: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/msm/msm_gem_submit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 34edb4a4ccd4..f4eaccb191d4 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -37,7 +37,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, struct msm_gpu *gpu, uint32_t nr) { struct msm_gem_submit *submit; - uint64_t sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); + uint64_t sz = sizeof(*submit) + ((u64)nr * sizeof(submit->bos[0])); if (sz > SIZE_MAX) return NULL; From a48fce6623805e047ef15760dc0335a5c5b694a3 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Tue, 17 Oct 2017 14:55:24 -0400 Subject: [PATCH 24/50] tracing/samples: Fix creation and deletion of simple_thread_fn creation commit 6575257c60e1a26a5319ccf2b5ce5b6449001017 upstream. Commit 7496946a8 ("tracing: Add samples of DECLARE_EVENT_CLASS() and DEFINE_EVENT()") added template examples for all the events. It created a DEFINE_EVENT_FN() example which reused the foo_bar_reg and foo_bar_unreg functions. Enabling both the TRACE_EVENT_FN() and DEFINE_EVENT_FN() example trace events caused the foo_bar_reg to be called twice, creating the test thread twice. The foo_bar_unreg would remove it only once, even if it was called multiple times, leaving a thread existing when the module is unloaded, causing an oops. Add a ref count and allow foo_bar_reg() and foo_bar_unreg() be called by multiple trace events. Fixes: 7496946a8 ("tracing: Add samples of DECLARE_EVENT_CLASS() and DEFINE_EVENT()") Signed-off-by: Steven Rostedt (VMware) Signed-off-by: Greg Kroah-Hartman --- samples/trace_events/trace-events-sample.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index 880a7d1d27d2..d69715d99e4d 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg) } static DEFINE_MUTEX(thread_mutex); +static bool simple_thread_cnt; void foo_bar_reg(void) { + mutex_lock(&thread_mutex); + if (simple_thread_cnt++) + goto out; + pr_info("Starting thread for foo_bar_fn\n"); /* * We shouldn't be able to start a trace when the module is * unloading (there's other locks to prevent that). But * for consistency sake, we still take the thread_mutex. */ - mutex_lock(&thread_mutex); simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); + out: mutex_unlock(&thread_mutex); } void foo_bar_unreg(void) { - pr_info("Killing thread for foo_bar_fn\n"); - /* protect against module unloading */ mutex_lock(&thread_mutex); + if (--simple_thread_cnt) + goto out; + + pr_info("Killing thread for foo_bar_fn\n"); if (simple_tsk_fn) kthread_stop(simple_tsk_fn); simple_tsk_fn = NULL; + out: mutex_unlock(&thread_mutex); } From b83c2880b34884c42bdd0ac28932d3e70743cc9b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 27 Oct 2017 20:35:31 -0700 Subject: [PATCH 25/50] Fix tracing sample code warning. commit a0cb2b5c390151837b08e5f7bca4a6ecddbcd39c upstream. Commit 6575257c60e1 ("tracing/samples: Fix creation and deletion of simple_thread_fn creation") introduced a new warning due to using a boolean as a counter. Just make it "int". Fixes: 6575257c60e1 ("tracing/samples: Fix creation and deletion of simple_thread_fn creation") Cc: Steven Rostedt Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- samples/trace_events/trace-events-sample.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index d69715d99e4d..4ccff66523c9 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c @@ -78,7 +78,7 @@ static int simple_thread_fn(void *arg) } static DEFINE_MUTEX(thread_mutex); -static bool simple_thread_cnt; +static int simple_thread_cnt; void foo_bar_reg(void) { From ba28f16cfa2a410ba8aff60c2918417b91e05871 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Sat, 7 Oct 2017 22:37:43 +0000 Subject: [PATCH 26/50] PM / wakeirq: report a wakeup_event on dedicated wekup irq [ Upstream commit 09bb6e93956ae5175b96905b723ec879c3ca0765 ] There are two reasons for reporting wakeup event when dedicated wakeup IRQ is triggered: - wakeup events accounting, so proper statistical data will be displayed in sysfs and debugfs; - there are small window when System is entering suspend during which dedicated wakeup IRQ can be lost: dpm_suspend_noirq() |- device_wakeup_arm_wake_irqs() |- dev_pm_arm_wake_irq(X) |- IRQ is enabled and marked as wakeup source [1]... |- suspend_device_irqs() |- suspend_device_irq(X) |- irqd_set(X, IRQD_WAKEUP_ARMED); |- wakup IRQ armed The wakeup IRQ can be lost if it's triggered at point [1] and not armed yet. Hence, fix above cases by adding simple pm_wakeup_event() call in handle_threaded_wake_irq(). Fixes: 4990d4fe327b (PM / Wakeirq: Add automated device wake IRQ handling) Signed-off-by: Grygorii Strashko Tested-by: Keerthy [ tony@atomide.com: added missing return to avoid warnings ] Tested-by: Tony Lindgren Signed-off-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/base/power/wakeirq.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 404d94c6c8bc..feba1b211898 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) struct wake_irq *wirq = _wirq; int res; + /* Maybe abort suspend? */ + if (irqd_is_wakeup_set(irq_get_irq_data(irq))) { + pm_wakeup_event(wirq->dev, 0); + + return IRQ_HANDLED; + } + /* We don't want RPM_ASYNC or RPM_NOWAIT here */ res = pm_runtime_resume(wirq->dev); if (res < 0) From a3e021504fcf75123eb05f10ce794d63e5a0c405 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 7 Oct 2017 22:37:44 +0000 Subject: [PATCH 27/50] mmc: s3cmci: include linux/interrupt.h for tasklet_struct [ Upstream commit e1c6ec26b853e9062f0b3daaf695c546d0702953 ] I got this new build error on today's linux-next drivers/mmc/host/s3cmci.h:69:24: error: field 'pio_tasklet' has incomplete type struct tasklet_struct pio_tasklet; drivers/mmc/host/s3cmci.c: In function 's3cmci_enable_irq': drivers/mmc/host/s3cmci.c:390:4: error: implicit declaration of function 'enable_irq';did you mean 'enable_imask'? [-Werror=implicit-function-declaration] While I haven't found out why this happened now and not earlier, the solution is obvious, we should include the header that defines the structure. Signed-off-by: Arnd Bergmann Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/s3cmci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 6291d5042ef2..6fed41bd016a 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include From ed414aeb5291ae1e1733bb24d8801b0167a454f0 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Sat, 7 Oct 2017 22:37:44 +0000 Subject: [PATCH 28/50] ARM: pxa: Don't rely on public mmc header to include leds.h [ Upstream commit 40d727a2defa176b78159d445bcf7afcc2ed9021 ] Some of the pxa platforms, balloon3, colibri-pxa270-income, corgi, trizeps4, vpac270, zeus and zylonite depends on leds.h. Explicitly include it instead of relying on the public mmc header host.h. Cc: Daniel Mack Cc: Haojian Zhuang Cc: Robert Jarzmik Cc: Signed-off-by: Ulf Hansson Acked-by: Robert Jarzmik Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/arm/mach-pxa/balloon3.c | 1 + arch/arm/mach-pxa/colibri-pxa270-income.c | 1 + arch/arm/mach-pxa/corgi.c | 1 + arch/arm/mach-pxa/trizeps4.c | 1 + arch/arm/mach-pxa/vpac270.c | 1 + arch/arm/mach-pxa/zeus.c | 1 + arch/arm/mach-pxa/zylonite.c | 1 + 7 files changed, 7 insertions(+) diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c index a727282bfa99..761d7d64d643 100644 --- a/arch/arm/mach-pxa/balloon3.c +++ b/arch/arm/mach-pxa/balloon3.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/colibri-pxa270-income.c b/arch/arm/mach-pxa/colibri-pxa270-income.c index db20d25daaab..1b92a4112bd1 100644 --- a/arch/arm/mach-pxa/colibri-pxa270-income.c +++ b/arch/arm/mach-pxa/colibri-pxa270-income.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 89f790dda93e..d1f12909f740 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index 066e3a250ee0..5e50c53f1f4b 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c index 54122a983ae3..2cce92924068 100644 --- a/arch/arm/mach-pxa/vpac270.c +++ b/arch/arm/mach-pxa/vpac270.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 30e62a3f0701..d757cfb5f8a6 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index e20359a7433c..d7f0a7d87ef2 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include From 8800aba5029239cdf209381e19b8d3a93fe4e765 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sat, 7 Oct 2017 22:37:44 +0000 Subject: [PATCH 29/50] mfd: ab8500-sysctrl: Handle probe deferral [ Upstream commit 7e9c40c63933a643908d686bd89dfc2315e8c70a ] In the current boot, clients making use of the AB8500 sysctrl may be probed before the ab8500-sysctrl driver. This gives them -EINVAL, but should rather give -EPROBE_DEFER. Before this, the abx500 clock driver didn't probe properly, and as a result the codec driver in turn using the clocks did not probe properly. After this patch, everything probes properly. Also add OF compatible-string probing. This driver is all device tree, so let's just make a drive-by-fix of that as well. Signed-off-by: Linus Walleij Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/ab8500-sysctrl.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c index 0d1825696153..405ce78c1ef4 100644 --- a/drivers/mfd/ab8500-sysctrl.c +++ b/drivers/mfd/ab8500-sysctrl.c @@ -99,7 +99,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value) u8 bank; if (sysctrl_dev == NULL) - return -EINVAL; + return -EPROBE_DEFER; bank = (reg >> 8); if (!valid_bank(bank)) @@ -115,11 +115,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value) u8 bank; if (sysctrl_dev == NULL) - return -EINVAL; + return -EPROBE_DEFER; bank = (reg >> 8); - if (!valid_bank(bank)) + if (!valid_bank(bank)) { + pr_err("invalid bank\n"); return -EINVAL; + } return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank, (u8)(reg & 0xFF), mask, value); @@ -180,9 +182,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id ab8500_sysctrl_match[] = { + { .compatible = "stericsson,ab8500-sysctrl", }, + {} +}; + static struct platform_driver ab8500_sysctrl_driver = { .driver = { .name = "ab8500-sysctrl", + .of_match_table = ab8500_sysctrl_match, }, .probe = ab8500_sysctrl_probe, .remove = ab8500_sysctrl_remove, From f9776d7ee5f50da18bebf5f81e4d8b1f9a13a592 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 7 Oct 2017 22:37:44 +0000 Subject: [PATCH 30/50] mfd: axp20x: Fix axp288 PEK_DBR and PEK_DBF irqs being swapped [ Upstream commit 1af468ebe45591651ec3bafc2e9ddc6fdef70ae0 ] The R in PEK_DBR stands for rising, so it should be mapped to AXP288_IRQ_POKP where the last P stands for positive edge. Likewise PEK_DBF should be mapped to the falling edge, aka the _N_egative edge, so it should be mapped to AXP288_IRQ_POKN. This fixes the inverted powerbutton status reporting by the axp20x-pek driver. Signed-off-by: Hans de Goede Acked-by: Chen-Yu Tsai Signed-off-by: Lee Jones Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/mfd/axp20x.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index 9842199e2e6c..89a2dd4d212a 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -164,14 +164,14 @@ static struct resource axp22x_pek_resources[] = { static struct resource axp288_power_button_resources[] = { { .name = "PEK_DBR", - .start = AXP288_IRQ_POKN, - .end = AXP288_IRQ_POKN, + .start = AXP288_IRQ_POKP, + .end = AXP288_IRQ_POKP, .flags = IORESOURCE_IRQ, }, { .name = "PEK_DBF", - .start = AXP288_IRQ_POKP, - .end = AXP288_IRQ_POKP, + .start = AXP288_IRQ_POKN, + .end = AXP288_IRQ_POKN, .flags = IORESOURCE_IRQ, }, }; From a76eb0e8655ef8cf58917549f667e8c9a9be1fc6 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Sat, 7 Oct 2017 22:37:45 +0000 Subject: [PATCH 31/50] staging: rtl8712u: Fix endian settings for structs describing network packets [ Upstream commit 221c46d28957bd6e2158abc2179ce4a8c9ce07d3 ] The headers describing a number of network packets do not have the correct endian settings for several types of data. Signed-off-by: Larry Finger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8712/ieee80211.h | 84 ++++++++++++++--------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h index d374824c4f33..7b16c05b5e8b 100644 --- a/drivers/staging/rtl8712/ieee80211.h +++ b/drivers/staging/rtl8712/ieee80211.h @@ -143,52 +143,52 @@ struct ieee_ibss_seq { }; struct ieee80211_hdr { - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; + __le16 seq_ctl; u8 addr4[ETH_ALEN]; -} __packed; +} __packed __aligned(2); struct ieee80211_hdr_3addr { - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; -} __packed; + __le16 seq_ctl; +} __packed __aligned(2); struct ieee80211_hdr_qos { - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; + __le16 seq_ctl; u8 addr4[ETH_ALEN]; - u16 qc; -} __packed; + __le16 qc; +} __packed __aligned(2); struct ieee80211_hdr_3addr_qos { - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; - u16 seq_ctl; - u16 qc; + __le16 seq_ctl; + __le16 qc; } __packed; struct eapol { u8 snap[6]; - u16 ethertype; + __be16 ethertype; u8 version; u8 type; - u16 length; + __le16 length; } __packed; @@ -528,13 +528,13 @@ struct ieee80211_security { */ struct ieee80211_header_data { - u16 frame_ctl; - u16 duration_id; + __le16 frame_ctl; + __le16 duration_id; u8 addr1[6]; u8 addr2[6]; u8 addr3[6]; - u16 seq_ctrl; -}; + __le16 seq_ctrl; +} __packed __aligned(2); #define BEACON_PROBE_SSID_ID_POSITION 12 @@ -566,18 +566,18 @@ struct ieee80211_info_element { /* * These are the data types that can make up management packets * - u16 auth_algorithm; - u16 auth_sequence; - u16 beacon_interval; - u16 capability; + __le16 auth_algorithm; + __le16 auth_sequence; + __le16 beacon_interval; + __le16 capability; u8 current_ap[ETH_ALEN]; - u16 listen_interval; + __le16 listen_interval; struct { u16 association_id:14, reserved:2; } __packed; - u32 time_stamp[2]; - u16 reason; - u16 status; + __le32 time_stamp[2]; + __le16 reason; + __le16 status; */ #define IEEE80211_DEFAULT_TX_ESSID "Penguin" @@ -585,16 +585,16 @@ struct ieee80211_info_element { struct ieee80211_authentication { struct ieee80211_header_data header; - u16 algorithm; - u16 transaction; - u16 status; + __le16 algorithm; + __le16 transaction; + __le16 status; } __packed; struct ieee80211_probe_response { struct ieee80211_header_data header; - u32 time_stamp[2]; - u16 beacon_interval; - u16 capability; + __le32 time_stamp[2]; + __le16 beacon_interval; + __le16 capability; struct ieee80211_info_element info_element; } __packed; @@ -604,16 +604,16 @@ struct ieee80211_probe_request { struct ieee80211_assoc_request_frame { struct ieee80211_hdr_3addr header; - u16 capability; - u16 listen_interval; + __le16 capability; + __le16 listen_interval; struct ieee80211_info_element_hdr info_element; } __packed; struct ieee80211_assoc_response_frame { struct ieee80211_hdr_3addr header; - u16 capability; - u16 status; - u16 aid; + __le16 capability; + __le16 status; + __le16 aid; } __packed; struct ieee80211_txb { From 5624ea1610407db30f1adaf896f6d9c3cb66f072 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 7 Oct 2017 22:37:45 +0000 Subject: [PATCH 32/50] ext4: fix stripe-unaligned allocations [ Upstream commit d9b22cf9f5466a057f2a4f1e642b469fa9d73117 ] When a filesystem is created using: mkfs.ext4 -b 4096 -E stride=512 and we try to allocate 64MB extent, we will end up directly in ext4_mb_complex_scan_group(). This is because the request is detected as power-of-two allocation (so we start in ext4_mb_regular_allocator() with ac_criteria == 0) however the check before ext4_mb_simple_scan_group() refuses the direct buddy scan because the allocation request is too large. Since cr == 0, the check whether we should use ext4_mb_scan_aligned() fails as well and we fall back to ext4_mb_complex_scan_group(). Fix the problem by checking for upper limit on power-of-two requests directly when detecting them. Reported-by: Ross Zwisler Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/ext4/mballoc.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 84cd77663e1f..1ba82dc5afa3 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) * We search using buddy data only if the order of the request * is greater than equal to the sbi_s_mb_order2_reqs * You can tune it via /sys/fs/ext4//mb_order2_req + * We also support searching for power-of-two requests only for + * requests upto maximum buddy size we have constructed. */ - if (i >= sbi->s_mb_order2_reqs) { + if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) { /* * This should tell if fe_len is exactly power of 2 */ @@ -2207,7 +2209,7 @@ repeat: } ac->ac_groups_scanned++; - if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2) + if (cr == 0) ext4_mb_simple_scan_group(ac, &e4b); else if (cr == 1 && sbi->s_stripe && !(ac->ac_g_ex.fe_len % sbi->s_stripe)) From 358008062202cb21054cb8130c1e5f184a3784c2 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Sat, 7 Oct 2017 22:37:45 +0000 Subject: [PATCH 33/50] ext4: do not use stripe_width if it is not set [ Upstream commit 5469d7c3087ecaf760f54b447f11af6061b7c897 ] Avoid using stripe_width for sbi->s_stripe value if it is not actually set. It prevents using the stride for sbi->s_stripe. Signed-off-by: Jan Kara Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- fs/ext4/super.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 32941cd6d34b..8bdb0cc2722f 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2499,9 +2499,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi) if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group) ret = sbi->s_stripe; - else if (stripe_width <= sbi->s_blocks_per_group) + else if (stripe_width && stripe_width <= sbi->s_blocks_per_group) ret = stripe_width; - else if (stride <= sbi->s_blocks_per_group) + else if (stride && stride <= sbi->s_blocks_per_group) ret = stride; else ret = 0; From dec5fcf11b454ebdac7bdf40775bc589dffe2980 Mon Sep 17 00:00:00 2001 From: Chris Brandt Date: Sat, 7 Oct 2017 22:37:45 +0000 Subject: [PATCH 34/50] i2c: riic: correctly finish transfers [ Upstream commit 71ccea095ea1d4efd004dab971be6d599e06fc3f ] This fixes the condition where the controller has not fully completed its final transfer and leaves the bus and controller in a undesirable state. At the end of the last transmitted byte, the existing driver would just signal for a STOP condition to be transmitted then immediately signal completion. However, the full STOP procedure might not have fully taken place by the time the runtime PM shuts off the peripheral clock, leaving the bus in a suspended state. Alternatively, the STOP condition on the bus may have completed, but when the next transaction is requested by the upper layer, not all the necessary register cleanup was finished from the last transfer which made the driver return BUS BUSY when it really wasn't. This patch now makes all transmit and receive transactions wait for the STOP condition to fully complete before signaling a completed transaction. With this new method, runtime PM no longer seems to be an issue. Fixes: 310c18a41450 ("i2c: riic: add driver") Signed-off-by: Chris Brandt Reviewed-by: Wolfram Sang Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-riic.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c index d7e3af671543..d8803c3bbfdc 100644 --- a/drivers/i2c/busses/i2c-riic.c +++ b/drivers/i2c/busses/i2c-riic.c @@ -80,6 +80,7 @@ #define ICIER_TEIE 0x40 #define ICIER_RIE 0x20 #define ICIER_NAKIE 0x10 +#define ICIER_SPIE 0x08 #define ICSR2_NACKF 0x10 @@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data) return IRQ_NONE; } - if (riic->is_last || riic->err) + if (riic->is_last || riic->err) { + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); - - writeb(0, riic->base + RIIC_ICIER); - complete(&riic->msg_done); + } return IRQ_HANDLED; } @@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data) if (riic->bytes_left == 1) { /* STOP must come before we set ACKBT! */ - if (riic->is_last) + if (riic->is_last) { + riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER); writeb(ICCR2_SP, riic->base + RIIC_ICCR2); + } riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3); - writeb(0, riic->base + RIIC_ICIER); - complete(&riic->msg_done); } else { riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3); } @@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data) return IRQ_HANDLED; } +static irqreturn_t riic_stop_isr(int irq, void *data) +{ + struct riic_dev *riic = data; + + /* read back registers to confirm writes have fully propagated */ + writeb(0, riic->base + RIIC_ICSR2); + readb(riic->base + RIIC_ICSR2); + writeb(0, riic->base + RIIC_ICIER); + readb(riic->base + RIIC_ICIER); + + complete(&riic->msg_done); + + return IRQ_HANDLED; +} + static u32 riic_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; @@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = { { .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" }, { .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" }, { .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" }, + { .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" }, { .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" }, }; From 3b7d9a95ccd38436295ab6598c526dac83504556 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sat, 7 Oct 2017 22:37:46 +0000 Subject: [PATCH 35/50] drm/amdgpu: when dpm disabled, also need to stop/start vce. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 28ed5504ab4b211a4e589e648e5ebd1e0caa7a6a ] Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index bb0da76051a1..e5da6f19b9b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -284,6 +284,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) amdgpu_dpm_enable_vce(adev, false); } else { amdgpu_asic_set_vce_clocks(adev, 0, 0); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); } } else { schedule_delayed_work(&adev->vce.idle_work, @@ -315,6 +319,11 @@ static void amdgpu_vce_note_usage(struct amdgpu_device *adev) amdgpu_dpm_enable_vce(adev, true); } else { amdgpu_asic_set_vce_clocks(adev, 53300, 40000); + amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } } } From 0806eaf13850334e477ea0589812b2c316e0b388 Mon Sep 17 00:00:00 2001 From: Taeung Song Date: Sat, 7 Oct 2017 22:37:46 +0000 Subject: [PATCH 36/50] perf tools: Only increase index if perf_evsel__new_idx() succeeds [ Upstream commit 75fc5ae5cc53fff71041ecadeb3354a2b4c9fe42 ] Signed-off-by: Taeung Song Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Wang Nan Link: http://lkml.kernel.org/r/1485952447-7013-2-git-send-email-treeze.taeung@gmail.com Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- tools/perf/util/parse-events.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 38304b7e4f81..e81dfb2e239c 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -291,10 +291,11 @@ __add_event(struct list_head *list, int *idx, event_attr_init(attr); - evsel = perf_evsel__new_idx(attr, (*idx)++); + evsel = perf_evsel__new_idx(attr, *idx); if (!evsel) return NULL; + (*idx)++; evsel->cpus = cpu_map__get(cpus); evsel->own_cpus = cpu_map__get(cpus); From 762d0762b9bb055962526c5fdd71a28d371c3df3 Mon Sep 17 00:00:00 2001 From: Oleh Kravchenko Date: Sat, 7 Oct 2017 22:37:46 +0000 Subject: [PATCH 37/50] cx231xx: Fix I2C on Internal Master 3 Bus [ Upstream commit 6c5da8031a3abfad259190d35f83d89568b72ee2 ] Internal Master 3 Bus can send and receive only 4 bytes per time. Signed-off-by: Oleh Kravchenko Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/usb/cx231xx/cx231xx-core.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/media/usb/cx231xx/cx231xx-core.c b/drivers/media/usb/cx231xx/cx231xx-core.c index 19b0293312a0..07670117f922 100644 --- a/drivers/media/usb/cx231xx/cx231xx-core.c +++ b/drivers/media/usb/cx231xx/cx231xx-core.c @@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev, */ if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) || (ven_req->bRequest == 0x5) || - (ven_req->bRequest == 0x6))) { + (ven_req->bRequest == 0x6) || + + /* Internal Master 3 Bus can send + * and receive only 4 bytes per time + */ + (ven_req->bRequest == 0x2))) { unsend_size = 0; pdata = ven_req->pBuff; From fff544c8cf6848c0ac1293d0f2e2b64629a64604 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Sat, 7 Oct 2017 22:37:47 +0000 Subject: [PATCH 38/50] xen/manage: correct return value check on xenbus_scanf() [ Upstream commit 4fed1b125eb6252bde478665fc05d4819f774fa8 ] A negative return value indicates an error; in fact the function at present won't ever return zero. Signed-off-by: Jan Beulich Reviewed-by: Juergen Gross Signed-off-by: Boris Ostrovsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/xen/manage.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index e12bd3635f83..e8850b0e3272 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -275,7 +275,7 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, err = xenbus_transaction_start(&xbt); if (err) return; - if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) { + if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) < 0) { pr_err("Unable to read sysrq code in control/sysrq\n"); xenbus_transaction_end(xbt, 1); return; From d1f96c30ce2d33b3e138363417d4a2ba67b619b8 Mon Sep 17 00:00:00 2001 From: Raghava Aditya Renukunta Date: Sat, 7 Oct 2017 22:37:47 +0000 Subject: [PATCH 39/50] scsi: aacraid: Process Error for response I/O [ Upstream commit 4ec57fb4edaec523f0f78a0449a3b063749ac58b ] Make sure that the driver processes error conditions even in the fast response path for response from the adapter. Signed-off-by: Raghava Aditya Renukunta Signed-off-by: Dave Carroll Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/aacraid/aachba.c | 297 ++++++++++++++++++---------------- 1 file changed, 155 insertions(+), 142 deletions(-) diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index e4c243748a97..de33801ca31e 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -2977,16 +2977,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr) return; BUG_ON(fibptr == NULL); + dev = fibptr->dev; - scsi_dma_unmap(scsicmd); - - /* expose physical device if expose_physicald flag is on */ - if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) - && expose_physicals > 0) - aac_expose_phy_device(scsicmd); - srbreply = (struct aac_srb_reply *) fib_data(fibptr); + scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { @@ -2999,158 +2994,176 @@ static void aac_srb_callback(void *context, struct fib * fibptr) */ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) - le32_to_cpu(srbreply->data_xfer_length)); - /* - * First check the fib status - */ + } - if (le32_to_cpu(srbreply->status) != ST_OK) { - int len; - printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status)); - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), - SCSI_SENSE_BUFFERSIZE); + scsi_dma_unmap(scsicmd); + + /* expose physical device if expose_physicald flag is on */ + if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) + && expose_physicals > 0) + aac_expose_phy_device(scsicmd); + + /* + * First check the fib status + */ + + if (le32_to_cpu(srbreply->status) != ST_OK) { + int len; + + pr_warn("aac_srb_callback: srb failed, status = %d\n", + le32_to_cpu(srbreply->status)); + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); + scsicmd->result = DID_ERROR << 16 + | COMMAND_COMPLETE << 8 + | SAM_STAT_CHECK_CONDITION; + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); + } + + /* + * Next check the srb status + */ + switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { + case SRB_STATUS_ERROR_RECOVERY: + case SRB_STATUS_PENDING: + case SRB_STATUS_SUCCESS: + scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; + break; + case SRB_STATUS_DATA_OVERRUN: + switch (scsicmd->cmnd[0]) { + case READ_6: + case WRITE_6: + case READ_10: + case WRITE_10: + case READ_12: + case WRITE_12: + case READ_16: + case WRITE_16: + if (le32_to_cpu(srbreply->data_xfer_length) + < scsicmd->underflow) + pr_warn("aacraid: SCSI CMD underflow\n"); + else + pr_warn("aacraid: SCSI CMD Data Overrun\n"); scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8 - | SAM_STAT_CHECK_CONDITION; - memcpy(scsicmd->sense_buffer, - srbreply->sense_data, len); - } - - /* - * Next check the srb status - */ - switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { - case SRB_STATUS_ERROR_RECOVERY: - case SRB_STATUS_PENDING: - case SRB_STATUS_SUCCESS: + | COMMAND_COMPLETE << 8; + break; + case INQUIRY: + scsicmd->result = DID_OK << 16 + | COMMAND_COMPLETE << 8; + break; + default: scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; break; - case SRB_STATUS_DATA_OVERRUN: - switch (scsicmd->cmnd[0]) { - case READ_6: - case WRITE_6: - case READ_10: - case WRITE_10: - case READ_12: - case WRITE_12: - case READ_16: - case WRITE_16: - if (le32_to_cpu(srbreply->data_xfer_length) - < scsicmd->underflow) - printk(KERN_WARNING"aacraid: SCSI CMD underflow\n"); - else - printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n"); - scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8; - break; - case INQUIRY: { - scsicmd->result = DID_OK << 16 - | COMMAND_COMPLETE << 8; - break; - } - default: - scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; - break; - } - break; - case SRB_STATUS_ABORTED: - scsicmd->result = DID_ABORT << 16 | ABORT << 8; - break; - case SRB_STATUS_ABORT_FAILED: - /* - * Not sure about this one - but assuming the - * hba was trying to abort for some reason - */ - scsicmd->result = DID_ERROR << 16 | ABORT << 8; - break; - case SRB_STATUS_PARITY_ERROR: - scsicmd->result = DID_PARITY << 16 - | MSG_PARITY_ERROR << 8; - break; - case SRB_STATUS_NO_DEVICE: - case SRB_STATUS_INVALID_PATH_ID: - case SRB_STATUS_INVALID_TARGET_ID: - case SRB_STATUS_INVALID_LUN: - case SRB_STATUS_SELECTION_TIMEOUT: - scsicmd->result = DID_NO_CONNECT << 16 - | COMMAND_COMPLETE << 8; - break; + } + break; + case SRB_STATUS_ABORTED: + scsicmd->result = DID_ABORT << 16 | ABORT << 8; + break; + case SRB_STATUS_ABORT_FAILED: + /* + * Not sure about this one - but assuming the + * hba was trying to abort for some reason + */ + scsicmd->result = DID_ERROR << 16 | ABORT << 8; + break; + case SRB_STATUS_PARITY_ERROR: + scsicmd->result = DID_PARITY << 16 + | MSG_PARITY_ERROR << 8; + break; + case SRB_STATUS_NO_DEVICE: + case SRB_STATUS_INVALID_PATH_ID: + case SRB_STATUS_INVALID_TARGET_ID: + case SRB_STATUS_INVALID_LUN: + case SRB_STATUS_SELECTION_TIMEOUT: + scsicmd->result = DID_NO_CONNECT << 16 + | COMMAND_COMPLETE << 8; + break; - case SRB_STATUS_COMMAND_TIMEOUT: - case SRB_STATUS_TIMEOUT: - scsicmd->result = DID_TIME_OUT << 16 - | COMMAND_COMPLETE << 8; - break; + case SRB_STATUS_COMMAND_TIMEOUT: + case SRB_STATUS_TIMEOUT: + scsicmd->result = DID_TIME_OUT << 16 + | COMMAND_COMPLETE << 8; + break; - case SRB_STATUS_BUSY: - scsicmd->result = DID_BUS_BUSY << 16 - | COMMAND_COMPLETE << 8; - break; + case SRB_STATUS_BUSY: + scsicmd->result = DID_BUS_BUSY << 16 + | COMMAND_COMPLETE << 8; + break; - case SRB_STATUS_BUS_RESET: - scsicmd->result = DID_RESET << 16 - | COMMAND_COMPLETE << 8; - break; + case SRB_STATUS_BUS_RESET: + scsicmd->result = DID_RESET << 16 + | COMMAND_COMPLETE << 8; + break; - case SRB_STATUS_MESSAGE_REJECTED: - scsicmd->result = DID_ERROR << 16 - | MESSAGE_REJECT << 8; - break; - case SRB_STATUS_REQUEST_FLUSHED: - case SRB_STATUS_ERROR: - case SRB_STATUS_INVALID_REQUEST: - case SRB_STATUS_REQUEST_SENSE_FAILED: - case SRB_STATUS_NO_HBA: - case SRB_STATUS_UNEXPECTED_BUS_FREE: - case SRB_STATUS_PHASE_SEQUENCE_FAILURE: - case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: - case SRB_STATUS_DELAYED_RETRY: - case SRB_STATUS_BAD_FUNCTION: - case SRB_STATUS_NOT_STARTED: - case SRB_STATUS_NOT_IN_USE: - case SRB_STATUS_FORCE_ABORT: - case SRB_STATUS_DOMAIN_VALIDATION_FAIL: - default: + case SRB_STATUS_MESSAGE_REJECTED: + scsicmd->result = DID_ERROR << 16 + | MESSAGE_REJECT << 8; + break; + case SRB_STATUS_REQUEST_FLUSHED: + case SRB_STATUS_ERROR: + case SRB_STATUS_INVALID_REQUEST: + case SRB_STATUS_REQUEST_SENSE_FAILED: + case SRB_STATUS_NO_HBA: + case SRB_STATUS_UNEXPECTED_BUS_FREE: + case SRB_STATUS_PHASE_SEQUENCE_FAILURE: + case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: + case SRB_STATUS_DELAYED_RETRY: + case SRB_STATUS_BAD_FUNCTION: + case SRB_STATUS_NOT_STARTED: + case SRB_STATUS_NOT_IN_USE: + case SRB_STATUS_FORCE_ABORT: + case SRB_STATUS_DOMAIN_VALIDATION_FAIL: + default: #ifdef AAC_DETAILED_STATUS_INFO - printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n", - le32_to_cpu(srbreply->srb_status) & 0x3F, - aac_get_status_string( - le32_to_cpu(srbreply->srb_status) & 0x3F), - scsicmd->cmnd[0], - le32_to_cpu(srbreply->scsi_status)); + pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n", + le32_to_cpu(srbreply->srb_status) & 0x3F, + aac_get_status_string( + le32_to_cpu(srbreply->srb_status) & 0x3F), + scsicmd->cmnd[0], + le32_to_cpu(srbreply->scsi_status)); #endif - if ((scsicmd->cmnd[0] == ATA_12) - || (scsicmd->cmnd[0] == ATA_16)) { - if (scsicmd->cmnd[2] & (0x01 << 5)) { - scsicmd->result = DID_OK << 16 - | COMMAND_COMPLETE << 8; - break; - } else { - scsicmd->result = DID_ERROR << 16 - | COMMAND_COMPLETE << 8; - break; - } + /* + * When the CC bit is SET by the host in ATA pass thru CDB, + * driver is supposed to return DID_OK + * + * When the CC bit is RESET by the host, driver should + * return DID_ERROR + */ + if ((scsicmd->cmnd[0] == ATA_12) + || (scsicmd->cmnd[0] == ATA_16)) { + + if (scsicmd->cmnd[2] & (0x01 << 5)) { + scsicmd->result = DID_OK << 16 + | COMMAND_COMPLETE << 8; + break; } else { scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; - break; + break; } - } - if (le32_to_cpu(srbreply->scsi_status) - == SAM_STAT_CHECK_CONDITION) { - int len; - - scsicmd->result |= SAM_STAT_CHECK_CONDITION; - len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), - SCSI_SENSE_BUFFERSIZE); -#ifdef AAC_DETAILED_STATUS_INFO - printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n", - le32_to_cpu(srbreply->status), len); -#endif - memcpy(scsicmd->sense_buffer, - srbreply->sense_data, len); + } else { + scsicmd->result = DID_ERROR << 16 + | COMMAND_COMPLETE << 8; + break; } } + if (le32_to_cpu(srbreply->scsi_status) + == SAM_STAT_CHECK_CONDITION) { + int len; + + scsicmd->result |= SAM_STAT_CHECK_CONDITION; + len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), + SCSI_SENSE_BUFFERSIZE); +#ifdef AAC_DETAILED_STATUS_INFO + pr_warn("aac_srb_callback: check condition, status = %d len=%d\n", + le32_to_cpu(srbreply->status), len); +#endif + memcpy(scsicmd->sense_buffer, + srbreply->sense_data, len); + } + /* * OR in the scsi status (already shifted up a bit) */ From 28a8fc6416ab191ce5ee34e709067dde9ec0e563 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Sat, 7 Oct 2017 22:37:47 +0000 Subject: [PATCH 40/50] platform/x86: intel_mid_thermal: Fix module autoload [ Upstream commit a93151a72061e944a4915458b1b1d6d505c03bbf ] If the driver is built as a module, autoload won't work because the module alias information is not filled. So user-space can't match the registered device with the corresponding module. Export the module alias information using the MODULE_DEVICE_TABLE() macro. Before this patch: $ modinfo drivers/platform/x86/intel_mid_thermal.ko | grep alias $ After this patch: $ modinfo drivers/platform/x86/intel_mid_thermal.ko | grep alias alias: platform:msic_thermal Signed-off-by: Javier Martinez Canillas Signed-off-by: Andy Shevchenko Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/intel_mid_thermal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 9f713b832ba3..5c768c4627d3 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c @@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = { { "msic_thermal", 1 }, { } }; +MODULE_DEVICE_TABLE(platform, therm_id_table); static struct platform_driver mid_thermal_driver = { .driver = { From 2d1d45396585549eaa6d01d9889a31c46237a96e Mon Sep 17 00:00:00 2001 From: Yang Sheng Date: Sat, 7 Oct 2017 22:37:48 +0000 Subject: [PATCH 41/50] staging: lustre: llite: don't invoke direct_IO for the EOF case [ Upstream commit 77759771fb95420d23876cb104ab65c022613325 ] The function generic_file_read_iter() does not check EOF before invoke direct_IO callback. So we have to check it ourselves. Signed-off-by: Yang Sheng Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-8969 Reviewed-on: https://review.whamcloud.com/24552 Reviewed-by: Bob Glossman Reviewed-by: Bobi Jam Reviewed-by: Oleg Drokin Signed-off-by: James Simmons Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/lustre/lustre/llite/rw26.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 3da4c01e2159..adeefb31cbad 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -376,6 +376,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, if (!lli->lli_has_smd) return -EBADF; + /* Check EOF by ourselves */ + if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode)) + return 0; + /* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */ if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK)) return -EINVAL; From efa8f1b7a65a9e603821b9f7952bd7f74127dcd3 Mon Sep 17 00:00:00 2001 From: frank zago Date: Sat, 7 Oct 2017 22:37:48 +0000 Subject: [PATCH 42/50] staging: lustre: hsm: stack overrun in hai_dump_data_field [ Upstream commit 22aadb91c0a0055935109c175f5446abfb130702 ] The function hai_dump_data_field will do a stack buffer overrun when cat'ing /sys/fs/lustre/.../hsm/actions if an action has some data in it. hai_dump_data_field uses snprintf. But there is no check for truncation, and the value returned by snprintf is used as-is. The coordinator code calls hai_dump_data_field with 12 bytes in the buffer. The 6th byte of data is printed incompletely to make room for the terminating NUL. However snprintf still returns 2, so when hai_dump_data_field writes the final NUL, it does it outside the reserved buffer, in the 13th byte of the buffer. This stack buffer overrun hangs my VM. Fix by checking that there is enough room for the next 2 characters plus the NUL terminator. Don't print half bytes. Change the format to 02X instead of .2X, which makes more sense. Signed-off-by: frank zago Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-8171 Reviewed-on: http://review.whamcloud.com/20338 Reviewed-by: John L. Hammond Reviewed-by: Jean-Baptiste Riaux Reviewed-by: Oleg Drokin Signed-off-by: James Simmons Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../lustre/lustre/include/lustre/lustre_user.h | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h index 80f8ec529424..8ed4558238fc 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h @@ -1063,23 +1063,21 @@ struct hsm_action_item { * \retval buffer */ static inline char *hai_dump_data_field(struct hsm_action_item *hai, - char *buffer, int len) + char *buffer, size_t len) { - int i, sz, data_len; + int i, data_len; char *ptr; ptr = buffer; - sz = len; data_len = hai->hai_len - sizeof(*hai); - for (i = 0 ; (i < data_len) && (sz > 0) ; i++) { - int cnt; - - cnt = snprintf(ptr, sz, "%.2X", - (unsigned char)hai->hai_data[i]); - ptr += cnt; - sz -= cnt; + for (i = 0; (i < data_len) && (len > 2); i++) { + snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]); + ptr += 2; + len -= 2; } + *ptr = '\0'; + return buffer; } From 2d097e5f5c039618a49a37f7ee457bc4e5f48d1d Mon Sep 17 00:00:00 2001 From: Alexander Boyko Date: Sat, 7 Oct 2017 22:37:48 +0000 Subject: [PATCH 43/50] staging: lustre: ptlrpc: skip lock if export failed [ Upstream commit 4c43c27ddc461d8473cedd70f2549614641dfbc7 ] This patch resolves IO vs eviction race. After eviction failed export stayed at stale list, a client had IO processing and reconnected during it. A client sent brw rpc with last lock cookie and new connection. The lock with failed export was found and assert was happened. (ost_handler.c:1812:ost_prolong_lock_one()) ASSERTION( lock->l_export == opd->opd_exp ) failed: 1. Skip the lock at ldlm_handle2lock if lock export failed. 2. Validation of lock for IO was added at hpreq_check(). The lock searching is based on granted interval tree. If server doesn`t have a valid lock, it reply to client with ESTALE. Signed-off-by: Alexander Boyko Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-7702 Seagate-bug-id: MRP-2787 Reviewed-on: http://review.whamcloud.com/18120 Reviewed-by: Fan Yong Reviewed-by: Vitaly Fertman Reviewed-by: Oleg Drokin Signed-off-by: James Simmons Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- .../staging/lustre/lustre/ldlm/ldlm_lock.c | 7 +++++++ .../staging/lustre/lustre/ptlrpc/service.c | 21 +++++++------------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c index 7f8c70056ffd..040553d6e316 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c @@ -550,6 +550,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle, if (lock == NULL) return NULL; + if (lock->l_export && lock->l_export->exp_failed) { + CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n", + lock, lock->l_export); + LDLM_LOCK_PUT(lock); + return NULL; + } + /* It's unlikely but possible that someone marked the lock as * destroyed after we did handle2object on it */ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) { diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c index f45898f17793..6d3c25ccb297 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/service.c +++ b/drivers/staging/lustre/lustre/ptlrpc/service.c @@ -1240,20 +1240,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, * it may hit swab race at LU-1044. */ if (req->rq_ops->hpreq_check) { rc = req->rq_ops->hpreq_check(req); - /** - * XXX: Out of all current - * ptlrpc_hpreq_ops::hpreq_check(), only - * ldlm_cancel_hpreq_check() can return an error code; - * other functions assert in similar places, which seems - * odd. What also does not seem right is that handlers - * for those RPCs do not assert on the same checks, but - * rather handle the error cases. e.g. see - * ost_rw_hpreq_check(), and ost_brw_read(), - * ost_brw_write(). + if (rc == -ESTALE) { + req->rq_status = rc; + ptlrpc_error(req); + } + /** can only return error, + * 0 for normal request, + * or 1 for high priority request */ - if (rc < 0) - return rc; - LASSERT(rc == 0 || rc == 1); + LASSERT(rc <= 1); } spin_lock_bh(&req->rq_export->exp_rpc_lock); From 0e97077574c6e991274b37972d1c0241332610fb Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Sat, 7 Oct 2017 22:37:48 +0000 Subject: [PATCH 44/50] exynos4-is: fimc-is: Unmap region obtained by of_iomap() [ Upstream commit 4742575cde1f3cee0ea6b41af42781672315b04b ] Free memory mapping, if fimc_is_probe is not successful. Signed-off-by: Arvind Yadav Signed-off-by: Sylwester Nawrocki Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/exynos4-is/fimc-is.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 49658ca39e51..a851f20dca23 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -815,12 +815,13 @@ static int fimc_is_probe(struct platform_device *pdev) is->irq = irq_of_parse_and_map(dev->of_node, 0); if (!is->irq) { dev_err(dev, "no irq found\n"); - return -EINVAL; + ret = -EINVAL; + goto err_iounmap; } ret = fimc_is_get_clocks(is); if (ret < 0) - return ret; + goto err_iounmap; platform_set_drvdata(pdev, is); @@ -880,6 +881,8 @@ err_irq: free_irq(is->irq, is); err_clk: fimc_is_put_clocks(is); +err_iounmap: + iounmap(is->pmu_regs); return ret; } @@ -935,6 +938,7 @@ static int fimc_is_remove(struct platform_device *pdev) fimc_is_unregister_subdevs(is); vb2_dma_contig_cleanup_ctx(is->alloc_ctx); fimc_is_put_clocks(is); + iounmap(is->pmu_regs); fimc_is_debugfs_remove(is); release_firmware(is->fw.f_w); fimc_is_free_cpu_memory(is); From 45a012ace954efc08670fd6cd5698facbcdb119c Mon Sep 17 00:00:00 2001 From: Alexander Usyskin Date: Sat, 7 Oct 2017 22:37:48 +0000 Subject: [PATCH 45/50] mei: return error on notification request to a disconnected client [ Upstream commit 7c47d2ca0feca767479329da23523ed798acb854 ] Request for a notification from a disconnected client will be ignored silently by the FW but the caller should know that the operation hasn't succeeded. Signed-off-by: Alexander Usyskin Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/client.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 2ff39fbc70d1..df268365e04e 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c @@ -1300,6 +1300,9 @@ int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request) return -EOPNOTSUPP; } + if (!mei_cl_is_connected(cl)) + return -ENODEV; + rets = pm_runtime_get(dev->dev); if (rets < 0 && rets != -EINPROGRESS) { pm_runtime_put_noidle(dev->dev); From 4b7a35866b0ca4205ec5bacd2a5531caf19a463f Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Sat, 7 Oct 2017 22:37:49 +0000 Subject: [PATCH 46/50] s390/dasd: check for device error pointer within state change interrupts [ Upstream commit 2202134e48a3b50320aeb9e3dd1186833e9d7e66 ] Check if the device pointer is valid. Just a sanity check since we already are in the int handler of the device. Signed-off-by: Stefan Haberland Signed-off-by: Martin Schwidefsky Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/s390/block/dasd.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 84c13dffa3a8..e7a6f1222642 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1635,8 +1635,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, /* check for for attention message */ if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { device = dasd_device_from_cdev_locked(cdev); - device->discipline->check_attention(device, irb->esw.esw1.lpum); - dasd_put_device(device); + if (!IS_ERR(device)) { + device->discipline->check_attention(device, + irb->esw.esw1.lpum); + dasd_put_device(device); + } } if (!cqr) From 298455f466cbdf41cff7be9ec8c1dd6a3c5ed6b0 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Sat, 7 Oct 2017 22:37:49 +0000 Subject: [PATCH 47/50] bt8xx: fix memory leak [ Upstream commit 6792eb0cf9310ec240b7e7c9bfa86dff4c758c68 ] If dvb_attach() fails then we were just printing an error message and exiting but the memory allocated to state was not released. Signed-off-by: Sudip Mukherjee Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/media/pci/bt8xx/dvb-bt8xx.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/pci/bt8xx/dvb-bt8xx.c b/drivers/media/pci/bt8xx/dvb-bt8xx.c index d407244fd1bc..bd0f5b195188 100644 --- a/drivers/media/pci/bt8xx/dvb-bt8xx.c +++ b/drivers/media/pci/bt8xx/dvb-bt8xx.c @@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type) /* DST is not a frontend, attaching the ASIC */ if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) { pr_err("%s: Could not find a Twinhan DST\n", __func__); + kfree(state); break; } /* Attach other DST peripherals if any */ From 883706dcda17fdecce9c308d1743a90f9780f383 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 30 May 2017 20:52:26 +0200 Subject: [PATCH 48/50] xen: don't print error message in case of missing Xenstore entry [ Upstream commit 4e93b6481c87ea5afde944a32b4908357ec58992 ] When registering for the Xenstore watch of the node control/sysrq the handler will be called at once. Don't issue an error message if the Xenstore node isn't there, as it will be created only when an event is being triggered. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/xen/manage.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index e8850b0e3272..2dd285827169 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -275,8 +275,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec, err = xenbus_transaction_start(&xbt); if (err) return; - if (xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key) < 0) { - pr_err("Unable to read sysrq code in control/sysrq\n"); + err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key); + if (err < 0) { + /* + * The Xenstore watch fires directly after registering it and + * after a suspend/resume cycle. So ENOENT is no error but + * might happen in those cases. + */ + if (err != -ENOENT) + pr_err("Error %d reading sysrq code in control/sysrq\n", + err); xenbus_transaction_end(xbt, 1); return; } From 2ed81e62b2ab5b4942bbeb533469d88663e84a9f Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 10 Feb 2017 21:30:27 -0600 Subject: [PATCH 49/50] staging: r8712u: Fix Sparse warning in rtl871x_xmit.c [ Upstream commit 07222e535831b916221dd2a48a3047ec7e45dc72 ] Sparse reports the following: CHECK drivers/staging/rtl8712/rtl871x_xmit.c drivers/staging/rtl8712/rtl871x_xmit.c:350:44: warning: restricted __le32 degrades to integer drivers/staging/rtl8712/rtl871x_xmit.c:491:23: warning: incorrect type in initializer (different base types) drivers/staging/rtl8712/rtl871x_xmit.c:491:23: expected unsigned short [usertype] *fctrl drivers/staging/rtl8712/rtl871x_xmit.c:491:23: got restricted __le16 * drivers/staging/rtl8712/rtl871x_xmit.c:580:36: warning: incorrect type in assignment (different base types) drivers/staging/rtl8712/rtl871x_xmit.c:580:36: expected unsigned short [unsigned] [short] [usertype] drivers/staging/rtl8712/rtl871x_xmit.c:580:36: got restricted __be16 [usertype] Signed-off-by: Larry Finger Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8712/rtl871x_xmit.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c index 68d65d230fe3..d3ad89c7b8af 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.c +++ b/drivers/staging/rtl8712/rtl871x_xmit.c @@ -339,7 +339,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt, /* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite * some settings above.*/ if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) - pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f; + pattrib->priority = + (le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f; return _SUCCESS; } @@ -479,7 +480,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr, struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; - u16 *fctrl = &pwlanhdr->frame_ctl; + __le16 *fctrl = &pwlanhdr->frame_ctl; memset(hdr, 0, WLANHDR_OFFSET); SetFrameSubType(fctrl, pattrib->subtype); @@ -568,7 +569,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto) snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; - *(u16 *)(data + SNAP_SIZE) = htons(h_proto); + *(__be16 *)(data + SNAP_SIZE) = htons(h_proto); return SNAP_SIZE + sizeof(u16); } From c54d0707aa09a824413ebb4195c98bfb9b9e1fc0 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 8 Nov 2017 10:06:31 +0100 Subject: [PATCH 50/50] Linux 4.4.97 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 12dfe1dcbaca..fb1a40d64ba8 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ VERSION = 4 PATCHLEVEL = 4 -SUBLEVEL = 96 +SUBLEVEL = 97 EXTRAVERSION = NAME = Blurry Fish Butt