Merge android-4.4.97 (46d256d
) into msm-4.4
* refs/heads/tmp-46d256d Linux 4.4.97 staging: r8712u: Fix Sparse warning in rtl871x_xmit.c xen: don't print error message in case of missing Xenstore entry bt8xx: fix memory leak s390/dasd: check for device error pointer within state change interrupts mei: return error on notification request to a disconnected client exynos4-is: fimc-is: Unmap region obtained by of_iomap() staging: lustre: ptlrpc: skip lock if export failed staging: lustre: hsm: stack overrun in hai_dump_data_field staging: lustre: llite: don't invoke direct_IO for the EOF case platform/x86: intel_mid_thermal: Fix module autoload scsi: aacraid: Process Error for response I/O xen/manage: correct return value check on xenbus_scanf() cx231xx: Fix I2C on Internal Master 3 Bus perf tools: Only increase index if perf_evsel__new_idx() succeeds drm/amdgpu: when dpm disabled, also need to stop/start vce. i2c: riic: correctly finish transfers ext4: do not use stripe_width if it is not set ext4: fix stripe-unaligned allocations staging: rtl8712u: Fix endian settings for structs describing network packets mfd: axp20x: Fix axp288 PEK_DBR and PEK_DBF irqs being swapped mfd: ab8500-sysctrl: Handle probe deferral ARM: pxa: Don't rely on public mmc header to include leds.h mmc: s3cmci: include linux/interrupt.h for tasklet_struct PM / wakeirq: report a wakeup_event on dedicated wekup irq Fix tracing sample code warning. tracing/samples: Fix creation and deletion of simple_thread_fn creation drm/msm: fix an integer overflow test drm/msm: Fix potential buffer overflow issue perf tools: Fix build failure on perl script context ocfs2: fstrim: Fix start offset of first cluster group during fstrim ARM: 8715/1: add a private asm/unaligned.h ARM: dts: mvebu: pl310-cache disable double-linefill arm64: ensure __dump_instr() checks addr_limit ASoC: adau17x1: Workaround for noise bug in ADC KEYS: fix out-of-bounds read during ASN.1 parsing KEYS: return full count in keyring_read() if buffer is too small cifs: check MaxPathNameComponentLength != 0 before using it ALSA: seq: Fix nested rwsem annotation for lockdep splat ALSA: timer: Add missing mutex lock for compat ioctls BACKPORT: xfrm: Clear sk_dst_cache when applying per-socket policy. Revert "ANDROID: sched/rt: schedtune: Add boost retention to RT" cpufreq: Drop schedfreq governor ANDROID: sched/rt: schedtune: Add boost retention to RT ANDROID: sched/rt: add schedtune accounting ANDROID: Revert "arm64: move ELF_ET_DYN_BASE to 4GB / 4MB" ANDROID: Revert "arm: move ELF_ET_DYN_BASE to 4MB" sched: EAS: Fix the calculation of group util in group_idle_state() sched: EAS: update trg_cpu to backup_cpu if no energy saving for target_cpu sched: EAS: Fix the condition to distinguish energy before/after Conflicts: drivers/cpufreq/Kconfig drivers/gpu/drm/msm/msm_gem_submit.c kernel/sched/core.c kernel/sched/fair.c kernel/sched/rt.c kernel/sched/sched.h Change-Id: I0d8c5287cb67fd47c8944a002c0ca71adcdef537 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
This commit is contained in:
commit
bb6e807311
60 changed files with 492 additions and 1125 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 96
|
||||
SUBLEVEL = 97
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -176,9 +176,9 @@
|
|||
reg = <0x8000 0x1000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,double-linefill-incr = <1>;
|
||||
arm,double-linefill-incr = <0>;
|
||||
arm,double-linefill-wrap = <0>;
|
||||
arm,double-linefill = <1>;
|
||||
arm,double-linefill = <0>;
|
||||
prefetch-data = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -143,9 +143,9 @@
|
|||
reg = <0x8000 0x1000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,double-linefill-incr = <1>;
|
||||
arm,double-linefill-incr = <0>;
|
||||
arm,double-linefill-wrap = <0>;
|
||||
arm,double-linefill = <1>;
|
||||
arm,double-linefill = <0>;
|
||||
prefetch-data = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -104,9 +104,9 @@
|
|||
reg = <0x8000 0x1000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,double-linefill-incr = <1>;
|
||||
arm,double-linefill-incr = <0>;
|
||||
arm,double-linefill-wrap = <0>;
|
||||
arm,double-linefill = <1>;
|
||||
arm,double-linefill = <0>;
|
||||
prefetch-data = <1>;
|
||||
};
|
||||
|
||||
|
|
|
@ -37,4 +37,3 @@ generic-y += termbits.h
|
|||
generic-y += termios.h
|
||||
generic-y += timex.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += unaligned.h
|
||||
|
|
|
@ -112,8 +112,12 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
|
|||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the base location for PIE (ET_DYN with INTERP) loads. */
|
||||
#define ELF_ET_DYN_BASE 0x400000UL
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
/* When the program starts, a1 contains a pointer to a function to be
|
||||
registered with atexit, as per the SVR4 ABI. A value of 0 means we
|
||||
|
|
27
arch/arm/include/asm/unaligned.h
Normal file
27
arch/arm/include/asm/unaligned.h
Normal file
|
@ -0,0 +1,27 @@
|
|||
#ifndef __ASM_ARM_UNALIGNED_H
|
||||
#define __ASM_ARM_UNALIGNED_H
|
||||
|
||||
/*
|
||||
* We generally want to set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS on ARMv6+,
|
||||
* but we don't want to use linux/unaligned/access_ok.h since that can lead
|
||||
* to traps on unaligned stm/ldm or strd/ldrd.
|
||||
*/
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
# include <linux/unaligned/le_struct.h>
|
||||
# include <linux/unaligned/be_byteshift.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
# define get_unaligned __get_unaligned_le
|
||||
# define put_unaligned __put_unaligned_le
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
# include <linux/unaligned/be_struct.h>
|
||||
# include <linux/unaligned/le_byteshift.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
# define get_unaligned __get_unaligned_be
|
||||
# define put_unaligned __put_unaligned_be
|
||||
#else
|
||||
# error need to define endianess
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_ARM_UNALIGNED_H */
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/fb.h>
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/gpio.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/major.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mtd/physmap.h>
|
||||
#include <linux/pm.h>
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/bitops.h>
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/usb/gpio_vbus.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/gpio.h>
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/gpio.h>
|
||||
|
|
|
@ -170,7 +170,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
|||
#ifdef CONFIG_COMPAT
|
||||
|
||||
/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
|
||||
#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
|
||||
#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
|
||||
|
||||
/* AArch32 registers. */
|
||||
#define COMPAT_ELF_NGREG 18
|
||||
|
|
|
@ -124,7 +124,7 @@ static void __dump_instr(const char *lvl, struct pt_regs *regs)
|
|||
for (i = -4; i < 1; i++) {
|
||||
unsigned int val, bad;
|
||||
|
||||
bad = __get_user(val, &((u32 *)addr)[i]);
|
||||
bad = get_user(val, &((u32 *)addr)[i]);
|
||||
|
||||
if (!bad)
|
||||
p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
|
||||
|
|
|
@ -141,6 +141,13 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
|
|||
struct wake_irq *wirq = _wirq;
|
||||
int res;
|
||||
|
||||
/* Maybe abort suspend? */
|
||||
if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
|
||||
pm_wakeup_event(wirq->dev, 0);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* We don't want RPM_ASYNC or RPM_NOWAIT here */
|
||||
res = pm_runtime_resume(wirq->dev);
|
||||
if (res < 0)
|
||||
|
|
|
@ -235,19 +235,6 @@ config CPU_BOOST
|
|||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_GOV_SCHED
|
||||
bool "'sched' cpufreq governor"
|
||||
depends on CPU_FREQ
|
||||
depends on SMP
|
||||
select CPU_FREQ_GOV_COMMON
|
||||
help
|
||||
'sched' - this governor scales cpu frequency from the
|
||||
scheduler as a function of cpu capacity utilization. It does
|
||||
not evaluate utilization on a periodic basis (as ondemand
|
||||
does) but instead is event-driven by the scheduler.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_GOV_SCHEDUTIL
|
||||
bool "'schedutil' cpufreq policy governor"
|
||||
depends on CPU_FREQ && SMP
|
||||
|
|
|
@ -284,6 +284,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
|
|||
amdgpu_dpm_enable_vce(adev, false);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 0, 0);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
}
|
||||
} else {
|
||||
schedule_delayed_work(&adev->vce.idle_work,
|
||||
|
@ -315,6 +319,11 @@ static void amdgpu_vce_note_usage(struct amdgpu_device *adev)
|
|||
amdgpu_dpm_enable_vce(adev, true);
|
||||
} else {
|
||||
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
|
||||
amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,8 +40,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
|
|||
struct msm_gpu_submitqueue *queue)
|
||||
{
|
||||
struct msm_gem_submit *submit;
|
||||
uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
|
||||
(nr_cmds * sizeof(submit->cmd[0]));
|
||||
uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
|
||||
((u64)nr_cmds * sizeof(submit->cmd[0]));
|
||||
|
||||
if (sz > SIZE_MAX)
|
||||
return NULL;
|
||||
|
|
|
@ -80,6 +80,7 @@
|
|||
#define ICIER_TEIE 0x40
|
||||
#define ICIER_RIE 0x20
|
||||
#define ICIER_NAKIE 0x10
|
||||
#define ICIER_SPIE 0x08
|
||||
|
||||
#define ICSR2_NACKF 0x10
|
||||
|
||||
|
@ -216,11 +217,10 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
if (riic->is_last || riic->err)
|
||||
if (riic->is_last || riic->err) {
|
||||
riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
|
||||
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
|
||||
|
||||
writeb(0, riic->base + RIIC_ICIER);
|
||||
complete(&riic->msg_done);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -240,13 +240,13 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
|
|||
|
||||
if (riic->bytes_left == 1) {
|
||||
/* STOP must come before we set ACKBT! */
|
||||
if (riic->is_last)
|
||||
if (riic->is_last) {
|
||||
riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
|
||||
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
|
||||
}
|
||||
|
||||
riic_clear_set_bit(riic, 0, ICMR3_ACKBT, RIIC_ICMR3);
|
||||
|
||||
writeb(0, riic->base + RIIC_ICIER);
|
||||
complete(&riic->msg_done);
|
||||
} else {
|
||||
riic_clear_set_bit(riic, ICMR3_ACKBT, 0, RIIC_ICMR3);
|
||||
}
|
||||
|
@ -259,6 +259,21 @@ static irqreturn_t riic_rdrf_isr(int irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t riic_stop_isr(int irq, void *data)
|
||||
{
|
||||
struct riic_dev *riic = data;
|
||||
|
||||
/* read back registers to confirm writes have fully propagated */
|
||||
writeb(0, riic->base + RIIC_ICSR2);
|
||||
readb(riic->base + RIIC_ICSR2);
|
||||
writeb(0, riic->base + RIIC_ICIER);
|
||||
readb(riic->base + RIIC_ICIER);
|
||||
|
||||
complete(&riic->msg_done);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static u32 riic_func(struct i2c_adapter *adap)
|
||||
{
|
||||
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
|
||||
|
@ -326,6 +341,7 @@ static struct riic_irq_desc riic_irqs[] = {
|
|||
{ .res_num = 0, .isr = riic_tend_isr, .name = "riic-tend" },
|
||||
{ .res_num = 1, .isr = riic_rdrf_isr, .name = "riic-rdrf" },
|
||||
{ .res_num = 2, .isr = riic_tdre_isr, .name = "riic-tdre" },
|
||||
{ .res_num = 3, .isr = riic_stop_isr, .name = "riic-stop" },
|
||||
{ .res_num = 5, .isr = riic_tend_isr, .name = "riic-nack" },
|
||||
};
|
||||
|
||||
|
|
|
@ -680,6 +680,7 @@ static void frontend_init(struct dvb_bt8xx_card *card, u32 type)
|
|||
/* DST is not a frontend, attaching the ASIC */
|
||||
if (dvb_attach(dst_attach, state, &card->dvb_adapter) == NULL) {
|
||||
pr_err("%s: Could not find a Twinhan DST\n", __func__);
|
||||
kfree(state);
|
||||
break;
|
||||
}
|
||||
/* Attach other DST peripherals if any */
|
||||
|
|
|
@ -815,12 +815,13 @@ static int fimc_is_probe(struct platform_device *pdev)
|
|||
is->irq = irq_of_parse_and_map(dev->of_node, 0);
|
||||
if (!is->irq) {
|
||||
dev_err(dev, "no irq found\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
ret = fimc_is_get_clocks(is);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto err_iounmap;
|
||||
|
||||
platform_set_drvdata(pdev, is);
|
||||
|
||||
|
@ -880,6 +881,8 @@ err_irq:
|
|||
free_irq(is->irq, is);
|
||||
err_clk:
|
||||
fimc_is_put_clocks(is);
|
||||
err_iounmap:
|
||||
iounmap(is->pmu_regs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -935,6 +938,7 @@ static int fimc_is_remove(struct platform_device *pdev)
|
|||
fimc_is_unregister_subdevs(is);
|
||||
vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
|
||||
fimc_is_put_clocks(is);
|
||||
iounmap(is->pmu_regs);
|
||||
fimc_is_debugfs_remove(is);
|
||||
release_firmware(is->fw.f_w);
|
||||
fimc_is_free_cpu_memory(is);
|
||||
|
|
|
@ -356,7 +356,12 @@ int cx231xx_send_vendor_cmd(struct cx231xx *dev,
|
|||
*/
|
||||
if ((ven_req->wLength > 4) && ((ven_req->bRequest == 0x4) ||
|
||||
(ven_req->bRequest == 0x5) ||
|
||||
(ven_req->bRequest == 0x6))) {
|
||||
(ven_req->bRequest == 0x6) ||
|
||||
|
||||
/* Internal Master 3 Bus can send
|
||||
* and receive only 4 bytes per time
|
||||
*/
|
||||
(ven_req->bRequest == 0x2))) {
|
||||
unsend_size = 0;
|
||||
pdata = ven_req->pBuff;
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ int ab8500_sysctrl_read(u16 reg, u8 *value)
|
|||
u8 bank;
|
||||
|
||||
if (sysctrl_dev == NULL)
|
||||
return -EINVAL;
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
bank = (reg >> 8);
|
||||
if (!valid_bank(bank))
|
||||
|
@ -115,11 +115,13 @@ int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value)
|
|||
u8 bank;
|
||||
|
||||
if (sysctrl_dev == NULL)
|
||||
return -EINVAL;
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
bank = (reg >> 8);
|
||||
if (!valid_bank(bank))
|
||||
if (!valid_bank(bank)) {
|
||||
pr_err("invalid bank\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return abx500_mask_and_set_register_interruptible(sysctrl_dev, bank,
|
||||
(u8)(reg & 0xFF), mask, value);
|
||||
|
@ -180,9 +182,15 @@ static int ab8500_sysctrl_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ab8500_sysctrl_match[] = {
|
||||
{ .compatible = "stericsson,ab8500-sysctrl", },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver ab8500_sysctrl_driver = {
|
||||
.driver = {
|
||||
.name = "ab8500-sysctrl",
|
||||
.of_match_table = ab8500_sysctrl_match,
|
||||
},
|
||||
.probe = ab8500_sysctrl_probe,
|
||||
.remove = ab8500_sysctrl_remove,
|
||||
|
|
|
@ -164,14 +164,14 @@ static struct resource axp22x_pek_resources[] = {
|
|||
static struct resource axp288_power_button_resources[] = {
|
||||
{
|
||||
.name = "PEK_DBR",
|
||||
.start = AXP288_IRQ_POKN,
|
||||
.end = AXP288_IRQ_POKN,
|
||||
.start = AXP288_IRQ_POKP,
|
||||
.end = AXP288_IRQ_POKP,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
{
|
||||
.name = "PEK_DBF",
|
||||
.start = AXP288_IRQ_POKP,
|
||||
.end = AXP288_IRQ_POKP,
|
||||
.start = AXP288_IRQ_POKN,
|
||||
.end = AXP288_IRQ_POKN,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -1300,6 +1300,9 @@ int mei_cl_notify_request(struct mei_cl *cl, struct file *file, u8 request)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return -ENODEV;
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
|
|
|
@ -550,6 +550,7 @@ static const struct platform_device_id therm_id_table[] = {
|
|||
{ "msic_thermal", 1 },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, therm_id_table);
|
||||
|
||||
static struct platform_driver mid_thermal_driver = {
|
||||
.driver = {
|
||||
|
|
|
@ -1635,9 +1635,12 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
|||
/* check for for attention message */
|
||||
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
|
||||
device = dasd_device_from_cdev_locked(cdev);
|
||||
device->discipline->check_attention(device, irb->esw.esw1.lpum);
|
||||
if (!IS_ERR(device)) {
|
||||
device->discipline->check_attention(device,
|
||||
irb->esw.esw1.lpum);
|
||||
dasd_put_device(device);
|
||||
}
|
||||
}
|
||||
|
||||
if (!cqr)
|
||||
return;
|
||||
|
|
|
@ -2977,16 +2977,11 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
return;
|
||||
|
||||
BUG_ON(fibptr == NULL);
|
||||
|
||||
dev = fibptr->dev;
|
||||
|
||||
scsi_dma_unmap(scsicmd);
|
||||
|
||||
/* expose physical device if expose_physicald flag is on */
|
||||
if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
|
||||
&& expose_physicals > 0)
|
||||
aac_expose_phy_device(scsicmd);
|
||||
|
||||
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
|
||||
|
||||
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
|
||||
|
||||
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
|
||||
|
@ -2999,6 +2994,16 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
*/
|
||||
scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
|
||||
- le32_to_cpu(srbreply->data_xfer_length));
|
||||
}
|
||||
|
||||
|
||||
scsi_dma_unmap(scsicmd);
|
||||
|
||||
/* expose physical device if expose_physicald flag is on */
|
||||
if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
|
||||
&& expose_physicals > 0)
|
||||
aac_expose_phy_device(scsicmd);
|
||||
|
||||
/*
|
||||
* First check the fib status
|
||||
*/
|
||||
|
@ -3006,7 +3011,8 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
if (le32_to_cpu(srbreply->status) != ST_OK) {
|
||||
int len;
|
||||
|
||||
printk(KERN_WARNING "aac_srb_callback: srb failed, status = %d\n", le32_to_cpu(srbreply->status));
|
||||
pr_warn("aac_srb_callback: srb failed, status = %d\n",
|
||||
le32_to_cpu(srbreply->status));
|
||||
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
scsicmd->result = DID_ERROR << 16
|
||||
|
@ -3037,17 +3043,16 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
case WRITE_16:
|
||||
if (le32_to_cpu(srbreply->data_xfer_length)
|
||||
< scsicmd->underflow)
|
||||
printk(KERN_WARNING"aacraid: SCSI CMD underflow\n");
|
||||
pr_warn("aacraid: SCSI CMD underflow\n");
|
||||
else
|
||||
printk(KERN_WARNING"aacraid: SCSI CMD Data Overrun\n");
|
||||
pr_warn("aacraid: SCSI CMD Data Overrun\n");
|
||||
scsicmd->result = DID_ERROR << 16
|
||||
| COMMAND_COMPLETE << 8;
|
||||
break;
|
||||
case INQUIRY: {
|
||||
case INQUIRY:
|
||||
scsicmd->result = DID_OK << 16
|
||||
| COMMAND_COMPLETE << 8;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
|
||||
break;
|
||||
|
@ -3112,15 +3117,23 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
|
||||
default:
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
printk(KERN_INFO "aacraid: SRB ERROR(%u) %s scsi cmd 0x%x - scsi status 0x%x\n",
|
||||
pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
|
||||
le32_to_cpu(srbreply->srb_status) & 0x3F,
|
||||
aac_get_status_string(
|
||||
le32_to_cpu(srbreply->srb_status) & 0x3F),
|
||||
scsicmd->cmnd[0],
|
||||
le32_to_cpu(srbreply->scsi_status));
|
||||
#endif
|
||||
/*
|
||||
* When the CC bit is SET by the host in ATA pass thru CDB,
|
||||
* driver is supposed to return DID_OK
|
||||
*
|
||||
* When the CC bit is RESET by the host, driver should
|
||||
* return DID_ERROR
|
||||
*/
|
||||
if ((scsicmd->cmnd[0] == ATA_12)
|
||||
|| (scsicmd->cmnd[0] == ATA_16)) {
|
||||
|
||||
if (scsicmd->cmnd[2] & (0x01 << 5)) {
|
||||
scsicmd->result = DID_OK << 16
|
||||
| COMMAND_COMPLETE << 8;
|
||||
|
@ -3144,13 +3157,13 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|
|||
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
#ifdef AAC_DETAILED_STATUS_INFO
|
||||
printk(KERN_WARNING "aac_srb_callback: check condition, status = %d len=%d\n",
|
||||
pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
|
||||
le32_to_cpu(srbreply->status), len);
|
||||
#endif
|
||||
memcpy(scsicmd->sense_buffer,
|
||||
srbreply->sense_data, len);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* OR in the scsi status (already shifted up a bit)
|
||||
*/
|
||||
|
|
|
@ -1063,23 +1063,21 @@ struct hsm_action_item {
|
|||
* \retval buffer
|
||||
*/
|
||||
static inline char *hai_dump_data_field(struct hsm_action_item *hai,
|
||||
char *buffer, int len)
|
||||
char *buffer, size_t len)
|
||||
{
|
||||
int i, sz, data_len;
|
||||
int i, data_len;
|
||||
char *ptr;
|
||||
|
||||
ptr = buffer;
|
||||
sz = len;
|
||||
data_len = hai->hai_len - sizeof(*hai);
|
||||
for (i = 0 ; (i < data_len) && (sz > 0) ; i++) {
|
||||
int cnt;
|
||||
|
||||
cnt = snprintf(ptr, sz, "%.2X",
|
||||
(unsigned char)hai->hai_data[i]);
|
||||
ptr += cnt;
|
||||
sz -= cnt;
|
||||
for (i = 0; (i < data_len) && (len > 2); i++) {
|
||||
snprintf(ptr, 3, "%02X", (unsigned char)hai->hai_data[i]);
|
||||
ptr += 2;
|
||||
len -= 2;
|
||||
}
|
||||
|
||||
*ptr = '\0';
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
|
|
@ -550,6 +550,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
|
|||
if (lock == NULL)
|
||||
return NULL;
|
||||
|
||||
if (lock->l_export && lock->l_export->exp_failed) {
|
||||
CDEBUG(D_INFO, "lock export failed: lock %p, exp %p\n",
|
||||
lock, lock->l_export);
|
||||
LDLM_LOCK_PUT(lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* It's unlikely but possible that someone marked the lock as
|
||||
* destroyed after we did handle2object on it */
|
||||
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
|
||||
|
|
|
@ -376,6 +376,10 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (!lli->lli_has_smd)
|
||||
return -EBADF;
|
||||
|
||||
/* Check EOF by ourselves */
|
||||
if (iov_iter_rw(iter) == READ && file_offset >= i_size_read(inode))
|
||||
return 0;
|
||||
|
||||
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
|
||||
if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1240,20 +1240,15 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
|
|||
* it may hit swab race at LU-1044. */
|
||||
if (req->rq_ops->hpreq_check) {
|
||||
rc = req->rq_ops->hpreq_check(req);
|
||||
/**
|
||||
* XXX: Out of all current
|
||||
* ptlrpc_hpreq_ops::hpreq_check(), only
|
||||
* ldlm_cancel_hpreq_check() can return an error code;
|
||||
* other functions assert in similar places, which seems
|
||||
* odd. What also does not seem right is that handlers
|
||||
* for those RPCs do not assert on the same checks, but
|
||||
* rather handle the error cases. e.g. see
|
||||
* ost_rw_hpreq_check(), and ost_brw_read(),
|
||||
* ost_brw_write().
|
||||
if (rc == -ESTALE) {
|
||||
req->rq_status = rc;
|
||||
ptlrpc_error(req);
|
||||
}
|
||||
/** can only return error,
|
||||
* 0 for normal request,
|
||||
* or 1 for high priority request
|
||||
*/
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
LASSERT(rc == 0 || rc == 1);
|
||||
LASSERT(rc <= 1);
|
||||
}
|
||||
|
||||
spin_lock_bh(&req->rq_export->exp_rpc_lock);
|
||||
|
|
|
@ -143,52 +143,52 @@ struct ieee_ibss_seq {
|
|||
};
|
||||
|
||||
struct ieee80211_hdr {
|
||||
u16 frame_ctl;
|
||||
u16 duration_id;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
u16 seq_ctl;
|
||||
__le16 seq_ctl;
|
||||
u8 addr4[ETH_ALEN];
|
||||
} __packed;
|
||||
} __packed __aligned(2);
|
||||
|
||||
struct ieee80211_hdr_3addr {
|
||||
u16 frame_ctl;
|
||||
u16 duration_id;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
u16 seq_ctl;
|
||||
} __packed;
|
||||
__le16 seq_ctl;
|
||||
} __packed __aligned(2);
|
||||
|
||||
|
||||
struct ieee80211_hdr_qos {
|
||||
u16 frame_ctl;
|
||||
u16 duration_id;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
u16 seq_ctl;
|
||||
__le16 seq_ctl;
|
||||
u8 addr4[ETH_ALEN];
|
||||
u16 qc;
|
||||
} __packed;
|
||||
__le16 qc;
|
||||
} __packed __aligned(2);
|
||||
|
||||
struct ieee80211_hdr_3addr_qos {
|
||||
u16 frame_ctl;
|
||||
u16 duration_id;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[ETH_ALEN];
|
||||
u8 addr2[ETH_ALEN];
|
||||
u8 addr3[ETH_ALEN];
|
||||
u16 seq_ctl;
|
||||
u16 qc;
|
||||
__le16 seq_ctl;
|
||||
__le16 qc;
|
||||
} __packed;
|
||||
|
||||
struct eapol {
|
||||
u8 snap[6];
|
||||
u16 ethertype;
|
||||
__be16 ethertype;
|
||||
u8 version;
|
||||
u8 type;
|
||||
u16 length;
|
||||
__le16 length;
|
||||
} __packed;
|
||||
|
||||
|
||||
|
@ -528,13 +528,13 @@ struct ieee80211_security {
|
|||
*/
|
||||
|
||||
struct ieee80211_header_data {
|
||||
u16 frame_ctl;
|
||||
u16 duration_id;
|
||||
__le16 frame_ctl;
|
||||
__le16 duration_id;
|
||||
u8 addr1[6];
|
||||
u8 addr2[6];
|
||||
u8 addr3[6];
|
||||
u16 seq_ctrl;
|
||||
};
|
||||
__le16 seq_ctrl;
|
||||
} __packed __aligned(2);
|
||||
|
||||
#define BEACON_PROBE_SSID_ID_POSITION 12
|
||||
|
||||
|
@ -566,18 +566,18 @@ struct ieee80211_info_element {
|
|||
/*
|
||||
* These are the data types that can make up management packets
|
||||
*
|
||||
u16 auth_algorithm;
|
||||
u16 auth_sequence;
|
||||
u16 beacon_interval;
|
||||
u16 capability;
|
||||
__le16 auth_algorithm;
|
||||
__le16 auth_sequence;
|
||||
__le16 beacon_interval;
|
||||
__le16 capability;
|
||||
u8 current_ap[ETH_ALEN];
|
||||
u16 listen_interval;
|
||||
__le16 listen_interval;
|
||||
struct {
|
||||
u16 association_id:14, reserved:2;
|
||||
} __packed;
|
||||
u32 time_stamp[2];
|
||||
u16 reason;
|
||||
u16 status;
|
||||
__le32 time_stamp[2];
|
||||
__le16 reason;
|
||||
__le16 status;
|
||||
*/
|
||||
|
||||
#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
|
||||
|
@ -585,16 +585,16 @@ struct ieee80211_info_element {
|
|||
|
||||
struct ieee80211_authentication {
|
||||
struct ieee80211_header_data header;
|
||||
u16 algorithm;
|
||||
u16 transaction;
|
||||
u16 status;
|
||||
__le16 algorithm;
|
||||
__le16 transaction;
|
||||
__le16 status;
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_probe_response {
|
||||
struct ieee80211_header_data header;
|
||||
u32 time_stamp[2];
|
||||
u16 beacon_interval;
|
||||
u16 capability;
|
||||
__le32 time_stamp[2];
|
||||
__le16 beacon_interval;
|
||||
__le16 capability;
|
||||
struct ieee80211_info_element info_element;
|
||||
} __packed;
|
||||
|
||||
|
@ -604,16 +604,16 @@ struct ieee80211_probe_request {
|
|||
|
||||
struct ieee80211_assoc_request_frame {
|
||||
struct ieee80211_hdr_3addr header;
|
||||
u16 capability;
|
||||
u16 listen_interval;
|
||||
__le16 capability;
|
||||
__le16 listen_interval;
|
||||
struct ieee80211_info_element_hdr info_element;
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_assoc_response_frame {
|
||||
struct ieee80211_hdr_3addr header;
|
||||
u16 capability;
|
||||
u16 status;
|
||||
u16 aid;
|
||||
__le16 capability;
|
||||
__le16 status;
|
||||
__le16 aid;
|
||||
} __packed;
|
||||
|
||||
struct ieee80211_txb {
|
||||
|
|
|
@ -339,7 +339,8 @@ sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
|
|||
/* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite
|
||||
* some settings above.*/
|
||||
if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
|
||||
pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
|
||||
pattrib->priority =
|
||||
(le32_to_cpu(txdesc.txdw1) >> QSEL_SHT) & 0x1f;
|
||||
return _SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -479,7 +480,7 @@ static sint make_wlanhdr(struct _adapter *padapter, u8 *hdr,
|
|||
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
|
||||
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
|
||||
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
|
||||
u16 *fctrl = &pwlanhdr->frame_ctl;
|
||||
__le16 *fctrl = &pwlanhdr->frame_ctl;
|
||||
|
||||
memset(hdr, 0, WLANHDR_OFFSET);
|
||||
SetFrameSubType(fctrl, pattrib->subtype);
|
||||
|
@ -568,7 +569,7 @@ static sint r8712_put_snap(u8 *data, u16 h_proto)
|
|||
snap->oui[0] = oui[0];
|
||||
snap->oui[1] = oui[1];
|
||||
snap->oui[2] = oui[2];
|
||||
*(u16 *)(data + SNAP_SIZE) = htons(h_proto);
|
||||
*(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
|
||||
return SNAP_SIZE + sizeof(u16);
|
||||
}
|
||||
|
||||
|
|
|
@ -275,8 +275,16 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
|
|||
err = xenbus_transaction_start(&xbt);
|
||||
if (err)
|
||||
return;
|
||||
if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
|
||||
pr_err("Unable to read sysrq code in control/sysrq\n");
|
||||
err = xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key);
|
||||
if (err < 0) {
|
||||
/*
|
||||
* The Xenstore watch fires directly after registering it and
|
||||
* after a suspend/resume cycle. So ENOENT is no error but
|
||||
* might happen in those cases.
|
||||
*/
|
||||
if (err != -ENOENT)
|
||||
pr_err("Error %d reading sysrq code in control/sysrq\n",
|
||||
err);
|
||||
xenbus_transaction_end(xbt, 1);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -193,7 +193,8 @@ check_name(struct dentry *direntry, struct cifs_tcon *tcon)
|
|||
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
|
||||
int i;
|
||||
|
||||
if (unlikely(direntry->d_name.len >
|
||||
if (unlikely(tcon->fsAttrInfo.MaxPathNameComponentLength &&
|
||||
direntry->d_name.len >
|
||||
le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
|
@ -509,7 +510,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
|
|||
|
||||
rc = check_name(direntry, tcon);
|
||||
if (rc)
|
||||
goto out_free_xid;
|
||||
goto out;
|
||||
|
||||
server = tcon->ses->server;
|
||||
|
||||
|
|
|
@ -2136,8 +2136,10 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
|||
* We search using buddy data only if the order of the request
|
||||
* is greater than equal to the sbi_s_mb_order2_reqs
|
||||
* You can tune it via /sys/fs/ext4/<partition>/mb_order2_req
|
||||
* We also support searching for power-of-two requests only for
|
||||
* requests upto maximum buddy size we have constructed.
|
||||
*/
|
||||
if (i >= sbi->s_mb_order2_reqs) {
|
||||
if (i >= sbi->s_mb_order2_reqs && i <= sb->s_blocksize_bits + 2) {
|
||||
/*
|
||||
* This should tell if fe_len is exactly power of 2
|
||||
*/
|
||||
|
@ -2207,7 +2209,7 @@ repeat:
|
|||
}
|
||||
|
||||
ac->ac_groups_scanned++;
|
||||
if (cr == 0 && ac->ac_2order < sb->s_blocksize_bits+2)
|
||||
if (cr == 0)
|
||||
ext4_mb_simple_scan_group(ac, &e4b);
|
||||
else if (cr == 1 && sbi->s_stripe &&
|
||||
!(ac->ac_g_ex.fe_len % sbi->s_stripe))
|
||||
|
|
|
@ -2498,9 +2498,9 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
|
|||
|
||||
if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
|
||||
ret = sbi->s_stripe;
|
||||
else if (stripe_width <= sbi->s_blocks_per_group)
|
||||
else if (stripe_width && stripe_width <= sbi->s_blocks_per_group)
|
||||
ret = stripe_width;
|
||||
else if (stride <= sbi->s_blocks_per_group)
|
||||
else if (stride && stride <= sbi->s_blocks_per_group)
|
||||
ret = stride;
|
||||
else
|
||||
ret = 0;
|
||||
|
|
|
@ -7270,13 +7270,24 @@ out:
|
|||
|
||||
static int ocfs2_trim_extent(struct super_block *sb,
|
||||
struct ocfs2_group_desc *gd,
|
||||
u32 start, u32 count)
|
||||
u64 group, u32 start, u32 count)
|
||||
{
|
||||
u64 discard, bcount;
|
||||
struct ocfs2_super *osb = OCFS2_SB(sb);
|
||||
|
||||
bcount = ocfs2_clusters_to_blocks(sb, count);
|
||||
discard = le64_to_cpu(gd->bg_blkno) +
|
||||
ocfs2_clusters_to_blocks(sb, start);
|
||||
discard = ocfs2_clusters_to_blocks(sb, start);
|
||||
|
||||
/*
|
||||
* For the first cluster group, the gd->bg_blkno is not at the start
|
||||
* of the group, but at an offset from the start. If we add it while
|
||||
* calculating discard for first group, we will wrongly start fstrim a
|
||||
* few blocks after the desried start block and the range can cross
|
||||
* over into the next cluster group. So, add it only if this is not
|
||||
* the first cluster group.
|
||||
*/
|
||||
if (group != osb->first_cluster_group_blkno)
|
||||
discard += le64_to_cpu(gd->bg_blkno);
|
||||
|
||||
trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
|
||||
|
||||
|
@ -7284,7 +7295,7 @@ static int ocfs2_trim_extent(struct super_block *sb,
|
|||
}
|
||||
|
||||
static int ocfs2_trim_group(struct super_block *sb,
|
||||
struct ocfs2_group_desc *gd,
|
||||
struct ocfs2_group_desc *gd, u64 group,
|
||||
u32 start, u32 max, u32 minbits)
|
||||
{
|
||||
int ret = 0, count = 0, next;
|
||||
|
@ -7303,7 +7314,7 @@ static int ocfs2_trim_group(struct super_block *sb,
|
|||
next = ocfs2_find_next_bit(bitmap, max, start);
|
||||
|
||||
if ((next - start) >= minbits) {
|
||||
ret = ocfs2_trim_extent(sb, gd,
|
||||
ret = ocfs2_trim_extent(sb, gd, group,
|
||||
start, next - start);
|
||||
if (ret < 0) {
|
||||
mlog_errno(ret);
|
||||
|
@ -7401,7 +7412,8 @@ int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
|||
}
|
||||
|
||||
gd = (struct ocfs2_group_desc *)gd_bh->b_data;
|
||||
cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
|
||||
cnt = ocfs2_trim_group(sb, gd, group,
|
||||
first_bit, last_bit, minlen);
|
||||
brelse(gd_bh);
|
||||
gd_bh = NULL;
|
||||
if (cnt < 0) {
|
||||
|
|
|
@ -40,7 +40,6 @@ extern unsigned int sysctl_sched_min_granularity;
|
|||
extern unsigned int sysctl_sched_wakeup_granularity;
|
||||
extern unsigned int sysctl_sched_child_runs_first;
|
||||
extern unsigned int sysctl_sched_sync_hint_enable;
|
||||
extern unsigned int sysctl_sched_initial_task_util;
|
||||
extern unsigned int sysctl_sched_cstate_aware;
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
|
|
|
@ -23,5 +23,4 @@ obj-$(CONFIG_SCHED_TUNE) += tune.o
|
|||
obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o
|
||||
obj-$(CONFIG_SCHED_CORE_CTL) += core_ctl.o
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_SCHED) += cpufreq_sched.o
|
||||
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
|
||||
|
|
|
@ -3167,91 +3167,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|||
return ns;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
|
||||
|
||||
static inline
|
||||
unsigned long add_capacity_margin(unsigned long cpu_capacity)
|
||||
{
|
||||
cpu_capacity = cpu_capacity * capacity_margin;
|
||||
cpu_capacity /= SCHED_CAPACITY_SCALE;
|
||||
return cpu_capacity;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned long sum_capacity_reqs(unsigned long cfs_cap,
|
||||
struct sched_capacity_reqs *scr)
|
||||
{
|
||||
unsigned long total = add_capacity_margin(cfs_cap + scr->rt);
|
||||
return total += scr->dl;
|
||||
}
|
||||
|
||||
unsigned long boosted_cpu_util(int cpu);
|
||||
static void sched_freq_tick_pelt(int cpu)
|
||||
{
|
||||
unsigned long cpu_utilization = boosted_cpu_util(cpu);
|
||||
unsigned long capacity_curr = capacity_curr_of(cpu);
|
||||
struct sched_capacity_reqs *scr;
|
||||
|
||||
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
|
||||
if (sum_capacity_reqs(cpu_utilization, scr) < capacity_curr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* To make free room for a task that is building up its "real"
|
||||
* utilization and to harm its performance the least, request
|
||||
* a jump to a higher OPP as soon as the margin of free capacity
|
||||
* is impacted (specified by capacity_margin).
|
||||
* Remember CPU utilization in sched_capacity_reqs should be normalised.
|
||||
*/
|
||||
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
static void sched_freq_tick_walt(int cpu)
|
||||
{
|
||||
unsigned long cpu_utilization = cpu_util_freq(cpu);
|
||||
unsigned long capacity_curr = capacity_curr_of(cpu);
|
||||
|
||||
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
|
||||
return sched_freq_tick_pelt(cpu);
|
||||
|
||||
/*
|
||||
* Add a margin to the WALT utilization to check if we will need to
|
||||
* increase frequency.
|
||||
* NOTE: WALT tracks a single CPU signal for all the scheduling
|
||||
* classes, thus this margin is going to be added to the DL class as
|
||||
* well, which is something we do not do in sched_freq_tick_pelt case.
|
||||
*/
|
||||
if (add_capacity_margin(cpu_utilization) <= capacity_curr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* It is likely that the load is growing so we
|
||||
* keep the added margin in our request as an
|
||||
* extra boost.
|
||||
* Remember CPU utilization in sched_capacity_reqs should be normalised.
|
||||
*/
|
||||
cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
|
||||
|
||||
}
|
||||
#define _sched_freq_tick(cpu) sched_freq_tick_walt(cpu)
|
||||
#else
|
||||
#define _sched_freq_tick(cpu) sched_freq_tick_pelt(cpu)
|
||||
#endif /* CONFIG_SCHED_WALT */
|
||||
|
||||
static void sched_freq_tick(int cpu)
|
||||
{
|
||||
if (!sched_freq())
|
||||
return;
|
||||
|
||||
_sched_freq_tick(cpu);
|
||||
}
|
||||
#else
|
||||
static inline void sched_freq_tick(int cpu) { }
|
||||
#endif /* CONFIG_CPU_FREQ_GOV_SCHED */
|
||||
|
||||
/*
|
||||
* This function gets called by the timer code, with HZ frequency.
|
||||
* We call it with interrupts disabled.
|
||||
|
@ -3278,7 +3193,6 @@ void scheduler_tick(void)
|
|||
wallclock = sched_ktime_clock();
|
||||
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
|
||||
early_notif = early_detection_notify(rq, wallclock);
|
||||
sched_freq_tick(cpu);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
if (early_notif)
|
||||
|
|
|
@ -1,525 +0,0 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Michael Turquette <mturquette@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/cpufreq_sched.h>
|
||||
|
||||
#include "sched.h"
|
||||
|
||||
#define THROTTLE_DOWN_NSEC 50000000 /* 50ms default */
|
||||
#define THROTTLE_UP_NSEC 500000 /* 500us default */
|
||||
|
||||
struct static_key __read_mostly __sched_freq = STATIC_KEY_INIT_FALSE;
|
||||
static bool __read_mostly cpufreq_driver_slow;
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
|
||||
static struct cpufreq_governor cpufreq_gov_sched;
|
||||
#endif
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, enabled);
|
||||
DEFINE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
|
||||
|
||||
struct gov_tunables {
|
||||
struct gov_attr_set attr_set;
|
||||
unsigned int up_throttle_nsec;
|
||||
unsigned int down_throttle_nsec;
|
||||
};
|
||||
|
||||
/**
|
||||
* gov_data - per-policy data internal to the governor
|
||||
* @up_throttle: next throttling period expiry if increasing OPP
|
||||
* @down_throttle: next throttling period expiry if decreasing OPP
|
||||
* @up_throttle_nsec: throttle period length in nanoseconds if increasing OPP
|
||||
* @down_throttle_nsec: throttle period length in nanoseconds if decreasing OPP
|
||||
* @task: worker thread for dvfs transition that may block/sleep
|
||||
* @irq_work: callback used to wake up worker thread
|
||||
* @requested_freq: last frequency requested by the sched governor
|
||||
*
|
||||
* struct gov_data is the per-policy cpufreq_sched-specific data structure. A
|
||||
* per-policy instance of it is created when the cpufreq_sched governor receives
|
||||
* the CPUFREQ_GOV_START condition and a pointer to it exists in the gov_data
|
||||
* member of struct cpufreq_policy.
|
||||
*
|
||||
* Readers of this data must call down_read(policy->rwsem). Writers must
|
||||
* call down_write(policy->rwsem).
|
||||
*/
|
||||
struct gov_data {
|
||||
ktime_t up_throttle;
|
||||
ktime_t down_throttle;
|
||||
struct gov_tunables *tunables;
|
||||
struct list_head tunables_hook;
|
||||
struct task_struct *task;
|
||||
struct irq_work irq_work;
|
||||
unsigned int requested_freq;
|
||||
};
|
||||
|
||||
static void cpufreq_sched_try_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int freq)
|
||||
{
|
||||
struct gov_data *gd = policy->governor_data;
|
||||
|
||||
/* avoid race with cpufreq_sched_stop */
|
||||
if (!down_write_trylock(&policy->rwsem))
|
||||
return;
|
||||
|
||||
__cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
|
||||
|
||||
gd->up_throttle = ktime_add_ns(ktime_get(),
|
||||
gd->tunables->up_throttle_nsec);
|
||||
gd->down_throttle = ktime_add_ns(ktime_get(),
|
||||
gd->tunables->down_throttle_nsec);
|
||||
up_write(&policy->rwsem);
|
||||
}
|
||||
|
||||
static bool finish_last_request(struct gov_data *gd, unsigned int cur_freq)
|
||||
{
|
||||
ktime_t now = ktime_get();
|
||||
|
||||
ktime_t throttle = gd->requested_freq < cur_freq ?
|
||||
gd->down_throttle : gd->up_throttle;
|
||||
|
||||
if (ktime_after(now, throttle))
|
||||
return false;
|
||||
|
||||
while (1) {
|
||||
int usec_left = ktime_to_ns(ktime_sub(throttle, now));
|
||||
|
||||
usec_left /= NSEC_PER_USEC;
|
||||
trace_cpufreq_sched_throttled(usec_left);
|
||||
usleep_range(usec_left, usec_left + 100);
|
||||
now = ktime_get();
|
||||
if (ktime_after(now, throttle))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* we pass in struct cpufreq_policy. This is safe because changing out the
|
||||
* policy requires a call to __cpufreq_governor(policy, CPUFREQ_GOV_STOP),
|
||||
* which tears down all of the data structures and __cpufreq_governor(policy,
|
||||
* CPUFREQ_GOV_START) will do a full rebuild, including this kthread with the
|
||||
* new policy pointer
|
||||
*/
|
||||
static int cpufreq_sched_thread(void *data)
|
||||
{
|
||||
struct sched_param param;
|
||||
struct cpufreq_policy *policy;
|
||||
struct gov_data *gd;
|
||||
unsigned int new_request = 0;
|
||||
unsigned int last_request = 0;
|
||||
int ret;
|
||||
|
||||
policy = (struct cpufreq_policy *) data;
|
||||
gd = policy->governor_data;
|
||||
|
||||
param.sched_priority = 50;
|
||||
ret = sched_setscheduler_nocheck(gd->task, SCHED_FIFO, ¶m);
|
||||
if (ret) {
|
||||
pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
|
||||
do_exit(-EINVAL);
|
||||
} else {
|
||||
pr_debug("%s: kthread (%d) set to SCHED_FIFO\n",
|
||||
__func__, gd->task->pid);
|
||||
}
|
||||
|
||||
do {
|
||||
new_request = gd->requested_freq;
|
||||
if (new_request == last_request) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
schedule();
|
||||
} else {
|
||||
/*
|
||||
* if the frequency thread sleeps while waiting to be
|
||||
* unthrottled, start over to check for a newer request
|
||||
*/
|
||||
if (finish_last_request(gd, policy->cur))
|
||||
continue;
|
||||
last_request = new_request;
|
||||
cpufreq_sched_try_driver_target(policy, new_request);
|
||||
}
|
||||
} while (!kthread_should_stop());
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_sched_irq_work(struct irq_work *irq_work)
|
||||
{
|
||||
struct gov_data *gd;
|
||||
|
||||
gd = container_of(irq_work, struct gov_data, irq_work);
|
||||
if (!gd)
|
||||
return;
|
||||
|
||||
wake_up_process(gd->task);
|
||||
}
|
||||
|
||||
static void update_fdomain_capacity_request(int cpu)
|
||||
{
|
||||
unsigned int freq_new, index_new, cpu_tmp;
|
||||
struct cpufreq_policy *policy;
|
||||
struct gov_data *gd;
|
||||
unsigned long capacity = 0;
|
||||
|
||||
/*
|
||||
* Avoid grabbing the policy if possible. A test is still
|
||||
* required after locking the CPU's policy to avoid racing
|
||||
* with the governor changing.
|
||||
*/
|
||||
if (!per_cpu(enabled, cpu))
|
||||
return;
|
||||
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
if (IS_ERR_OR_NULL(policy))
|
||||
return;
|
||||
|
||||
if (policy->governor != &cpufreq_gov_sched ||
|
||||
!policy->governor_data)
|
||||
goto out;
|
||||
|
||||
gd = policy->governor_data;
|
||||
|
||||
/* find max capacity requested by cpus in this policy */
|
||||
for_each_cpu(cpu_tmp, policy->cpus) {
|
||||
struct sched_capacity_reqs *scr;
|
||||
|
||||
scr = &per_cpu(cpu_sched_capacity_reqs, cpu_tmp);
|
||||
capacity = max(capacity, scr->total);
|
||||
}
|
||||
|
||||
/* Convert the new maximum capacity request into a cpu frequency */
|
||||
freq_new = capacity * policy->cpuinfo.max_freq >> SCHED_CAPACITY_SHIFT;
|
||||
if (cpufreq_frequency_table_target(policy, policy->freq_table,
|
||||
freq_new, CPUFREQ_RELATION_L,
|
||||
&index_new))
|
||||
goto out;
|
||||
freq_new = policy->freq_table[index_new].frequency;
|
||||
|
||||
if (freq_new > policy->max)
|
||||
freq_new = policy->max;
|
||||
|
||||
if (freq_new < policy->min)
|
||||
freq_new = policy->min;
|
||||
|
||||
trace_cpufreq_sched_request_opp(cpu, capacity, freq_new,
|
||||
gd->requested_freq);
|
||||
if (freq_new == gd->requested_freq)
|
||||
goto out;
|
||||
|
||||
gd->requested_freq = freq_new;
|
||||
|
||||
/*
|
||||
* Throttling is not yet supported on platforms with fast cpufreq
|
||||
* drivers.
|
||||
*/
|
||||
if (cpufreq_driver_slow)
|
||||
irq_work_queue_on(&gd->irq_work, cpu);
|
||||
else
|
||||
cpufreq_sched_try_driver_target(policy, freq_new);
|
||||
|
||||
out:
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SCHED_WALT
|
||||
static inline unsigned long
|
||||
requested_capacity(struct sched_capacity_reqs *scr)
|
||||
{
|
||||
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
|
||||
return scr->cfs;
|
||||
return scr->cfs + scr->rt;
|
||||
}
|
||||
#else
|
||||
#define requested_capacity(scr) (scr->cfs + scr->rt)
|
||||
#endif
|
||||
|
||||
void update_cpu_capacity_request(int cpu, bool request)
|
||||
{
|
||||
unsigned long new_capacity;
|
||||
struct sched_capacity_reqs *scr;
|
||||
|
||||
/* The rq lock serializes access to the CPU's sched_capacity_reqs. */
|
||||
lockdep_assert_held(&cpu_rq(cpu)->lock);
|
||||
|
||||
scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
|
||||
|
||||
new_capacity = requested_capacity(scr);
|
||||
new_capacity = new_capacity * capacity_margin
|
||||
/ SCHED_CAPACITY_SCALE;
|
||||
new_capacity += scr->dl;
|
||||
|
||||
if (new_capacity == scr->total)
|
||||
return;
|
||||
|
||||
trace_cpufreq_sched_update_capacity(cpu, request, scr, new_capacity);
|
||||
|
||||
scr->total = new_capacity;
|
||||
if (request)
|
||||
update_fdomain_capacity_request(cpu);
|
||||
}
|
||||
|
||||
static inline void set_sched_freq(void)
|
||||
{
|
||||
static_key_slow_inc(&__sched_freq);
|
||||
}
|
||||
|
||||
static inline void clear_sched_freq(void)
|
||||
{
|
||||
static_key_slow_dec(&__sched_freq);
|
||||
}
|
||||
|
||||
/* Tunables */
|
||||
static struct gov_tunables *global_tunables;
|
||||
|
||||
static inline struct gov_tunables *to_tunables(struct gov_attr_set *attr_set)
|
||||
{
|
||||
return container_of(attr_set, struct gov_tunables, attr_set);
|
||||
}
|
||||
|
||||
static ssize_t up_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
|
||||
{
|
||||
struct gov_tunables *tunables = to_tunables(attr_set);
|
||||
|
||||
return sprintf(buf, "%u\n", tunables->up_throttle_nsec);
|
||||
}
|
||||
|
||||
static ssize_t up_throttle_nsec_store(struct gov_attr_set *attr_set,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct gov_tunables *tunables = to_tunables(attr_set);
|
||||
int ret;
|
||||
long unsigned int val;
|
||||
|
||||
ret = kstrtoul(buf, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
tunables->up_throttle_nsec = val;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t down_throttle_nsec_show(struct gov_attr_set *attr_set, char *buf)
|
||||
{
|
||||
struct gov_tunables *tunables = to_tunables(attr_set);
|
||||
|
||||
return sprintf(buf, "%u\n", tunables->down_throttle_nsec);
|
||||
}
|
||||
|
||||
static ssize_t down_throttle_nsec_store(struct gov_attr_set *attr_set,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct gov_tunables *tunables = to_tunables(attr_set);
|
||||
int ret;
|
||||
long unsigned int val;
|
||||
|
||||
ret = kstrtoul(buf, 0, &val);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
tunables->down_throttle_nsec = val;
|
||||
return count;
|
||||
}
|
||||
|
||||
static struct governor_attr up_throttle_nsec = __ATTR_RW(up_throttle_nsec);
|
||||
static struct governor_attr down_throttle_nsec = __ATTR_RW(down_throttle_nsec);
|
||||
|
||||
static struct attribute *schedfreq_attributes[] = {
|
||||
&up_throttle_nsec.attr,
|
||||
&down_throttle_nsec.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct kobj_type tunables_ktype = {
|
||||
.default_attrs = schedfreq_attributes,
|
||||
.sysfs_ops = &governor_sysfs_ops,
|
||||
};
|
||||
|
||||
static int cpufreq_sched_policy_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct gov_data *gd;
|
||||
int cpu;
|
||||
int rc;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
memset(&per_cpu(cpu_sched_capacity_reqs, cpu), 0,
|
||||
sizeof(struct sched_capacity_reqs));
|
||||
|
||||
gd = kzalloc(sizeof(*gd), GFP_KERNEL);
|
||||
if (!gd)
|
||||
return -ENOMEM;
|
||||
|
||||
policy->governor_data = gd;
|
||||
|
||||
if (!global_tunables) {
|
||||
gd->tunables = kzalloc(sizeof(*gd->tunables), GFP_KERNEL);
|
||||
if (!gd->tunables)
|
||||
goto free_gd;
|
||||
|
||||
gd->tunables->up_throttle_nsec =
|
||||
policy->cpuinfo.transition_latency ?
|
||||
policy->cpuinfo.transition_latency :
|
||||
THROTTLE_UP_NSEC;
|
||||
gd->tunables->down_throttle_nsec =
|
||||
THROTTLE_DOWN_NSEC;
|
||||
|
||||
rc = kobject_init_and_add(&gd->tunables->attr_set.kobj,
|
||||
&tunables_ktype,
|
||||
get_governor_parent_kobj(policy),
|
||||
"%s", cpufreq_gov_sched.name);
|
||||
if (rc)
|
||||
goto free_tunables;
|
||||
|
||||
gov_attr_set_init(&gd->tunables->attr_set,
|
||||
&gd->tunables_hook);
|
||||
|
||||
pr_debug("%s: throttle_threshold = %u [ns]\n",
|
||||
__func__, gd->tunables->up_throttle_nsec);
|
||||
|
||||
if (!have_governor_per_policy())
|
||||
global_tunables = gd->tunables;
|
||||
} else {
|
||||
gd->tunables = global_tunables;
|
||||
gov_attr_set_get(&global_tunables->attr_set,
|
||||
&gd->tunables_hook);
|
||||
}
|
||||
|
||||
policy->governor_data = gd;
|
||||
if (cpufreq_driver_is_slow()) {
|
||||
cpufreq_driver_slow = true;
|
||||
gd->task = kthread_create(cpufreq_sched_thread, policy,
|
||||
"kschedfreq:%d",
|
||||
cpumask_first(policy->related_cpus));
|
||||
if (IS_ERR_OR_NULL(gd->task)) {
|
||||
pr_err("%s: failed to create kschedfreq thread\n",
|
||||
__func__);
|
||||
goto free_tunables;
|
||||
}
|
||||
get_task_struct(gd->task);
|
||||
kthread_bind_mask(gd->task, policy->related_cpus);
|
||||
wake_up_process(gd->task);
|
||||
init_irq_work(&gd->irq_work, cpufreq_sched_irq_work);
|
||||
}
|
||||
|
||||
set_sched_freq();
|
||||
|
||||
return 0;
|
||||
|
||||
free_tunables:
|
||||
kfree(gd->tunables);
|
||||
free_gd:
|
||||
policy->governor_data = NULL;
|
||||
kfree(gd);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int cpufreq_sched_policy_exit(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int count;
|
||||
struct gov_data *gd = policy->governor_data;
|
||||
|
||||
clear_sched_freq();
|
||||
if (cpufreq_driver_slow) {
|
||||
kthread_stop(gd->task);
|
||||
put_task_struct(gd->task);
|
||||
}
|
||||
|
||||
count = gov_attr_set_put(&gd->tunables->attr_set, &gd->tunables_hook);
|
||||
if (!count) {
|
||||
if (!have_governor_per_policy())
|
||||
global_tunables = NULL;
|
||||
kfree(gd->tunables);
|
||||
}
|
||||
|
||||
policy->governor_data = NULL;
|
||||
|
||||
kfree(gd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_sched_start(struct cpufreq_policy *policy)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
per_cpu(enabled, cpu) = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpufreq_sched_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned int clamp_freq;
|
||||
struct gov_data *gd = policy->governor_data;;
|
||||
|
||||
pr_debug("limit event for cpu %u: %u - %u kHz, currently %u kHz\n",
|
||||
policy->cpu, policy->min, policy->max,
|
||||
policy->cur);
|
||||
|
||||
clamp_freq = clamp(gd->requested_freq, policy->min, policy->max);
|
||||
|
||||
if (policy->cur != clamp_freq)
|
||||
__cpufreq_driver_target(policy, clamp_freq, CPUFREQ_RELATION_L);
|
||||
}
|
||||
|
||||
static int cpufreq_sched_stop(struct cpufreq_policy *policy)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus)
|
||||
per_cpu(enabled, cpu) = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpufreq_sched_setup(struct cpufreq_policy *policy,
|
||||
unsigned int event)
|
||||
{
|
||||
switch (event) {
|
||||
case CPUFREQ_GOV_POLICY_INIT:
|
||||
return cpufreq_sched_policy_init(policy);
|
||||
case CPUFREQ_GOV_POLICY_EXIT:
|
||||
return cpufreq_sched_policy_exit(policy);
|
||||
case CPUFREQ_GOV_START:
|
||||
return cpufreq_sched_start(policy);
|
||||
case CPUFREQ_GOV_STOP:
|
||||
return cpufreq_sched_stop(policy);
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
cpufreq_sched_limits(policy);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED
|
||||
static
|
||||
#endif
|
||||
struct cpufreq_governor cpufreq_gov_sched = {
|
||||
.name = "sched",
|
||||
.governor = cpufreq_sched_setup,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static int __init cpufreq_sched_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu(cpu, cpu_possible_mask)
|
||||
per_cpu(enabled, cpu) = 0;
|
||||
return cpufreq_register_governor(&cpufreq_gov_sched);
|
||||
}
|
||||
|
||||
/* Try to make this the default governor */
|
||||
fs_initcall(cpufreq_sched_init);
|
|
@ -53,7 +53,6 @@ unsigned int sysctl_sched_latency = 6000000ULL;
|
|||
unsigned int normalized_sysctl_sched_latency = 6000000ULL;
|
||||
|
||||
unsigned int sysctl_sched_sync_hint_enable = 1;
|
||||
unsigned int sysctl_sched_initial_task_util = 0;
|
||||
unsigned int sysctl_sched_cstate_aware = 1;
|
||||
|
||||
/*
|
||||
|
@ -746,9 +745,7 @@ void init_entity_runnable_average(struct sched_entity *se)
|
|||
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
|
||||
/*
|
||||
* In previous Android versions, we used to have:
|
||||
* sa->util_avg = sched_freq() ?
|
||||
* sysctl_sched_initial_task_util :
|
||||
* scale_load_down(SCHED_LOAD_SCALE);
|
||||
* sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
|
||||
* sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
|
||||
* However, that functionality has been moved to enqueue.
|
||||
* It is unclear if we should restore this in enqueue.
|
||||
|
@ -5759,23 +5756,6 @@ unsigned long boosted_cpu_util(int cpu);
|
|||
#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
|
||||
static void update_capacity_of(int cpu)
|
||||
{
|
||||
unsigned long req_cap;
|
||||
|
||||
if (!sched_freq())
|
||||
return;
|
||||
|
||||
/* Normalize scale-invariant capacity to cpu. */
|
||||
req_cap = boosted_cpu_util(cpu);
|
||||
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
|
||||
set_cfs_cpu_capacity(cpu, true, req_cap);
|
||||
}
|
||||
#else
|
||||
#define update_capacity_of(X) do {} while(0)
|
||||
#endif /* SMP and CPU_FREQ_GOV_SCHED */
|
||||
|
||||
/*
|
||||
* The enqueue_task method is called before nr_running is
|
||||
* increased. Here we update the fair scheduling stats and
|
||||
|
@ -5788,7 +5768,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
struct sched_entity *se = &p->se;
|
||||
#ifdef CONFIG_SMP
|
||||
int task_new = flags & ENQUEUE_WAKEUP_NEW;
|
||||
int task_wakeup = flags & ENQUEUE_WAKEUP;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -5863,19 +5842,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
rq->rd->overutilized = true;
|
||||
trace_sched_overutilized(true);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!se) {
|
||||
/*
|
||||
* We want to potentially trigger a freq switch
|
||||
* request only for tasks that are waking up; this is
|
||||
* because we get here also during load balancing, but
|
||||
* in these cases it seems wise to trigger as single
|
||||
* request after load balancing is done.
|
||||
*/
|
||||
if (task_new || task_wakeup)
|
||||
update_capacity_of(cpu_of(rq));
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -5953,23 +5919,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
|
|||
*/
|
||||
schedtune_dequeue_task(p, cpu_of(rq));
|
||||
|
||||
if (!se) {
|
||||
/*
|
||||
* We want to potentially trigger a freq switch
|
||||
* request only for tasks that are going to sleep;
|
||||
* this is because we get here also during load
|
||||
* balancing, but in these cases it seems wise to
|
||||
* trigger as single request after load balancing is
|
||||
* done.
|
||||
*/
|
||||
if (task_sleep) {
|
||||
if (rq->cfs.nr_running)
|
||||
update_capacity_of(cpu_of(rq));
|
||||
else if (sched_freq())
|
||||
set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
hrtick_update(rq);
|
||||
|
@ -6521,13 +6470,6 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
|
|||
/* Take non-cpuidle idling into account (active idle/arch_cpu_idle()) */
|
||||
state++;
|
||||
|
||||
/*
|
||||
* Try to estimate if a deeper idle state is
|
||||
* achievable when we move the task.
|
||||
*/
|
||||
for_each_cpu(i, sched_group_cpus(sg))
|
||||
grp_util += cpu_util(i);
|
||||
|
||||
src_in_grp = cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg));
|
||||
dst_in_grp = cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg));
|
||||
if (src_in_grp == dst_in_grp) {
|
||||
|
@ -6536,10 +6478,16 @@ static int group_idle_state(struct energy_env *eenv, struct sched_group *sg)
|
|||
*/
|
||||
goto end;
|
||||
}
|
||||
/* add or remove util as appropriate to indicate what group util
|
||||
* will be (worst case - no concurrent execution) after moving the task
|
||||
|
||||
/*
|
||||
* Try to estimate if a deeper idle state is
|
||||
* achievable when we move the task.
|
||||
*/
|
||||
grp_util += src_in_grp ? -eenv->util_delta : eenv->util_delta;
|
||||
for_each_cpu(i, sched_group_cpus(sg)) {
|
||||
grp_util += cpu_util_wake(i, eenv->task);
|
||||
if (unlikely(i == eenv->trg_cpu))
|
||||
grp_util += eenv->util_delta;
|
||||
}
|
||||
|
||||
if (grp_util <=
|
||||
((long)sg->sgc->max_capacity * (int)sg->group_weight)) {
|
||||
|
@ -6626,13 +6574,13 @@ static int sched_group_energy(struct energy_env *eenv)
|
|||
|
||||
if (sg->group_weight == 1) {
|
||||
/* Remove capacity of src CPU (before task move) */
|
||||
if (eenv->util_delta == 0 &&
|
||||
if (eenv->trg_cpu == eenv->src_cpu &&
|
||||
cpumask_test_cpu(eenv->src_cpu, sched_group_cpus(sg))) {
|
||||
eenv->cap.before = sg->sge->cap_states[cap_idx].cap;
|
||||
eenv->cap.delta -= eenv->cap.before;
|
||||
}
|
||||
/* Add capacity of dst CPU (after task move) */
|
||||
if (eenv->util_delta != 0 &&
|
||||
if (eenv->trg_cpu == eenv->dst_cpu &&
|
||||
cpumask_test_cpu(eenv->dst_cpu, sched_group_cpus(sg))) {
|
||||
eenv->cap.after = sg->sge->cap_states[cap_idx].cap;
|
||||
eenv->cap.delta += eenv->cap.after;
|
||||
|
@ -7813,6 +7761,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
|
|||
/* No energy saving for target_cpu, try backup */
|
||||
target_cpu = tmp_backup;
|
||||
eenv.dst_cpu = target_cpu;
|
||||
eenv.trg_cpu = target_cpu;
|
||||
if (tmp_backup < 0 ||
|
||||
tmp_backup == prev_cpu ||
|
||||
energy_diff(&eenv) >= 0) {
|
||||
|
@ -8865,10 +8814,6 @@ static void attach_one_task(struct rq *rq, struct task_struct *p)
|
|||
{
|
||||
raw_spin_lock(&rq->lock);
|
||||
attach_task(rq, p);
|
||||
/*
|
||||
* We want to potentially raise target_cpu's OPP.
|
||||
*/
|
||||
update_capacity_of(cpu_of(rq));
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
|
@ -8890,11 +8835,6 @@ static void attach_tasks(struct lb_env *env)
|
|||
attach_task(env->dst_rq, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* We want to potentially raise env.dst_cpu's OPP.
|
||||
*/
|
||||
update_capacity_of(env->dst_cpu);
|
||||
|
||||
raw_spin_unlock(&env->dst_rq->lock);
|
||||
}
|
||||
|
||||
|
@ -10454,11 +10394,6 @@ more_balance:
|
|||
* ld_moved - cumulative load moved across iterations
|
||||
*/
|
||||
cur_ld_moved = detach_tasks(&env);
|
||||
/*
|
||||
* We want to potentially lower env.src_cpu's OPP.
|
||||
*/
|
||||
if (cur_ld_moved)
|
||||
update_capacity_of(env.src_cpu);
|
||||
|
||||
/*
|
||||
* We've detached some tasks from busiest_rq. Every
|
||||
|
@ -10708,7 +10643,6 @@ static int idle_balance(struct rq *this_rq)
|
|||
struct sched_domain *sd;
|
||||
int pulled_task = 0;
|
||||
u64 curr_cost = 0;
|
||||
long removed_util=0;
|
||||
|
||||
if (cpu_isolated(this_cpu))
|
||||
return 0;
|
||||
|
@ -10735,17 +10669,6 @@ static int idle_balance(struct rq *this_rq)
|
|||
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
|
||||
/*
|
||||
* If removed_util_avg is !0 we most probably migrated some task away
|
||||
* from this_cpu. In this case we might be willing to trigger an OPP
|
||||
* update, but we want to do so if we don't find anybody else to pull
|
||||
* here (we will trigger an OPP update with the pulled task's enqueue
|
||||
* anyway).
|
||||
*
|
||||
* Record removed_util before calling update_blocked_averages, and use
|
||||
* it below (before returning) to see if an OPP update is required.
|
||||
*/
|
||||
removed_util = atomic_long_read(&(this_rq->cfs).removed_util_avg);
|
||||
update_blocked_averages(this_cpu);
|
||||
rcu_read_lock();
|
||||
for_each_domain(this_cpu, sd) {
|
||||
|
@ -10813,12 +10736,6 @@ out:
|
|||
if (pulled_task) {
|
||||
idle_exit_fair(this_rq);
|
||||
this_rq->idle_stamp = 0;
|
||||
} else if (removed_util) {
|
||||
/*
|
||||
* No task pulled and someone has been migrated away.
|
||||
* Good case to trigger an OPP update.
|
||||
*/
|
||||
update_capacity_of(this_cpu);
|
||||
}
|
||||
|
||||
return pulled_task;
|
||||
|
@ -10903,10 +10820,6 @@ static int active_load_balance_cpu_stop(void *data)
|
|||
p = detach_one_task(&env);
|
||||
if (p) {
|
||||
schedstat_inc(sd, alb_pushed);
|
||||
/*
|
||||
* We want to potentially lower env.src_cpu's OPP.
|
||||
*/
|
||||
update_capacity_of(env.src_cpu);
|
||||
moved = true;
|
||||
} else {
|
||||
schedstat_inc(sd, alb_failed);
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include <linux/irq_work.h>
|
||||
#include <trace/events/sched.h>
|
||||
|
||||
#include "tune.h"
|
||||
|
||||
int sched_rr_timeslice = RR_TIMESLICE;
|
||||
|
||||
static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
|
||||
|
@ -1394,6 +1396,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|||
|
||||
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
|
||||
enqueue_pushable_task(rq, p);
|
||||
|
||||
schedtune_enqueue_task(p, cpu_of(rq));
|
||||
}
|
||||
|
||||
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
||||
|
@ -1405,6 +1409,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
|
|||
dec_hmp_sched_stats_rt(rq, p);
|
||||
|
||||
dequeue_pushable_task(rq, p);
|
||||
schedtune_dequeue_task(p, cpu_of(rq));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1612,41 +1617,6 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
|
|||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_CPU_FREQ_GOV_SCHED)
|
||||
static void sched_rt_update_capacity_req(struct rq *rq)
|
||||
{
|
||||
u64 total, used, age_stamp, avg;
|
||||
s64 delta;
|
||||
|
||||
if (!sched_freq())
|
||||
return;
|
||||
|
||||
sched_avg_update(rq);
|
||||
/*
|
||||
* Since we're reading these variables without serialization make sure
|
||||
* we read them once before doing sanity checks on them.
|
||||
*/
|
||||
age_stamp = READ_ONCE(rq->age_stamp);
|
||||
avg = READ_ONCE(rq->rt_avg);
|
||||
delta = rq_clock(rq) - age_stamp;
|
||||
|
||||
if (unlikely(delta < 0))
|
||||
delta = 0;
|
||||
|
||||
total = sched_avg_period() + delta;
|
||||
|
||||
used = div_u64(avg, total);
|
||||
if (unlikely(used > SCHED_CAPACITY_SCALE))
|
||||
used = SCHED_CAPACITY_SCALE;
|
||||
|
||||
set_rt_cpu_capacity(rq->cpu, 1, (unsigned long)(used));
|
||||
}
|
||||
#else
|
||||
static inline void sched_rt_update_capacity_req(struct rq *rq)
|
||||
{ }
|
||||
|
||||
#endif
|
||||
|
||||
static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
|
||||
struct rt_rq *rt_rq)
|
||||
{
|
||||
|
@ -1715,17 +1685,8 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
|
|||
if (prev->sched_class == &rt_sched_class)
|
||||
update_curr_rt(rq);
|
||||
|
||||
if (!rt_rq->rt_queued) {
|
||||
/*
|
||||
* The next task to be picked on this rq will have a lower
|
||||
* priority than rt tasks so we can spend some time to update
|
||||
* the capacity used by rt tasks based on the last activity.
|
||||
* This value will be the used as an estimation of the next
|
||||
* activity.
|
||||
*/
|
||||
sched_rt_update_capacity_req(rq);
|
||||
if (!rt_rq->rt_queued)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
put_prev_task(rq, prev);
|
||||
|
||||
|
@ -2558,9 +2519,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
|
|||
|
||||
update_curr_rt(rq);
|
||||
|
||||
if (rq->rt.rt_nr_running)
|
||||
sched_rt_update_capacity_req(rq);
|
||||
|
||||
watchdog(rq, p);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2422,64 +2422,6 @@ static inline unsigned long cpu_util_freq(int cpu)
|
|||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_GOV_SCHED
|
||||
#define capacity_max SCHED_CAPACITY_SCALE
|
||||
extern unsigned int capacity_margin;
|
||||
extern struct static_key __sched_freq;
|
||||
|
||||
static inline bool sched_freq(void)
|
||||
{
|
||||
return static_key_false(&__sched_freq);
|
||||
}
|
||||
|
||||
/*
|
||||
* sched_capacity_reqs expects capacity requests to be normalised.
|
||||
* All capacities should sum to the range of 0-1024.
|
||||
*/
|
||||
DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
|
||||
void update_cpu_capacity_request(int cpu, bool request);
|
||||
|
||||
static inline void set_cfs_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{
|
||||
struct sched_capacity_reqs *scr = &per_cpu(cpu_sched_capacity_reqs, cpu);
|
||||
|
||||
if (scr->cfs != capacity) {
|
||||
scr->cfs = capacity;
|
||||
update_cpu_capacity_request(cpu, request);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_rt_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{
|
||||
if (per_cpu(cpu_sched_capacity_reqs, cpu).rt != capacity) {
|
||||
per_cpu(cpu_sched_capacity_reqs, cpu).rt = capacity;
|
||||
update_cpu_capacity_request(cpu, request);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void set_dl_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{
|
||||
if (per_cpu(cpu_sched_capacity_reqs, cpu).dl != capacity) {
|
||||
per_cpu(cpu_sched_capacity_reqs, cpu).dl = capacity;
|
||||
update_cpu_capacity_request(cpu, request);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define sched_freq() false
|
||||
static inline void set_cfs_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{ }
|
||||
static inline void set_rt_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{ }
|
||||
static inline void set_dl_cpu_capacity(int cpu, bool request,
|
||||
unsigned long capacity)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_HMP
|
||||
/*
|
||||
* HMP and EAS are orthogonal. Hopefully the compiler just elides out all code
|
||||
|
|
|
@ -528,13 +528,6 @@ static struct ctl_table kern_table[] = {
|
|||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "sched_initial_task_util",
|
||||
.data = &sysctl_sched_initial_task_util,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{
|
||||
.procname = "sched_cstate_aware",
|
||||
.data = &sysctl_sched_cstate_aware,
|
||||
|
|
|
@ -283,6 +283,9 @@ next_op:
|
|||
if (unlikely(len > datalen - dp))
|
||||
goto data_overrun_error;
|
||||
}
|
||||
} else {
|
||||
if (unlikely(len > datalen - dp))
|
||||
goto data_overrun_error;
|
||||
}
|
||||
|
||||
if (flags & FLAG_CONS) {
|
||||
|
|
|
@ -1869,6 +1869,7 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
|
|||
if (err >= 0) {
|
||||
xfrm_sk_policy_insert(sk, err, pol);
|
||||
xfrm_pol_put(pol);
|
||||
__sk_dst_reset(sk);
|
||||
err = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -78,28 +78,36 @@ static int simple_thread_fn(void *arg)
|
|||
}
|
||||
|
||||
static DEFINE_MUTEX(thread_mutex);
|
||||
static int simple_thread_cnt;
|
||||
|
||||
void foo_bar_reg(void)
|
||||
{
|
||||
mutex_lock(&thread_mutex);
|
||||
if (simple_thread_cnt++)
|
||||
goto out;
|
||||
|
||||
pr_info("Starting thread for foo_bar_fn\n");
|
||||
/*
|
||||
* We shouldn't be able to start a trace when the module is
|
||||
* unloading (there's other locks to prevent that). But
|
||||
* for consistency sake, we still take the thread_mutex.
|
||||
*/
|
||||
mutex_lock(&thread_mutex);
|
||||
simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn");
|
||||
out:
|
||||
mutex_unlock(&thread_mutex);
|
||||
}
|
||||
|
||||
void foo_bar_unreg(void)
|
||||
{
|
||||
pr_info("Killing thread for foo_bar_fn\n");
|
||||
/* protect against module unloading */
|
||||
mutex_lock(&thread_mutex);
|
||||
if (--simple_thread_cnt)
|
||||
goto out;
|
||||
|
||||
pr_info("Killing thread for foo_bar_fn\n");
|
||||
if (simple_tsk_fn)
|
||||
kthread_stop(simple_tsk_fn);
|
||||
simple_tsk_fn = NULL;
|
||||
out:
|
||||
mutex_unlock(&thread_mutex);
|
||||
}
|
||||
|
||||
|
|
|
@ -452,34 +452,33 @@ static long keyring_read(const struct key *keyring,
|
|||
char __user *buffer, size_t buflen)
|
||||
{
|
||||
struct keyring_read_iterator_context ctx;
|
||||
unsigned long nr_keys;
|
||||
int ret;
|
||||
long ret;
|
||||
|
||||
kenter("{%d},,%zu", key_serial(keyring), buflen);
|
||||
|
||||
if (buflen & (sizeof(key_serial_t) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
nr_keys = keyring->keys.nr_leaves_on_tree;
|
||||
if (nr_keys == 0)
|
||||
return 0;
|
||||
|
||||
/* Calculate how much data we could return */
|
||||
if (!buffer || !buflen)
|
||||
return nr_keys * sizeof(key_serial_t);
|
||||
|
||||
/* Copy the IDs of the subscribed keys into the buffer */
|
||||
/* Copy as many key IDs as fit into the buffer */
|
||||
if (buffer && buflen) {
|
||||
ctx.buffer = (key_serial_t __user *)buffer;
|
||||
ctx.buflen = buflen;
|
||||
ctx.count = 0;
|
||||
ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
|
||||
ret = assoc_array_iterate(&keyring->keys,
|
||||
keyring_read_iterator, &ctx);
|
||||
if (ret < 0) {
|
||||
kleave(" = %d [iterate]", ret);
|
||||
kleave(" = %ld [iterate]", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
kleave(" = %zu [ok]", ctx.count);
|
||||
return ctx.count;
|
||||
/* Return the size of the buffer needed */
|
||||
ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t);
|
||||
if (ret <= buflen)
|
||||
kleave("= %ld [ok]", ret);
|
||||
else
|
||||
kleave("= %ld [buffer too small]", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -676,7 +676,7 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
|
|||
if (atomic)
|
||||
read_lock(&grp->list_lock);
|
||||
else
|
||||
down_read(&grp->list_mutex);
|
||||
down_read_nested(&grp->list_mutex, hop);
|
||||
list_for_each_entry(subs, &grp->list_head, src_list) {
|
||||
/* both ports ready? */
|
||||
if (atomic_read(&subs->ref_count) != 2)
|
||||
|
|
|
@ -106,7 +106,8 @@ enum {
|
|||
#endif /* CONFIG_X86_X32 */
|
||||
};
|
||||
|
||||
static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
static long __snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
void __user *argp = compat_ptr(arg);
|
||||
|
||||
|
@ -127,7 +128,7 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
|
|||
case SNDRV_TIMER_IOCTL_PAUSE:
|
||||
case SNDRV_TIMER_IOCTL_PAUSE_OLD:
|
||||
case SNDRV_TIMER_IOCTL_NEXT_DEVICE:
|
||||
return snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
|
||||
return __snd_timer_user_ioctl(file, cmd, (unsigned long)argp);
|
||||
case SNDRV_TIMER_IOCTL_INFO32:
|
||||
return snd_timer_user_info_compat(file, argp);
|
||||
case SNDRV_TIMER_IOCTL_STATUS32:
|
||||
|
@ -139,3 +140,15 @@ static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd, uns
|
|||
}
|
||||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static long snd_timer_user_ioctl_compat(struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct snd_timer_user *tu = file->private_data;
|
||||
long ret;
|
||||
|
||||
mutex_lock(&tu->ioctl_lock);
|
||||
ret = __snd_timer_user_ioctl_compat(file, cmd, arg);
|
||||
mutex_unlock(&tu->ioctl_lock);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -89,6 +89,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w,
|
||||
struct snd_kcontrol *kcontrol, int event)
|
||||
{
|
||||
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
|
||||
struct adau *adau = snd_soc_codec_get_drvdata(codec);
|
||||
|
||||
/*
|
||||
* If we are capturing, toggle the ADOSR bit in Converter Control 0 to
|
||||
* avoid losing SNR (workaround from ADI). This must be done after
|
||||
* the ADC(s) have been enabled. According to the data sheet, it is
|
||||
* normally illegal to set this bit when the sampling rate is 96 kHz,
|
||||
* but according to ADI it is acceptable for this workaround.
|
||||
*/
|
||||
regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
|
||||
ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR);
|
||||
regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0,
|
||||
ADAU17X1_CONVERTER0_ADOSR, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char * const adau17x1_mono_stereo_text[] = {
|
||||
"Stereo",
|
||||
"Mono Left Channel (L+R)",
|
||||
|
@ -120,7 +141,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = {
|
|||
SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0,
|
||||
&adau17x1_dac_mode_mux),
|
||||
|
||||
SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0),
|
||||
SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0,
|
||||
adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU),
|
||||
SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0),
|
||||
SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0),
|
||||
SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0),
|
||||
|
|
|
@ -123,5 +123,7 @@ bool adau17x1_has_dsp(struct adau *adau);
|
|||
|
||||
#define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7
|
||||
|
||||
#define ADAU17X1_CONVERTER0_ADOSR BIT(3)
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
libperf-y += Context.o
|
||||
|
||||
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default
|
||||
CFLAGS_Context.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes
|
||||
CFLAGS_Context.o += -Wno-unused-parameter -Wno-nested-externs -Wno-undef
|
||||
CFLAGS_Context.o += -Wno-switch-default -Wno-shadow
|
||||
|
|
|
@ -292,10 +292,11 @@ __add_event(struct list_head *list, int *idx,
|
|||
|
||||
event_attr_init(attr);
|
||||
|
||||
evsel = perf_evsel__new_idx(attr, (*idx)++);
|
||||
evsel = perf_evsel__new_idx(attr, *idx);
|
||||
if (!evsel)
|
||||
return NULL;
|
||||
|
||||
(*idx)++;
|
||||
evsel->cpus = cpu_map__get(cpus);
|
||||
evsel->own_cpus = cpu_map__get(cpus);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue