Merge android-4.4.96 (aed4c54) into msm-4.4

* refs/heads/tmp-aed4c54
  Linux 4.4.96
  Revert "drm: bridge: add DT bindings for TI ths8135"
  ecryptfs: fix dereference of NULL user_key_payload
  x86/microcode/intel: Disable late loading on model 79
  regulator: fan53555: fix I2C device ids
  can: kvaser_usb: Ignore CMD_FLUSH_QUEUE_REPLY messages
  can: kvaser_usb: Correct return value in printout
  can: sun4i: fix loopback mode
  scsi: sg: Re-fix off by one in sg_fill_request_table()
  scsi: zfcp: fix erp_action use-before-initialize in REC action trace
  assoc_array: Fix a buggy node-splitting case
  Input: gtco - fix potential out-of-bound access
  Input: elan_i2c - add ELAN0611 to the ACPI table
  xen/gntdev: avoid out of bounds access in case of partial gntdev_mmap()
  fuse: fix READDIRPLUS skipping an entry
  spi: uapi: spidev: add missing ioctl header
  usb: xhci: Handle error condition in xhci_stop_device()
  ceph: unlock dangling spinlock in try_flush_caps()
  ALSA: hda - fix headset mic problem for Dell machines with alc236
  ALSA: hda/realtek - Add support for ALC236/ALC3204
  workqueue: replace pool->manager_arb mutex with a flag
  sched: EAS: upmigrate misfit current task
  sched: avoid pushing tasks to an offline CPU
  sched: Extend active balance to accept 'push_task' argument
  Revert "sched/core: Warn if ENERGY_AWARE is enabled but data is missing"
  Revert "sched/core: fix have_sched_energy_data build warning"
  FROMLIST: kbuild: clang: fix build failures with sparse check
  Revert "Revert "UPSTREAM: efi/libstub/arm64: Set -fpie when building the EFI stub""
  BACKPORT: efi/libstub: Unify command line param parsing

Conflicts:
	drivers/usb/host/xhci-hub.c
	kernel/sched/core.c
	kernel/sched/fair.c

Change-Id: Ie36ce5de516f02b2d553043009d9afee64e7ff24
Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
Blagovest Kolenichev 2017-12-12 03:32:55 -08:00
commit 3bda2b55b4
31 changed files with 230 additions and 256 deletions

View file

@ -1,46 +0,0 @@
THS8135 Video DAC
-----------------
This is the binding for Texas Instruments THS8135 Video DAC bridge.
Required properties:
- compatible: Must be "ti,ths8135"
Required nodes:
This device has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
- Video port 0 for RGB input
- Video port 1 for VGA output
Example
-------
vga-bridge {
compatible = "ti,ths8135";
#address-cells = <1>;
#size-cells = <0>;
ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
vga_bridge_in: endpoint {
remote-endpoint = <&lcdc_out_vga>;
};
};
port@1 {
reg = <1>;
vga_bridge_out: endpoint {
remote-endpoint = <&vga_con_in>;
};
};
};
};

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 95
SUBLEVEL = 96
EXTRAVERSION =
NAME = Blurry Fish Butt
@ -700,11 +700,11 @@ KBUILD_CFLAGS += $(stackp-flag)
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)
CLANG_TARGET := -target $(notdir $(CLANG_TRIPLE:%-=%))
CLANG_TARGET := --target=$(notdir $(CLANG_TRIPLE:%-=%))
GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN)
CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)

View file

@ -990,6 +990,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n)
return 0;
}
static bool is_blacklisted(unsigned int cpu)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86 == 6 && c->x86_model == 79) {
pr_err_once("late loading on model 79 is disabled.\n");
return true;
}
return false;
}
static enum ucode_state request_microcode_fw(int cpu, struct device *device,
bool refresh_fw)
{
@ -998,6 +1010,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
const struct firmware *firmware;
enum ucode_state ret;
if (is_blacklisted(cpu))
return UCODE_NFOUND;
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
@ -1022,6 +1037,9 @@ static int get_ucode_user(void *to, const void *from, size_t n)
static enum ucode_state
request_microcode_user(int cpu, const void __user *buf, size_t size)
{
if (is_blacklisted(cpu))
return UCODE_NFOUND;
return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
}

View file

@ -10,7 +10,7 @@ cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
-mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS))
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
-fno-builtin -fpic -mno-single-pic-base

View file

@ -18,8 +18,6 @@
#include "efistub.h"
bool __nokaslr;
static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
{
static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
@ -221,18 +219,6 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail;
}
/* check whether 'nokaslr' was passed on the command line */
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
static const u8 default_cmdline[] = CONFIG_CMDLINE;
const u8 *str, *cmdline = cmdline_ptr;
if (IS_ENABLED(CONFIG_CMDLINE_FORCE))
cmdline = default_cmdline;
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str > cmdline && *(str - 1) == ' '))
__nokaslr = true;
}
status = handle_kernel_image(sys_table, image_addr, &image_size,
&reserve_addr,
&reserve_size,
@ -242,9 +228,13 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
goto fail_free_cmdline;
}
status = efi_parse_options(cmdline_ptr);
if (status != EFI_SUCCESS)
pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
cmdline_size == 0)
efi_parse_options(CONFIG_CMDLINE);
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && cmdline_size > 0)
efi_parse_options(cmdline_ptr);
/*
* Unauthenticated device tree data is a security hazard, so

View file

@ -23,8 +23,6 @@
#include "efistub.h"
extern bool __nokaslr;
efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
@ -40,7 +38,7 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
u64 phys_seed = 0;
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
if (!__nokaslr) {
if (!nokaslr()) {
status = efi_get_random_bytes(sys_table_arg,
sizeof(phys_seed),
(u8 *)&phys_seed);

View file

@ -41,6 +41,13 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
#define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
#endif
static int __section(.data) __nokaslr;
int __pure nokaslr(void)
{
return __nokaslr;
}
struct file_info {
efi_file_handle_t *handle;
u64 size;
@ -313,10 +320,14 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
* environments, first in the early boot environment of the EFI boot
* stub, and subsequently during the kernel boot.
*/
efi_status_t efi_parse_options(char *cmdline)
efi_status_t efi_parse_options(char const *cmdline)
{
char *str;
str = strstr(cmdline, "nokaslr");
if (str == cmdline || (str && str > cmdline && *(str - 1) == ' '))
__nokaslr = 1;
/*
* If no EFI parameters were specified on the cmdline we've got
* nothing to do.

View file

@ -5,6 +5,8 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
extern int __pure nokaslr(void);
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,

View file

@ -1240,6 +1240,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
};

View file

@ -231,13 +231,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
/* Walk this report and pull out the info we need */
while (i < length) {
prefix = report[i];
/* Skip over prefix */
i++;
prefix = report[i++];
/* Determine data size and save the data in the proper variable */
size = PREF_SIZE(prefix);
size = (1U << PREF_SIZE(prefix)) >> 1;
if (i + size > length) {
dev_err(ddev,
"Not enough data (need %d, have %d)\n",
i + size, length);
break;
}
switch (size) {
case 1:
data = report[i];
@ -245,8 +249,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
case 2:
data16 = get_unaligned_le16(&report[i]);
break;
case 3:
size = 4;
case 4:
data32 = get_unaligned_le32(&report[i]);
break;
}

View file

@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev)
/* enter the selected mode */
mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR);
if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE;
else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE;
@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_PRESUME_ACK |
CAN_CTRLMODE_3_SAMPLES;
priv->base = addr;
priv->clk = clk;

View file

@ -134,6 +134,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id)
#define CMD_RESET_ERROR_COUNTER 49
#define CMD_TX_ACKNOWLEDGE 50
#define CMD_CAN_ERROR_EVENT 51
#define CMD_FLUSH_QUEUE_REPLY 68
#define CMD_LEAF_USB_THROTTLE 77
#define CMD_LEAF_LOG_MESSAGE 106
@ -1297,6 +1298,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev,
goto warn;
break;
case CMD_FLUSH_QUEUE_REPLY:
if (dev->family != KVASER_LEAF)
goto warn;
break;
default:
warn: dev_warn(dev->udev->dev.parent,
"Unhandled message (%d)\n", msg->id);
@ -1607,7 +1613,8 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot flush queue, error %d\n", err);
if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel))
err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel);
if (err)
netdev_warn(netdev, "Cannot reset card, error %d\n", err);
err = kvaser_usb_stop_chip(priv);

View file

@ -449,7 +449,10 @@ static const struct i2c_device_id fan53555_id[] = {
.name = "fan53555",
.driver_data = FAN53555_VENDOR_FAIRCHILD
}, {
.name = "syr82x",
.name = "syr827",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "syr828",
.driver_data = FAN53555_VENDOR_SILERGY
}, {
.name = "hl7509",

View file

@ -358,6 +358,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device)
adapter->next_port_scan = jiffies;
adapter->erp_action.adapter = adapter;
if (zfcp_qdio_setup(adapter))
goto failed;
@ -514,6 +516,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
port->dev.groups = zfcp_port_attr_groups;
port->dev.release = zfcp_port_release;
port->erp_action.adapter = adapter;
port->erp_action.port = port;
if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) {
kfree(port);
goto err_out;

View file

@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE,
&zfcp_sdev->status);
erp_action = &zfcp_sdev->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
erp_action->sdev = sdev;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != sdev);
if (!(atomic_read(&zfcp_sdev->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_port(port);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
erp_action = &port->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
erp_action->port = port;
WARN_ON_ONCE(erp_action->port != port);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
break;
@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
zfcp_erp_action_dismiss_adapter(adapter);
atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status);
erp_action = &adapter->erp_action;
memset(erp_action, 0, sizeof(struct zfcp_erp_action));
WARN_ON_ONCE(erp_action->port != NULL);
WARN_ON_ONCE(erp_action->sdev != NULL);
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING))
act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
return NULL;
}
erp_action->adapter = adapter;
WARN_ON_ONCE(erp_action->adapter != adapter);
memset(&erp_action->list, 0, sizeof(erp_action->list));
memset(&erp_action->timer, 0, sizeof(erp_action->timer));
erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED;
erp_action->fsf_req_id = 0;
erp_action->action = need;
erp_action->status = act_status;

View file

@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
struct zfcp_unit *unit;
int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE;
zfcp_sdev->erp_action.adapter = adapter;
zfcp_sdev->erp_action.sdev = sdev;
port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
if (!port)
return -ENXIO;
zfcp_sdev->erp_action.port = port;
unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
if (unit)
put_device(&unit->dev);

View file

@ -848,7 +848,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if (val > SG_MAX_QUEUE)
if (val >= SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =

View file

@ -397,25 +397,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
GFP_NOWAIT);
if (!command) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, cmd);
return -ENOMEM;
ret = -ENOMEM;
goto cmd_cleanup;
}
ret = xhci_queue_stop_endpoint(xhci, command, slot_id,
i, suspend);
i, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_free_command(xhci, command);
goto err_cmd_queue;
goto cmd_cleanup;
}
}
}
ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
goto err_cmd_queue;
goto cmd_cleanup;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
@ -427,7 +427,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
ret = -ETIME;
}
err_cmd_queue:
cmd_cleanup:
xhci_free_command(xhci, cmd);
return ret;
}

View file

@ -827,6 +827,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
mutex_unlock(&priv->lock);
if (use_ptemod) {
map->pages_vm_start = vma->vm_start;
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start,
find_grant_ptes, map);
@ -864,7 +865,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
set_grant_ptes_as_special, NULL);
}
#endif
map->pages_vm_start = vma->vm_start;
}
return 0;

View file

@ -1850,6 +1850,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid)
retry:
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
spin_unlock(&ci->i_ceph_lock);
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
}
@ -1867,8 +1868,10 @@ retry:
mutex_lock(&session->s_mutex);
goto retry;
}
if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
spin_unlock(&ci->i_ceph_lock);
goto out;
}
flushing = __mark_caps_flushing(inode, session, &flush_tid,
&oldest_flush_tid);

View file

@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context {
static inline struct ecryptfs_auth_tok *
ecryptfs_get_encrypted_key_payload_data(struct key *key)
{
if (key->type == &key_type_encrypted)
return (struct ecryptfs_auth_tok *)
(&((struct encrypted_key_payload *)key->payload.data[0])->payload_data);
else
struct encrypted_key_payload *payload;
if (key->type != &key_type_encrypted)
return NULL;
payload = key->payload.data[0];
if (!payload)
return ERR_PTR(-EKEYREVOKED);
return (struct ecryptfs_auth_tok *)payload->payload_data;
}
static inline struct key *ecryptfs_get_encrypted_key(char *sig)
@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok *
ecryptfs_get_key_payload_data(struct key *key)
{
struct ecryptfs_auth_tok *auth_tok;
const struct user_key_payload *ukp;
auth_tok = ecryptfs_get_encrypted_key_payload_data(key);
if (!auth_tok)
return (struct ecryptfs_auth_tok *)user_key_payload(key)->data;
else
if (auth_tok)
return auth_tok;
ukp = user_key_payload(key);
if (!ukp)
return ERR_PTR(-EKEYREVOKED);
return (struct ecryptfs_auth_tok *)ukp->data;
}
#define ECRYPTFS_MAX_KEYSET_SIZE 1024

View file

@ -461,7 +461,8 @@ out:
* @auth_tok_key: key containing the authentication token
* @auth_tok: authentication token
*
* Returns zero on valid auth tok; -EINVAL otherwise
* Returns zero on valid auth tok; -EINVAL if the payload is invalid; or
* -EKEYREVOKED if the key was revoked before we acquired its semaphore.
*/
static int
ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
@ -470,6 +471,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key,
int rc = 0;
(*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key);
if (IS_ERR(*auth_tok)) {
rc = PTR_ERR(*auth_tok);
*auth_tok = NULL;
goto out;
}
if (ecryptfs_verify_version((*auth_tok)->version)) {
printk(KERN_ERR "Data structure version mismatch. Userspace "
"tools must match eCryptfs kernel module with major "

View file

@ -1343,7 +1343,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
*/
over = !dir_emit(ctx, dirent->name, dirent->namelen,
dirent->ino, dirent->type);
ctx->pos = dirent->off;
if (!over)
ctx->pos = dirent->off;
}
buf += reclen;

View file

@ -1292,7 +1292,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
efi_status_t efi_parse_options(char *cmdline);
efi_status_t efi_parse_options(char const *cmdline);
bool efi_runtime_disabled(void);
#endif /* _LINUX_EFI_H */

View file

@ -23,6 +23,7 @@
#define SPIDEV_H
#include <linux/types.h>
#include <linux/ioctl.h>
/* User space versions of kernel symbols for SPI clocking modes,
* matching <linux/spi/spi.h>

View file

@ -99,10 +99,6 @@
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
#ifdef CONFIG_SMP
static bool have_sched_energy_data(void);
#endif
DEFINE_MUTEX(sched_domains_mutex);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
@ -205,11 +201,6 @@ static int sched_feat_set(char *cmp)
sysctl_sched_features &= ~(1UL << i);
sched_feat_disable(i);
} else {
#ifdef CONFIG_SMP
if (i == __SCHED_FEAT_ENERGY_AWARE)
WARN(!have_sched_energy_data(),
"Missing sched energy data\n");
#endif
sysctl_sched_features |= (1UL << i);
sched_feat_enable(i);
}
@ -7202,19 +7193,6 @@ static void init_sched_groups_capacity(int cpu, struct sched_domain *sd)
atomic_set(&sg->sgc->nr_busy_cpus, sg->group_weight);
}
static bool have_sched_energy_data(void)
{
int cpu;
for_each_possible_cpu(cpu) {
if (!rcu_dereference(per_cpu(sd_scs, cpu)) ||
!rcu_dereference(per_cpu(sd_ea, cpu)))
return false;
}
return true;
}
/*
* Check that the per-cpu provided sd energy data is consistent for all cpus
* within the mask.
@ -8031,9 +8009,6 @@ static int build_sched_domains(const struct cpumask *cpu_map,
}
rcu_read_unlock();
WARN(sched_feat(ENERGY_AWARE) && !have_sched_energy_data(),
"Missing data for energy aware scheduling\n");
ret = 0;
error:
__free_domain_allocs(&d, alloc_state, cpu_map);

View file

@ -3660,68 +3660,6 @@ static inline int migration_needed(struct task_struct *p, int cpu)
return 0;
}
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
unsigned long flags;
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock_irqsave(&rq->lock, flags);
if (!rq->active_balance) {
rq->active_balance = 1;
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
rc = 1;
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
return rc;
}
static DEFINE_RAW_SPINLOCK(migration_lock);
static bool do_migration(int reason, int new_cpu, int cpu)
{
if ((reason == UP_MIGRATION || reason == DOWN_MIGRATION)
&& same_cluster(new_cpu, cpu))
return false;
/* Inter cluster high irqload migrations are OK */
return new_cpu != cpu;
}
/*
* Check if currently running task should be migrated to a better cpu.
*
* Todo: Effect this via changes to nohz_balancer_kick() and load balance?
*/
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq), new_cpu;
int active_balance = 0, reason;
reason = migration_needed(p, cpu);
if (!reason)
return;
raw_spin_lock(&migration_lock);
new_cpu = select_best_cpu(p, cpu, reason, 0);
if (do_migration(reason, new_cpu, cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
mark_reserved(new_cpu);
}
raw_spin_unlock(&migration_lock);
if (active_balance)
stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
}
#ifdef CONFIG_CFS_BANDWIDTH
static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
@ -7464,7 +7402,9 @@ done:
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
* the waking task p removed. check_for_migration() looks for a better CPU of
* rq->curr. For that case we should return cpu util with contributions from
* currently running task p removed.
*/
static int cpu_util_wake(int cpu, struct task_struct *p)
{
@ -7477,7 +7417,8 @@ static int cpu_util_wake(int cpu, struct task_struct *p)
* utilization from cpu utilization. Instead just use
* cpu_util for this case.
*/
if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
p->state == TASK_WAKING)
return cpu_util(cpu);
#endif
/* Task has no contribution or is new */
@ -11582,6 +11523,47 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
static inline int
kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
{
int rc = 0;
/* Invoke active balance to force migrate currently running task */
raw_spin_lock(&rq->lock);
if (!rq->active_balance) {
rq->active_balance = 1;
rq->push_cpu = new_cpu;
get_task_struct(p);
rq->push_task = p;
rc = 1;
}
raw_spin_unlock(&rq->lock);
return rc;
}
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int new_cpu;
int active_balance;
int cpu = task_cpu(p);
if (rq->misfit_task) {
if (rq->curr->state != TASK_RUNNING ||
rq->curr->nr_cpus_allowed == 1)
return;
new_cpu = select_energy_cpu_brute(p, cpu, 0);
if (capacity_orig_of(new_cpu) > capacity_orig_of(cpu)) {
active_balance = kick_active_balance(rq, p, new_cpu);
if (active_balance)
stop_one_cpu_nowait(cpu,
active_load_balance_cpu_stop,
rq, &rq->active_balance_work);
}
}
}
#endif /* CONFIG_SMP */
/*

View file

@ -32,8 +32,10 @@ extern long calc_load_fold_active(struct rq *this_rq);
#ifdef CONFIG_SMP
extern void update_cpu_load_active(struct rq *this_rq);
extern void check_for_migration(struct rq *rq, struct task_struct *p);
#else
static inline void update_cpu_load_active(struct rq *this_rq) { }
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
#endif
/*
@ -1447,7 +1449,6 @@ static inline bool is_short_burst_task(struct task_struct *p)
p->ravg.avg_sleep_time > sysctl_sched_short_sleep;
}
extern void check_for_migration(struct rq *rq, struct task_struct *p);
extern void pre_big_task_count_change(const struct cpumask *cpus);
extern void post_big_task_count_change(const struct cpumask *cpus);
extern void set_hmp_defaults(void);
@ -1707,7 +1708,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
return 1;
}
static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
static inline void pre_big_task_count_change(void) { }
static inline void post_big_task_count_change(void) { }
static inline void set_hmp_defaults(void) { }

View file

@ -70,6 +70,7 @@ enum {
* attach_mutex to avoid changing binding state while
* worker_attach_to_pool() is in progress.
*/
POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */
POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
/* worker flags */
@ -167,7 +168,6 @@ struct worker_pool {
/* L: hash of busy workers */
/* see manage_workers() for details on the two manager mutexes */
struct mutex manager_arb; /* manager arbitration */
struct worker *manager; /* L: purely informational */
struct mutex attach_mutex; /* attach/detach exclusion */
struct list_head workers; /* A: attached workers */
@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
@ -812,7 +813,7 @@ static bool need_to_create_worker(struct worker_pool *pool)
/* Do we have too many workers and should some go away? */
static bool too_many_workers(struct worker_pool *pool)
{
bool managing = mutex_is_locked(&pool->manager_arb);
bool managing = pool->flags & POOL_MANAGER_ACTIVE;
int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
int nr_busy = pool->nr_workers - nr_idle;
@ -1964,24 +1965,17 @@ static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
/*
* Anyone who successfully grabs manager_arb wins the arbitration
* and becomes the manager. mutex_trylock() on pool->manager_arb
* failure while holding pool->lock reliably indicates that someone
* else is managing the pool and the worker which failed trylock
* can proceed to executing work items. This means that anyone
* grabbing manager_arb is responsible for actually performing
* manager duties. If manager_arb is grabbed and released without
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
if (pool->flags & POOL_MANAGER_ACTIVE)
return false;
pool->flags |= POOL_MANAGER_ACTIVE;
pool->manager = worker;
maybe_create_worker(pool);
pool->manager = NULL;
mutex_unlock(&pool->manager_arb);
pool->flags &= ~POOL_MANAGER_ACTIVE;
wake_up(&wq_manager_wait);
return true;
}
@ -3141,7 +3135,6 @@ static int init_worker_pool(struct worker_pool *pool)
setup_timer(&pool->mayday_timer, pool_mayday_timeout,
(unsigned long)pool);
mutex_init(&pool->manager_arb);
mutex_init(&pool->attach_mutex);
INIT_LIST_HEAD(&pool->workers);
@ -3211,13 +3204,15 @@ static void put_unbound_pool(struct worker_pool *pool)
hash_del(&pool->hash_node);
/*
* Become the manager and destroy all workers. Grabbing
* manager_arb prevents @pool's workers from blocking on
* attach_mutex.
* Become the manager and destroy all workers. This prevents
* @pool's workers from blocking on attach_mutex. We're the last
* manager and @pool gets freed with the flag set.
*/
mutex_lock(&pool->manager_arb);
spin_lock_irq(&pool->lock);
wait_event_lock_irq(wq_manager_wait,
!(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
pool->flags |= POOL_MANAGER_ACTIVE;
while ((worker = first_idle_worker(pool)))
destroy_worker(worker);
WARN_ON(pool->nr_workers || pool->nr_idle);
@ -3231,8 +3226,6 @@ static void put_unbound_pool(struct worker_pool *pool)
if (pool->detach_completion)
wait_for_completion(pool->detach_completion);
mutex_unlock(&pool->manager_arb);
/* shut down the timers */
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);

View file

@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
goto all_leaves_cluster_together;
/* Otherwise we can just insert a new node ahead of the old
* one.
/* Otherwise all the old leaves cluster in the same slot, but
* the new leaf wants to go into a different slot - so we
* create a new node (n0) to hold the new leaf and a pointer to
* a new node (n1) holding all the old leaves.
*
* This can be done by falling through to the node splitting
* path.
*/
goto present_leaves_cluster_but_not_new_leaf;
pr_devel("present leaves cluster but not new leaf\n");
}
split_node:
pr_devel("split node\n");
/* We need to split the current node; we know that the node doesn't
* simply contain a full set of leaves that cluster together (it
* contains meta pointers and/or non-clustering leaves).
/* We need to split the current node. The node must contain anything
* from a single leaf (in the one leaf case, this leaf will cluster
* with the new leaf) and the rest meta-pointers, to all leaves, some
* of which may cluster.
*
* It won't contain the case in which all the current leaves plus the
* new leaves want to cluster in the same slot.
*
* We need to expel at least two leaves out of a set consisting of the
* leaves in the node and the new leaf.
* leaves in the node and the new leaf. The current meta pointers can
* just be copied as they shouldn't cluster with any of the leaves.
*
* We need a new node (n0) to replace the current one and a new node to
* take the expelled nodes (n1).
@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy:
pr_devel("<--%s() = ok [split node]\n", __func__);
return true;
present_leaves_cluster_but_not_new_leaf:
/* All the old leaves cluster in the same slot, but the new leaf wants
* to go into a different slot, so we create a new node to hold the new
* leaf and a pointer to a new node holding all the old leaves.
*/
pr_devel("present leaves cluster but not new leaf\n");
new_n0->back_pointer = node->back_pointer;
new_n0->parent_slot = node->parent_slot;
new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
new_n1->parent_slot = edit->segment_cache[0];
new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
edit->adjust_count_on = new_n0;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
new_n1->slots[i] = node->slots[i];
new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
edit->set[0].to = assoc_array_node_to_ptr(new_n0);
edit->excised_meta[0] = assoc_array_node_to_ptr(node);
pr_devel("<--%s() = ok [insert node before]\n", __func__);
return true;
all_leaves_cluster_together:
/* All the leaves, new and old, want to cluster together in this node
* in the same slot, so we have to replace this node with a shortcut to

View file

@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
case 0x10ec0282:
@ -909,6 +910,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
{ 0x10ec0275, 0x1028, 0, "ALC3260" },
{ 0x10ec0899, 0x1028, 0, "ALC3861" },
{ 0x10ec0298, 0x1028, 0, "ALC3266" },
{ 0x10ec0236, 0x1028, 0, "ALC3204" },
{ 0x10ec0256, 0x1028, 0, "ALC3246" },
{ 0x10ec0225, 0x1028, 0, "ALC3253" },
{ 0x10ec0295, 0x1028, 0, "ALC3254" },
@ -3694,6 +3696,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
alc_process_coef_fw(codec, coef0255_1);
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
alc_process_coef_fw(codec, coef0255);
@ -3774,6 +3777,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
switch (codec->core.vendor_id) {
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_write_coef_idx(codec, 0x45, 0xc489);
@ -3879,6 +3883,7 @@ static void alc_headset_mode_default(struct hda_codec *codec)
case 0x10ec0295:
alc_process_coef_fw(codec, coef0225);
break;
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@ -3962,6 +3967,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@ -4052,6 +4058,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, coef0255);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0256);
break;
@ -4119,6 +4126,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
};
switch (codec->core.vendor_id) {
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@ -4320,6 +4328,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
case 0x10ec0255:
alc_process_coef_fw(codec, alc255fw);
break;
case 0x10ec0236:
case 0x10ec0256:
alc_process_coef_fw(codec, alc256fw);
break;
@ -5834,6 +5843,14 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170150},
{0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
{0x14, 0x90170110},
{0x21, 0x02211020}),
@ -6208,6 +6225,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
case 0x10ec0236:
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
@ -7147,6 +7165,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0234, "ALC234", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0236, "ALC236", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0255, "ALC255", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0256, "ALC256", patch_alc269),
HDA_CODEC_ENTRY(0x10ec0260, "ALC260", patch_alc260),