Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
This commit is contained in:
commit
19192a140a
66 changed files with 630 additions and 255 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 39
|
||||
SUBLEVEL = 40
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -239,8 +239,7 @@ static int __init xen_guest_init(void)
|
|||
* for secondary CPUs as they are brought up.
|
||||
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
|
||||
*/
|
||||
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
|
||||
sizeof(struct vcpu_info));
|
||||
xen_vcpu_info = alloc_percpu(struct vcpu_info);
|
||||
if (xen_vcpu_info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1313,9 +1313,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||
blk_mq_put_ctx(data.ctx);
|
||||
if (!old_rq)
|
||||
goto done;
|
||||
if (!blk_mq_direct_issue_request(old_rq, &cookie))
|
||||
goto done;
|
||||
blk_mq_insert_request(old_rq, false, true, true);
|
||||
if (test_bit(BLK_MQ_S_STOPPED, &data.hctx->state) ||
|
||||
blk_mq_direct_issue_request(old_rq, &cookie) != 0)
|
||||
blk_mq_insert_request(old_rq, false, true, true);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
|
|
@ -836,11 +836,29 @@ static struct kobject *get_device_parent(struct device *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool live_in_glue_dir(struct kobject *kobj,
|
||||
struct device *dev)
|
||||
{
|
||||
if (!kobj || !dev->class ||
|
||||
kobj->kset != &dev->class->p->glue_dirs)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct kobject *get_glue_dir(struct device *dev)
|
||||
{
|
||||
return dev->kobj.parent;
|
||||
}
|
||||
|
||||
/*
|
||||
* make sure cleaning up dir as the last step, we need to make
|
||||
* sure .release handler of kobject is run with holding the
|
||||
* global lock
|
||||
*/
|
||||
static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
|
||||
{
|
||||
/* see if we live in a "glue" directory */
|
||||
if (!glue_dir || !dev->class ||
|
||||
glue_dir->kset != &dev->class->p->glue_dirs)
|
||||
if (!live_in_glue_dir(glue_dir, dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&gdp_mutex);
|
||||
|
@ -848,11 +866,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
|
|||
mutex_unlock(&gdp_mutex);
|
||||
}
|
||||
|
||||
static void cleanup_device_parent(struct device *dev)
|
||||
{
|
||||
cleanup_glue_dir(dev, dev->kobj.parent);
|
||||
}
|
||||
|
||||
static int device_add_class_symlinks(struct device *dev)
|
||||
{
|
||||
struct device_node *of_node = dev_of_node(dev);
|
||||
|
@ -1028,6 +1041,7 @@ int device_add(struct device *dev)
|
|||
struct kobject *kobj;
|
||||
struct class_interface *class_intf;
|
||||
int error = -EINVAL;
|
||||
struct kobject *glue_dir = NULL;
|
||||
|
||||
dev = get_device(dev);
|
||||
if (!dev)
|
||||
|
@ -1072,8 +1086,10 @@ int device_add(struct device *dev)
|
|||
/* first, register with generic layer. */
|
||||
/* we require the name to be set before, and pass NULL */
|
||||
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
|
||||
if (error)
|
||||
if (error) {
|
||||
glue_dir = get_glue_dir(dev);
|
||||
goto Error;
|
||||
}
|
||||
|
||||
/* notify platform of device entry */
|
||||
if (platform_notify)
|
||||
|
@ -1154,9 +1170,10 @@ done:
|
|||
device_remove_file(dev, &dev_attr_uevent);
|
||||
attrError:
|
||||
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
|
||||
glue_dir = get_glue_dir(dev);
|
||||
kobject_del(&dev->kobj);
|
||||
Error:
|
||||
cleanup_device_parent(dev);
|
||||
cleanup_glue_dir(dev, glue_dir);
|
||||
put_device(parent);
|
||||
name_error:
|
||||
kfree(dev->p);
|
||||
|
@ -1232,6 +1249,7 @@ EXPORT_SYMBOL_GPL(put_device);
|
|||
void device_del(struct device *dev)
|
||||
{
|
||||
struct device *parent = dev->parent;
|
||||
struct kobject *glue_dir = NULL;
|
||||
struct class_interface *class_intf;
|
||||
|
||||
/* Notify clients of device removal. This call must come
|
||||
|
@ -1276,8 +1294,9 @@ void device_del(struct device *dev)
|
|||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_REMOVED_DEVICE, dev);
|
||||
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
|
||||
cleanup_device_parent(dev);
|
||||
glue_dir = get_glue_dir(dev);
|
||||
kobject_del(&dev->kobj);
|
||||
cleanup_glue_dir(dev, glue_dir);
|
||||
put_device(parent);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_del);
|
||||
|
|
|
@ -1657,7 +1657,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
blk_mq_start_request(bd->rq);
|
||||
|
||||
if (lo->lo_state != Lo_bound)
|
||||
return -EIO;
|
||||
return BLK_MQ_RQ_QUEUE_ERROR;
|
||||
|
||||
if (lo->use_dio && !(cmd->rq->cmd_flags & (REQ_FLUSH |
|
||||
REQ_DISCARD)))
|
||||
|
|
|
@ -305,7 +305,6 @@ static int tpmfront_probe(struct xenbus_device *dev,
|
|||
rv = setup_ring(dev, priv);
|
||||
if (rv) {
|
||||
chip = dev_get_drvdata(&dev->dev);
|
||||
tpm_chip_unregister(chip);
|
||||
ring_free(priv);
|
||||
return rv;
|
||||
}
|
||||
|
|
|
@ -22,13 +22,6 @@
|
|||
|
||||
#include "clock.h"
|
||||
|
||||
/*
|
||||
* DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
|
||||
* that are sourced by DPLL5, and both of these require this clock
|
||||
* to be at 120 MHz for proper operation.
|
||||
*/
|
||||
#define DPLL5_FREQ_FOR_USBHOST 120000000
|
||||
|
||||
#define OMAP3430ES2_ST_DSS_IDLE_SHIFT 1
|
||||
#define OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT 5
|
||||
#define OMAP3430ES2_ST_SSI_IDLE_SHIFT 8
|
||||
|
@ -546,14 +539,21 @@ void __init omap3_clk_lock_dpll5(void)
|
|||
struct clk *dpll5_clk;
|
||||
struct clk *dpll5_m2_clk;
|
||||
|
||||
/*
|
||||
* Errata sprz319f advisory 2.1 documents a USB host clock drift issue
|
||||
* that can be worked around using specially crafted dpll5 settings
|
||||
* with a dpll5_m2 divider set to 8. Set the dpll5 rate to 8x the USB
|
||||
* host clock rate, its .set_rate handler() will detect that frequency
|
||||
* and use the errata settings.
|
||||
*/
|
||||
dpll5_clk = clk_get(NULL, "dpll5_ck");
|
||||
clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
|
||||
clk_set_rate(dpll5_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST * 8);
|
||||
clk_prepare_enable(dpll5_clk);
|
||||
|
||||
/* Program dpll5_m2_clk divider for no division */
|
||||
/* Program dpll5_m2_clk divider */
|
||||
dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
|
||||
clk_prepare_enable(dpll5_m2_clk);
|
||||
clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
|
||||
clk_set_rate(dpll5_m2_clk, OMAP3_DPLL5_FREQ_FOR_USBHOST);
|
||||
|
||||
clk_disable_unprepare(dpll5_m2_clk);
|
||||
clk_disable_unprepare(dpll5_clk);
|
||||
|
|
|
@ -257,11 +257,20 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
|
|||
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
|
||||
unsigned long parent_rate);
|
||||
|
||||
/*
|
||||
* OMAP3_DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
|
||||
* that are sourced by DPLL5, and both of these require this clock
|
||||
* to be at 120 MHz for proper operation.
|
||||
*/
|
||||
#define OMAP3_DPLL5_FREQ_FOR_USBHOST 120000000
|
||||
|
||||
unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
|
||||
int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
|
||||
unsigned long parent_rate);
|
||||
int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate, u8 index);
|
||||
int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate);
|
||||
void omap3_clk_lock_dpll5(void);
|
||||
|
||||
unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
|
||||
|
|
|
@ -114,6 +114,18 @@ static const struct clk_ops omap3_dpll_ck_ops = {
|
|||
.round_rate = &omap2_dpll_round_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops omap3_dpll5_ck_ops = {
|
||||
.enable = &omap3_noncore_dpll_enable,
|
||||
.disable = &omap3_noncore_dpll_disable,
|
||||
.get_parent = &omap2_init_dpll_parent,
|
||||
.recalc_rate = &omap3_dpll_recalc,
|
||||
.set_rate = &omap3_dpll5_set_rate,
|
||||
.set_parent = &omap3_noncore_dpll_set_parent,
|
||||
.set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
|
||||
.determine_rate = &omap3_noncore_dpll_determine_rate,
|
||||
.round_rate = &omap2_dpll_round_rate,
|
||||
};
|
||||
|
||||
static const struct clk_ops omap3_dpll_per_ck_ops = {
|
||||
.enable = &omap3_noncore_dpll_enable,
|
||||
.disable = &omap3_noncore_dpll_disable,
|
||||
|
@ -461,7 +473,12 @@ static void __init of_ti_omap3_dpll_setup(struct device_node *node)
|
|||
.modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
|
||||
};
|
||||
|
||||
of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
|
||||
if ((of_machine_is_compatible("ti,omap3630") ||
|
||||
of_machine_is_compatible("ti,omap36xx")) &&
|
||||
!strcmp(node->name, "dpll5_ck"))
|
||||
of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
|
||||
else
|
||||
of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
|
||||
}
|
||||
CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
|
||||
of_ti_omap3_dpll_setup);
|
||||
|
|
|
@ -815,3 +815,70 @@ int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
|
|||
return omap3_noncore_dpll_set_rate_and_parent(hw, rate, parent_rate,
|
||||
index);
|
||||
}
|
||||
|
||||
/* Apply DM3730 errata sprz319 advisory 2.1. */
|
||||
static bool omap3_dpll5_apply_errata(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
struct omap3_dpll5_settings {
|
||||
unsigned int rate, m, n;
|
||||
};
|
||||
|
||||
static const struct omap3_dpll5_settings precomputed[] = {
|
||||
/*
|
||||
* From DM3730 errata advisory 2.1, table 35 and 36.
|
||||
* The N value is increased by 1 compared to the tables as the
|
||||
* errata lists register values while last_rounded_field is the
|
||||
* real divider value.
|
||||
*/
|
||||
{ 12000000, 80, 0 + 1 },
|
||||
{ 13000000, 443, 5 + 1 },
|
||||
{ 19200000, 50, 0 + 1 },
|
||||
{ 26000000, 443, 11 + 1 },
|
||||
{ 38400000, 25, 0 + 1 }
|
||||
};
|
||||
|
||||
const struct omap3_dpll5_settings *d;
|
||||
struct clk_hw_omap *clk = to_clk_hw_omap(hw);
|
||||
struct dpll_data *dd;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(precomputed); ++i) {
|
||||
if (parent_rate == precomputed[i].rate)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(precomputed))
|
||||
return false;
|
||||
|
||||
d = &precomputed[i];
|
||||
|
||||
/* Update the M, N and rounded rate values and program the DPLL. */
|
||||
dd = clk->dpll_data;
|
||||
dd->last_rounded_m = d->m;
|
||||
dd->last_rounded_n = d->n;
|
||||
dd->last_rounded_rate = div_u64((u64)parent_rate * d->m, d->n);
|
||||
omap3_noncore_dpll_program(clk, 0);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3_dpll5_set_rate - set rate for omap3 dpll5
|
||||
* @hw: clock to change
|
||||
* @rate: target rate for clock
|
||||
* @parent_rate: rate of the parent clock
|
||||
*
|
||||
* Set rate for the DPLL5 clock. Apply the sprz319 advisory 2.1 on OMAP36xx if
|
||||
* the DPLL is used for USB host (detected through the requested rate).
|
||||
*/
|
||||
int omap3_dpll5_set_rate(struct clk_hw *hw, unsigned long rate,
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
if (rate == OMAP3_DPLL5_FREQ_FOR_USBHOST * 8) {
|
||||
if (omap3_dpll5_apply_errata(hw, parent_rate))
|
||||
return 0;
|
||||
}
|
||||
|
||||
return omap3_noncore_dpll_set_rate(hw, rate, parent_rate);
|
||||
}
|
||||
|
|
|
@ -702,7 +702,9 @@ copy_iv:
|
|||
|
||||
/* Will read cryptlen */
|
||||
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
|
||||
aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2);
|
||||
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
|
||||
FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
|
||||
|
||||
/* Write ICV */
|
||||
append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
|
||||
|
|
|
@ -1500,12 +1500,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key)
|
|||
if (!cc->key_size && strcmp(key, "-"))
|
||||
goto out;
|
||||
|
||||
/* clear the flag since following operations may invalidate previously valid key */
|
||||
clear_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
||||
|
||||
if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0)
|
||||
goto out;
|
||||
|
||||
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
||||
|
||||
r = crypt_setkey_allcpus(cc);
|
||||
if (!r)
|
||||
set_bit(DM_CRYPT_KEY_VALID, &cc->flags);
|
||||
|
||||
out:
|
||||
/* Hex key string not needed after here, so wipe it. */
|
||||
|
|
|
@ -200,11 +200,13 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
|||
|
||||
if (!(fc->up_interval + fc->down_interval)) {
|
||||
ti->error = "Total (up + down) interval is zero";
|
||||
r = -EINVAL;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
if (fc->up_interval + fc->down_interval < fc->up_interval) {
|
||||
ti->error = "Interval overflow";
|
||||
r = -EINVAL;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
|
|
|
@ -775,16 +775,14 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
|
|||
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
|
||||
|
||||
r = sm_ll_new_metadata(&smm->ll, tm);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
|
||||
nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
|
||||
r = sm_ll_extend(&smm->ll, nr_blocks);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (!r) {
|
||||
if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
|
||||
nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
|
||||
r = sm_ll_extend(&smm->ll, nr_blocks);
|
||||
}
|
||||
memcpy(&smm->sm, &ops, sizeof(smm->sm));
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Now we need to update the newly created data structures with the
|
||||
|
|
|
@ -1708,6 +1708,7 @@ static const struct usb_device_id acm_ids[] = {
|
|||
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
|
||||
.driver_info = QUIRK_CONTROL_LINE_STATE, },
|
||||
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
|
||||
{ USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */
|
||||
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
|
||||
},
|
||||
/* Motorola H24 HSPA module: */
|
||||
|
|
|
@ -101,6 +101,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
|
|||
|
||||
static void hub_release(struct kref *kref);
|
||||
static int usb_reset_and_verify_device(struct usb_device *udev);
|
||||
static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
|
||||
struct usb_port *port_dev);
|
||||
|
||||
static inline char *portspeed(struct usb_hub *hub, int portstatus)
|
||||
{
|
||||
|
@ -883,82 +885,28 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1,
|
|||
}
|
||||
|
||||
/*
|
||||
* If USB 3.0 ports are placed into the Disabled state, they will no longer
|
||||
* detect any device connects or disconnects. This is generally not what the
|
||||
* USB core wants, since it expects a disabled port to produce a port status
|
||||
* change event when a new device connects.
|
||||
*
|
||||
* Instead, set the link state to Disabled, wait for the link to settle into
|
||||
* that state, clear any change bits, and then put the port into the RxDetect
|
||||
* state.
|
||||
* USB-3 does not have a similar link state as USB-2 that will avoid negotiating
|
||||
* a connection with a plugged-in cable but will signal the host when the cable
|
||||
* is unplugged. Disable remote wake and set link state to U3 for USB-3 devices
|
||||
*/
|
||||
static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
|
||||
{
|
||||
int ret;
|
||||
int total_time;
|
||||
u16 portchange, portstatus;
|
||||
|
||||
if (!hub_is_superspeed(hub->hdev))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hub_port_status(hub, port1, &portstatus, &portchange);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
|
||||
* Controller [1022:7814] will have spurious result making the following
|
||||
* usb 3.0 device hotplugging route to the 2.0 root hub and recognized
|
||||
* as high-speed device if we set the usb 3.0 port link state to
|
||||
* Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
|
||||
* check the state here to avoid the bug.
|
||||
*/
|
||||
if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
|
||||
USB_SS_PORT_LS_RX_DETECT) {
|
||||
dev_dbg(&hub->ports[port1 - 1]->dev,
|
||||
"Not disabling port; link state is RxDetect\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Wait for the link to enter the disabled state. */
|
||||
for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) {
|
||||
ret = hub_port_status(hub, port1, &portstatus, &portchange);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
|
||||
USB_SS_PORT_LS_SS_DISABLED)
|
||||
break;
|
||||
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
|
||||
break;
|
||||
msleep(HUB_DEBOUNCE_STEP);
|
||||
}
|
||||
if (total_time >= HUB_DEBOUNCE_TIMEOUT)
|
||||
dev_warn(&hub->ports[port1 - 1]->dev,
|
||||
"Could not disable after %d ms\n", total_time);
|
||||
|
||||
return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT);
|
||||
}
|
||||
|
||||
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
|
||||
{
|
||||
struct usb_port *port_dev = hub->ports[port1 - 1];
|
||||
struct usb_device *hdev = hub->hdev;
|
||||
int ret = 0;
|
||||
|
||||
if (port_dev->child && set_state)
|
||||
usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
|
||||
if (!hub->error) {
|
||||
if (hub_is_superspeed(hub->hdev))
|
||||
ret = hub_usb3_port_disable(hub, port1);
|
||||
else
|
||||
if (hub_is_superspeed(hub->hdev)) {
|
||||
hub_usb3_port_prepare_disable(hub, port_dev);
|
||||
ret = hub_set_port_link_state(hub, port_dev->portnum,
|
||||
USB_SS_PORT_LS_U3);
|
||||
} else {
|
||||
ret = usb_clear_port_feature(hdev, port1,
|
||||
USB_PORT_FEAT_ENABLE);
|
||||
}
|
||||
}
|
||||
if (port_dev->child && set_state)
|
||||
usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED);
|
||||
if (ret && ret != -ENODEV)
|
||||
dev_err(&port_dev->dev, "cannot disable (err = %d)\n", ret);
|
||||
return ret;
|
||||
|
@ -4073,6 +4021,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
|
||||
|
||||
/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */
|
||||
static void hub_usb3_port_prepare_disable(struct usb_hub *hub,
|
||||
struct usb_port *port_dev)
|
||||
{
|
||||
struct usb_device *udev = port_dev->child;
|
||||
int ret;
|
||||
|
||||
if (udev && udev->port_is_suspended && udev->do_remote_wakeup) {
|
||||
ret = hub_set_port_link_state(hub, port_dev->portnum,
|
||||
USB_SS_PORT_LS_U0);
|
||||
if (!ret) {
|
||||
msleep(USB_RESUME_TIMEOUT);
|
||||
ret = usb_disable_remote_wakeup(udev);
|
||||
}
|
||||
if (ret)
|
||||
dev_warn(&udev->dev,
|
||||
"Port disable: can't disable remote wake\n");
|
||||
udev->do_remote_wakeup = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#else /* CONFIG_PM */
|
||||
|
||||
|
@ -4080,6 +4048,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm);
|
|||
#define hub_resume NULL
|
||||
#define hub_reset_resume NULL
|
||||
|
||||
static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub,
|
||||
struct usb_port *port_dev) { }
|
||||
|
||||
int usb_disable_lpm(struct usb_device *udev)
|
||||
{
|
||||
return 0;
|
||||
|
|
|
@ -144,11 +144,16 @@ int config_ep_by_speed(struct usb_gadget *g,
|
|||
|
||||
ep_found:
|
||||
/* commit results */
|
||||
_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
|
||||
_ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff;
|
||||
_ep->desc = chosen_desc;
|
||||
_ep->comp_desc = NULL;
|
||||
_ep->maxburst = 0;
|
||||
_ep->mult = 0;
|
||||
_ep->mult = 1;
|
||||
|
||||
if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) ||
|
||||
usb_endpoint_xfer_int(_ep->desc)))
|
||||
_ep->mult = usb_endpoint_maxp(_ep->desc) & 0x7ff;
|
||||
|
||||
if (!want_comp_desc)
|
||||
return 0;
|
||||
|
||||
|
@ -165,7 +170,7 @@ ep_found:
|
|||
switch (usb_endpoint_type(_ep->desc)) {
|
||||
case USB_ENDPOINT_XFER_ISOC:
|
||||
/* mult: bits 1:0 of bmAttributes */
|
||||
_ep->mult = comp_desc->bmAttributes & 0x3;
|
||||
_ep->mult = (comp_desc->bmAttributes & 0x3) + 1;
|
||||
case USB_ENDPOINT_XFER_BULK:
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
_ep->maxburst = comp_desc->bMaxBurst + 1;
|
||||
|
|
|
@ -1079,13 +1079,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
|||
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
|
||||
if (!agdev->out_ep) {
|
||||
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
|
||||
if (!agdev->in_ep) {
|
||||
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
uac2->p_prm.uac2 = uac2;
|
||||
|
@ -1102,7 +1102,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
|||
|
||||
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
prm = &agdev->uac2.c_prm;
|
||||
prm->max_psize = hs_epout_desc.wMaxPacketSize;
|
||||
|
@ -1117,19 +1117,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
|
|||
prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
|
||||
if (!prm->rbuf) {
|
||||
prm->max_psize = 0;
|
||||
goto err_free_descs;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = alsa_uac2_init(agdev);
|
||||
if (ret)
|
||||
goto err_free_descs;
|
||||
goto err;
|
||||
return 0;
|
||||
|
||||
err_free_descs:
|
||||
usb_free_all_descriptors(fn);
|
||||
err:
|
||||
kfree(agdev->uac2.p_prm.rbuf);
|
||||
kfree(agdev->uac2.c_prm.rbuf);
|
||||
err_free_descs:
|
||||
usb_free_all_descriptors(fn);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -243,7 +243,7 @@ uvc_video_alloc_requests(struct uvc_video *video)
|
|||
|
||||
req_size = video->ep->maxpacket
|
||||
* max_t(unsigned int, video->ep->maxburst, 1)
|
||||
* (video->ep->mult + 1);
|
||||
* (video->ep->mult);
|
||||
|
||||
for (i = 0; i < UVC_NUM_REQUESTS; ++i) {
|
||||
video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL);
|
||||
|
|
|
@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd)
|
|||
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP)
|
||||
uhci->wait_for_hp = 1;
|
||||
|
||||
/* Intel controllers use non-PME wakeup signalling */
|
||||
if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL)
|
||||
device_set_run_wake(uhci_dev(uhci), 1);
|
||||
|
||||
/* Set up pointers to PCI-specific functions */
|
||||
uhci->reset_hc = uhci_pci_reset_hc;
|
||||
uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc;
|
||||
|
|
|
@ -296,7 +296,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
|
|||
rc = usb_serial_generic_open(tty, port);
|
||||
if (rc) {
|
||||
retval = rc;
|
||||
goto exit;
|
||||
goto err_free_cfg;
|
||||
}
|
||||
|
||||
rc = usb_control_msg(port->serial->dev,
|
||||
|
@ -315,17 +315,32 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
|
|||
dev_dbg(&port->dev, "%s - enabled reading\n", __func__);
|
||||
|
||||
rc = klsi_105_get_line_state(port, &line_state);
|
||||
if (rc >= 0) {
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->line_state = line_state;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state);
|
||||
retval = 0;
|
||||
} else
|
||||
if (rc < 0) {
|
||||
retval = rc;
|
||||
goto err_disable_read;
|
||||
}
|
||||
|
||||
exit:
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->line_state = line_state;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__,
|
||||
line_state);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_read:
|
||||
usb_control_msg(port->serial->dev,
|
||||
usb_sndctrlpipe(port->serial->dev, 0),
|
||||
KL5KUSB105A_SIO_CONFIGURE,
|
||||
USB_TYPE_VENDOR | USB_DIR_OUT,
|
||||
KL5KUSB105A_SIO_CONFIGURE_READ_OFF,
|
||||
0, /* index */
|
||||
NULL, 0,
|
||||
KLSI_TIMEOUT);
|
||||
usb_serial_generic_close(port);
|
||||
err_free_cfg:
|
||||
kfree(cfg);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
|
|
@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
|
|||
#define TELIT_PRODUCT_CC864_SINGLE 0x1006
|
||||
#define TELIT_PRODUCT_DE910_DUAL 0x1010
|
||||
#define TELIT_PRODUCT_UE910_V2 0x1012
|
||||
#define TELIT_PRODUCT_LE922_USBCFG1 0x1040
|
||||
#define TELIT_PRODUCT_LE922_USBCFG2 0x1041
|
||||
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
|
||||
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
|
||||
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
|
||||
|
@ -1210,6 +1212,10 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
|
||||
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
|
||||
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
|
||||
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
|
||||
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
|
||||
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
|
||||
|
@ -1989,6 +1995,7 @@ static const struct usb_device_id option_ids[] = {
|
|||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
|
||||
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
|
||||
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
|
||||
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
|
||||
|
|
|
@ -804,7 +804,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||
|
||||
vma->vm_ops = &gntdev_vmops;
|
||||
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
|
||||
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
|
||||
|
||||
if (use_ptemod)
|
||||
vma->vm_flags |= VM_DONTCOPY;
|
||||
|
|
|
@ -759,7 +759,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
|
|||
return true; /* already a holder */
|
||||
else if (bdev->bd_holder != NULL)
|
||||
return false; /* held by someone else */
|
||||
else if (bdev->bd_contains == bdev)
|
||||
else if (whole == bdev)
|
||||
return true; /* is a whole device which isn't held */
|
||||
|
||||
else if (whole->bd_holder == bd_may_claim)
|
||||
|
|
|
@ -70,6 +70,20 @@ void btrfs_##name(struct work_struct *arg) \
|
|||
normal_work_helper(work); \
|
||||
}
|
||||
|
||||
bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq)
|
||||
{
|
||||
/*
|
||||
* We could compare wq->normal->pending with num_online_cpus()
|
||||
* to support "thresh == NO_THRESHOLD" case, but it requires
|
||||
* moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
|
||||
* postpone it until someone needs the support of that case.
|
||||
*/
|
||||
if (wq->normal->thresh == NO_THRESHOLD)
|
||||
return false;
|
||||
|
||||
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
|
||||
}
|
||||
|
||||
BTRFS_WORK_HELPER(worker_helper);
|
||||
BTRFS_WORK_HELPER(delalloc_helper);
|
||||
BTRFS_WORK_HELPER(flush_delalloc_helper);
|
||||
|
|
|
@ -80,4 +80,5 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
|
|||
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
|
||||
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
|
||||
void btrfs_set_work_high_priority(struct btrfs_work *work);
|
||||
bool btrfs_workqueue_normal_congested(struct btrfs_workqueue *wq);
|
||||
#endif
|
||||
|
|
|
@ -3070,6 +3070,8 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
|
|||
cpu->target = le64_to_cpu(disk->target);
|
||||
cpu->flags = le64_to_cpu(disk->flags);
|
||||
cpu->limit = le64_to_cpu(disk->limit);
|
||||
cpu->stripes_min = le32_to_cpu(disk->stripes_min);
|
||||
cpu->stripes_max = le32_to_cpu(disk->stripes_max);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -3088,6 +3090,8 @@ btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
|
|||
disk->target = cpu_to_le64(cpu->target);
|
||||
disk->flags = cpu_to_le64(cpu->flags);
|
||||
disk->limit = cpu_to_le64(cpu->limit);
|
||||
disk->stripes_min = cpu_to_le32(cpu->stripes_min);
|
||||
disk->stripes_max = cpu_to_le32(cpu->stripes_max);
|
||||
}
|
||||
|
||||
/* struct btrfs_super_block */
|
||||
|
|
|
@ -1375,7 +1375,8 @@ release_path:
|
|||
total_done++;
|
||||
|
||||
btrfs_release_prepared_delayed_node(delayed_node);
|
||||
if (async_work->nr == 0 || total_done < async_work->nr)
|
||||
if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
|
||||
total_done < async_work->nr)
|
||||
goto again;
|
||||
|
||||
free_path:
|
||||
|
@ -1391,7 +1392,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
|
|||
{
|
||||
struct btrfs_async_delayed_work *async_work;
|
||||
|
||||
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
|
||||
if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
|
||||
btrfs_workqueue_normal_congested(fs_info->delayed_workers))
|
||||
return 0;
|
||||
|
||||
async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
|
||||
|
|
|
@ -8486,14 +8486,13 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
|
||||
&wc->refs[level - 1],
|
||||
&wc->flags[level - 1]);
|
||||
if (ret < 0) {
|
||||
btrfs_tree_unlock(next);
|
||||
return ret;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto out_unlock;
|
||||
|
||||
if (unlikely(wc->refs[level - 1] == 0)) {
|
||||
btrfs_err(root->fs_info, "Missing references.");
|
||||
BUG();
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
*lookup_info = 0;
|
||||
|
||||
|
@ -8545,7 +8544,12 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
|
|||
}
|
||||
|
||||
level--;
|
||||
BUG_ON(level != btrfs_header_level(next));
|
||||
ASSERT(level == btrfs_header_level(next));
|
||||
if (level != btrfs_header_level(next)) {
|
||||
btrfs_err(root->fs_info, "mismatched level");
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
path->nodes[level] = next;
|
||||
path->slots[level] = 0;
|
||||
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
|
||||
|
@ -8560,8 +8564,15 @@ skip:
|
|||
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
|
||||
parent = path->nodes[level]->start;
|
||||
} else {
|
||||
BUG_ON(root->root_key.objectid !=
|
||||
ASSERT(root->root_key.objectid ==
|
||||
btrfs_header_owner(path->nodes[level]));
|
||||
if (root->root_key.objectid !=
|
||||
btrfs_header_owner(path->nodes[level])) {
|
||||
btrfs_err(root->fs_info,
|
||||
"mismatched block owner");
|
||||
ret = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
parent = 0;
|
||||
}
|
||||
|
||||
|
@ -8578,12 +8589,18 @@ skip:
|
|||
}
|
||||
ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
|
||||
root->root_key.objectid, level - 1, 0);
|
||||
BUG_ON(ret); /* -ENOMEM */
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
*lookup_info = 1;
|
||||
ret = 1;
|
||||
|
||||
out_unlock:
|
||||
btrfs_tree_unlock(next);
|
||||
free_extent_buffer(next);
|
||||
*lookup_info = 1;
|
||||
return 1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -9686,6 +9703,11 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|||
struct extent_buffer *leaf;
|
||||
int need_clear = 0;
|
||||
u64 cache_gen;
|
||||
u64 feature;
|
||||
int mixed;
|
||||
|
||||
feature = btrfs_super_incompat_flags(info->super_copy);
|
||||
mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
|
||||
|
||||
root = info->extent_root;
|
||||
key.objectid = 0;
|
||||
|
@ -9739,6 +9761,15 @@ int btrfs_read_block_groups(struct btrfs_root *root)
|
|||
btrfs_item_ptr_offset(leaf, path->slots[0]),
|
||||
sizeof(cache->item));
|
||||
cache->flags = btrfs_block_group_flags(&cache->item);
|
||||
if (!mixed &&
|
||||
((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
|
||||
(cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
|
||||
btrfs_err(info,
|
||||
"bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
|
||||
cache->key.objectid);
|
||||
ret = -EINVAL;
|
||||
goto error;
|
||||
}
|
||||
|
||||
key.objectid = found_key.objectid + found_key.offset;
|
||||
btrfs_release_path(path);
|
||||
|
|
|
@ -5294,11 +5294,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
|
|||
lock_page(page);
|
||||
}
|
||||
locked_pages++;
|
||||
}
|
||||
/*
|
||||
* We need to firstly lock all pages to make sure that
|
||||
* the uptodate bit of our pages won't be affected by
|
||||
* clear_extent_buffer_uptodate().
|
||||
*/
|
||||
for (i = start_i; i < num_pages; i++) {
|
||||
page = eb->pages[i];
|
||||
if (!PageUptodate(page)) {
|
||||
num_reads++;
|
||||
all_uptodate = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (all_uptodate) {
|
||||
if (start_i == 0)
|
||||
set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
|
||||
|
|
|
@ -3825,6 +3825,11 @@ process_slot:
|
|||
}
|
||||
btrfs_release_path(path);
|
||||
key.offset = next_key_min_offset;
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
|
|
|
@ -2283,10 +2283,6 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
|
|||
int err = -ENOMEM;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fs_info->qgroup_rescan_lock);
|
||||
fs_info->qgroup_rescan_running = true;
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
|
||||
path = btrfs_alloc_path();
|
||||
if (!path)
|
||||
goto out;
|
||||
|
@ -2397,6 +2393,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
|
|||
sizeof(fs_info->qgroup_rescan_progress));
|
||||
fs_info->qgroup_rescan_progress.objectid = progress_objectid;
|
||||
init_completion(&fs_info->qgroup_rescan_completion);
|
||||
fs_info->qgroup_rescan_running = true;
|
||||
|
||||
spin_unlock(&fs_info->qgroup_lock);
|
||||
mutex_unlock(&fs_info->qgroup_rescan_lock);
|
||||
|
|
|
@ -921,9 +921,16 @@ again:
|
|||
path2->slots[level]--;
|
||||
|
||||
eb = path2->nodes[level];
|
||||
WARN_ON(btrfs_node_blockptr(eb, path2->slots[level]) !=
|
||||
cur->bytenr);
|
||||
|
||||
if (btrfs_node_blockptr(eb, path2->slots[level]) !=
|
||||
cur->bytenr) {
|
||||
btrfs_err(root->fs_info,
|
||||
"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
|
||||
cur->bytenr, level - 1, root->objectid,
|
||||
node_key->objectid, node_key->type,
|
||||
node_key->offset);
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
lower = cur;
|
||||
need_check = true;
|
||||
for (; level < BTRFS_MAX_LEVEL; level++) {
|
||||
|
@ -2343,6 +2350,10 @@ void free_reloc_roots(struct list_head *list)
|
|||
while (!list_empty(list)) {
|
||||
reloc_root = list_entry(list->next, struct btrfs_root,
|
||||
root_list);
|
||||
free_extent_buffer(reloc_root->node);
|
||||
free_extent_buffer(reloc_root->commit_root);
|
||||
reloc_root->node = NULL;
|
||||
reloc_root->commit_root = NULL;
|
||||
__del_reloc_root(reloc_root);
|
||||
}
|
||||
}
|
||||
|
@ -2676,11 +2687,15 @@ static int do_relocation(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (!upper->eb) {
|
||||
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
|
||||
if (ret < 0) {
|
||||
err = ret;
|
||||
if (ret) {
|
||||
if (ret < 0)
|
||||
err = ret;
|
||||
else
|
||||
err = -ENOENT;
|
||||
|
||||
btrfs_release_path(path);
|
||||
break;
|
||||
}
|
||||
BUG_ON(ret > 0);
|
||||
|
||||
if (!upper->eb) {
|
||||
upper->eb = path->nodes[upper->level];
|
||||
|
|
|
@ -1923,12 +1923,11 @@ static noinline int find_dir_range(struct btrfs_root *root,
|
|||
next:
|
||||
/* check the next slot in the tree to see if it is a valid item */
|
||||
nritems = btrfs_header_nritems(path->nodes[0]);
|
||||
path->slots[0]++;
|
||||
if (path->slots[0] >= nritems) {
|
||||
ret = btrfs_next_leaf(root, path);
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
path->slots[0]++;
|
||||
}
|
||||
|
||||
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
|
||||
|
|
|
@ -627,6 +627,8 @@ struct TCP_Server_Info {
|
|||
#ifdef CONFIG_CIFS_SMB2
|
||||
unsigned int max_read;
|
||||
unsigned int max_write;
|
||||
struct delayed_work reconnect; /* reconnect workqueue job */
|
||||
struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
|
||||
#endif /* CONFIG_CIFS_SMB2 */
|
||||
};
|
||||
|
||||
|
@ -826,6 +828,7 @@ cap_unix(struct cifs_ses *ses)
|
|||
struct cifs_tcon {
|
||||
struct list_head tcon_list;
|
||||
int tc_count;
|
||||
struct list_head rlist; /* reconnect list */
|
||||
struct list_head openFileList;
|
||||
spinlock_t open_file_lock; /* protects list above */
|
||||
struct cifs_ses *ses; /* pointer to session associated with */
|
||||
|
|
|
@ -205,6 +205,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid,
|
|||
struct tcon_link *tlink,
|
||||
struct cifs_pending_open *open);
|
||||
extern void cifs_del_pending_open(struct cifs_pending_open *open);
|
||||
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
|
||||
int from_reconnect);
|
||||
extern void cifs_put_tcon(struct cifs_tcon *tcon);
|
||||
|
||||
#if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL)
|
||||
extern void cifs_dfs_release_automount_timer(void);
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
#include "nterr.h"
|
||||
#include "rfc1002pdu.h"
|
||||
#include "fscache.h"
|
||||
#ifdef CONFIG_CIFS_SMB2
|
||||
#include "smb2proto.h"
|
||||
#endif
|
||||
|
||||
#define CIFS_PORT 445
|
||||
#define RFC1001_PORT 139
|
||||
|
@ -2113,8 +2116,8 @@ cifs_find_tcp_session(struct smb_vol *vol)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_put_tcp_session(struct TCP_Server_Info *server)
|
||||
void
|
||||
cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
|
@ -2131,6 +2134,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server)
|
|||
|
||||
cancel_delayed_work_sync(&server->echo);
|
||||
|
||||
#ifdef CONFIG_CIFS_SMB2
|
||||
if (from_reconnect)
|
||||
/*
|
||||
* Avoid deadlock here: reconnect work calls
|
||||
* cifs_put_tcp_session() at its end. Need to be sure
|
||||
* that reconnect work does nothing with server pointer after
|
||||
* that step.
|
||||
*/
|
||||
cancel_delayed_work(&server->reconnect);
|
||||
else
|
||||
cancel_delayed_work_sync(&server->reconnect);
|
||||
#endif
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
server->tcpStatus = CifsExiting;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
|
@ -2195,6 +2211,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
|
|||
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
|
||||
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
|
||||
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
|
||||
#ifdef CONFIG_CIFS_SMB2
|
||||
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
|
||||
mutex_init(&tcp_ses->reconnect_mutex);
|
||||
#endif
|
||||
memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr,
|
||||
sizeof(tcp_ses->srcaddr));
|
||||
memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr,
|
||||
|
@ -2347,7 +2367,7 @@ cifs_put_smb_ses(struct cifs_ses *ses)
|
|||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
sesInfoFree(ses);
|
||||
cifs_put_tcp_session(server);
|
||||
cifs_put_tcp_session(server, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
|
@ -2521,7 +2541,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
|
|||
mutex_unlock(&ses->session_mutex);
|
||||
|
||||
/* existing SMB ses has a server reference already */
|
||||
cifs_put_tcp_session(server);
|
||||
cifs_put_tcp_session(server, 0);
|
||||
free_xid(xid);
|
||||
return ses;
|
||||
}
|
||||
|
@ -2611,7 +2631,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
void
|
||||
cifs_put_tcon(struct cifs_tcon *tcon)
|
||||
{
|
||||
unsigned int xid;
|
||||
|
@ -3767,7 +3787,7 @@ mount_fail_check:
|
|||
else if (ses)
|
||||
cifs_put_smb_ses(ses);
|
||||
else
|
||||
cifs_put_tcp_session(server);
|
||||
cifs_put_tcp_session(server, 0);
|
||||
bdi_destroy(&cifs_sb->bdi);
|
||||
}
|
||||
|
||||
|
@ -4078,7 +4098,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
|
|||
ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
|
||||
if (IS_ERR(ses)) {
|
||||
tcon = (struct cifs_tcon *)ses;
|
||||
cifs_put_tcp_session(master_tcon->ses->server);
|
||||
cifs_put_tcp_session(master_tcon->ses->server, 0);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
|
|||
* and check it for zero before using.
|
||||
*/
|
||||
max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf;
|
||||
if (!max_buf) {
|
||||
if (max_buf < sizeof(struct smb2_lock_element)) {
|
||||
free_xid(xid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -278,7 +278,7 @@ out:
|
|||
case SMB2_CHANGE_NOTIFY:
|
||||
case SMB2_QUERY_INFO:
|
||||
case SMB2_SET_INFO:
|
||||
return -EAGAIN;
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
unload_nls(nls_codepage);
|
||||
return rc;
|
||||
|
@ -1822,6 +1822,54 @@ smb2_echo_callback(struct mid_q_entry *mid)
|
|||
add_credits(server, credits_received, CIFS_ECHO_OP);
|
||||
}
|
||||
|
||||
void smb2_reconnect_server(struct work_struct *work)
|
||||
{
|
||||
struct TCP_Server_Info *server = container_of(work,
|
||||
struct TCP_Server_Info, reconnect.work);
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon, *tcon2;
|
||||
struct list_head tmp_list;
|
||||
int tcon_exist = false;
|
||||
|
||||
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
|
||||
mutex_lock(&server->reconnect_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&tmp_list);
|
||||
cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
|
||||
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
|
||||
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
|
||||
if (tcon->need_reconnect) {
|
||||
tcon->tc_count++;
|
||||
list_add_tail(&tcon->rlist, &tmp_list);
|
||||
tcon_exist = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Get the reference to server struct to be sure that the last call of
|
||||
* cifs_put_tcon() in the loop below won't release the server pointer.
|
||||
*/
|
||||
if (tcon_exist)
|
||||
server->srv_count++;
|
||||
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
|
||||
smb2_reconnect(SMB2_ECHO, tcon);
|
||||
list_del_init(&tcon->rlist);
|
||||
cifs_put_tcon(tcon);
|
||||
}
|
||||
|
||||
cifs_dbg(FYI, "Reconnecting tcons finished\n");
|
||||
mutex_unlock(&server->reconnect_mutex);
|
||||
|
||||
/* now we can safely release srv struct */
|
||||
if (tcon_exist)
|
||||
cifs_put_tcp_session(server, 1);
|
||||
}
|
||||
|
||||
int
|
||||
SMB2_echo(struct TCP_Server_Info *server)
|
||||
{
|
||||
|
@ -1834,32 +1882,11 @@ SMB2_echo(struct TCP_Server_Info *server)
|
|||
cifs_dbg(FYI, "In echo request\n");
|
||||
|
||||
if (server->tcpStatus == CifsNeedNegotiate) {
|
||||
struct list_head *tmp, *tmp2;
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon;
|
||||
|
||||
cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each(tmp, &server->smb_ses_list) {
|
||||
ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
|
||||
list_for_each(tmp2, &ses->tcon_list) {
|
||||
tcon = list_entry(tmp2, struct cifs_tcon,
|
||||
tcon_list);
|
||||
/* add check for persistent handle reconnect */
|
||||
if (tcon && tcon->need_reconnect) {
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
rc = smb2_reconnect(SMB2_ECHO, tcon);
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
/* No need to send echo on newly established connections */
|
||||
queue_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* if no session, renegotiate failed above */
|
||||
if (server->tcpStatus == CifsNeedNegotiate)
|
||||
return -EIO;
|
||||
|
||||
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
|
|
@ -95,6 +95,7 @@ extern int smb2_open_file(const unsigned int xid,
|
|||
extern int smb2_unlock_range(struct cifsFileInfo *cfile,
|
||||
struct file_lock *flock, const unsigned int xid);
|
||||
extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
|
||||
extern void smb2_reconnect_server(struct work_struct *work);
|
||||
|
||||
/*
|
||||
* SMB2 Worker functions - most of protocol specific implementation details
|
||||
|
|
30
fs/exec.c
30
fs/exec.c
|
@ -56,6 +56,7 @@
|
|||
#include <linux/pipe_fs_i.h>
|
||||
#include <linux/oom.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/user_namespace.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -1114,6 +1115,13 @@ int flush_old_exec(struct linux_binprm * bprm)
|
|||
flush_thread();
|
||||
current->personality &= ~bprm->per_clear;
|
||||
|
||||
/*
|
||||
* We have to apply CLOEXEC before we change whether the process is
|
||||
* dumpable (in setup_new_exec) to avoid a race with a process in userspace
|
||||
* trying to access the should-be-closed file descriptors of a process
|
||||
* undergoing exec(2).
|
||||
*/
|
||||
do_close_on_exec(current->files);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
|
@ -1123,8 +1131,22 @@ EXPORT_SYMBOL(flush_old_exec);
|
|||
|
||||
void would_dump(struct linux_binprm *bprm, struct file *file)
|
||||
{
|
||||
if (inode_permission(file_inode(file), MAY_READ) < 0)
|
||||
struct inode *inode = file_inode(file);
|
||||
if (inode_permission(inode, MAY_READ) < 0) {
|
||||
struct user_namespace *old, *user_ns;
|
||||
bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
|
||||
|
||||
/* Ensure mm->user_ns contains the executable */
|
||||
user_ns = old = bprm->mm->user_ns;
|
||||
while ((user_ns != &init_user_ns) &&
|
||||
!privileged_wrt_inode_uidgid(user_ns, inode))
|
||||
user_ns = user_ns->parent;
|
||||
|
||||
if (old != user_ns) {
|
||||
bprm->mm->user_ns = get_user_ns(user_ns);
|
||||
put_user_ns(old);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(would_dump);
|
||||
|
||||
|
@ -1154,7 +1176,6 @@ void setup_new_exec(struct linux_binprm * bprm)
|
|||
!gid_eq(bprm->cred->gid, current_egid())) {
|
||||
current->pdeath_signal = 0;
|
||||
} else {
|
||||
would_dump(bprm, bprm->file);
|
||||
if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)
|
||||
set_dumpable(current->mm, suid_dumpable);
|
||||
}
|
||||
|
@ -1163,7 +1184,6 @@ void setup_new_exec(struct linux_binprm * bprm)
|
|||
group */
|
||||
current->self_exec_id++;
|
||||
flush_signal_handlers(current, 0);
|
||||
do_close_on_exec(current->files);
|
||||
}
|
||||
EXPORT_SYMBOL(setup_new_exec);
|
||||
|
||||
|
@ -1254,7 +1274,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
|
|||
unsigned n_fs;
|
||||
|
||||
if (p->ptrace) {
|
||||
if (p->ptrace & PT_PTRACE_CAP)
|
||||
if (ptracer_capable(p, current_user_ns()))
|
||||
bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
|
||||
else
|
||||
bprm->unsafe |= LSM_UNSAFE_PTRACE;
|
||||
|
@ -1587,6 +1607,8 @@ static int do_execveat_common(int fd, struct filename *filename,
|
|||
if (retval < 0)
|
||||
goto out;
|
||||
|
||||
would_dump(bprm, bprm->file);
|
||||
|
||||
retval = exec_binprm(bprm);
|
||||
if (retval < 0)
|
||||
goto out;
|
||||
|
|
|
@ -395,17 +395,19 @@ static inline int ext4_inode_journal_mode(struct inode *inode)
|
|||
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
|
||||
/* We do not support data journalling with delayed allocation */
|
||||
if (!S_ISREG(inode->i_mode) ||
|
||||
test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
|
||||
return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
|
||||
if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
|
||||
!test_opt(inode->i_sb, DELALLOC))
|
||||
test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
|
||||
(ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) &&
|
||||
!test_opt(inode->i_sb, DELALLOC))) {
|
||||
/* We do not support data journalling for encrypted data */
|
||||
if (S_ISREG(inode->i_mode) && ext4_encrypted_inode(inode))
|
||||
return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
|
||||
return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */
|
||||
}
|
||||
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
|
||||
return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */
|
||||
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
|
||||
return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */
|
||||
else
|
||||
BUG();
|
||||
BUG();
|
||||
}
|
||||
|
||||
static inline int ext4_should_journal_data(struct inode *inode)
|
||||
|
|
|
@ -337,8 +337,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode,
|
|||
|
||||
len -= EXT4_MIN_INLINE_DATA_SIZE;
|
||||
value = kzalloc(len, GFP_NOFS);
|
||||
if (!value)
|
||||
if (!value) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error = ext4_xattr_ibody_get(inode, i.name_index, i.name,
|
||||
value, len);
|
||||
|
|
|
@ -4202,6 +4202,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|||
struct inode *inode;
|
||||
journal_t *journal = EXT4_SB(sb)->s_journal;
|
||||
long ret;
|
||||
loff_t size;
|
||||
int block;
|
||||
uid_t i_uid;
|
||||
gid_t i_gid;
|
||||
|
@ -4293,6 +4294,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
|
|||
ei->i_file_acl |=
|
||||
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
|
||||
inode->i_size = ext4_isize(raw_inode);
|
||||
if ((size = i_size_read(inode)) < 0) {
|
||||
EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
|
||||
ret = -EFSCORRUPTED;
|
||||
goto bad_inode;
|
||||
}
|
||||
ei->i_disksize = inode->i_size;
|
||||
#ifdef CONFIG_QUOTA
|
||||
ei->i_reserved_quota = 0;
|
||||
|
|
|
@ -669,7 +669,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb,
|
|||
ext4_grpblk_t min;
|
||||
ext4_grpblk_t max;
|
||||
ext4_grpblk_t chunk;
|
||||
unsigned short border;
|
||||
unsigned int border;
|
||||
|
||||
BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb));
|
||||
|
||||
|
@ -2287,7 +2287,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
|||
struct ext4_group_info *grinfo;
|
||||
struct sg {
|
||||
struct ext4_group_info info;
|
||||
ext4_grpblk_t counters[16];
|
||||
ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2];
|
||||
} sg;
|
||||
|
||||
group--;
|
||||
|
|
|
@ -3037,10 +3037,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
|
|||
ext4_set_bit(s++, buf);
|
||||
count++;
|
||||
}
|
||||
for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
|
||||
ext4_set_bit(EXT4_B2C(sbi, s++), buf);
|
||||
count++;
|
||||
j = ext4_bg_num_gdb(sb, grp);
|
||||
if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) {
|
||||
ext4_error(sb, "Invalid number of block group "
|
||||
"descriptor blocks: %d", j);
|
||||
j = EXT4_BLOCKS_PER_GROUP(sb) - s;
|
||||
}
|
||||
count += j;
|
||||
for (; j > 0; j--)
|
||||
ext4_set_bit(EXT4_B2C(sbi, s++), buf);
|
||||
}
|
||||
if (!count)
|
||||
return 0;
|
||||
|
@ -3130,7 +3135,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
char *orig_data = kstrdup(data, GFP_KERNEL);
|
||||
struct buffer_head *bh;
|
||||
struct ext4_super_block *es = NULL;
|
||||
struct ext4_sb_info *sbi;
|
||||
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
||||
ext4_fsblk_t block;
|
||||
ext4_fsblk_t sb_block = get_sb_block(&data);
|
||||
ext4_fsblk_t logical_sb_block;
|
||||
|
@ -3149,16 +3154,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
|
||||
ext4_group_t first_not_zeroed;
|
||||
|
||||
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
|
||||
if (!sbi)
|
||||
goto out_free_orig;
|
||||
if ((data && !orig_data) || !sbi)
|
||||
goto out_free_base;
|
||||
|
||||
sbi->s_blockgroup_lock =
|
||||
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
|
||||
if (!sbi->s_blockgroup_lock) {
|
||||
kfree(sbi);
|
||||
goto out_free_orig;
|
||||
}
|
||||
if (!sbi->s_blockgroup_lock)
|
||||
goto out_free_base;
|
||||
|
||||
sb->s_fs_info = sbi;
|
||||
sbi->s_sb = sb;
|
||||
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
|
||||
|
@ -3304,11 +3307,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
*/
|
||||
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
|
||||
|
||||
if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
|
||||
&journal_devnum, &journal_ioprio, 0)) {
|
||||
ext4_msg(sb, KERN_WARNING,
|
||||
"failed to parse options in superblock: %s",
|
||||
sbi->s_es->s_mount_opts);
|
||||
if (sbi->s_es->s_mount_opts[0]) {
|
||||
char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts,
|
||||
sizeof(sbi->s_es->s_mount_opts),
|
||||
GFP_KERNEL);
|
||||
if (!s_mount_opts)
|
||||
goto failed_mount;
|
||||
if (!parse_options(s_mount_opts, sb, &journal_devnum,
|
||||
&journal_ioprio, 0)) {
|
||||
ext4_msg(sb, KERN_WARNING,
|
||||
"failed to parse options in superblock: %s",
|
||||
s_mount_opts);
|
||||
}
|
||||
kfree(s_mount_opts);
|
||||
}
|
||||
sbi->s_def_mount_opt = sbi->s_mount_opt;
|
||||
if (!parse_options((char *) data, sb, &journal_devnum,
|
||||
|
@ -3334,6 +3345,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
"both data=journal and dax");
|
||||
goto failed_mount;
|
||||
}
|
||||
if (ext4_has_feature_encrypt(sb)) {
|
||||
ext4_msg(sb, KERN_WARNING,
|
||||
"encrypted files will use data=ordered "
|
||||
"instead of data journaling mode");
|
||||
}
|
||||
if (test_opt(sb, DELALLOC))
|
||||
clear_opt(sb, DELALLOC);
|
||||
} else {
|
||||
|
@ -3496,12 +3512,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
|
||||
sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
|
||||
sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
|
||||
if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
|
||||
goto cantfind_ext4;
|
||||
|
||||
sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
|
||||
if (sbi->s_inodes_per_block == 0)
|
||||
goto cantfind_ext4;
|
||||
if (sbi->s_inodes_per_group < sbi->s_inodes_per_block ||
|
||||
sbi->s_inodes_per_group > blocksize * 8) {
|
||||
ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n",
|
||||
sbi->s_blocks_per_group);
|
||||
goto failed_mount;
|
||||
}
|
||||
sbi->s_itb_per_group = sbi->s_inodes_per_group /
|
||||
sbi->s_inodes_per_block;
|
||||
sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
|
||||
|
@ -3584,13 +3604,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
sbi->s_cluster_ratio = clustersize / blocksize;
|
||||
|
||||
if (sbi->s_inodes_per_group > blocksize * 8) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"#inodes per group too big: %lu",
|
||||
sbi->s_inodes_per_group);
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
/* Do we have standard group size of clustersize * 8 blocks ? */
|
||||
if (sbi->s_blocks_per_group == clustersize << 3)
|
||||
set_opt2(sb, STD_GROUP_SIZE);
|
||||
|
@ -3994,7 +4007,9 @@ no_journal:
|
|||
|
||||
if (___ratelimit(&ext4_mount_msg_ratelimit, "EXT4-fs mount"))
|
||||
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
|
||||
"Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
|
||||
"Opts: %.*s%s%s", descr,
|
||||
(int) sizeof(sbi->s_es->s_mount_opts),
|
||||
sbi->s_es->s_mount_opts,
|
||||
*sbi->s_es->s_mount_opts ? "; " : "", orig_data);
|
||||
|
||||
if (es->s_error_count)
|
||||
|
@ -4064,8 +4079,8 @@ failed_mount:
|
|||
out_fail:
|
||||
sb->s_fs_info = NULL;
|
||||
kfree(sbi->s_blockgroup_lock);
|
||||
out_free_base:
|
||||
kfree(sbi);
|
||||
out_free_orig:
|
||||
kfree(orig_data);
|
||||
return err ? err : ret;
|
||||
}
|
||||
|
|
|
@ -352,6 +352,7 @@ static int stat_open(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
static const struct file_operations stat_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = stat_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
|
|
|
@ -3980,6 +3980,7 @@ xlog_recover_clear_agi_bucket(
|
|||
agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
|
||||
offset = offsetof(xfs_agi_t, agi_unlinked) +
|
||||
(sizeof(xfs_agino_t) * bucket);
|
||||
xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
|
||||
xfs_trans_log_buf(tp, agibp, offset,
|
||||
(offset + sizeof(xfs_agino_t) - 1));
|
||||
|
||||
|
|
|
@ -247,8 +247,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap)
|
|||
return true;
|
||||
}
|
||||
#endif /* CONFIG_MULTIUSER */
|
||||
extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode);
|
||||
extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap);
|
||||
extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
|
||||
extern bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns);
|
||||
|
||||
/* audit system wants to get cap info from files as well */
|
||||
extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
|
||||
|
|
|
@ -476,6 +476,7 @@ struct mm_struct {
|
|||
*/
|
||||
struct task_struct __rcu *owner;
|
||||
#endif
|
||||
struct user_namespace *user_ns;
|
||||
|
||||
/* store ref to file /proc/<pid>/exe symlink points to */
|
||||
struct file __rcu *exe_file;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
|
||||
#define PT_PTRACED 0x00000001
|
||||
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
|
||||
#define PT_PTRACE_CAP 0x00000004 /* ptracer can follow suid-exec */
|
||||
|
||||
#define PT_OPT_FLAG_SHIFT 3
|
||||
/* PT_TRACE_* event enable flags */
|
||||
|
|
|
@ -1626,6 +1626,7 @@ struct task_struct {
|
|||
struct list_head cpu_timers[3];
|
||||
|
||||
/* process credentials */
|
||||
const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
|
||||
const struct cred __rcu *real_cred; /* objective and real subjective task
|
||||
* credentials (COW) */
|
||||
const struct cred __rcu *cred; /* effective (overridable) subjective task
|
||||
|
|
|
@ -456,6 +456,19 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns,
|
|||
}
|
||||
EXPORT_SYMBOL(file_ns_capable);
|
||||
|
||||
/**
|
||||
* privileged_wrt_inode_uidgid - Do capabilities in the namespace work over the inode?
|
||||
* @ns: The user namespace in question
|
||||
* @inode: The inode in question
|
||||
*
|
||||
* Return true if the inode uid and gid are within the namespace.
|
||||
*/
|
||||
bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode)
|
||||
{
|
||||
return kuid_has_mapping(ns, inode->i_uid) &&
|
||||
kgid_has_mapping(ns, inode->i_gid);
|
||||
}
|
||||
|
||||
/**
|
||||
* capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped
|
||||
* @inode: The inode in question
|
||||
|
@ -469,7 +482,26 @@ bool capable_wrt_inode_uidgid(const struct inode *inode, int cap)
|
|||
{
|
||||
struct user_namespace *ns = current_user_ns();
|
||||
|
||||
return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid) &&
|
||||
kgid_has_mapping(ns, inode->i_gid);
|
||||
return ns_capable(ns, cap) && privileged_wrt_inode_uidgid(ns, inode);
|
||||
}
|
||||
EXPORT_SYMBOL(capable_wrt_inode_uidgid);
|
||||
|
||||
/**
|
||||
* ptracer_capable - Determine if the ptracer holds CAP_SYS_PTRACE in the namespace
|
||||
* @tsk: The task that may be ptraced
|
||||
* @ns: The user namespace to search for CAP_SYS_PTRACE in
|
||||
*
|
||||
* Return true if the task that is ptracing the current task had CAP_SYS_PTRACE
|
||||
* in the specified user namespace.
|
||||
*/
|
||||
bool ptracer_capable(struct task_struct *tsk, struct user_namespace *ns)
|
||||
{
|
||||
int ret = 0; /* An absent tracer adds no restrictions */
|
||||
const struct cred *cred;
|
||||
rcu_read_lock();
|
||||
cred = rcu_dereference(tsk->ptracer_cred);
|
||||
if (cred)
|
||||
ret = security_capable_noaudit(cred, ns, CAP_SYS_PTRACE);
|
||||
rcu_read_unlock();
|
||||
return (ret == 0);
|
||||
}
|
||||
|
|
|
@ -598,11 +598,11 @@ return_normal:
|
|||
/*
|
||||
* Wait for the other CPUs to be notified and be waiting for us:
|
||||
*/
|
||||
time_left = loops_per_jiffy * HZ;
|
||||
time_left = MSEC_PER_SEC;
|
||||
while (kgdb_do_roundup && --time_left &&
|
||||
(atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
|
||||
online_cpus)
|
||||
cpu_relax();
|
||||
udelay(1000);
|
||||
if (!time_left)
|
||||
pr_crit("Timed out waiting for secondary CPUs.\n");
|
||||
|
||||
|
|
|
@ -585,7 +585,8 @@ static void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
|
|||
#endif
|
||||
}
|
||||
|
||||
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
|
||||
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
|
||||
struct user_namespace *user_ns)
|
||||
{
|
||||
mm->mmap = NULL;
|
||||
mm->mm_rb = RB_ROOT;
|
||||
|
@ -625,6 +626,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
|
|||
if (init_new_context(p, mm))
|
||||
goto fail_nocontext;
|
||||
|
||||
mm->user_ns = get_user_ns(user_ns);
|
||||
return mm;
|
||||
|
||||
fail_nocontext:
|
||||
|
@ -670,7 +672,7 @@ struct mm_struct *mm_alloc(void)
|
|||
return NULL;
|
||||
|
||||
memset(mm, 0, sizeof(*mm));
|
||||
return mm_init(mm, current);
|
||||
return mm_init(mm, current, current_user_ns());
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -685,6 +687,7 @@ void __mmdrop(struct mm_struct *mm)
|
|||
destroy_context(mm);
|
||||
mmu_notifier_mm_destroy(mm);
|
||||
check_mm(mm);
|
||||
put_user_ns(mm->user_ns);
|
||||
free_mm(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
@ -943,7 +946,7 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
|
|||
|
||||
memcpy(mm, oldmm, sizeof(*mm));
|
||||
|
||||
if (!mm_init(mm, tsk))
|
||||
if (!mm_init(mm, tsk, mm->user_ns))
|
||||
goto fail_nomem;
|
||||
|
||||
err = dup_mmap(mm, oldmm);
|
||||
|
|
|
@ -39,6 +39,9 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
|
|||
BUG_ON(!list_empty(&child->ptrace_entry));
|
||||
list_add(&child->ptrace_entry, &new_parent->ptraced);
|
||||
child->parent = new_parent;
|
||||
rcu_read_lock();
|
||||
child->ptracer_cred = get_cred(__task_cred(new_parent));
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -71,11 +74,15 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
|
|||
*/
|
||||
void __ptrace_unlink(struct task_struct *child)
|
||||
{
|
||||
const struct cred *old_cred;
|
||||
BUG_ON(!child->ptrace);
|
||||
|
||||
child->ptrace = 0;
|
||||
child->parent = child->real_parent;
|
||||
list_del_init(&child->ptrace_entry);
|
||||
old_cred = child->ptracer_cred;
|
||||
child->ptracer_cred = NULL;
|
||||
put_cred(old_cred);
|
||||
|
||||
spin_lock(&child->sighand->siglock);
|
||||
|
||||
|
@ -219,7 +226,7 @@ static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
|
|||
static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
||||
{
|
||||
const struct cred *cred = current_cred(), *tcred;
|
||||
int dumpable = 0;
|
||||
struct mm_struct *mm;
|
||||
kuid_t caller_uid;
|
||||
kgid_t caller_gid;
|
||||
|
||||
|
@ -270,16 +277,11 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
|||
return -EPERM;
|
||||
ok:
|
||||
rcu_read_unlock();
|
||||
smp_rmb();
|
||||
if (task->mm)
|
||||
dumpable = get_dumpable(task->mm);
|
||||
rcu_read_lock();
|
||||
if (dumpable != SUID_DUMP_USER &&
|
||||
!ptrace_has_cap(__task_cred(task)->user_ns, mode)) {
|
||||
rcu_read_unlock();
|
||||
return -EPERM;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mm = task->mm;
|
||||
if (mm &&
|
||||
((get_dumpable(mm) != SUID_DUMP_USER) &&
|
||||
!ptrace_has_cap(mm->user_ns, mode)))
|
||||
return -EPERM;
|
||||
|
||||
return security_ptrace_access_check(task, mode);
|
||||
}
|
||||
|
@ -343,10 +345,6 @@ static int ptrace_attach(struct task_struct *task, long request,
|
|||
|
||||
if (seize)
|
||||
flags |= PT_SEIZED;
|
||||
rcu_read_lock();
|
||||
if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
|
||||
flags |= PT_PTRACE_CAP;
|
||||
rcu_read_unlock();
|
||||
task->ptrace = flags;
|
||||
|
||||
__ptrace_link(task, current);
|
||||
|
|
|
@ -403,7 +403,6 @@ static void watchdog_overflow_callback(struct perf_event *event,
|
|||
*/
|
||||
if (is_hardlockup()) {
|
||||
int this_cpu = smp_processor_id();
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
|
||||
/* only print hardlockups once */
|
||||
if (__this_cpu_read(hard_watchdog_warn) == true)
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/cpumask.h>
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/user_namespace.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
|
@ -21,5 +22,6 @@ struct mm_struct init_mm = {
|
|||
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
|
||||
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
||||
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
|
||||
.user_ns = &init_user_ns,
|
||||
INIT_MM_CONTEXT(init_mm)
|
||||
};
|
||||
|
|
14
mm/vmscan.c
14
mm/vmscan.c
|
@ -277,6 +277,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|||
int nid = shrinkctl->nid;
|
||||
long batch_size = shrinker->batch ? shrinker->batch
|
||||
: SHRINK_BATCH;
|
||||
long scanned = 0, next_deferred;
|
||||
|
||||
freeable = shrinker->count_objects(shrinker, shrinkctl);
|
||||
if (freeable == 0)
|
||||
|
@ -298,7 +299,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|||
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
|
||||
shrinker->scan_objects, total_scan);
|
||||
total_scan = freeable;
|
||||
}
|
||||
next_deferred = nr;
|
||||
} else
|
||||
next_deferred = total_scan;
|
||||
|
||||
/*
|
||||
* We need to avoid excessive windup on filesystem shrinkers
|
||||
|
@ -355,17 +358,22 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
|
|||
|
||||
count_vm_events(SLABS_SCANNED, nr_to_scan);
|
||||
total_scan -= nr_to_scan;
|
||||
scanned += nr_to_scan;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (next_deferred >= scanned)
|
||||
next_deferred -= scanned;
|
||||
else
|
||||
next_deferred = 0;
|
||||
/*
|
||||
* move the unused scan count back into the shrinker in a
|
||||
* manner that handles concurrent updates. If we exhausted the
|
||||
* scan, there is no need to do an update.
|
||||
*/
|
||||
if (total_scan > 0)
|
||||
new_nr = atomic_long_add_return(total_scan,
|
||||
if (next_deferred > 0)
|
||||
new_nr = atomic_long_add_return(next_deferred,
|
||||
&shrinker->nr_deferred[nid]);
|
||||
else
|
||||
new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
|
||||
|
|
|
@ -884,6 +884,8 @@ void snd_hda_apply_fixup(struct hda_codec *codec, int action)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(snd_hda_apply_fixup);
|
||||
|
||||
#define IGNORE_SEQ_ASSOC (~(AC_DEFCFG_SEQUENCE | AC_DEFCFG_DEF_ASSOC))
|
||||
|
||||
static bool pin_config_match(struct hda_codec *codec,
|
||||
const struct hda_pintbl *pins)
|
||||
{
|
||||
|
@ -901,7 +903,7 @@ static bool pin_config_match(struct hda_codec *codec,
|
|||
for (; t_pins->nid; t_pins++) {
|
||||
if (t_pins->nid == nid) {
|
||||
found = 1;
|
||||
if (t_pins->val == cfg)
|
||||
if ((t_pins->val & IGNORE_SEQ_ASSOC) == (cfg & IGNORE_SEQ_ASSOC))
|
||||
break;
|
||||
else if ((cfg & 0xf0000000) == 0x40000000 && (t_pins->val & 0xf0000000) == 0x40000000)
|
||||
break;
|
||||
|
|
|
@ -780,6 +780,7 @@ static const struct hda_pintbl alienware_pincfgs[] = {
|
|||
static const struct snd_pci_quirk ca0132_quirks[] = {
|
||||
SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
|
||||
SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
|
||||
SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -262,6 +262,7 @@ enum {
|
|||
CXT_FIXUP_CAP_MIX_AMP_5047,
|
||||
CXT_FIXUP_MUTE_LED_EAPD,
|
||||
CXT_FIXUP_HP_SPECTRE,
|
||||
CXT_FIXUP_HP_GATE_MIC,
|
||||
};
|
||||
|
||||
/* for hda_fixup_thinkpad_acpi() */
|
||||
|
@ -633,6 +634,17 @@ static void cxt_fixup_cap_mix_amp_5047(struct hda_codec *codec,
|
|||
(1 << AC_AMPCAP_MUTE_SHIFT));
|
||||
}
|
||||
|
||||
static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec,
|
||||
const struct hda_fixup *fix,
|
||||
int action)
|
||||
{
|
||||
/* the mic pin (0x19) doesn't give an unsolicited event;
|
||||
* probe the mic pin together with the headphone pin (0x16)
|
||||
*/
|
||||
if (action == HDA_FIXUP_ACT_PROBE)
|
||||
snd_hda_jack_set_gating_jack(codec, 0x19, 0x16);
|
||||
}
|
||||
|
||||
/* ThinkPad X200 & co with cxt5051 */
|
||||
static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
|
||||
{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
|
||||
|
@ -774,6 +786,10 @@ static const struct hda_fixup cxt_fixups[] = {
|
|||
{ }
|
||||
}
|
||||
},
|
||||
[CXT_FIXUP_HP_GATE_MIC] = {
|
||||
.type = HDA_FIXUP_FUNC,
|
||||
.v.func = cxt_fixup_hp_gate_mic_jack,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct snd_pci_quirk cxt5045_fixups[] = {
|
||||
|
@ -824,6 +840,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
|
|||
SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC),
|
||||
SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC),
|
||||
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
|
||||
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
|
||||
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
|
||||
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
|
||||
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
|
||||
|
|
|
@ -5899,6 +5899,9 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
|
|||
{0x12, 0x90a60180},
|
||||
{0x14, 0x90170120},
|
||||
{0x21, 0x02211030}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x1b, 0x01011020},
|
||||
{0x21, 0x02211010}),
|
||||
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
|
||||
{0x12, 0x90a60160},
|
||||
{0x14, 0x90170120},
|
||||
|
|
|
@ -762,6 +762,9 @@ static int sst_soc_prepare(struct device *dev)
|
|||
struct sst_data *drv = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
if (!drv->soc_card)
|
||||
return 0;
|
||||
|
||||
/* suspend all pcms first */
|
||||
snd_soc_suspend(drv->soc_card->dev);
|
||||
snd_soc_poweroff(drv->soc_card->dev);
|
||||
|
@ -784,6 +787,9 @@ static void sst_soc_complete(struct device *dev)
|
|||
struct sst_data *drv = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
if (!drv->soc_card)
|
||||
return;
|
||||
|
||||
/* restart SSPs */
|
||||
for (i = 0; i < drv->soc_card->num_rtd; i++) {
|
||||
struct snd_soc_dai *dai = drv->soc_card->rtd[i].cpu_dai;
|
||||
|
|
|
@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub)
|
|||
|
||||
mutex_lock(&rt->stream_mutex);
|
||||
|
||||
hiface_pcm_stream_stop(rt);
|
||||
|
||||
sub->dma_off = 0;
|
||||
sub->period_off = 0;
|
||||
|
||||
|
|
|
@ -931,9 +931,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
|
|||
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
|
||||
case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
|
||||
case USB_ID(0x046d, 0x0991):
|
||||
case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */
|
||||
/* Most audio usb devices lie about volume resolution.
|
||||
* Most Logitech webcams have res = 384.
|
||||
* Proboly there is some logitech magic behind this number --fishor
|
||||
* Probably there is some logitech magic behind this number --fishor
|
||||
*/
|
||||
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
|
||||
usb_audio_info(chip,
|
||||
|
|
Loading…
Add table
Reference in a new issue