This is the 4.4.199 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl3CqekACgkQONu9yGCS
 aT4uYQ//a2T+5VIxlJ3ZaTLRVXTqDjo7atLuL4OkeqJtbqsBy/1LcRQps8G+cXEd
 eZ4kmi8PcGQ+flles96LA6fMmL1MmIZcg05HJ/kJYBoOSz98Jb6/jckGu8FMuv8W
 WXCPNbuigL8IX3Qy6xGtrevJ+rBO+hRpBrRzjc1NeFZpJ00lOSF/S+e09GjpXxP6
 /fiDl/ZT2Iy+RgK769kmDHz7CkZHnTIu6MFteg4MireUsK2vWpopGdoM0UDWVWL9
 7DY+sxh7lIfNde1d8v0bX6YBfpy63ck82woh7QyuA5JWVNzQeGoOmoNAtn5HO0jz
 G46y+54ruDEangdQzBDld759o4aW2MfplLhwLbkvy4u8dx9XKQsYAi2oPofVUAnp
 Hx00/0h4NqNtNDn6TL1BWXAdTHwgyTJmm4kFAbYahepEzJvXI1l+1BpR0Rqcs3QU
 7Fzv9rWL3hUTi12NyjYBPrAJIYP2eKp04fgKNhAQFCoyycNHjvwbwdEvhLdCgJsI
 I1tq/LA1H9gRHA/mE1AQm4YVAEmO1KHOKJHS0HppQgA0k6i4KsUeO0ghz1kAdY0Q
 l2olxVtaUugQ21QLzxivTn6WhTgSk9cxgrGQ8GxCu+4jsP5CYTnp2H7jEJNB8kjp
 iVit1j14Tz4msRW+wgLud1jiIh4D9tWxh5Domtm05ZrwtUu+xcY=
 =NJpm
 -----END PGP SIGNATURE-----

Merge 4.4.199 into android-4.4-p

Changes in 4.4.199
	dm snapshot: use mutex instead of rw_semaphore
	dm snapshot: introduce account_start_copy() and account_end_copy()
	dm snapshot: rework COW throttling to fix deadlock
	dm: Use kzalloc for all structs with embedded biosets/mempools
	sc16is7xx: Fix for "Unexpected interrupt: 8"
	x86/cpu: Add Atom Tremont (Jacobsville)
	scripts/setlocalversion: Improve -dirty check with git-status --no-optional-locks
	usb: handle warm-reset port requests on hub resume
	exec: load_script: Do not exec truncated interpreter path
	iio: fix center temperature of bmc150-accel-core
	perf map: Fix overlapped map handling
	RDMA/iwcm: Fix a lock inversion issue
	fs: cifs: mute -Wunused-const-variable message
	serial: mctrl_gpio: Check for NULL pointer
	efi/cper: Fix endianness of PCIe class code
	efi/x86: Do not clean dummy variable in kexec path
	fs: ocfs2: fix possible null-pointer dereferences in ocfs2_xa_prepare_entry()
	fs: ocfs2: fix a possible null-pointer dereference in ocfs2_info_scan_inode_alloc()
	MIPS: fw: sni: Fix out of bounds init of o32 stack
	NFSv4: Fix leak of clp->cl_acceptor string
	tracing: Initialize iter->seq after zeroing in tracing_read_pipe()
	USB: legousbtower: fix a signedness bug in tower_probe()
	thunderbolt: Use 32-bit writes when writing ring producer/consumer
	ath6kl: fix a NULL-ptr-deref bug in ath6kl_usb_alloc_urb_from_pipe()
	fuse: flush dirty data/metadata before non-truncate setattr
	fuse: truncate pending writes on O_TRUNC
	ALSA: bebob: Fix prototype of helper function to return negative value
	UAS: Revert commit 3ae62a42090f ("UAS: fix alignment of scatter/gather segments")
	USB: gadget: Reject endpoints with 0 maxpacket value
	USB: ldusb: fix ring-buffer locking
	USB: ldusb: fix control-message timeout
	USB: serial: whiteheat: fix potential slab corruption
	USB: serial: whiteheat: fix line-speed endianness
	HID: Fix assumption that devices have inputs
	HID: fix error message in hid_open_report()
	nl80211: fix validation of mesh path nexthop
	s390/cmm: fix information leak in cmm_timeout_handler()
	rtlwifi: Fix potential overflow on P2P code
	llc: fix sk_buff leak in llc_sap_state_process()
	llc: fix sk_buff leak in llc_conn_service()
	bonding: fix potential NULL deref in bond_update_slave_arr
	net: usb: sr9800: fix uninitialized local variable
	sch_netem: fix rcu splat in netem_enqueue()
	sctp: fix the issue that flags are ignored when using kernel_connect
	sctp: not bind the socket in sctp_connect
	xfs: Correctly invert xfs_buftarg LRU isolation logic
	Revert "ALSA: hda: Flush interrupts on disabling"
	Linux 4.4.199

Change-Id: Ia26458456401f9ec050f4c11bd5bdf24b8a21b24
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-11-06 12:50:36 +01:00
commit 3f5703c80e
66 changed files with 526 additions and 279 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 198
SUBLEVEL = 199
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -42,7 +42,7 @@
/* O32 stack has to be 8-byte aligned. */
static u64 o32_stk[4096];
#define O32_STK &o32_stk[sizeof(o32_stk)]
#define O32_STK (&o32_stk[ARRAY_SIZE(o32_stk)])
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")

View file

@ -306,16 +306,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
}
if (write) {
len = *lenp;
if (copy_from_user(buf, buffer,
len > sizeof(buf) ? sizeof(buf) : len))
len = min(*lenp, sizeof(buf));
if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
@ -323,9 +323,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += len;
}
*lenp = len;
*ppos += len;
return 0;
}

View file

@ -5,7 +5,7 @@
* "Big Core" Processors (Branded as Core, Xeon, etc...)
*
* The "_X" parts are generally the EP and EX Xeons, or the
* "Extreme" ones, like Broadwell-E.
* "Extreme" ones, like Broadwell-E, or Atom microserver.
*
* Things ending in "2" are usually because we have no better
* name for them. There's no processor called "WESTMERE2".
@ -67,6 +67,7 @@
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */
#define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */
#define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */
#define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */
/* Xeon Phi */

View file

@ -859,9 +859,6 @@ static void __init kexec_enter_virtual_mode(void)
if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
runtime_code_page_mkexec();
/* clean DUMMY object */
efi_delete_dummy_variable();
#endif
}

View file

@ -375,7 +375,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
pcie->device_id.vendor_id, pcie->device_id.device_id);
p = pcie->device_id.class_code;
printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
}
if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,

View file

@ -75,13 +75,20 @@ static int axff_init(struct hid_device *hid)
{
struct axff_device *axff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int field_count = 0;
int i, j;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -959,6 +959,7 @@ int hid_open_report(struct hid_device *device)
__u8 *start;
__u8 *buf;
__u8 *end;
__u8 *next;
int ret;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
@ -1012,7 +1013,8 @@ int hid_open_report(struct hid_device *device)
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
ret = -EINVAL;
while ((start = fetch_item(start, end, &item)) != NULL) {
while ((next = fetch_item(start, end, &item)) != NULL) {
start = next;
if (item.format != HID_ITEM_FORMAT_SHORT) {
hid_err(device, "unexpected long global item\n");
@ -1041,7 +1043,8 @@ int hid_open_report(struct hid_device *device)
}
}
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
hid_err(device, "item fetching failed at offset %u/%u\n",
size - (unsigned int)(end - start), size);
err:
vfree(parser);
hid_close_report(device);

View file

@ -87,13 +87,19 @@ static int drff_init(struct hid_device *hid)
{
struct drff_device *drff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -59,13 +59,19 @@ static int emsff_init(struct hid_device *hid)
{
struct emsff_device *emsff;
struct hid_report *report;
struct hid_input *hidinput = list_first_entry(&hid->inputs,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -77,14 +77,20 @@ static int gaff_init(struct hid_device *hid)
{
struct gaff_device *gaff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct list_head *report_ptr = report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;

View file

@ -140,13 +140,19 @@ static int holtekff_init(struct hid_device *hid)
{
struct holtekff_device *holtekff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
if (list_empty(report_list)) {
hid_err(hid, "no output report found\n");
return -ENODEV;

View file

@ -62,11 +62,17 @@ int lg2ff_init(struct hid_device *hid)
{
struct lg2ff_device *lg2ff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
if (!report)

View file

@ -129,12 +129,19 @@ static const signed short ff3_joystick_ac[] = {
int lg3ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff3_joystick_ac;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
return -ENODEV;

View file

@ -1158,8 +1158,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@ -1171,6 +1171,13 @@ int lg4ff_init(struct hid_device *hid)
int mmode_ret, mmode_idx = -1;
u16 real_product_id;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -1;

View file

@ -127,12 +127,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
int lgff_init(struct hid_device* hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
const signed short *ff_bits = ff_joystick;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
/* Check that the report looks ok */
if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;

View file

@ -1960,9 +1960,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
static int sony_init_ff(struct sony_sc *sc)
{
struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *input_dev;
if (list_empty(&sc->hdev->inputs)) {
hid_err(sc->hdev, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
input_dev = hidinput->input;
input_set_capability(input_dev, EV_FF, FF_RUMBLE);
return input_ff_create_memless(input_dev, NULL, sony_play_effect);

View file

@ -136,12 +136,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
struct tmff_device *tmff;
struct hid_report *report;
struct list_head *report_list;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *input_dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *input_dev;
int error;
int i;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
input_dev = hidinput->input;
tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
if (!tmff)
return -ENOMEM;

View file

@ -66,11 +66,17 @@ static int zpff_init(struct hid_device *hid)
{
struct zpff_device *zpff;
struct hid_report *report;
struct hid_input *hidinput = list_entry(hid->inputs.next,
struct hid_input, list);
struct input_dev *dev = hidinput->input;
struct hid_input *hidinput;
struct input_dev *dev;
int i, error;
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
return -ENODEV;
}
hidinput = list_entry(hid->inputs.next, struct hid_input, list);
dev = hidinput->input;
for (i = 0; i < 4; i++) {
report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
if (!report)

View file

@ -126,7 +126,7 @@
#define BMC150_ACCEL_SLEEP_1_SEC 0x0F
#define BMC150_ACCEL_REG_TEMP 0x08
#define BMC150_ACCEL_TEMP_CENTER_VAL 24
#define BMC150_ACCEL_TEMP_CENTER_VAL 23
#define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
#define BMC150_AUTO_SUSPEND_DELAY_MS 2000

View file

@ -1976,9 +1976,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
conn_id->cm_id.iw = NULL;
cma_exch(conn_id, RDMA_CM_DESTROYING);
mutex_unlock(&conn_id->handler_mutex);
mutex_unlock(&listen_id->handler_mutex);
cma_deref_id(conn_id);
rdma_destroy_id(&conn_id->id);
goto out;
return ret;
}
mutex_unlock(&conn_id->handler_mutex);

View file

@ -32,7 +32,7 @@ static struct kmem_cache *_cell_cache;
*/
struct dm_bio_prison *dm_bio_prison_create(void)
{
struct dm_bio_prison *prison = kmalloc(sizeof(*prison), GFP_KERNEL);
struct dm_bio_prison *prison = kzalloc(sizeof(*prison), GFP_KERNEL);
if (!prison)
return NULL;

View file

@ -50,7 +50,7 @@ struct dm_io_client *dm_io_client_create(void)
struct dm_io_client *client;
unsigned min_ios = dm_get_reserved_bio_based_ios();
client = kmalloc(sizeof(*client), GFP_KERNEL);
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
return ERR_PTR(-ENOMEM);

View file

@ -827,7 +827,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
kc = kmalloc(sizeof(*kc), GFP_KERNEL);
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
if (!kc)
return ERR_PTR(-ENOMEM);

View file

@ -179,7 +179,7 @@ struct dm_region_hash *dm_region_hash_create(
;
nr_buckets >>= 1;
rh = kmalloc(sizeof(*rh), GFP_KERNEL);
rh = kzalloc(sizeof(*rh), GFP_KERNEL);
if (!rh) {
DMERR("unable to allocate region hash memory");
return ERR_PTR(-ENOMEM);

View file

@ -19,7 +19,6 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
#include <linux/semaphore.h>
#include "dm.h"
@ -48,7 +47,7 @@ struct dm_exception_table {
};
struct dm_snapshot {
struct rw_semaphore lock;
struct mutex lock;
struct dm_dev *origin;
struct dm_dev *cow;
@ -106,8 +105,8 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
/* Maximum number of in-flight COW jobs. */
struct semaphore cow_count;
unsigned in_progress;
wait_queue_head_t in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@ -158,8 +157,8 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
static int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@ -456,9 +455,9 @@ static int __find_snapshots_sharing_cow(struct dm_snapshot *snap,
if (!bdev_equal(s->cow->bdev, snap->cow->bdev))
continue;
down_read(&s->lock);
mutex_lock(&s->lock);
active = s->active;
up_read(&s->lock);
mutex_unlock(&s->lock);
if (active) {
if (snap_src)
@ -926,7 +925,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
int r;
chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1;
down_write(&s->lock);
mutex_lock(&s->lock);
/*
* Process chunks (and associated exceptions) in reverse order
@ -941,7 +940,7 @@ static int remove_single_exception_chunk(struct dm_snapshot *s)
b = __release_queued_bios_after_merge(s);
out:
up_write(&s->lock);
mutex_unlock(&s->lock);
if (b)
flush_bios(b);
@ -1000,9 +999,9 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
if (linear_chunks < 0) {
DMERR("Read error in exception store: "
"shutting down merge");
down_write(&s->lock);
mutex_lock(&s->lock);
s->merge_failed = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
}
goto shut;
}
@ -1043,10 +1042,10 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
previous_count = read_pending_exceptions_done_count();
}
down_write(&s->lock);
mutex_lock(&s->lock);
s->first_merging_chunk = old_chunk;
s->num_merging_chunks = linear_chunks;
up_write(&s->lock);
mutex_unlock(&s->lock);
/* Wait until writes to all 'linear_chunks' drain */
for (i = 0; i < linear_chunks; i++)
@ -1088,10 +1087,10 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
return;
shut:
down_write(&s->lock);
mutex_lock(&s->lock);
s->merge_failed = 1;
b = __release_queued_bios_after_merge(s);
up_write(&s->lock);
mutex_unlock(&s->lock);
error_bios(b);
merge_shutdown(s);
@ -1137,7 +1136,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
origin_mode = FMODE_WRITE;
}
s = kmalloc(sizeof(*s), GFP_KERNEL);
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ti->error = "Cannot allocate private snapshot structure";
r = -ENOMEM;
@ -1190,7 +1189,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->exception_start_sequence = 0;
s->exception_complete_sequence = 0;
INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock);
mutex_init(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
s->state_bits = 0;
@ -1206,7 +1205,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
init_waitqueue_head(&s->in_progress_wait);
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
@ -1357,9 +1356,9 @@ static void snapshot_dtr(struct dm_target *ti)
/* Check whether exception handover must be cancelled */
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest && (s == snap_src)) {
down_write(&snap_dest->lock);
mutex_lock(&snap_dest->lock);
snap_dest->valid = 0;
up_write(&snap_dest->lock);
mutex_unlock(&snap_dest->lock);
DMERR("Cancelling snapshot handover.");
}
up_read(&_origins_lock);
@ -1390,13 +1389,62 @@ static void snapshot_dtr(struct dm_target *ti)
dm_exception_store_destroy(s->store);
mutex_destroy(&s->lock);
dm_put_device(ti, s->cow);
dm_put_device(ti, s->origin);
WARN_ON(s->in_progress);
kfree(s);
}
static void account_start_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
s->in_progress++;
spin_unlock(&s->in_progress_wait.lock);
}
static void account_end_copy(struct dm_snapshot *s)
{
spin_lock(&s->in_progress_wait.lock);
BUG_ON(!s->in_progress);
s->in_progress--;
if (likely(s->in_progress <= cow_threshold) &&
unlikely(waitqueue_active(&s->in_progress_wait)))
wake_up_locked(&s->in_progress_wait);
spin_unlock(&s->in_progress_wait.lock);
}
static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
{
if (unlikely(s->in_progress > cow_threshold)) {
spin_lock(&s->in_progress_wait.lock);
if (likely(s->in_progress > cow_threshold)) {
/*
* NOTE: this throttle doesn't account for whether
* the caller is servicing an IO that will trigger a COW
* so excess throttling may result for chunks not required
* to be COW'd. But if cow_threshold was reached, extra
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
if (unlock_origins)
up_read(&_origins_lock);
io_schedule();
remove_wait_queue(&s->in_progress_wait, &wait);
return false;
}
spin_unlock(&s->in_progress_wait.lock);
}
return true;
}
/*
* Flush a list of buffers.
*/
@ -1412,7 +1460,7 @@ static void flush_bios(struct bio *bio)
}
}
static int do_origin(struct dm_dev *origin, struct bio *bio);
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
/*
* Flush a list of buffers.
@ -1425,7 +1473,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
r = do_origin(s->origin, bio);
r = do_origin(s->origin, bio, false);
if (r == DM_MAPIO_REMAPPED)
generic_make_request(bio);
bio = n;
@ -1477,7 +1525,7 @@ static void pending_complete(void *context, int success)
if (!success) {
/* Read/write error - snapshot is unusable */
down_write(&s->lock);
mutex_lock(&s->lock);
__invalidate_snapshot(s, -EIO);
error = 1;
goto out;
@ -1485,14 +1533,14 @@ static void pending_complete(void *context, int success)
e = alloc_completed_exception(GFP_NOIO);
if (!e) {
down_write(&s->lock);
mutex_lock(&s->lock);
__invalidate_snapshot(s, -ENOMEM);
error = 1;
goto out;
}
*e = pe->e;
down_write(&s->lock);
mutex_lock(&s->lock);
if (!s->valid) {
free_completed_exception(e);
error = 1;
@ -1517,7 +1565,7 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
increment_pending_exceptions_done_count();
up_write(&s->lock);
mutex_unlock(&s->lock);
/* Submit any pending write bios */
if (error) {
@ -1579,7 +1627,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
}
list_add(&pe->out_of_order_entry, lh);
}
up(&s->cow_count);
account_end_copy(s);
}
/*
@ -1603,7 +1651,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
down(&s->cow_count);
account_start_copy(s);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@ -1623,7 +1671,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
down(&s->cow_count);
account_start_copy(s);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
@ -1714,9 +1762,12 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!s->valid)
return -EIO;
/* FIXME: should only take write lock if we need
* to copy an exception */
down_write(&s->lock);
if (bio_data_dir(bio) == WRITE) {
while (unlikely(!wait_for_in_progress(s, false)))
; /* wait_for_in_progress() has slept */
}
mutex_lock(&s->lock);
if (!s->valid || (unlikely(s->snapshot_overflowed) && bio_rw(bio) == WRITE)) {
r = -EIO;
@ -1738,9 +1789,9 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (bio_rw(bio) == WRITE) {
pe = __lookup_pending_exception(s, chunk);
if (!pe) {
up_write(&s->lock);
mutex_unlock(&s->lock);
pe = alloc_pending_exception(s);
down_write(&s->lock);
mutex_lock(&s->lock);
if (!s->valid || s->snapshot_overflowed) {
free_pending_exception(pe);
@ -1775,7 +1826,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_size ==
(s->store->chunk_size << SECTOR_SHIFT)) {
pe->started = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
start_full_bio(pe, bio);
goto out;
}
@ -1785,7 +1836,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
if (!pe->started) {
/* this is protected by snap->lock */
pe->started = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
start_copy(pe);
goto out;
}
@ -1795,7 +1846,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
}
out_unlock:
up_write(&s->lock);
mutex_unlock(&s->lock);
out:
return r;
}
@ -1831,7 +1882,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
down_write(&s->lock);
mutex_lock(&s->lock);
/* Full merging snapshots are redirected to the origin */
if (!s->valid)
@ -1862,12 +1913,12 @@ redirect_to_origin:
bio->bi_bdev = s->origin->bdev;
if (bio_rw(bio) == WRITE) {
up_write(&s->lock);
return do_origin(s->origin, bio);
mutex_unlock(&s->lock);
return do_origin(s->origin, bio, false);
}
out_unlock:
up_write(&s->lock);
mutex_unlock(&s->lock);
return r;
}
@ -1898,7 +1949,7 @@ static int snapshot_preresume(struct dm_target *ti)
down_read(&_origins_lock);
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
down_read(&snap_src->lock);
mutex_lock(&snap_src->lock);
if (s == snap_src) {
DMERR("Unable to resume snapshot source until "
"handover completes.");
@ -1908,7 +1959,7 @@ static int snapshot_preresume(struct dm_target *ti)
"source is suspended.");
r = -EINVAL;
}
up_read(&snap_src->lock);
mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@ -1954,11 +2005,11 @@ static void snapshot_resume(struct dm_target *ti)
(void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL);
if (snap_src && snap_dest) {
down_write(&snap_src->lock);
down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
mutex_lock(&snap_src->lock);
mutex_lock_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING);
__handover_exceptions(snap_src, snap_dest);
up_write(&snap_dest->lock);
up_write(&snap_src->lock);
mutex_unlock(&snap_dest->lock);
mutex_unlock(&snap_src->lock);
}
up_read(&_origins_lock);
@ -1973,9 +2024,9 @@ static void snapshot_resume(struct dm_target *ti)
/* Now we have correct chunk size, reregister */
reregister_snapshot(s);
down_write(&s->lock);
mutex_lock(&s->lock);
s->active = 1;
up_write(&s->lock);
mutex_unlock(&s->lock);
}
static uint32_t get_origin_minimum_chunksize(struct block_device *bdev)
@ -2015,7 +2066,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
down_write(&snap->lock);
mutex_lock(&snap->lock);
if (!snap->valid)
DMEMIT("Invalid");
@ -2040,7 +2091,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
DMEMIT("Unknown");
}
up_write(&snap->lock);
mutex_unlock(&snap->lock);
break;
@ -2106,7 +2157,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
if (dm_target_is_snapshot_merge(snap->ti))
continue;
down_write(&snap->lock);
mutex_lock(&snap->lock);
/* Only deal with valid and active snapshots */
if (!snap->valid || !snap->active)
@ -2133,9 +2184,9 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
pe = __lookup_pending_exception(snap, chunk);
if (!pe) {
up_write(&snap->lock);
mutex_unlock(&snap->lock);
pe = alloc_pending_exception(snap);
down_write(&snap->lock);
mutex_lock(&snap->lock);
if (!snap->valid) {
free_pending_exception(pe);
@ -2178,7 +2229,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
}
next_snapshot:
up_write(&snap->lock);
mutex_unlock(&snap->lock);
if (pe_to_start_now) {
start_copy(pe_to_start_now);
@ -2199,15 +2250,24 @@ next_snapshot:
/*
* Called on a write from the origin driver.
*/
static int do_origin(struct dm_dev *origin, struct bio *bio)
static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
{
struct origin *o;
int r = DM_MAPIO_REMAPPED;
again:
down_read(&_origins_lock);
o = __lookup_origin(origin->bdev);
if (o)
if (o) {
if (limit) {
struct dm_snapshot *s;
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
}
r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
}
up_read(&_origins_lock);
return r;
@ -2320,7 +2380,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, available_sectors);
/* Only tell snapshots if this is a write */
return do_origin(o->dev, bio);
return do_origin(o->dev, bio, true);
}
/*

View file

@ -2882,7 +2882,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
return (struct pool *)pmd;
}
pool = kmalloc(sizeof(*pool), GFP_KERNEL);
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool) {
*error = "Error allocating memory for pool";
err_p = ERR_PTR(-ENOMEM);

View file

@ -3889,7 +3889,7 @@ out:
* this to-be-skipped slave to send a packet out.
*/
old_arr = rtnl_dereference(bond->slave_arr);
for (idx = 0; idx < old_arr->count; idx++) {
for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
if (skipslave == old_arr->arr[idx]) {
old_arr->arr[idx] =
old_arr->arr[old_arr->count-1];

View file

@ -336,7 +336,7 @@ static void sr_set_multicast(struct net_device *net)
static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
{
struct usbnet *dev = netdev_priv(net);
__le16 res;
__le16 res = 0;
mutex_lock(&dev->phy_mutex);
sr_set_sw_mii(dev);

View file

@ -132,6 +132,10 @@ ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
struct ath6kl_urb_context *urb_context = NULL;
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return NULL;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context =
@ -150,6 +154,10 @@ static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
{
unsigned long flags;
/* bail if this pipe is not initialized */
if (!pipe->ar_usb)
return;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;

View file

@ -94,9 +94,20 @@ static void __iomem *ring_options_base(struct tb_ring *ring)
return io;
}
static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
iowrite16(value, ring_desc_base(ring) + offset);
/*
* The other 16-bits in the register is read-only and writes to it
* are ignored by the hardware so we can save one ioread32() by
* filling the read-only bits with zeroes.
*/
iowrite32(cons, ring_desc_base(ring) + 8);
}
static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{
/* See ring_iowrite_cons() above for explanation */
iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
@ -148,7 +159,10 @@ static void ring_write_descriptors(struct tb_ring *ring)
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
if (ring->is_tx)
ring_iowrite_prod(ring, ring->head);
else
ring_iowrite_cons(ring, ring->head);
}
}
@ -368,7 +382,7 @@ void ring_stop(struct tb_ring *ring)
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;

View file

@ -332,6 +332,7 @@ struct sc16is7xx_port {
struct kthread_worker kworker;
struct task_struct *kworker_task;
struct kthread_work irq_work;
struct mutex efr_lock;
struct sc16is7xx_one p[0];
};
@ -496,6 +497,21 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
div /= 4;
}
/* In an amazing feat of design, the Enhanced Features Register shares
* the address of the Interrupt Identification Register, and is
* switched in by writing a magic value (0xbf) to the Line Control
* Register. Any interrupt firing during this time will see the EFR
* where it expects the IIR to be, leading to "Unexpected interrupt"
* messages.
*
* Prevent this possibility by claiming a mutex while accessing the
* EFR, and claiming the same mutex from within the interrupt handler.
* This is similar to disabling the interrupt, but that doesn't work
* because the bulk of the interrupt processing is run as a workqueue
* job in thread context.
*/
mutex_lock(&s->efr_lock);
lcr = sc16is7xx_port_read(port, SC16IS7XX_LCR_REG);
/* Open the LCR divisors for configuration */
@ -511,6 +527,8 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud)
/* Put LCR back to the normal mode */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
sc16is7xx_port_update(port, SC16IS7XX_MCR_REG,
SC16IS7XX_MCR_CLKSEL_BIT,
prescaler);
@ -693,6 +711,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
mutex_lock(&s->efr_lock);
while (1) {
bool keep_polling = false;
int i;
@ -702,6 +722,8 @@ static void sc16is7xx_ist(struct kthread_work *ws)
if (!keep_polling)
break;
}
mutex_unlock(&s->efr_lock);
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
@ -888,6 +910,9 @@ static void sc16is7xx_set_termios(struct uart_port *port,
if (!(termios->c_cflag & CREAD))
port->ignore_status_mask |= SC16IS7XX_LSR_BRK_ERROR_MASK;
/* As above, claim the mutex while accessing the EFR. */
mutex_lock(&s->efr_lock);
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG,
SC16IS7XX_LCR_CONF_MODE_B);
@ -909,6 +934,8 @@ static void sc16is7xx_set_termios(struct uart_port *port,
/* Update LCR register */
sc16is7xx_port_write(port, SC16IS7XX_LCR_REG, lcr);
mutex_unlock(&s->efr_lock);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
port->uartclk / 16 / 4 / 0xffff,
@ -1172,6 +1199,7 @@ static int sc16is7xx_probe(struct device *dev,
s->regmap = regmap;
s->devtype = devtype;
dev_set_drvdata(dev, s);
mutex_init(&s->efr_lock);
init_kthread_worker(&s->kworker);
init_kthread_work(&s->irq_work, sc16is7xx_ist);

View file

@ -67,6 +67,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
enum mctrl_gpio_idx gidx)
{
if (gpios == NULL)
return NULL;
return gpios->gpio[gidx];
}
EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);

View file

@ -102,6 +102,8 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static int hub_port_disable(struct usb_hub *hub, int port1, int set_state);
static bool hub_port_warm_reset_required(struct usb_hub *hub, int port1,
u16 portstatus);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
{
@ -1092,6 +1094,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
/* Make sure a warm-reset request is handled by port_event */
if (type == HUB_RESUME &&
hub_port_warm_reset_required(hub, port1, portstatus))
set_bit(port1, hub->event_bits);
/*
* Add debounce if USB3 link is in polling/link training state.
* Link will automatically transition to Enabled state after

View file

@ -499,11 +499,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
retval = -EFAULT;
goto unlock_exit;
}
dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
retval = bytes_to_read;
spin_lock_irq(&dev->rbsl);
dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
if (dev->buffer_overflow) {
dev->buffer_overflow = 0;
spin_unlock_irq(&dev->rbsl);
@ -584,7 +584,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
1 << 8, 0,
dev->interrupt_out_buffer,
bytes_to_write,
USB_CTRL_SET_TIMEOUT * HZ);
USB_CTRL_SET_TIMEOUT);
if (retval < 0)
dev_err(&dev->intf->dev,
"Couldn't submit HID_REQ_SET_REPORT %d\n",

View file

@ -910,7 +910,7 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
get_version_reply,
sizeof(*get_version_reply),
1000);
if (result < sizeof(*get_version_reply)) {
if (result != sizeof(*get_version_reply)) {
if (result >= 0)
result = -EIO;
dev_err(idev, "get version request failed: %d\n", result);

View file

@ -604,6 +604,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
command_port = port->serial->port[COMMAND_PORT];
command_info = usb_get_serial_port_data(command_port);
if (command_port->bulk_out_size < datasize + 1)
return -EIO;
mutex_lock(&command_info->mutex);
command_info->command_finished = false;
@ -677,6 +681,7 @@ static void firm_setup_port(struct tty_struct *tty)
struct device *dev = &port->dev;
struct whiteheat_port_settings port_settings;
unsigned int cflag = tty->termios.c_cflag;
speed_t baud;
port_settings.port = port->port_number + 1;
@ -737,11 +742,13 @@ static void firm_setup_port(struct tty_struct *tty)
dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
/* get the baud rate wanted */
port_settings.baud = tty_get_baud_rate(tty);
dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
baud = tty_get_baud_rate(tty);
port_settings.baud = cpu_to_le32(baud);
dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
/* fixme: should set validated settings */
tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
tty_encode_baud_rate(tty, baud, baud);
/* handle any settings that aren't specified in the tty structure */
port_settings.lloop = 0;

View file

@ -91,7 +91,7 @@ struct whiteheat_simple {
struct whiteheat_port_settings {
__u8 port; /* port number (1 to N) */
__u32 baud; /* any value 7 - 460800, firmware calculates
__le32 baud; /* any value 7 - 460800, firmware calculates
best fit; arrives little endian */
__u8 bits; /* 5, 6, 7, or 8 */
__u8 stop; /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */

View file

@ -772,29 +772,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
{
struct uas_dev_info *devinfo =
(struct uas_dev_info *)sdev->host->hostdata;
int maxp;
sdev->hostdata = devinfo;
/*
* We have two requirements here. We must satisfy the requirements
* of the physical HC and the demands of the protocol, as we
* definitely want no additional memory allocation in this path
* ruling out using bounce buffers.
*
* For a transmission on USB to continue we must never send
* a package that is smaller than maxpacket. Hence the length of each
* scatterlist element except the last must be divisible by the
* Bulk maxpacket value.
* If the HC does not ensure that through SG,
* the upper layer must do that. We must assume nothing
* about the capabilities off the HC, so we use the most
* pessimistic requirement.
*/
maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
/*
* The protocol has no requirements on alignment in the strict sense.
* Controllers may or may not have alignment restrictions.

View file

@ -14,14 +14,31 @@
#include <linux/err.h>
#include <linux/fs.h>
static inline bool spacetab(char c) { return c == ' ' || c == '\t'; }
static inline char *next_non_spacetab(char *first, const char *last)
{
for (; first <= last; first++)
if (!spacetab(*first))
return first;
return NULL;
}
static inline char *next_terminator(char *first, const char *last)
{
for (; first <= last; first++)
if (spacetab(*first) || !*first)
return first;
return NULL;
}
static int load_script(struct linux_binprm *bprm)
{
const char *i_arg, *i_name;
char *cp;
char *cp, *buf_end;
struct file *file;
char interp[BINPRM_BUF_SIZE];
int retval;
/* Not ours to exec if we don't start with "#!". */
if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!'))
return -ENOEXEC;
@ -34,18 +51,40 @@ static int load_script(struct linux_binprm *bprm)
if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE)
return -ENOENT;
/*
* This section does the #! interpretation.
* Sorta complicated, but hopefully it will work. -TYT
*/
/* Release since we are not mapping a binary into memory. */
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
bprm->buf[BINPRM_BUF_SIZE - 1] = '\0';
if ((cp = strchr(bprm->buf, '\n')) == NULL)
cp = bprm->buf+BINPRM_BUF_SIZE-1;
/*
* This section handles parsing the #! line into separate
* interpreter path and argument strings. We must be careful
* because bprm->buf is not yet guaranteed to be NUL-terminated
* (though the buffer will have trailing NUL padding when the
* file size was smaller than the buffer size).
*
* We do not want to exec a truncated interpreter path, so either
* we find a newline (which indicates nothing is truncated), or
* we find a space/tab/NUL after the interpreter path (which
* itself may be preceded by spaces/tabs). Truncating the
* arguments is fine: the interpreter can re-read the script to
* parse them on its own.
*/
buf_end = bprm->buf + sizeof(bprm->buf) - 1;
cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n');
if (!cp) {
cp = next_non_spacetab(bprm->buf + 2, buf_end);
if (!cp)
return -ENOEXEC; /* Entire buf is spaces/tabs */
/*
* If there is no later space/tab/NUL we must assume the
* interpreter path is truncated.
*/
if (!next_terminator(cp, buf_end))
return -ENOEXEC;
cp = buf_end;
}
/* NUL-terminate the buffer and any trailing spaces/tabs. */
*cp = '\0';
while (cp > bprm->buf) {
cp--;

View file

@ -130,10 +130,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{0, 0}
};
static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
{0, 0}
};
/*
* Convert a string containing text IPv4 or IPv6 address to binary form.
*

View file

@ -1673,6 +1673,19 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;
/* Flush dirty data/metadata before non-truncate SETATTR */
if (is_wb && S_ISREG(inode->i_mode) &&
attr->ia_valid &
(ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
ATTR_TIMES_SET)) {
err = write_inode_now(inode, true);
if (err)
return err;
fuse_set_nowrite(inode);
fuse_release_nowrite(inode);
}
if (is_truncate) {
fuse_set_nowrite(inode);
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);

View file

@ -201,7 +201,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
bool lock_inode = (file->f_flags & O_TRUNC) &&
bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
@ -209,16 +209,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
if (err)
return err;
if (lock_inode)
if (is_wb_truncate) {
mutex_lock(&inode->i_mutex);
fuse_set_nowrite(inode);
}
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
if (lock_inode)
if (is_wb_truncate) {
fuse_release_nowrite(inode);
mutex_unlock(&inode->i_mutex);
}
return err;
}

View file

@ -5255,6 +5255,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
}
status = task->tk_status;
if (setclientid.sc_cred) {
kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}

View file

@ -289,7 +289,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
if (inode_alloc)
mutex_lock(&inode_alloc->i_mutex);
if (o2info_coherent(&fi->ifi_req)) {
if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
status = ocfs2_inode_lock(inode_alloc, &bh, 0);
if (status < 0) {
mlog_errno(status);

View file

@ -1475,18 +1475,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
return loc->xl_ops->xlo_check_space(loc, xi);
}
static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
{
loc->xl_ops->xlo_add_entry(loc, name_hash);
loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
/*
* We can't leave the new entry's xe_name_offset at zero or
* add_namevalue() will go nuts. We set it to the size of our
* storage so that it can never be less than any other entry.
*/
loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
}
static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
struct ocfs2_xattr_info *xi)
{
@ -2118,29 +2106,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
if (rc)
goto out;
if (loc->xl_entry) {
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!loc->xl_entry) {
rc = -EINVAL;
goto out;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
if (ocfs2_xa_can_reuse_entry(loc, xi)) {
orig_value_size = loc->xl_entry->xe_value_size;
rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
if (rc)
goto out;
goto alloc_value;
}
if (!ocfs2_xattr_is_local(loc->xl_entry)) {
orig_clusters = ocfs2_xa_value_clusters(loc);
rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
if (rc) {
mlog_errno(rc);
ocfs2_xa_cleanup_value_truncate(loc,
"overwriting",
orig_clusters);
goto out;
}
ocfs2_xa_wipe_namevalue(loc);
} else
ocfs2_xa_add_entry(loc, name_hash);
}
ocfs2_xa_wipe_namevalue(loc);
/*
* If we get here, we have a blank entry. Fill it. We grow our

View file

@ -1584,7 +1584,7 @@ xfs_buftarg_isolate(
* zero. If the value is already zero, we need to reclaim the
* buffer, otherwise it gets another trip through the LRU.
*/
if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
if (atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
spin_unlock(&bp->b_lock);
return LRU_ROTATE;
}

View file

@ -270,6 +270,16 @@ static inline int usb_ep_enable(struct usb_ep *ep)
if (ep->enabled)
return 0;
/* UDC drivers can't handle endpoints with maxpacket size 0 */
if (usb_endpoint_maxp(ep->desc) == 0) {
/*
* We should log an error message here, but we can't call
* dev_err() because there's no way to find the gadget
* given only ep.
*/
return -EINVAL;
}
ret = ep->ops->enable(ep, ep->desc);
if (ret)
return ret;

View file

@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
/* Access to a connection */
int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);

View file

@ -289,6 +289,11 @@ static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
return q;
}
static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
{
return rcu_dereference_bh(qdisc->dev_queue->qdisc);
}
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->qdisc_sleeping;

View file

@ -98,6 +98,8 @@ void sctp_addr_wq_mgmt(struct net *, struct sctp_sockaddr_entry *, int);
/*
* sctp/socket.c
*/
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);

View file

@ -4894,6 +4894,7 @@ waitagain:
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
cpumask_clear(iter->started);
trace_seq_init(&iter->seq);
iter->pos = -1;
trace_event_read_lock();

View file

@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
rc = llc_conn_send_pdu(sk, skb);
skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;
@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
if (likely(!rc)) {
rc = llc_conn_send_pdu(sk, skb);
skb_get(skb);
llc_conn_send_pdu(sk, skb);
llc_conn_ac_inc_vs_by_1(sk, skb);
}
return rc;

View file

@ -30,7 +30,7 @@
#endif
static int llc_find_offset(int state, int ev_type);
static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
static void llc_conn_send_pdus(struct sock *sk);
static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
static int llc_exec_conn_trans_actions(struct sock *sk,
struct llc_conn_state_trans *trans,
@ -193,11 +193,11 @@ out_skb_put:
return rc;
}
int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
{
/* queue PDU to send to MAC layer */
skb_queue_tail(&sk->sk_write_queue, skb);
return llc_conn_send_pdus(sk, skb);
llc_conn_send_pdus(sk);
}
/**
@ -255,7 +255,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk, NULL);
llc_conn_send_pdus(sk);
out:;
}
@ -296,7 +296,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
if (howmany_resend > 0)
llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
/* any PDUs to re-send are queued up; start sending to MAC */
llc_conn_send_pdus(sk, NULL);
llc_conn_send_pdus(sk);
out:;
}
@ -340,16 +340,12 @@ out:
/**
* llc_conn_send_pdus - Sends queued PDUs
* @sk: active connection
* @hold_skb: the skb held by caller, or NULL if does not care
*
* Sends queued pdus to MAC layer for transmission. When @hold_skb is
* NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
* successfully, or 1 for failure.
* Sends queued pdus to MAC layer for transmission.
*/
static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
static void llc_conn_send_pdus(struct sock *sk)
{
struct sk_buff *skb;
int ret = 0;
while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@ -361,20 +357,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
if (!skb2)
break;
dev_queue_xmit(skb2);
} else {
bool is_target = skb == hold_skb;
int rc;
if (is_target)
skb_get(skb);
rc = dev_queue_xmit(skb);
if (is_target)
ret = rc;
skb = skb2;
}
dev_queue_xmit(skb);
}
return ret;
}
/**

View file

@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
if (likely(!rc)) {
skb_get(skb);
rc = dev_queue_xmit(skb);
}
return rc;
}
@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
if (likely(!rc)) {
skb_get(skb);
rc = dev_queue_xmit(skb);
}
return rc;
}
@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
if (likely(!rc)) {
skb_get(skb);
rc = dev_queue_xmit(skb);
}
return rc;
}

View file

@ -197,29 +197,22 @@ out:
* After executing actions of the event, upper layer will be indicated
* if needed(on receiving an UI frame). sk can be null for the
* datalink_proto case.
*
* This function always consumes a reference to the skb.
*/
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
/*
* We have to hold the skb, because llc_sap_next_state
* will kfree it in the sending path and we need to
* look at the skb->cb, where we encode llc_sap_state_ev.
*/
skb_get(skb);
ev->ind_cfm_flag = 0;
llc_sap_next_state(sap, skb);
if (ev->ind_cfm_flag == LLC_IND) {
if (skb->sk->sk_state == TCP_LISTEN)
kfree_skb(skb);
else {
llc_save_primitive(skb->sk, skb, ev->prim);
/* queue skb to the user. */
if (sock_queue_rcv_skb(skb->sk, skb))
kfree_skb(skb);
}
if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
llc_save_primitive(skb->sk, skb, ev->prim);
/* queue skb to the user. */
if (sock_queue_rcv_skb(skb->sk, skb) == 0)
return;
}
kfree_skb(skb);
}

View file

@ -464,7 +464,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
* skb will be queued.
*/
if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
struct Qdisc *rootq = qdisc_root(sch);
struct Qdisc *rootq = qdisc_root_bh(sch);
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
q->duplicate = 0;

View file

@ -970,7 +970,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect,
.connect = sctp_inet_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = sctp_getname,

View file

@ -1012,7 +1012,7 @@ static const struct proto_ops inet_seqpacket_ops = {
.owner = THIS_MODULE,
.release = inet_release, /* Needs to be wrapped... */
.bind = inet_bind,
.connect = inet_dgram_connect,
.connect = sctp_inet_connect,
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname, /* Semantics are different. */

View file

@ -1072,7 +1072,7 @@ out:
*/
static int __sctp_connect(struct sock *sk,
struct sockaddr *kaddrs,
int addrs_size,
int addrs_size, int flags,
sctp_assoc_t *assoc_id)
{
struct net *net = sock_net(sk);
@ -1090,7 +1090,6 @@ static int __sctp_connect(struct sock *sk,
union sctp_addr *sa_addr = NULL;
void *addr_buf;
unsigned short port;
unsigned int f_flags = 0;
sp = sctp_sk(sk);
ep = sp->ep;
@ -1238,13 +1237,7 @@ static int __sctp_connect(struct sock *sk,
sp->pf->to_sk_daddr(sa_addr, sk);
sk->sk_err = 0;
/* in-kernel sockets don't generally have a file allocated to them
* if all they do is call sock_create_kern().
*/
if (sk->sk_socket->file)
f_flags = sk->sk_socket->file->f_flags;
timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK);
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if (assoc_id)
*assoc_id = asoc->assoc_id;
@ -1340,7 +1333,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
{
struct sockaddr *kaddrs;
gfp_t gfp = GFP_KERNEL;
int err = 0;
int err = 0, flags = 0;
pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n",
__func__, sk, addrs, addrs_size);
@ -1360,11 +1353,18 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
return -ENOMEM;
if (__copy_from_user(kaddrs, addrs, addrs_size)) {
err = -EFAULT;
} else {
err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id);
kfree(kaddrs);
return -EFAULT;
}
/* in-kernel sockets don't generally have a file allocated to them
* if all they do is call sock_create_kern().
*/
if (sk->sk_socket->file)
flags = sk->sk_socket->file->f_flags;
err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
kfree(kaddrs);
return err;
@ -3895,31 +3895,36 @@ out_nounlock:
* len: the size of the address.
*/
static int sctp_connect(struct sock *sk, struct sockaddr *addr,
int addr_len)
int addr_len, int flags)
{
int err = 0;
struct sctp_af *af;
int err = -EINVAL;
lock_sock(sk);
pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
addr, addr_len);
/* Validate addr_len before calling common connect/connectx routine. */
af = sctp_get_af_specific(addr->sa_family);
if (!af || addr_len < af->sockaddr_len) {
err = -EINVAL;
} else {
/* Pass correct addr len to common routine (so it knows there
* is only one address being passed.
*/
err = __sctp_connect(sk, addr, af->sockaddr_len, NULL);
}
if (af && addr_len >= af->sockaddr_len)
err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
release_sock(sk);
return err;
}
int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
if (uaddr->sa_family == AF_UNSPEC)
return -EOPNOTSUPP;
return sctp_connect(sock->sk, uaddr, addr_len, flags);
}
/* FIXME: Write comments. */
static int sctp_disconnect(struct sock *sk, int flags)
{
@ -7428,7 +7433,6 @@ struct proto sctp_prot = {
.name = "SCTP",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,
@ -7467,7 +7471,6 @@ struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
.close = sctp_close,
.connect = sctp_connect,
.disconnect = sctp_disconnect,
.accept = sctp_accept,
.ioctl = sctp_ioctl,

View file

@ -292,7 +292,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_MESH_ID_LEN },
[NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
[NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_BINARY,
.len = ETH_ALEN },
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },

View file

@ -72,8 +72,16 @@ scm_version()
printf -- '-svn%s' "`git svn find-rev $head`"
fi
# Check for uncommitted changes
if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
# Check for uncommitted changes.
# First, with git-status, but --no-optional-locks is only
# supported in git >= 2.14, so fall back to git-diff-index if
# it fails. Note that git-diff-index does not refresh the
# index, so it may give misleading results. See
# git-update-index(1), git-diff-index(1), and git-status(1).
if {
git --no-optional-locks status -uno --porcelain 2>/dev/null ||
git diff-index --name-only HEAD
} | grep -qvE '^(.. )?scripts/package'; then
printf '%s' -dirty
fi

View file

@ -253,8 +253,7 @@ end:
return err;
}
static unsigned int
map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
{
unsigned int sec, sections, ch, channels;
unsigned int pcm, midi, location;

View file

@ -340,8 +340,6 @@ static void azx_int_disable(struct hdac_bus *bus)
list_for_each_entry(azx_dev, &bus->stream_list, list)
snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
synchronize_irq(bus->irq);
/* disable SIE for all streams */
snd_hdac_chip_writeb(bus, INTCTL, 0);

View file

@ -1274,9 +1274,9 @@ static int azx_free(struct azx *chip)
}
if (bus->chip_init) {
azx_stop_chip(chip);
azx_clear_irq_pending(chip);
azx_stop_all_streams(chip);
azx_stop_chip(chip);
}
if (bus->irq >= 0)

View file

@ -1,4 +1,5 @@
#include "symbol.h"
#include <assert.h>
#include <errno.h>
#include <inttypes.h>
#include <limits.h>
@ -702,6 +703,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
}
after->start = map->end;
after->pgoff += map->end - pos->start;
assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
__map_groups__insert(pos->groups, after);
if (verbose >= 2)
map__fprintf(after, fp);