This is the 4.4.57 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAljXlGkACgkQONu9yGCS
 aT6/mw/9G7QpBoLEwnQbw2NVeboOiM0E9iejUkwsZQzlWspREh43qW0x5Nwk9rxl
 y+OAgiYzF6z2hxV6hHNaswEYdIzOBkSjMq2Xbjmjrbj3H8sv5GWT8yD9Cxmaoerx
 oBJ21Pe7tMK5IQnThOLRef8ZVtCLKPlr789ifCzg7iuRUnzCdV2eyrthzgkfmt4y
 rSHjoSGji1RaC9O7/7DmBvQAosfzr/eSopHz0cbLWLS17OfJ+Xa7+6xb42uzENq6
 3mZUCyT0kg8Abz3e9E2wAmKyODkGnX7fPl97Mop5vwflrZTajWMqeCTi75SMIOgj
 TONSTi5NIASjS9AKB/UTphXrGEmQV/tU+GaUB3eYqsJQygFQQgllL2S+nLaSQ2u4
 LguWDltAfz0mY3/zv5bmf3C7LmpkBxJceaEAMYhsLmJsENsbPO1rRt3plSu9dNGv
 f1g3p4xktE2BZMbsKbMZ78CsCe5gYitx/nEzCqpQsqNasw/C99N/I24nAF7g5OOa
 Kwo9mY+hjamiqPdiII5rYiPnta/358xITLoLzemLbgjtfuLC5NGO3SppUZvW5DXW
 bmn1MwChSqdNRGLeOpdlQ7lrE4DFUtIzA78WHdj7jsJgUpJGFKyZSbhAhXPX3ryV
 Jqcngw/eSRtrkU6P7ZpZzFVUun98eLpIfbKgR/UMROjZIGmCrlA=
 =sriX
 -----END PGP SIGNATURE-----

Merge 4.4.57 to android-4.4

Changes in 4.4.57:
	usb: core: hub: hub_port_init lock controller instead of bus
	USB: don't free bandwidth_mutex too early
	crypto: ghash-clmulni - Fix load failure
	crypto: cryptd - Assign statesize properly
	crypto: mcryptd - Fix load failure
	cxlflash: Increase cmd_per_lun for better throughput
	ACPI / video: skip evaluating _DOD when it does not exist
	pinctrl: cherryview: Do not mask all interrupts in probe
	Drivers: hv: balloon: don't crash when memory is added in non-sorted order
	Drivers: hv: avoid vfree() on crash
	xen/qspinlock: Don't kick CPU if IRQ is not initialized
	KVM: PPC: Book3S PR: Fix illegal opcode emulation
	s390/pci: fix use after free in dma_init
	drm/amdgpu: add missing irq.h include
	tpm_tis: Use devm_free_irq not free_irq
	hv_netvsc: use skb_get_hash() instead of a homegrown implementation
	kernek/fork.c: allocate idle task for a CPU always on its local node
	give up on gcc ilog2() constant optimizations
	perf/core: Fix event inheritance on fork()
	cpufreq: Fix and clean up show_cpuinfo_cur_freq()
	powerpc/boot: Fix zImage TOC alignment
	md/raid1/10: fix potential deadlock
	target/pscsi: Fix TYPE_TAPE + TYPE_MEDIMUM_CHANGER export
	scsi: lpfc: Add shutdown method for kexec
	scsi: libiscsi: add lock around task lists to fix list corruption regression
	target: Fix VERIFY_16 handling in sbc_parse_cdb
	isdn/gigaset: fix NULL-deref at probe
	gfs2: Avoid alignment hole in struct lm_lockname
	percpu: acquire pcpu_lock when updating pcpu_nr_empty_pop_pages
	ext4: fix fencepost in s_first_meta_bg validation
	Linux 4.4.57

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-03-29 13:53:50 +02:00
commit 373a68ca93
38 changed files with 192 additions and 179 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 56
SUBLEVEL = 57
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -68,6 +68,7 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
. = ALIGN(256);
.got :
{
__toc_start = .;

View file

@ -302,7 +302,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
kvmppc_core_queue_program(vcpu, 0);
}
}

View file

@ -455,7 +455,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
goto out_clean;
goto out;
}
/*
@ -475,18 +475,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto out_reg;
goto free_dma_table;
}
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
goto out_reg;
return 0;
goto free_bitmap;
out_reg:
return 0;
free_bitmap:
vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
free_dma_table:
dma_free_cpu_table(zdev->dma_table);
out_clean:
zdev->dma_table = NULL;
out:
return rc;
}

View file

@ -219,6 +219,29 @@ static int ghash_async_final(struct ahash_request *req)
}
}
static int ghash_async_import(struct ahash_request *req, const void *in)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
ghash_async_init(req);
memcpy(dctx, in, sizeof(*dctx));
return 0;
}
static int ghash_async_export(struct ahash_request *req, void *out)
{
struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memcpy(out, dctx, sizeof(*dctx));
return 0;
}
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@ -288,8 +311,11 @@ static struct ahash_alg ghash_async_alg = {
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
.export = ghash_async_export,
.import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
.statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",

View file

@ -27,6 +27,12 @@ static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
int irq = per_cpu(lock_kicker_irq, cpu);
/* Don't kick if the target's kicker interrupt is not initialized. */
if (irq == -1)
return;
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}

View file

@ -642,6 +642,7 @@ static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;

View file

@ -531,6 +531,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;

View file

@ -1211,6 +1211,9 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
union acpi_object *dod = NULL;
union acpi_object *obj;
if (!video->cap._DOD)
return AE_NOT_EXIST;
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));

View file

@ -401,7 +401,7 @@ static void disable_interrupts(struct tpm_chip *chip)
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
free_irq(chip->vendor.irq, chip);
devm_free_irq(chip->pdev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}

View file

@ -688,9 +688,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
if (!cur_freq)
return sprintf(buf, "<unknown>");
return sprintf(buf, "%u\n", cur_freq);
if (cur_freq)
return sprintf(buf, "%u\n", cur_freq);
return sprintf(buf, "<unknown>\n");
}
/**

View file

@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>

View file

@ -274,7 +274,7 @@ cleanup:
*
* This routine is called normally during driver unloading or exiting.
*/
void hv_cleanup(void)
void hv_cleanup(bool crash)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
@ -284,7 +284,8 @@ void hv_cleanup(void)
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
vfree(hv_context.hypercall_page);
if (!crash)
vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL;
}
@ -304,7 +305,8 @@ void hv_cleanup(void)
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
vfree(hv_context.tsc_page);
if (!crash)
vfree(hv_context.tsc_page);
hv_context.tsc_page = NULL;
}
#endif

View file

@ -714,7 +714,7 @@ static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
if ((start_pfn >= has->end_pfn))
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
/*
* If the current hot add-request extends beyond
@ -768,7 +768,7 @@ static unsigned long handle_pg_range(unsigned long pg_start,
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
if ((start_pfn >= has->end_pfn))
if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;

View file

@ -581,7 +581,7 @@ struct hv_ring_buffer_debug_info {
extern int hv_init(void);
extern void hv_cleanup(void);
extern void hv_cleanup(bool crash);
extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,

View file

@ -889,7 +889,7 @@ err_alloc:
bus_unregister(&hv_bus);
err_cleanup:
hv_cleanup();
hv_cleanup(false);
return ret;
}
@ -1254,7 +1254,7 @@ static void hv_kexec_handler(void)
vmbus_initiate_unload();
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
hv_cleanup();
hv_cleanup(false);
};
static void hv_crash_handler(struct pt_regs *regs)
@ -1266,7 +1266,7 @@ static void hv_crash_handler(struct pt_regs *regs)
* for kdump.
*/
hv_synic_cleanup(NULL);
hv_cleanup();
hv_cleanup(true);
};
static int __init hv_acpi_init(void)
@ -1330,7 +1330,7 @@ static void __exit vmbus_exit(void)
&hyperv_panic_block);
}
bus_unregister(&hv_bus);
hv_cleanup();
hv_cleanup(false);
for_each_online_cpu(cpu) {
tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);

View file

@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
if (hostif->desc.bNumEndpoints < 1)
return -ENODEV;
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),

View file

@ -1477,7 +1477,25 @@ static void make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
/*
* If a bio is splitted, the first part of bio will pass
* barrier but the bio is queued in current->bio_list (see
* generic_make_request). If there is a raise_barrier() called
* here, the second part of bio can't pass barrier. But since
* the first part bio isn't dispatched to underlaying disks
* yet, the barrier is never released, hence raise_barrier will
* alays wait. We have a deadlock.
* Note, this only happens in read path. For write path, the
* first part of bio is dispatched in a schedule() call
* (because of blk plug) or offloaded to raid10d.
* Quitting from the function immediately can change the bio
* order queued in bio_list and avoid the deadlock.
*/
__make_request(mddev, split);
if (split != bio && bio_data_dir(bio) == READ) {
generic_make_request(bio);
break;
}
} while (split != bio);
/* In case raid10d snuck in to freeze_array */

View file

@ -197,65 +197,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
union sub_key {
u64 k;
struct {
u8 pad[3];
u8 kb;
u32 ka;
};
};
/* Toeplitz hash function
* data: network byte order
* return: host byte order
*/
static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
{
union sub_key subk;
int k_next = 4;
u8 dt;
int i, j;
u32 ret = 0;
subk.k = 0;
subk.ka = ntohl(*(u32 *)key);
for (i = 0; i < dlen; i++) {
subk.kb = key[k_next];
k_next = (k_next + 1) % klen;
dt = ((u8 *)data)[i];
for (j = 0; j < 8; j++) {
if (dt & 0x80)
ret ^= subk.ka;
dt <<= 1;
subk.k <<= 1;
}
}
return ret;
}
static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
{
struct flow_keys flow;
int data_len;
if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
!(flow.basic.n_proto == htons(ETH_P_IP) ||
flow.basic.n_proto == htons(ETH_P_IPV6)))
return false;
if (flow.basic.ip_proto == IPPROTO_TCP)
data_len = 12;
else
data_len = 8;
*hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
return true;
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@ -268,11 +209,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
if (netvsc_set_hash(&hash, skb)) {
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues;
skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
}
hash = skb_get_hash(skb);
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues;
return q_idx;
}

View file

@ -1466,12 +1466,11 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq)
offset += range->npins;
}
/* Mask and clear all interrupts */
chv_writel(0, pctrl->regs + CHV_INTMASK);
/* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
handle_simple_irq, IRQ_TYPE_NONE);
handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
goto fail;

View file

@ -34,7 +34,6 @@ extern const struct file_operations cxlflash_cxl_fops;
sectors
*/
#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
@ -48,9 +47,12 @@ extern const struct file_operations cxlflash_cxl_fops;
index derivation
*/
#define CXLFLASH_MAX_CMDS 16
#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
/* RRQ for master issued cmds */
#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
static inline void check_sizes(void)
{
@ -149,7 +151,7 @@ struct afu_cmd {
struct afu {
/* Stuff requiring alignment go first. */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/*
* Command & data for AFU commands.
*/

View file

@ -2305,7 +2305,7 @@ static struct scsi_host_template driver_template = {
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
.cmd_per_lun = 16,
.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
.this_id = -1,
.sg_tablesize = SG_NONE, /* No scatter gather support */

View file

@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
if (!list_empty(&task->running))
spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&task->running)) {
pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
}
spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@ -1535,19 +1547,24 @@ check_mgmt:
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@ -1558,6 +1575,7 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@ -1577,12 +1595,15 @@ check_mgmt:
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@ -2900,6 +2923,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */

View file

@ -11387,6 +11387,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
.shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,

View file

@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
return;
goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
out_free:
kfree(buf);
}
@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
dev->dev_attrib.hw_block_size = sd->sector_size;
dev->dev_attrib.hw_block_size =
min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
if (sd->type == TYPE_TAPE)
if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
dev->dev_attrib.hw_block_size = sd->sector_size;
}
return 0;
}
@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
static int pscsi_create_type_other(struct se_device *dev,
struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
int ret;
spin_unlock_irq(sh->host_lock);
ret = pscsi_add_device_to_list(dev, sd);
if (ret)
return ret;
pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return 0;
}
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
case TYPE_ROM:
ret = pscsi_create_type_rom(dev, sd);
break;
default:
ret = pscsi_create_type_other(dev, sd);
ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
scsi_device_put(sd);
scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@ -1088,7 +1066,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
dump_stack();
return 0;
}

View file

@ -1096,9 +1096,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
case VERIFY_16:
size = 0;
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
if (cdb[0] == VERIFY) {
sectors = transport_get_sectors_10(cdb);
cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb);
cmd->t_task_lba = transport_lba_64(cdb);
}
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:

View file

@ -966,7 +966,7 @@ static void usb_bus_init (struct usb_bus *bus)
bus->bandwidth_allocated = 0;
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
mutex_init(&bus->usb_address0_mutex);
mutex_init(&bus->devnum_next_mutex);
INIT_LIST_HEAD (&bus->bus_list);
}
@ -2497,6 +2497,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
return NULL;
}
if (primary_hcd == NULL) {
hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
GFP_KERNEL);
if (!hcd->address0_mutex) {
kfree(hcd);
dev_dbg(dev, "hcd address0 mutex alloc failed\n");
return NULL;
}
mutex_init(hcd->address0_mutex);
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
@ -2508,6 +2516,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
dev_set_drvdata(dev, hcd);
} else {
mutex_lock(&usb_port_peer_mutex);
hcd->address0_mutex = primary_hcd->address0_mutex;
hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
hcd->primary_hcd = primary_hcd;
primary_hcd->primary_hcd = primary_hcd;
@ -2564,24 +2573,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
* Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
* deallocated.
*
* Make sure to only deallocate the bandwidth_mutex when the primary HCD is
* freed. When hcd_release() is called for either hcd in a peer set
* invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
* block new peering attempts
* Make sure to deallocate the bandwidth_mutex only when the last HCD is
* freed. When hcd_release() is called for either hcd in a peer set,
* invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
*/
static void hcd_release(struct kref *kref)
{
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
if (usb_hcd_is_primary_hcd(hcd))
kfree(hcd->bandwidth_mutex);
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
peer->shared_hcd = NULL;
if (peer->primary_hcd == hcd)
peer->primary_hcd = NULL;
peer->primary_hcd = NULL;
} else {
kfree(hcd->address0_mutex);
kfree(hcd->bandwidth_mutex);
}
mutex_unlock(&usb_port_peer_mutex);
kfree(hcd);

View file

@ -1980,7 +1980,7 @@ static void choose_devnum(struct usb_device *udev)
struct usb_bus *bus = udev->bus;
/* be safe when more hub events are proceed in parallel */
mutex_lock(&bus->usb_address0_mutex);
mutex_lock(&bus->devnum_next_mutex);
if (udev->wusb) {
devnum = udev->portnum + 1;
BUG_ON(test_bit(devnum, bus->devmap.devicemap));
@ -1998,7 +1998,7 @@ static void choose_devnum(struct usb_device *udev)
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
mutex_unlock(&bus->usb_address0_mutex);
mutex_unlock(&bus->devnum_next_mutex);
}
static void release_devnum(struct usb_device *udev)
@ -4262,7 +4262,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
mutex_lock(&hdev->bus->usb_address0_mutex);
mutex_lock(hcd->address0_mutex);
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
@ -4548,7 +4548,7 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
mutex_unlock(&hdev->bus->usb_address0_mutex);
mutex_unlock(hcd->address0_mutex);
return retval;
}

View file

@ -3666,7 +3666,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
ext4_msg(sb, KERN_WARNING,
"first meta block group too large: %u "
"(group descriptor block count %u)",

View file

@ -207,7 +207,7 @@ struct lm_lockname {
struct gfs2_sbd *ln_sbd;
u64 ln_number;
unsigned int ln_type;
};
} __packed __aligned(sizeof(int));
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \

View file

@ -15,12 +15,6 @@
#include <linux/types.h>
#include <linux/bitops.h>
/*
* deal with unrepresentable constant logarithms
*/
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 1 ? ____ilog2_NaN() : \
(n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
(n) & (1ULL << 1) ? 1 : \
(n) & (1ULL << 0) ? 0 : \
____ilog2_NaN() \
) : \
1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \

View file

@ -371,14 +371,13 @@ struct usb_bus {
int devnum_next; /* Next open device number in
* round-robin allocation */
struct mutex devnum_next_mutex; /* devnum_next mutex */
struct usb_devmap devmap; /* device address allocation map */
struct usb_device *root_hub; /* Root hub */
struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
struct list_head bus_list; /* list of busses */
struct mutex usb_address0_mutex; /* unaddressed device mutex */
int bandwidth_allocated; /* on this bus: how much of the time
* reserved for periodic (intr/iso)
* requests is used, on average?

View file

@ -180,6 +180,7 @@ struct usb_hcd {
* bandwidth_mutex should be dropped after a successful control message
* to the device, or resetting the bandwidth after a failed attempt.
*/
struct mutex *address0_mutex;
struct mutex *bandwidth_mutex;
struct usb_hcd *shared_hcd;
struct usb_hcd *primary_hcd;

View file

@ -196,6 +196,7 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */
/* xmit */
spinlock_t taskqueuelock; /* protects the next three lists */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */

View file

@ -9238,7 +9238,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
goto out_unlock;
}
/*
@ -9254,7 +9254,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
goto out_unlock;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@ -9282,6 +9282,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
out_unlock:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);

View file

@ -331,13 +331,14 @@ void set_task_stack_end_magic(struct task_struct *tsk)
*stackend = STACK_END_MAGIC; /* for overflow detection */
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
struct thread_info *ti;
int node = tsk_fork_get_node(orig);
int err;
if (node == NUMA_NO_NODE)
node = tsk_fork_get_node(orig);
tsk = alloc_task_struct_node(node);
if (!tsk)
return NULL;
@ -1271,7 +1272,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
int __user *child_tidptr,
struct pid *pid,
int trace,
unsigned long tls)
unsigned long tls,
int node)
{
int retval;
struct task_struct *p;
@ -1324,7 +1326,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
p = dup_task_struct(current, node);
if (!p)
goto fork_out;
@ -1700,7 +1702,8 @@ static inline void init_idle_pids(struct pid_link *links)
struct task_struct *fork_idle(int cpu)
{
struct task_struct *task;
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
cpu_to_node(cpu));
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
@ -1745,7 +1748,7 @@ long _do_fork(unsigned long clone_flags,
}
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace, tls);
child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.

View file

@ -1012,8 +1012,11 @@ area_found:
mutex_unlock(&pcpu_alloc_mutex);
}
if (chunk != pcpu_reserved_chunk)
if (chunk != pcpu_reserved_chunk) {
spin_lock_irqsave(&pcpu_lock, flags);
pcpu_nr_empty_pop_pages -= occ_pages;
spin_unlock_irqrestore(&pcpu_lock, flags);
}
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
pcpu_schedule_balance_work();

View file

@ -12,12 +12,6 @@
#ifndef _TOOLS_LINUX_LOG2_H
#define _TOOLS_LINUX_LOG2_H
/*
* deal with unrepresentable constant logarithms
*/
extern __attribute__((const, noreturn))
int ____ilog2_NaN(void);
/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
(n) < 1 ? ____ilog2_NaN() : \
(n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
(n) & (1ULL << 1) ? 1 : \
(n) & (1ULL << 0) ? 0 : \
____ilog2_NaN() \
) : \
1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \