This is the 4.4.39 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlhSyaQACgkQONu9yGCS aT6Z1hAAyc8RAfmRq68Ygr0GECIpxB7dWaIDIJBp+vp0JwuG+QqC+7ivOcWBfn1i R8Cy4ka/xEki0s9ZFWD7324X3pHb9nV+Ba6A6O67LiDZ4Yi8dnk/ihHsnxhvHmq3 4IgGpWeL352zeLIipIW7oWcLIwqa+iah3NkjAbxTFPpvfU4TxV+itk4qPO5OgLq9 WBKrvyzy+DWgU8miWaVa1V32EBDZnLu+pncZF38+BsUMiCgJF0cidGYfVDmErnvY p917FEngW4UEm5Ku99N17Z9sjJzuEM8VA0CjJY6s0tI84rAho//mmQfhGvyT3Q7m fE4h03Itd2LQSqGcgMN4QK2pqaV2/cbcZFHCDylDWM83gpFg29uYqzrBP7SsGoet cNCgkA8lfGoekF4QLgIF4vimlgAVetdb8RM+Ycs3NK3WF/guGg0w91dDSxGsy+WU LYiGSP8K5jtW++Y+Xznjm2k2lBBMWxD+qjK3PA26IszwLdxE6pSWGvkkijVNG7Y1 2IFgfUJdpH36yw5lKNCCBCgfXsk8xqDERbTq2p12NitiPmNkhspjECF7/QUrGw5n RmC8GYrbVGtkVvQvYFFbZFsUpPCazCmCbb6gbQ+NtECS2m6/KdpbppkR8seagCbU 9hC97c8cDfZDEi2F8vGnFQAe96wMrMbn+lMLwsEfyFKPJQJuKwE= =8ECX -----END PGP SIGNATURE----- Merge tag 'v4.4.39' into android-4.4.y This is the 4.4.39 stable release Change-Id: I36dd900bb57846dbbcd7b274774f1debef0f1f18
This commit is contained in:
commit
9b133b6207
19 changed files with 127 additions and 59 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 38
|
||||
SUBLEVEL = 39
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs)
|
|||
*/
|
||||
#define HZSCALE (268435456 / (1000000 / HZ))
|
||||
|
||||
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000));
|
||||
#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000))
|
||||
|
||||
#endif /* defined(_M68K_DELAY_H) */
|
||||
|
|
|
@ -65,9 +65,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
unsigned long flags; \
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags); \
|
||||
old_pte = *ptep; \
|
||||
set_pte(ptep, pteval); \
|
||||
if (pte_inserted(old_pte)) \
|
||||
purge_tlb_entries(mm, addr); \
|
||||
set_pte(ptep, pteval); \
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags); \
|
||||
} while (0)
|
||||
|
||||
|
@ -478,8 +478,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
|
|||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
purge_tlb_entries(vma->vm_mm, addr);
|
||||
set_pte(ptep, pte_mkold(pte));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
@ -492,9 +492,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags);
|
||||
old_pte = *ptep;
|
||||
set_pte(ptep, __pte(0));
|
||||
if (pte_inserted(old_pte))
|
||||
purge_tlb_entries(mm, addr);
|
||||
set_pte(ptep, __pte(0));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
|
||||
return old_pte;
|
||||
|
@ -504,8 +504,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
|
|||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&pa_tlb_lock, flags);
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
purge_tlb_entries(mm, addr);
|
||||
set_pte(ptep, pte_wrprotect(*ptep));
|
||||
spin_unlock_irqrestore(&pa_tlb_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -375,6 +375,15 @@ void __init parisc_setup_cache_timing(void)
|
|||
|
||||
/* calculate TLB flush threshold */
|
||||
|
||||
/* On SMP machines, skip the TLB measure of kernel text which
|
||||
* has been mapped as huge pages. */
|
||||
if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
|
||||
threshold = max(cache_info.it_size, cache_info.dt_size);
|
||||
threshold *= PAGE_SIZE;
|
||||
threshold /= num_online_cpus();
|
||||
goto set_tlb_threshold;
|
||||
}
|
||||
|
||||
alltime = mfctl(16);
|
||||
flush_tlb_all();
|
||||
alltime = mfctl(16) - alltime;
|
||||
|
@ -393,6 +402,8 @@ void __init parisc_setup_cache_timing(void)
|
|||
alltime, size, rangetime);
|
||||
|
||||
threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime);
|
||||
|
||||
set_tlb_threshold:
|
||||
if (threshold)
|
||||
parisc_tlb_flush_threshold = threshold;
|
||||
printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
|
||||
|
|
|
@ -886,19 +886,10 @@ ENTRY(flush_dcache_page_asm)
|
|||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
fdc,m r31(%r28)
|
||||
|
||||
sync
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l %r0(%r25)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb %r0(%r25)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
bv %r0(%r2)
|
||||
nop
|
||||
.exit
|
||||
|
@ -973,17 +964,6 @@ ENTRY(flush_icache_page_asm)
|
|||
fic,m %r31(%sr4,%r28)
|
||||
|
||||
sync
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
pdtlb,l %r0(%r28)
|
||||
pitlb,l %r0(%sr4,%r25)
|
||||
#else
|
||||
tlb_lock %r20,%r21,%r22
|
||||
pdtlb %r0(%r28)
|
||||
pitlb %r0(%sr4,%r25)
|
||||
tlb_unlock %r20,%r21,%r22
|
||||
#endif
|
||||
|
||||
bv %r0(%r2)
|
||||
nop
|
||||
.exit
|
||||
|
|
|
@ -612,8 +612,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
|||
|
||||
/* Clear frozen state */
|
||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
pci_unlock_rescan_remove();
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Give the system 5 seconds to finish running the user-space
|
||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||
|
|
|
@ -67,7 +67,7 @@ u64 x86_perf_event_update(struct perf_event *event)
|
|||
int shift = 64 - x86_pmu.cntval_bits;
|
||||
u64 prev_raw_count, new_raw_count;
|
||||
int idx = hwc->idx;
|
||||
s64 delta;
|
||||
u64 delta;
|
||||
|
||||
if (idx == INTEL_PMC_IDX_FIXED_BTS)
|
||||
return 0;
|
||||
|
|
|
@ -3636,7 +3636,7 @@ __init int intel_pmu_init(void)
|
|||
|
||||
/* Support full width counters using alternative MSR range */
|
||||
if (x86_pmu.intel_cap.full_width_write) {
|
||||
x86_pmu.max_period = x86_pmu.cntval_mask;
|
||||
x86_pmu.max_period = x86_pmu.cntval_mask >> 1;
|
||||
x86_pmu.perfctr = MSR_IA32_PMC0;
|
||||
pr_cont("full-width counters, ");
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
|
|||
|
||||
$(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
|
||||
$(obj)/rsaprivkey-asn1.o: $(obj)/rsaprivkey-asn1.c $(obj)/rsaprivkey-asn1.h
|
||||
$(obj)/rsa_helper.o: $(obj)/rsapubkey-asn1.h $(obj)/rsaprivkey-asn1.h
|
||||
clean-files += rsapubkey-asn1.c rsapubkey-asn1.h
|
||||
clean-files += rsaprivkey-asn1.c rsaprivkey-asn1.h
|
||||
|
||||
|
|
|
@ -258,18 +258,22 @@ out_free_inst:
|
|||
goto out;
|
||||
}
|
||||
|
||||
static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
|
||||
u32 *mask)
|
||||
{
|
||||
struct crypto_attr_type *algt;
|
||||
|
||||
algt = crypto_get_attr_type(tb);
|
||||
if (IS_ERR(algt))
|
||||
return;
|
||||
if ((algt->type & CRYPTO_ALG_INTERNAL))
|
||||
*type |= CRYPTO_ALG_INTERNAL;
|
||||
if ((algt->mask & CRYPTO_ALG_INTERNAL))
|
||||
*mask |= CRYPTO_ALG_INTERNAL;
|
||||
return false;
|
||||
|
||||
*type |= algt->type & CRYPTO_ALG_INTERNAL;
|
||||
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
|
||||
|
||||
if (*type & *mask & CRYPTO_ALG_INTERNAL)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
|
||||
|
@ -498,7 +502,8 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
|
|||
u32 mask = 0;
|
||||
int err;
|
||||
|
||||
mcryptd_check_internal(tb, &type, &mask);
|
||||
if (!mcryptd_check_internal(tb, &type, &mask))
|
||||
return -EINVAL;
|
||||
|
||||
salg = shash_attr_alg(tb[1], type, mask);
|
||||
if (IS_ERR(salg))
|
||||
|
|
|
@ -1378,8 +1378,14 @@ static ssize_t hot_remove_store(struct class *class,
|
|||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
/*
|
||||
* NOTE: hot_add attribute is not the usual read-only sysfs attribute. In a
|
||||
* sense that reading from this file does alter the state of your system -- it
|
||||
* creates a new un-initialized zram device and returns back this device's
|
||||
* device_id (or an error code if it fails to create a new device).
|
||||
*/
|
||||
static struct class_attribute zram_control_class_attrs[] = {
|
||||
__ATTR_RO(hot_add),
|
||||
__ATTR(hot_add, 0400, hot_add_show, NULL),
|
||||
__ATTR_WO(hot_remove),
|
||||
__ATTR_NULL,
|
||||
};
|
||||
|
|
|
@ -872,23 +872,25 @@ lbl_free_candev:
|
|||
static void peak_usb_disconnect(struct usb_interface *intf)
|
||||
{
|
||||
struct peak_usb_device *dev;
|
||||
struct peak_usb_device *dev_prev_siblings;
|
||||
|
||||
/* unregister as many netdev devices as siblings */
|
||||
for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) {
|
||||
for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) {
|
||||
struct net_device *netdev = dev->netdev;
|
||||
char name[IFNAMSIZ];
|
||||
|
||||
dev_prev_siblings = dev->prev_siblings;
|
||||
dev->state &= ~PCAN_USB_STATE_CONNECTED;
|
||||
strncpy(name, netdev->name, IFNAMSIZ);
|
||||
|
||||
unregister_netdev(netdev);
|
||||
free_candev(netdev);
|
||||
|
||||
kfree(dev->cmd_buf);
|
||||
dev->next_siblings = NULL;
|
||||
if (dev->adapter->dev_free)
|
||||
dev->adapter->dev_free(dev);
|
||||
|
||||
free_candev(netdev);
|
||||
dev_info(&intf->dev, "%s removed\n", name);
|
||||
}
|
||||
|
||||
|
|
|
@ -131,22 +131,16 @@ enum {
|
|||
{ .notifier_call = fn, .priority = pri }; \
|
||||
__register_cpu_notifier(&fn##_nb); \
|
||||
}
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
extern void unregister_cpu_notifier(struct notifier_block *nb);
|
||||
extern void __unregister_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
|
||||
#ifndef MODULE
|
||||
extern int register_cpu_notifier(struct notifier_block *nb);
|
||||
extern int __register_cpu_notifier(struct notifier_block *nb);
|
||||
#else
|
||||
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
|
||||
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
|
||||
|
||||
static inline int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return 0;
|
||||
|
@ -156,7 +150,6 @@ static inline int __register_cpu_notifier(struct notifier_block *nb)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void unregister_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
|
|
|
@ -196,5 +196,6 @@ struct can_filter {
|
|||
};
|
||||
|
||||
#define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */
|
||||
#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */
|
||||
|
||||
#endif /* !_UAPI_CAN_H */
|
||||
|
|
|
@ -232,8 +232,6 @@ static int cpu_notify(unsigned long val, void *v)
|
|||
return __cpu_notify(val, v, -1, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static void cpu_notify_nofail(unsigned long val, void *v)
|
||||
{
|
||||
BUG_ON(cpu_notify(val, v));
|
||||
|
@ -255,6 +253,7 @@ void __unregister_cpu_notifier(struct notifier_block *nb)
|
|||
}
|
||||
EXPORT_SYMBOL(__unregister_cpu_notifier);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/**
|
||||
* clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
|
||||
* @cpu: a CPU id
|
||||
|
|
|
@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
|
|||
|
||||
static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
|
||||
{
|
||||
if (!rt_mutex_has_waiters(lock))
|
||||
clear_rt_mutex_waiters(lock);
|
||||
unsigned long owner, *p = (unsigned long *) &lock->owner;
|
||||
|
||||
if (rt_mutex_has_waiters(lock))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The rbtree has no waiters enqueued, now make sure that the
|
||||
* lock->owner still has the waiters bit set, otherwise the
|
||||
* following can happen:
|
||||
*
|
||||
* CPU 0 CPU 1 CPU2
|
||||
* l->owner=T1
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T2)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
*
|
||||
* rt_mutex_lock(l)
|
||||
* lock(l->lock)
|
||||
* l->owner = T1 | HAS_WAITERS;
|
||||
* enqueue(T3)
|
||||
* boost()
|
||||
* unlock(l->lock)
|
||||
* block()
|
||||
* signal(->T2) signal(->T3)
|
||||
* lock(l->lock)
|
||||
* dequeue(T2)
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* dequeue(T3)
|
||||
* ==> wait list is empty
|
||||
* deboost()
|
||||
* unlock(l->lock)
|
||||
* lock(l->lock)
|
||||
* fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* l->owner = owner
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
* lock(l->lock)
|
||||
* rt_mutex_unlock(l) fixup_rt_mutex_waiters()
|
||||
* if (wait_list_empty(l) {
|
||||
* owner = l->owner & ~HAS_WAITERS;
|
||||
* cmpxchg(l->owner, T1, NULL)
|
||||
* ===> Success (l->owner = NULL)
|
||||
*
|
||||
* l->owner = owner
|
||||
* ==> l->owner = T1
|
||||
* }
|
||||
*
|
||||
* With the check for the waiter bit in place T3 on CPU2 will not
|
||||
* overwrite. All tasks fiddling with the waiters bit are
|
||||
* serialized by l->lock, so nothing else can modify the waiters
|
||||
* bit. If the bit is set then nothing can change l->owner either
|
||||
* so the simple RMW is safe. The cmpxchg() will simply fail if it
|
||||
* happens in the middle of the RMW because the waiters bit is
|
||||
* still set.
|
||||
*/
|
||||
owner = READ_ONCE(*p);
|
||||
if (owner & RT_MUTEX_HAS_WAITERS)
|
||||
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p)
|
|||
|
||||
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
|
||||
{
|
||||
return (struct task_struct *)
|
||||
((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
unsigned long owner = (unsigned long) READ_ONCE(lock->owner);
|
||||
|
||||
return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -2764,7 +2764,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|||
&tvlv_tt_data,
|
||||
&tt_change,
|
||||
&tt_len);
|
||||
if (!tt_len)
|
||||
if (!tt_len || !tvlv_len)
|
||||
goto unlock;
|
||||
|
||||
/* Copy the last orig_node's OGM buffer */
|
||||
|
@ -2782,7 +2782,7 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
|
|||
&tvlv_tt_data,
|
||||
&tt_change,
|
||||
&tt_len);
|
||||
if (!tt_len)
|
||||
if (!tt_len || !tvlv_len)
|
||||
goto out;
|
||||
|
||||
/* fill the rest of the tvlv with the real TT entries */
|
||||
|
|
|
@ -499,6 +499,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
|
|||
if (optlen % sizeof(struct can_filter) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
|
||||
return -EINVAL;
|
||||
|
||||
count = optlen / sizeof(struct can_filter);
|
||||
|
||||
if (count > 1) {
|
||||
|
|
Loading…
Add table
Reference in a new issue