This is the 4.4.72 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllBIXAACgkQONu9yGCS aT6T+w//VjXDZ+MddWJ4UeQDyIANYeFpa4tJNoqR3JsnT6yg1HODRZDR7aP5QJmN GIoRWU/2Q2nmYbAO0c8RPxs07w2xtIZzTUn+H+i6sG7bRs5RbLM5AMg4W/A/X88L V5c34kCvCf1HRfrdd4rXIZiibFnSZGqUv6o1YyQqCIvx15pyB6elMM714zt8uubk iL4/WJ2M4SrmamHWA349ldEtPjQKpwpwdBcCn+M4awbimdc0pm8oZqNkAfwJ+vLO HsuClO57I699ESU2Zt5bfEdVsW/gc7WiJOAr1Mrl2suToryrWfs2YT+sC/IQhkfC gUsi9Cm/6YMu+tiP4o6aqYvTFoFplFErpEbC3mqAEvHGGHKhrgEDotYJ+FnvI3q7 Jaxix0B/Q/NIqsJPnqe5ONOCKFmW7rGR2e2j5+45GuiofioNVNF12HWfQkoItPOL YeR2JB8K9aywzYM4gaJuy8ScJ1shN8TY1FKgZa5gBT2ym4pDDcQmxz7Jr7agREHe F2sJ23zMU+o9guGA4Is2yqWCQ5yM+3kpPPISz+Pcgh8Q95o+ftCSyOeB2F5roW8I EO22AlJPlQH0LWDQhOJ5ZuAVe+qB8EdrQqqdLbP4/oHp7MtlR5ge+idRuZc+AUsa UoASccPsEwHyBErQmHoWNI4nPRciFrKliOqERmPLcuzewUwSatw= =wXRR -----END PGP SIGNATURE----- Merge 4.4.72 into android-4.4 Changes in 4.4.72 bnx2x: Fix Multi-Cos ipv6: xfrm: Handle errors reported by xfrm6_find_1stfragopt() cxgb4: avoid enabling napi twice to the same queue tcp: disallow cwnd undo when switching congestion control vxlan: fix use-after-free on deletion ipv6: Fix leak in ipv6_gso_segment(). net: ping: do not abuse udp_poll() net: ethoc: enable NAPI before poll may be scheduled net: bridge: start hello timer only if device is up sparc64: mm: fix copy_tsb to correctly copy huge page TSBs sparc: Machine description indices can vary sparc64: reset mm cpumask after wrap sparc64: combine activate_mm and switch_mm sparc64: redefine first version sparc64: add per-cpu mm of secondary contexts sparc64: new context wrap sparc64: delete old wrap code arch/sparc: support NR_CPUS = 4096 serial: ifx6x60: fix use-after-free on module unload ptrace: Properly initialize ptracer_cred on fork KEYS: fix dereferencing NULL payload with nonzero length KEYS: fix freeing uninitialized memory in key_update() crypto: gcm - wait for crypto op not signal safe drm/amdgpu/ci: disable mclk switching for high refresh rates (v2) nfsd4: fix null dereference on replay nfsd: Fix up the "supattr_exclcreat" attributes kvm: async_pf: fix rcu_irq_enter() with irqs enabled KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation arm: KVM: Allow unaligned accesses at HYP KVM: async_pf: avoid async pf injection when in guest mode dmaengine: usb-dmac: Fix DMAOR AE bit definition dmaengine: ep93xx: Always start from BASE0 xen/privcmd: Support correctly 64KB page granularity when mapping memory xen-netfront: do not cast grant table reference to signed short xen-netfront: cast grant table reference first to type int ext4: fix SEEK_HOLE ext4: keep existing extra fields when inode expands ext4: fix fdatasync(2) after extent manipulation operations usb: gadget: f_mass_storage: Serialize wake and sleep execution usb: chipidea: udc: fix NULL pointer dereference if udc_start failed usb: chipidea: debug: check before accessing ci_role staging/lustre/lov: remove set_fs() call from lov_getstripe() iio: light: ltr501 Fix interchanged als/ps register field iio: proximity: as3935: fix AS3935_INT mask drivers: char: random: add get_random_long() random: properly align get_random_int_hash stackprotector: Increase the per-task stack canary's random range from 32 bits to 64 bits on 64-bit platforms cpufreq: cpufreq_register_driver() should return -ENODEV if init fails target: Re-add check to reject control WRITEs with overflow data drm/msm: Expose our reservation object when exporting a dmabuf. Input: elantech - add Fujitsu Lifebook E546/E557 to force crc_enabled cpuset: consider dying css as offline fs: add i_blocksize() ufs: restore proper tail allocation fix ufs_isblockset() ufs: restore maintaining ->i_blocks ufs: set correct ->s_maxsize ufs_extend_tail(): fix the braino in calling conventions of ufs_new_fragments() ufs_getfrag_block(): we only grab ->truncate_mutex on block creation path cxl: Fix error path on bad ioctl btrfs: use correct types for page indices in btrfs_page_exists_in_range btrfs: fix memory leak in update_space_info failure path KVM: arm/arm64: Handle possible NULL stage2 pud when ageing pages scsi: qla2xxx: don't disable a not previously enabled PCI device powerpc/eeh: Avoid use after free in eeh_handle_special_event() powerpc/numa: Fix percpu allocations to be NUMA aware powerpc/hotplug-mem: Fix missing endian conversion of aa_index perf/core: Drop kernel samples even though :u is specified drm/vmwgfx: Handle vmalloc() failure in vmw_local_fifo_reserve() drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl() drm/vmwgfx: Make sure backup_handle is always valid drm/nouveau/tmr: fully separate alarm execution/pending lists ALSA: timer: Fix race between read and ioctl ALSA: timer: Fix missing queue indices reset at SNDRV_TIMER_IOCTL_SELECT ASoC: Fix use-after-free at card unregistration drivers: char: mem: Fix wraparound check to allow mappings up to the end tty: Drop krefs for interrupted tty lock serial: sh-sci: Fix panic when serial console and DMA are enabled net: better skb->sender_cpu and skb->napi_id cohabitation mm: consider memblock reservations for deferred memory initialization sizing NFS: Ensure we revalidate attributes before using execute_ok() NFSv4: Don't perform cached access checks before we've OPENed the file Make __xfs_xattr_put_listen preperly report errors. arm64: hw_breakpoint: fix watchpoint matching for tagged pointers arm64: entry: improve data abort handling of tagged pointers RDMA/qib,hfi1: Fix MR reference count leak on write with immediate usercopy: Adjust tests to deal with SMAP/PAN arm64: armv8_deprecated: ensure extension of addr arm64: ensure extension of smp_store_release value Linux 4.4.72 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
e76c0faf11
128 changed files with 745 additions and 360 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 71
|
||||
SUBLEVEL = 72
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -110,7 +110,6 @@ __do_hyp_init:
|
|||
@ - Write permission implies XN: disabled
|
||||
@ - Instruction cache: enabled
|
||||
@ - Data/Unified cache: enabled
|
||||
@ - Memory alignment checks: enabled
|
||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
ldr r2, =HSCTLR_MASK
|
||||
|
@ -118,8 +117,8 @@ __do_hyp_init:
|
|||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||
and r1, r1, r2
|
||||
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
||||
ARM( ldr r2, =(HSCTLR_M) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||
orr r1, r1, r2
|
||||
orr r0, r0, r1
|
||||
isb
|
||||
|
|
|
@ -869,6 +869,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
|
|||
pmd_t *pmd;
|
||||
|
||||
pud = stage2_get_pud(kvm, cache, addr);
|
||||
if (!pud)
|
||||
return NULL;
|
||||
|
||||
if (pud_none(*pud)) {
|
||||
if (!cache)
|
||||
return NULL;
|
||||
|
|
13
arch/arm64/include/asm/asm-uaccess.h
Normal file
13
arch/arm64/include/asm/asm-uaccess.h
Normal file
|
@ -0,0 +1,13 @@
|
|||
#ifndef __ASM_ASM_UACCESS_H
|
||||
#define __ASM_ASM_UACCESS_H
|
||||
|
||||
/*
|
||||
* Remove the address tag from a virtual address, if present.
|
||||
*/
|
||||
.macro clear_address_tag, dst, addr
|
||||
tst \addr, #(1 << 55)
|
||||
bic \dst, \addr, #(0xff << 56)
|
||||
csel \dst, \dst, \addr, eq
|
||||
.endm
|
||||
|
||||
#endif
|
|
@ -44,23 +44,33 @@
|
|||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
union { typeof(*p) __val; char __c[1]; } __u = \
|
||||
{ .__val = (__force typeof(*p)) (v) }; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 1: \
|
||||
asm volatile ("stlrb %w1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
: "=Q" (*p) \
|
||||
: "r" (*(__u8 *)__u.__c) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 2: \
|
||||
asm volatile ("stlrh %w1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
: "=Q" (*p) \
|
||||
: "r" (*(__u16 *)__u.__c) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 4: \
|
||||
asm volatile ("stlr %w1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
: "=Q" (*p) \
|
||||
: "r" (*(__u32 *)__u.__c) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("stlr %1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
: "=Q" (*p) \
|
||||
: "r" (*(__u64 *)__u.__c) \
|
||||
: "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
|
@ -119,6 +120,13 @@ static inline void set_fs(mm_segment_t fs)
|
|||
flag; \
|
||||
})
|
||||
|
||||
/*
|
||||
* When dealing with data aborts, watchpoints, or instruction traps we may end
|
||||
* up with a tagged userland pointer. Clear the tag to get a sane pointer to
|
||||
* pass on to access_ok(), for instance.
|
||||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
|
|
|
@ -299,7 +299,8 @@ do { \
|
|||
_ASM_EXTABLE(0b, 4b) \
|
||||
_ASM_EXTABLE(1b, 4b) \
|
||||
: "=&r" (res), "+r" (data), "=&r" (temp) \
|
||||
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
|
||||
: "r" ((unsigned long)addr), "i" (-EAGAIN), \
|
||||
"i" (-EFAULT) \
|
||||
: "memory"); \
|
||||
uaccess_disable(); \
|
||||
} while (0)
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include <asm/ptrace.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/asm-uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
/*
|
||||
|
@ -433,12 +434,13 @@ el1_da:
|
|||
/*
|
||||
* Data abort handling
|
||||
*/
|
||||
mrs x0, far_el1
|
||||
mrs x3, far_el1
|
||||
enable_dbg
|
||||
// re-enable interrupts if they were enabled in the aborted context
|
||||
tbnz x23, #7, 1f // PSR_I_BIT
|
||||
enable_irq
|
||||
1:
|
||||
clear_address_tag x0, x3
|
||||
mov x2, sp // struct pt_regs
|
||||
bl do_mem_abort
|
||||
|
||||
|
@ -599,7 +601,7 @@ el0_da:
|
|||
// enable interrupts before calling the main handler
|
||||
enable_dbg_and_irq
|
||||
ct_user_exit
|
||||
bic x0, x26, #(0xff << 56)
|
||||
clear_address_tag x0, x26
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <asm/traps.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* Breakpoint currently in use for each BRP. */
|
||||
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
|
||||
|
|
|
@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
|
|||
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
||||
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
||||
|
||||
static inline int early_cpu_to_node(int cpu)
|
||||
{
|
||||
int nid;
|
||||
|
||||
nid = numa_cpu_lookup_table[cpu];
|
||||
|
||||
/*
|
||||
* Fall back to node 0 if nid is unset (it should be, except bugs).
|
||||
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
|
||||
*/
|
||||
return (nid < 0) ? 0 : nid;
|
||||
}
|
||||
#else
|
||||
|
||||
static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||
|
||||
static inline void dump_numa_cpu_topology(void) {}
|
||||
|
||||
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
||||
|
|
|
@ -655,7 +655,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
|||
*/
|
||||
#define MAX_WAIT_FOR_RECOVERY 300
|
||||
|
||||
static void eeh_handle_normal_event(struct eeh_pe *pe)
|
||||
static bool eeh_handle_normal_event(struct eeh_pe *pe)
|
||||
{
|
||||
struct pci_bus *frozen_bus;
|
||||
int rc = 0;
|
||||
|
@ -665,7 +665,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
|
|||
if (!frozen_bus) {
|
||||
pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
|
||||
__func__, pe->phb->global_number, pe->addr);
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
|
||||
eeh_pe_update_time_stamp(pe);
|
||||
|
@ -790,7 +790,7 @@ static void eeh_handle_normal_event(struct eeh_pe *pe)
|
|||
pr_info("EEH: Notify device driver to resume\n");
|
||||
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
|
||||
|
||||
return;
|
||||
return false;
|
||||
|
||||
excess_failures:
|
||||
/*
|
||||
|
@ -831,7 +831,11 @@ perm_error:
|
|||
pci_lock_rescan_remove();
|
||||
pcibios_remove_pci_devices(frozen_bus);
|
||||
pci_unlock_rescan_remove();
|
||||
|
||||
/* The passed PE should no longer be used */
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void eeh_handle_special_event(void)
|
||||
|
@ -897,7 +901,14 @@ static void eeh_handle_special_event(void)
|
|||
*/
|
||||
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
|
||||
rc == EEH_NEXT_ERR_FENCED_PHB) {
|
||||
eeh_handle_normal_event(pe);
|
||||
/*
|
||||
* eeh_handle_normal_event() can make the PE stale if it
|
||||
* determines that the PE cannot possibly be recovered.
|
||||
* Don't modify the PE state if that's the case.
|
||||
*/
|
||||
if (eeh_handle_normal_event(pe))
|
||||
continue;
|
||||
|
||||
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
|
||||
} else {
|
||||
pci_lock_rescan_remove();
|
||||
|
|
|
@ -751,7 +751,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||
{
|
||||
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
|
||||
return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
|
@ -762,7 +762,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
|
|||
|
||||
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||
{
|
||||
if (cpu_to_node(from) == cpu_to_node(to))
|
||||
if (early_cpu_to_node(from) == early_cpu_to_node(to))
|
||||
return LOCAL_DISTANCE;
|
||||
else
|
||||
return REMOTE_DISTANCE;
|
||||
|
|
|
@ -110,6 +110,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
|
|||
for (i = 0; i < num_lmbs; i++) {
|
||||
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
||||
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
||||
lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
|
||||
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
||||
}
|
||||
|
||||
|
@ -553,6 +554,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
|
|||
for (i = 0; i < num_lmbs; i++) {
|
||||
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
||||
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
||||
lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
|
||||
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -182,9 +182,9 @@ config NR_CPUS
|
|||
int "Maximum number of CPUs"
|
||||
depends on SMP
|
||||
range 2 32 if SPARC32
|
||||
range 2 1024 if SPARC64
|
||||
range 2 4096 if SPARC64
|
||||
default 32 if SPARC32
|
||||
default 64 if SPARC64
|
||||
default 4096 if SPARC64
|
||||
|
||||
source kernel/Kconfig.hz
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@
|
|||
#define CTX_NR_MASK TAG_CONTEXT_BITS
|
||||
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
|
||||
|
||||
#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
|
||||
#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
|
||||
#define CTX_VALID(__ctx) \
|
||||
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
|
||||
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
|
||||
|
|
|
@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock;
|
|||
extern unsigned long tlb_context_cache;
|
||||
extern unsigned long mmu_context_bmap[];
|
||||
|
||||
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
|
||||
void get_new_mmu_context(struct mm_struct *mm);
|
||||
#ifdef CONFIG_SMP
|
||||
void smp_new_mmu_context_version(void);
|
||||
#else
|
||||
#define smp_new_mmu_context_version() do { } while (0)
|
||||
#endif
|
||||
|
||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
void destroy_context(struct mm_struct *mm);
|
||||
|
||||
|
@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
|
|||
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
unsigned long ctx_valid, flags;
|
||||
int cpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
per_cpu(per_cpu_secondary_mm, cpu) = mm;
|
||||
if (unlikely(mm == &init_mm))
|
||||
return;
|
||||
|
||||
|
@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|||
* for the first time, we must flush that context out of the
|
||||
* local TLB.
|
||||
*/
|
||||
cpu = smp_processor_id();
|
||||
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
||||
|
@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/* Activate a new MM instance for the current task. */
|
||||
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
if (!CTX_VALID(mm->context))
|
||||
get_new_mmu_context(mm);
|
||||
cpu = smp_processor_id();
|
||||
if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
|
||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||
|
||||
load_secondary_context(mm);
|
||||
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
|
||||
tsb_context_switch(mm);
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
}
|
||||
|
||||
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
|
||||
#endif /* !(__ASSEMBLY__) */
|
||||
|
||||
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#define PIL_SMP_CALL_FUNC 1
|
||||
#define PIL_SMP_RECEIVE_SIGNAL 2
|
||||
#define PIL_SMP_CAPTURE 3
|
||||
#define PIL_SMP_CTX_NEW_VERSION 4
|
||||
#define PIL_DEVICE_IRQ 5
|
||||
#define PIL_SMP_CALL_FUNC_SNGL 6
|
||||
#define PIL_DEFERRED_PCR_WORK 7
|
||||
|
|
|
@ -327,6 +327,7 @@ struct vio_dev {
|
|||
int compat_len;
|
||||
|
||||
u64 dev_no;
|
||||
u64 id;
|
||||
|
||||
unsigned long channel_id;
|
||||
|
||||
|
|
|
@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
|
|||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long page;
|
||||
void *mondo, *p;
|
||||
|
||||
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
|
||||
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
|
||||
|
||||
/* Make sure mondo block is 64byte aligned */
|
||||
p = kzalloc(127, GFP_KERNEL);
|
||||
if (!p) {
|
||||
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
|
||||
prom_halt();
|
||||
}
|
||||
mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
|
||||
tb->cpu_mondo_block_pa = __pa(mondo);
|
||||
|
||||
page = get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
|
||||
prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
tb->cpu_mondo_block_pa = __pa(page);
|
||||
tb->cpu_list_pa = __pa(page + 64);
|
||||
tb->cpu_list_pa = __pa(page);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
|
|||
/* smp_64.c */
|
||||
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
|
||||
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
|
||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
|
||||
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
|
||||
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
|
||||
|
||||
|
|
|
@ -959,37 +959,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
unsigned long flags;
|
||||
|
||||
clear_softint(1 << irq);
|
||||
|
||||
/* See if we need to allocate a new TLB context because
|
||||
* the version of the one we are using is now out of date.
|
||||
*/
|
||||
mm = current->active_mm;
|
||||
if (unlikely(!mm || (mm == &init_mm)))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&mm->context.lock, flags);
|
||||
|
||||
if (unlikely(!CTX_VALID(mm->context)))
|
||||
get_new_mmu_context(mm);
|
||||
|
||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
||||
|
||||
load_secondary_context(mm);
|
||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
||||
SECONDARY_CONTEXT);
|
||||
}
|
||||
|
||||
void smp_new_mmu_context_version(void)
|
||||
{
|
||||
smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
void kgdb_roundup_cpus(unsigned long flags)
|
||||
{
|
||||
|
|
|
@ -470,13 +470,16 @@ __tsb_context_switch:
|
|||
.type copy_tsb,#function
|
||||
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
||||
* %o2=new_tsb_base, %o3=new_tsb_size
|
||||
* %o4=page_size_shift
|
||||
*/
|
||||
sethi %uhi(TSB_PASS_BITS), %g7
|
||||
srlx %o3, 4, %o3
|
||||
add %o0, %o1, %g1 /* end of old tsb */
|
||||
add %o0, %o1, %o1 /* end of old tsb */
|
||||
sllx %g7, 32, %g7
|
||||
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
|
||||
|
||||
mov %o4, %g1 /* page_size_shift */
|
||||
|
||||
661: prefetcha [%o0] ASI_N, #one_read
|
||||
.section .tsb_phys_patch, "ax"
|
||||
.word 661b
|
||||
|
@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
|||
/* This can definitely be computed faster... */
|
||||
srlx %o0, 4, %o5 /* Build index */
|
||||
and %o5, 511, %o5 /* Mask index */
|
||||
sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
|
||||
sllx %o5, %g1, %o5 /* Put into vaddr position */
|
||||
or %o4, %o5, %o4 /* Full VADDR. */
|
||||
srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
|
||||
srlx %o4, %g1, %o4 /* Shift down to create index */
|
||||
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
|
||||
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
|
||||
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
|
||||
|
@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
|||
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
|
||||
|
||||
80: add %o0, 16, %o0
|
||||
cmp %o0, %g1
|
||||
cmp %o0, %o1
|
||||
bne,pt %xcc, 90b
|
||||
nop
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
|
|||
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
|
||||
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
|
||||
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
|
||||
tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
|
||||
tl0_irq4: BTRAP(0x44)
|
||||
#else
|
||||
tl0_irq1: BTRAP(0x41)
|
||||
tl0_irq2: BTRAP(0x42)
|
||||
|
|
|
@ -284,13 +284,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||
if (!id) {
|
||||
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
||||
vdev->dev_no = ~(u64)0;
|
||||
vdev->id = ~(u64)0;
|
||||
} else if (!cfg_handle) {
|
||||
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
||||
vdev->dev_no = *id;
|
||||
vdev->id = ~(u64)0;
|
||||
} else {
|
||||
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
||||
*cfg_handle, *id);
|
||||
vdev->dev_no = *cfg_handle;
|
||||
vdev->id = *id;
|
||||
}
|
||||
|
||||
vdev->dev.parent = parent;
|
||||
|
@ -333,27 +336,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
|
|||
(void) vio_create_one(hp, node, &root_vdev->dev);
|
||||
}
|
||||
|
||||
struct vio_md_node_query {
|
||||
const char *type;
|
||||
u64 dev_no;
|
||||
u64 id;
|
||||
};
|
||||
|
||||
static int vio_md_node_match(struct device *dev, void *arg)
|
||||
{
|
||||
struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
|
||||
struct vio_dev *vdev = to_vio_dev(dev);
|
||||
|
||||
if (vdev->mp == (u64) arg)
|
||||
return 1;
|
||||
if (vdev->dev_no != query->dev_no)
|
||||
return 0;
|
||||
if (vdev->id != query->id)
|
||||
return 0;
|
||||
if (strcmp(vdev->type, query->type))
|
||||
return 0;
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
||||
{
|
||||
const char *type;
|
||||
const u64 *id, *cfg_handle;
|
||||
u64 a;
|
||||
struct vio_md_node_query query;
|
||||
struct device *dev;
|
||||
|
||||
dev = device_find_child(&root_vdev->dev, (void *) node,
|
||||
type = mdesc_get_property(hp, node, "device-type", NULL);
|
||||
if (!type) {
|
||||
type = mdesc_get_property(hp, node, "name", NULL);
|
||||
if (!type)
|
||||
type = mdesc_node_name(hp, node);
|
||||
}
|
||||
|
||||
query.type = type;
|
||||
|
||||
id = mdesc_get_property(hp, node, "id", NULL);
|
||||
cfg_handle = NULL;
|
||||
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
|
||||
u64 target;
|
||||
|
||||
target = mdesc_arc_target(hp, a);
|
||||
cfg_handle = mdesc_get_property(hp, target,
|
||||
"cfg-handle", NULL);
|
||||
if (cfg_handle)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!id) {
|
||||
query.dev_no = ~(u64)0;
|
||||
query.id = ~(u64)0;
|
||||
} else if (!cfg_handle) {
|
||||
query.dev_no = *id;
|
||||
query.id = ~(u64)0;
|
||||
} else {
|
||||
query.dev_no = *cfg_handle;
|
||||
query.id = *id;
|
||||
}
|
||||
|
||||
dev = device_find_child(&root_vdev->dev, &query,
|
||||
vio_md_node_match);
|
||||
if (dev) {
|
||||
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
||||
|
||||
device_unregister(dev);
|
||||
put_device(dev);
|
||||
} else {
|
||||
if (!id)
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node.\n",
|
||||
type);
|
||||
else if (!cfg_handle)
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
|
||||
type, *id);
|
||||
else
|
||||
printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
|
||||
type, *cfg_handle, *id);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -656,10 +656,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
|
|||
|
||||
/* get_new_mmu_context() uses "cache + 1". */
|
||||
DEFINE_SPINLOCK(ctx_alloc_lock);
|
||||
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
|
||||
unsigned long tlb_context_cache = CTX_FIRST_VERSION;
|
||||
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
|
||||
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
|
||||
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
|
||||
DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
|
||||
|
||||
static void mmu_context_wrap(void)
|
||||
{
|
||||
unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
|
||||
unsigned long new_ver, new_ctx, old_ctx;
|
||||
struct mm_struct *mm;
|
||||
int cpu;
|
||||
|
||||
bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
|
||||
|
||||
/* Reserve kernel context */
|
||||
set_bit(0, mmu_context_bmap);
|
||||
|
||||
new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
|
||||
if (unlikely(new_ver == 0))
|
||||
new_ver = CTX_FIRST_VERSION;
|
||||
tlb_context_cache = new_ver;
|
||||
|
||||
/*
|
||||
* Make sure that any new mm that are added into per_cpu_secondary_mm,
|
||||
* are going to go through get_new_mmu_context() path.
|
||||
*/
|
||||
mb();
|
||||
|
||||
/*
|
||||
* Updated versions to current on those CPUs that had valid secondary
|
||||
* contexts
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
/*
|
||||
* If a new mm is stored after we took this mm from the array,
|
||||
* it will go into get_new_mmu_context() path, because we
|
||||
* already bumped the version in tlb_context_cache.
|
||||
*/
|
||||
mm = per_cpu(per_cpu_secondary_mm, cpu);
|
||||
|
||||
if (unlikely(!mm || mm == &init_mm))
|
||||
continue;
|
||||
|
||||
old_ctx = mm->context.sparc64_ctx_val;
|
||||
if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
|
||||
new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
|
||||
set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
|
||||
mm->context.sparc64_ctx_val = new_ctx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Caller does TLB context flushing on local CPU if necessary.
|
||||
* The caller also ensures that CTX_VALID(mm->context) is false.
|
||||
|
@ -675,48 +723,30 @@ void get_new_mmu_context(struct mm_struct *mm)
|
|||
{
|
||||
unsigned long ctx, new_ctx;
|
||||
unsigned long orig_pgsz_bits;
|
||||
int new_version;
|
||||
|
||||
spin_lock(&ctx_alloc_lock);
|
||||
retry:
|
||||
/* wrap might have happened, test again if our context became valid */
|
||||
if (unlikely(CTX_VALID(mm->context)))
|
||||
goto out;
|
||||
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
|
||||
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
|
||||
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
|
||||
new_version = 0;
|
||||
if (new_ctx >= (1 << CTX_NR_BITS)) {
|
||||
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
|
||||
if (new_ctx >= ctx) {
|
||||
int i;
|
||||
new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
|
||||
CTX_FIRST_VERSION;
|
||||
if (new_ctx == 1)
|
||||
new_ctx = CTX_FIRST_VERSION;
|
||||
|
||||
/* Don't call memset, for 16 entries that's just
|
||||
* plain silly...
|
||||
*/
|
||||
mmu_context_bmap[0] = 3;
|
||||
mmu_context_bmap[1] = 0;
|
||||
mmu_context_bmap[2] = 0;
|
||||
mmu_context_bmap[3] = 0;
|
||||
for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
|
||||
mmu_context_bmap[i + 0] = 0;
|
||||
mmu_context_bmap[i + 1] = 0;
|
||||
mmu_context_bmap[i + 2] = 0;
|
||||
mmu_context_bmap[i + 3] = 0;
|
||||
}
|
||||
new_version = 1;
|
||||
goto out;
|
||||
mmu_context_wrap();
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
if (mm->context.sparc64_ctx_val)
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
|
||||
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
|
||||
out:
|
||||
tlb_context_cache = new_ctx;
|
||||
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
|
||||
out:
|
||||
spin_unlock(&ctx_alloc_lock);
|
||||
|
||||
if (unlikely(new_version))
|
||||
smp_new_mmu_context_version();
|
||||
}
|
||||
|
||||
static int numa_enabled = 1;
|
||||
|
|
|
@ -451,7 +451,8 @@ retry_tsb_alloc:
|
|||
extern void copy_tsb(unsigned long old_tsb_base,
|
||||
unsigned long old_tsb_size,
|
||||
unsigned long new_tsb_base,
|
||||
unsigned long new_tsb_size);
|
||||
unsigned long new_tsb_size,
|
||||
unsigned long page_size_shift);
|
||||
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
||||
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
||||
|
||||
|
@ -459,7 +460,9 @@ retry_tsb_alloc:
|
|||
old_tsb_base = __pa(old_tsb_base);
|
||||
new_tsb_base = __pa(new_tsb_base);
|
||||
}
|
||||
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
|
||||
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
|
||||
tsb_index == MM_TSB_BASE ?
|
||||
PAGE_SHIFT : REAL_HPAGE_SHIFT);
|
||||
}
|
||||
|
||||
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
||||
|
|
|
@ -971,11 +971,6 @@ xcall_capture:
|
|||
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
||||
retry
|
||||
|
||||
.globl xcall_new_mmu_context_version
|
||||
xcall_new_mmu_context_version:
|
||||
wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
|
||||
retry
|
||||
|
||||
#ifdef CONFIG_KGDB
|
||||
.globl xcall_kgdb_capture
|
||||
xcall_kgdb_capture:
|
||||
|
|
|
@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
|
|||
*/
|
||||
rcu_irq_exit();
|
||||
native_safe_halt();
|
||||
rcu_irq_enter();
|
||||
local_irq_disable();
|
||||
rcu_irq_enter();
|
||||
}
|
||||
}
|
||||
if (!n.halted)
|
||||
|
|
|
@ -737,18 +737,20 @@ out:
|
|||
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
||||
int j, nent = vcpu->arch.cpuid_nent;
|
||||
struct kvm_cpuid_entry2 *ej;
|
||||
int j = i;
|
||||
int nent = vcpu->arch.cpuid_nent;
|
||||
|
||||
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
/* when no next entry is found, the current entry[i] is reselected */
|
||||
for (j = i + 1; ; j = (j + 1) % nent) {
|
||||
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
|
||||
if (ej->function == e->function) {
|
||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
return j;
|
||||
}
|
||||
}
|
||||
return 0; /* silence gcc, even though control never reaches here */
|
||||
do {
|
||||
j = (j + 1) % nent;
|
||||
ej = &vcpu->arch.cpuid_entries[j];
|
||||
} while (ej->function != e->function);
|
||||
|
||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||
|
||||
return j;
|
||||
}
|
||||
|
||||
/* find an entry with matching function, matching index (if needed), and that
|
||||
|
|
|
@ -3433,12 +3433,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
|||
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||
}
|
||||
|
||||
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (unlikely(!lapic_in_kernel(vcpu) ||
|
||||
kvm_event_needs_reinjection(vcpu)))
|
||||
return false;
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_x86_ops->interrupt_allowed(vcpu);
|
||||
}
|
||||
|
||||
|
@ -3454,7 +3457,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
|||
if (!async)
|
||||
return false; /* *pfn has correct page already */
|
||||
|
||||
if (!prefault && can_do_async_pf(vcpu)) {
|
||||
if (!prefault && kvm_can_do_async_pf(vcpu)) {
|
||||
trace_kvm_try_async_get_page(gva, gfn);
|
||||
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
||||
trace_kvm_async_pf_doublefault(gva, gfn);
|
||||
|
|
|
@ -74,6 +74,7 @@ enum {
|
|||
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
||||
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||
{
|
||||
|
|
|
@ -8245,8 +8245,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
|
|||
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
|
||||
return true;
|
||||
else
|
||||
return !kvm_event_needs_reinjection(vcpu) &&
|
||||
kvm_x86_ops->interrupt_allowed(vcpu);
|
||||
return kvm_can_do_async_pf(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_start_assignment(struct kvm *kvm)
|
||||
|
|
|
@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
|||
|
||||
err = crypto_ablkcipher_encrypt(&data->req);
|
||||
if (err == -EINPROGRESS || err == -EBUSY) {
|
||||
err = wait_for_completion_interruptible(
|
||||
&data->result.completion);
|
||||
if (!err)
|
||||
err = data->result.err;
|
||||
wait_for_completion(&data->result.completion);
|
||||
err = data->result.err;
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
|
|
@ -346,7 +346,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
|
|||
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
|
||||
/* It's illegal to wrap around the end of the physical address space. */
|
||||
if (offset + (phys_addr_t)size < offset)
|
||||
if (offset + (phys_addr_t)size - 1 < offset)
|
||||
return -EINVAL;
|
||||
|
||||
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
|
||||
|
|
|
@ -1798,13 +1798,15 @@ int random_int_secret_init(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
|
||||
__aligned(sizeof(unsigned long));
|
||||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. Similar to urandom but
|
||||
* with the goal of minimal entropy pool depletion. As a result, the random
|
||||
* value is not cryptographically secure but for several uses the cost of
|
||||
* depleting entropy is too high
|
||||
*/
|
||||
static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
|
||||
unsigned int get_random_int(void)
|
||||
{
|
||||
__u32 *hash;
|
||||
|
|
|
@ -2548,6 +2548,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|||
if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
|
||||
list_empty(&cpufreq_policy_list)) {
|
||||
/* if all ->init() calls failed, unregister */
|
||||
ret = -ENODEV;
|
||||
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
|
||||
driver_data->name);
|
||||
goto err_if_unreg;
|
||||
|
|
|
@ -325,6 +325,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
|
|||
| M2P_CONTROL_ENABLE;
|
||||
m2p_set_control(edmac, control);
|
||||
|
||||
edmac->buffer = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ struct usb_dmac {
|
|||
#define USB_DMASWR 0x0008
|
||||
#define USB_DMASWR_SWR (1 << 0)
|
||||
#define USB_DMAOR 0x0060
|
||||
#define USB_DMAOR_AE (1 << 2)
|
||||
#define USB_DMAOR_AE (1 << 1)
|
||||
#define USB_DMAOR_DME (1 << 0)
|
||||
|
||||
#define USB_DMASAR 0x0000
|
||||
|
|
|
@ -893,6 +893,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
|
|||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
||||
|
||||
/* disable mclk switching if the refresh is >120Hz, even if the
|
||||
* blanking period would allow it
|
||||
*/
|
||||
if (amdgpu_dpm_get_vrefresh(adev) > 120)
|
||||
return true;
|
||||
|
||||
if (vblank_time < switch_limit)
|
||||
return true;
|
||||
else
|
||||
|
|
|
@ -986,6 +986,7 @@ static struct drm_driver msm_driver = {
|
|||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_res_obj = msm_gem_prime_res_obj,
|
||||
.gem_prime_pin = msm_gem_prime_pin,
|
||||
.gem_prime_unpin = msm_gem_prime_unpin,
|
||||
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
|
||||
|
|
|
@ -212,6 +212,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
|
|||
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
|
||||
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach, struct sg_table *sg);
|
||||
int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
|
|
|
@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
|
|||
if (!obj->import_attach)
|
||||
msm_gem_put_pages(obj);
|
||||
}
|
||||
|
||||
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
return msm_obj->resv;
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
struct nvkm_alarm {
|
||||
struct list_head head;
|
||||
struct list_head exec;
|
||||
u64 timestamp;
|
||||
void (*func)(struct nvkm_alarm *);
|
||||
};
|
||||
|
|
|
@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
|
|||
/* Move to completed list. We'll drop the lock before
|
||||
* executing the callback so it can reschedule itself.
|
||||
*/
|
||||
list_move_tail(&alarm->head, &exec);
|
||||
list_del_init(&alarm->head);
|
||||
list_add(&alarm->exec, &exec);
|
||||
}
|
||||
|
||||
/* Shut down interrupt if no more pending alarms. */
|
||||
|
@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
|
|||
spin_unlock_irqrestore(&tmr->lock, flags);
|
||||
|
||||
/* Execute completed callbacks. */
|
||||
list_for_each_entry_safe(alarm, atemp, &exec, head) {
|
||||
list_del_init(&alarm->head);
|
||||
list_for_each_entry_safe(alarm, atemp, &exec, exec) {
|
||||
list_del(&alarm->exec);
|
||||
alarm->func(alarm);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
|
|||
return fifo_state->static_buffer;
|
||||
else {
|
||||
fifo_state->dynamic_buffer = vmalloc(bytes);
|
||||
if (!fifo_state->dynamic_buffer)
|
||||
goto out_err;
|
||||
return fifo_state->dynamic_buffer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1288,11 +1288,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
int ret;
|
||||
uint32_t size;
|
||||
uint32_t backup_handle;
|
||||
uint32_t backup_handle = 0;
|
||||
|
||||
if (req->multisample_count != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
|
@ -1328,12 +1331,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
|
|||
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
|
||||
&res->backup,
|
||||
&user_srf->backup_base);
|
||||
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
vmw_dmabuf_unreference(&res->backup);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
if (ret == 0) {
|
||||
if (res->backup->base.num_pages * PAGE_SIZE <
|
||||
res->backup_size) {
|
||||
DRM_ERROR("Surface backup buffer is too small.\n");
|
||||
vmw_dmabuf_unreference(&res->backup);
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
} else {
|
||||
backup_handle = req->buffer_handle;
|
||||
}
|
||||
}
|
||||
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
|
||||
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
|
||||
|
|
|
@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
|
|||
static const struct reg_field reg_field_it =
|
||||
REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
|
||||
static const struct reg_field reg_field_als_intr =
|
||||
REG_FIELD(LTR501_INTR, 0, 0);
|
||||
static const struct reg_field reg_field_ps_intr =
|
||||
REG_FIELD(LTR501_INTR, 1, 1);
|
||||
static const struct reg_field reg_field_ps_intr =
|
||||
REG_FIELD(LTR501_INTR, 0, 0);
|
||||
static const struct reg_field reg_field_als_rate =
|
||||
REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
|
||||
static const struct reg_field reg_field_ps_rate =
|
||||
|
|
|
@ -40,9 +40,9 @@
|
|||
#define AS3935_AFE_PWR_BIT BIT(0)
|
||||
|
||||
#define AS3935_INT 0x03
|
||||
#define AS3935_INT_MASK 0x07
|
||||
#define AS3935_INT_MASK 0x0f
|
||||
#define AS3935_EVENT_INT BIT(3)
|
||||
#define AS3935_NOISE_INT BIT(1)
|
||||
#define AS3935_NOISE_INT BIT(0)
|
||||
|
||||
#define AS3935_DATA 0x07
|
||||
#define AS3935_DATA_MASK 0x3F
|
||||
|
|
|
@ -2088,8 +2088,10 @@ send_last:
|
|||
ret = qib_get_rwqe(qp, 1);
|
||||
if (ret < 0)
|
||||
goto nack_op_err;
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
qib_put_ss(&qp->r_sge);
|
||||
goto rnr_nak;
|
||||
}
|
||||
wc.ex.imm_data = ohdr->u.rc.imm_data;
|
||||
hdrsize += 4;
|
||||
wc.wc_flags = IB_WC_WITH_IMM;
|
||||
|
|
|
@ -1122,8 +1122,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
|
|||
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
|
||||
* Avatar AVIU-145A2 0x361f00 ? clickpad
|
||||
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
|
||||
* Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
|
||||
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
|
||||
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
|
||||
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
|
||||
|
@ -1528,6 +1530,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
|
@ -1549,6 +1558,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
|
||||
.matches = {
|
||||
|
|
|
@ -158,11 +158,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
|
|||
|
||||
/* Do this outside the status_mutex to avoid a circular dependency with
|
||||
* the locking in cxl_mmap_fault() */
|
||||
if (copy_from_user(&work, uwork,
|
||||
sizeof(struct cxl_ioctl_start_work))) {
|
||||
rc = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
if (copy_from_user(&work, uwork, sizeof(work)))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&ctx->status_mutex);
|
||||
if (ctx->status != OPENED) {
|
||||
|
|
|
@ -1949,7 +1949,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|||
}
|
||||
|
||||
/* select a non-FCoE queue */
|
||||
return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
|
||||
return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
|
||||
}
|
||||
|
||||
void bnx2x_set_num_queues(struct bnx2x *bp)
|
||||
|
|
|
@ -2714,10 +2714,14 @@ static int cxgb_up(struct adapter *adap)
|
|||
if (err)
|
||||
goto irq_err;
|
||||
}
|
||||
|
||||
mutex_lock(&uld_mutex);
|
||||
enable_rx(adap);
|
||||
t4_sge_start(adap);
|
||||
t4_intr_enable(adap);
|
||||
adap->flags |= FULL_INIT_DONE;
|
||||
mutex_unlock(&uld_mutex);
|
||||
|
||||
notify_ulds(adap, CXGB4_STATE_UP);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
update_clip(adap);
|
||||
|
|
|
@ -713,6 +713,8 @@ static int ethoc_open(struct net_device *dev)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
ethoc_init_ring(priv, dev->mem_start);
|
||||
ethoc_reset(priv);
|
||||
|
||||
|
@ -725,7 +727,6 @@ static int ethoc_open(struct net_device *dev)
|
|||
}
|
||||
|
||||
phy_start(priv->phy);
|
||||
napi_enable(&priv->napi);
|
||||
|
||||
if (netif_msg_ifup(priv)) {
|
||||
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
|
||||
|
|
|
@ -77,6 +77,8 @@ static const u8 all_zeros_mac[ETH_ALEN];
|
|||
|
||||
static int vxlan_sock_add(struct vxlan_dev *vxlan);
|
||||
|
||||
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
|
||||
|
||||
/* per-network namespace private data for this module */
|
||||
struct vxlan_net {
|
||||
struct list_head vxlan_list;
|
||||
|
@ -1052,6 +1054,8 @@ static void __vxlan_sock_release(struct vxlan_sock *vs)
|
|||
|
||||
static void vxlan_sock_release(struct vxlan_dev *vxlan)
|
||||
{
|
||||
vxlan_vs_del_dev(vxlan);
|
||||
|
||||
__vxlan_sock_release(vxlan->vn4_sock);
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
__vxlan_sock_release(vxlan->vn6_sock);
|
||||
|
@ -2255,6 +2259,15 @@ static void vxlan_cleanup(unsigned long arg)
|
|||
mod_timer(&vxlan->age_timer, next_timer);
|
||||
}
|
||||
|
||||
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_init_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
}
|
||||
|
||||
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
@ -3028,12 +3041,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
|||
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
if (!hlist_unhashed(&vxlan->hlist))
|
||||
hlist_del_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
|
||||
gro_cells_destroy(&vxlan->gro_cells);
|
||||
list_del(&vxlan->next);
|
||||
|
|
|
@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
|
|||
queue->rx_skbs[id] = skb;
|
||||
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
|
||||
BUG_ON((signed short)ref < 0);
|
||||
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
||||
queue->grant_rx_ref[id] = ref;
|
||||
|
||||
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
|
||||
|
@ -437,7 +437,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
|
|||
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
|
||||
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
|
||||
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
|
||||
BUG_ON((signed short)ref < 0);
|
||||
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
|
||||
|
||||
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
|
||||
gfn, GNTMAP_readonly);
|
||||
|
|
|
@ -2311,10 +2311,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
if (mem_only) {
|
||||
if (pci_enable_device_mem(pdev))
|
||||
goto probe_out;
|
||||
return ret;
|
||||
} else {
|
||||
if (pci_enable_device(pdev))
|
||||
goto probe_out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* This may fail but that's ok */
|
||||
|
@ -2324,7 +2324,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (!ha) {
|
||||
ql_log_pci(ql_log_fatal, pdev, 0x0009,
|
||||
"Unable to allocate memory for ha.\n");
|
||||
goto probe_out;
|
||||
goto disable_device;
|
||||
}
|
||||
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
|
||||
"Memory allocated for ha=%p.\n", ha);
|
||||
|
@ -2923,7 +2923,7 @@ iospace_config_failed:
|
|||
kfree(ha);
|
||||
ha = NULL;
|
||||
|
||||
probe_out:
|
||||
disable_device:
|
||||
pci_disable_device(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -399,18 +399,10 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
|
|||
struct lov_mds_md *lmmk = NULL;
|
||||
int rc, lmm_size;
|
||||
int lum_size;
|
||||
mm_segment_t seg;
|
||||
|
||||
if (!lsm)
|
||||
return -ENODATA;
|
||||
|
||||
/*
|
||||
* "Switch to kernel segment" to allow copying from kernel space by
|
||||
* copy_{to,from}_user().
|
||||
*/
|
||||
seg = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
|
||||
/* we only need the header part from user space to get lmm_magic and
|
||||
* lmm_stripe_count, (the header part is common to v1 and v3) */
|
||||
lum_size = sizeof(struct lov_user_md_v1);
|
||||
|
@ -485,6 +477,5 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
|
|||
|
||||
obd_free_diskmd(exp, &lmmk);
|
||||
out_set:
|
||||
set_fs(seg);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1154,15 +1154,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
|
|||
if (cmd->unknown_data_length) {
|
||||
cmd->data_length = size;
|
||||
} else if (size != cmd->data_length) {
|
||||
pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
|
||||
pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
|
||||
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
|
||||
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
|
||||
cmd->data_length, size, cmd->t_task_cdb[0]);
|
||||
|
||||
if (cmd->data_direction == DMA_TO_DEVICE &&
|
||||
cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
|
||||
pr_err("Rejecting underflow/overflow WRITE data\n");
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
if (cmd->data_direction == DMA_TO_DEVICE) {
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
|
||||
pr_err_ratelimited("Rejecting underflow/overflow"
|
||||
" for WRITE data CDB\n");
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
/*
|
||||
* Some fabric drivers like iscsi-target still expect to
|
||||
* always reject overflow writes. Reject this case until
|
||||
* full fabric driver level support for overflow writes
|
||||
* is introduced tree-wide.
|
||||
*/
|
||||
if (size > cmd->data_length) {
|
||||
pr_err_ratelimited("Rejecting overflow for"
|
||||
" WRITE control CDB\n");
|
||||
return TCM_INVALID_CDB_FIELD;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Reject READ_* or WRITE_* with overflow/underflow for
|
||||
|
|
|
@ -1378,9 +1378,9 @@ static struct spi_driver ifx_spi_driver = {
|
|||
static void __exit ifx_spi_exit(void)
|
||||
{
|
||||
/* unregister */
|
||||
spi_unregister_driver(&ifx_spi_driver);
|
||||
tty_unregister_driver(tty_drv);
|
||||
put_tty_driver(tty_drv);
|
||||
spi_unregister_driver(&ifx_spi_driver);
|
||||
unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
|
||||
}
|
||||
|
||||
|
|
|
@ -1800,12 +1800,14 @@ static int sci_startup(struct uart_port *port)
|
|||
|
||||
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
|
||||
|
||||
ret = sci_request_irq(s);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
|
||||
sci_request_dma(port);
|
||||
|
||||
ret = sci_request_irq(s);
|
||||
if (unlikely(ret < 0)) {
|
||||
sci_free_dma(port);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&port->lock, flags);
|
||||
sci_start_tx(port);
|
||||
sci_start_rx(port);
|
||||
|
@ -1834,8 +1836,8 @@ static void sci_shutdown(struct uart_port *port)
|
|||
}
|
||||
#endif
|
||||
|
||||
sci_free_dma(port);
|
||||
sci_free_irq(s);
|
||||
sci_free_dma(port);
|
||||
}
|
||||
|
||||
static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
|
||||
|
|
|
@ -2070,13 +2070,12 @@ retry_open:
|
|||
if (tty) {
|
||||
mutex_unlock(&tty_mutex);
|
||||
retval = tty_lock_interruptible(tty);
|
||||
tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
|
||||
if (retval) {
|
||||
if (retval == -EINTR)
|
||||
retval = -ERESTARTSYS;
|
||||
goto err_unref;
|
||||
}
|
||||
/* safe to drop the kref from tty_driver_lookup_tty() */
|
||||
tty_kref_put(tty);
|
||||
retval = tty_reopen(tty);
|
||||
if (retval < 0) {
|
||||
tty_unlock(tty);
|
||||
|
|
|
@ -24,10 +24,15 @@ EXPORT_SYMBOL(tty_lock);
|
|||
|
||||
int tty_lock_interruptible(struct tty_struct *tty)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
|
||||
return -EIO;
|
||||
tty_kref_get(tty);
|
||||
return mutex_lock_interruptible(&tty->legacy_mutex);
|
||||
ret = mutex_lock_interruptible(&tty->legacy_mutex);
|
||||
if (ret)
|
||||
tty_kref_put(tty);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __lockfunc tty_unlock(struct tty_struct *tty)
|
||||
|
|
|
@ -295,7 +295,8 @@ static int ci_role_show(struct seq_file *s, void *data)
|
|||
{
|
||||
struct ci_hdrc *ci = s->private;
|
||||
|
||||
seq_printf(s, "%s\n", ci_role(ci)->name);
|
||||
if (ci->role != CI_ROLE_END)
|
||||
seq_printf(s, "%s\n", ci_role(ci)->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1982,6 +1982,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
|
|||
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
|
||||
{
|
||||
struct ci_role_driver *rdrv;
|
||||
int ret;
|
||||
|
||||
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
|
||||
return -ENXIO;
|
||||
|
@ -1994,7 +1995,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
|
|||
rdrv->stop = udc_id_switch_for_host;
|
||||
rdrv->irq = udc_irq;
|
||||
rdrv->name = "gadget";
|
||||
ci->roles[CI_ROLE_GADGET] = rdrv;
|
||||
|
||||
return udc_start(ci);
|
||||
ret = udc_start(ci);
|
||||
if (!ret)
|
||||
ci->roles[CI_ROLE_GADGET] = rdrv;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -399,7 +399,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
|
|||
/* Caller must hold fsg->lock */
|
||||
static void wakeup_thread(struct fsg_common *common)
|
||||
{
|
||||
smp_wmb(); /* ensure the write of bh->state is complete */
|
||||
/*
|
||||
* Ensure the reading of thread_wakeup_needed
|
||||
* and the writing of bh->state are completed
|
||||
*/
|
||||
smp_mb();
|
||||
/* Tell the main thread that something has happened */
|
||||
common->thread_wakeup_needed = 1;
|
||||
if (common->thread_task)
|
||||
|
@ -630,7 +634,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
|
|||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
common->thread_wakeup_needed = 0;
|
||||
smp_rmb(); /* ensure the latest bh->state is visible */
|
||||
|
||||
/*
|
||||
* Ensure the writing of thread_wakeup_needed
|
||||
* and the reading of bh->state are completed
|
||||
*/
|
||||
smp_mb();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -335,8 +335,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
|
|||
st->global_error = 1;
|
||||
}
|
||||
}
|
||||
st->va += PAGE_SIZE * nr;
|
||||
st->index += nr;
|
||||
st->va += XEN_PAGE_SIZE * nr;
|
||||
st->index += nr / XEN_PFN_PER_PAGE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3854,6 +3854,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
|
|||
info->space_info_kobj, "%s",
|
||||
alloc_name(found->flags));
|
||||
if (ret) {
|
||||
percpu_counter_destroy(&found->total_bytes_pinned);
|
||||
kfree(found);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -2771,7 +2771,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
if (!ret)
|
||||
ret = btrfs_prealloc_file_range(inode, mode,
|
||||
range->start,
|
||||
range->len, 1 << inode->i_blkbits,
|
||||
range->len, i_blocksize(inode),
|
||||
offset + len, &alloc_hint);
|
||||
list_del(&range->list);
|
||||
kfree(range);
|
||||
|
|
|
@ -7318,8 +7318,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
|
|||
int found = false;
|
||||
void **pagep = NULL;
|
||||
struct page *page = NULL;
|
||||
int start_idx;
|
||||
int end_idx;
|
||||
unsigned long start_idx;
|
||||
unsigned long end_idx;
|
||||
|
||||
start_idx = start >> PAGE_CACHE_SHIFT;
|
||||
|
||||
|
|
12
fs/buffer.c
12
fs/buffer.c
|
@ -2298,7 +2298,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
|
|||
loff_t pos, loff_t *bytes)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned blocksize = 1 << inode->i_blkbits;
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
struct page *page;
|
||||
void *fsdata;
|
||||
pgoff_t index, curidx;
|
||||
|
@ -2378,8 +2378,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
|
|||
get_block_t *get_block, loff_t *bytes)
|
||||
{
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned blocksize = 1 << inode->i_blkbits;
|
||||
unsigned zerofrom;
|
||||
unsigned int blocksize = i_blocksize(inode);
|
||||
unsigned int zerofrom;
|
||||
int err;
|
||||
|
||||
err = cont_expand_zero(file, mapping, pos, bytes);
|
||||
|
@ -2741,7 +2741,7 @@ int nobh_truncate_page(struct address_space *mapping,
|
|||
struct buffer_head map_bh;
|
||||
int err;
|
||||
|
||||
blocksize = 1 << inode->i_blkbits;
|
||||
blocksize = i_blocksize(inode);
|
||||
length = offset & (blocksize - 1);
|
||||
|
||||
/* Block boundary? Nothing to do */
|
||||
|
@ -2819,7 +2819,7 @@ int block_truncate_page(struct address_space *mapping,
|
|||
struct buffer_head *bh;
|
||||
int err;
|
||||
|
||||
blocksize = 1 << inode->i_blkbits;
|
||||
blocksize = i_blocksize(inode);
|
||||
length = offset & (blocksize - 1);
|
||||
|
||||
/* Block boundary? Nothing to do */
|
||||
|
@ -2931,7 +2931,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
|
|||
struct inode *inode = mapping->host;
|
||||
tmp.b_state = 0;
|
||||
tmp.b_blocknr = 0;
|
||||
tmp.b_size = 1 << inode->i_blkbits;
|
||||
tmp.b_size = i_blocksize(inode);
|
||||
get_block(inode, block, &tmp, 0);
|
||||
return tmp.b_blocknr;
|
||||
}
|
||||
|
|
|
@ -697,7 +697,7 @@ static int ceph_writepages_start(struct address_space *mapping,
|
|||
struct pagevec pvec;
|
||||
int done = 0;
|
||||
int rc = 0;
|
||||
unsigned wsize = 1 << inode->i_blkbits;
|
||||
unsigned int wsize = i_blocksize(inode);
|
||||
struct ceph_osd_request *req = NULL;
|
||||
int do_sync = 0;
|
||||
loff_t snap_size, i_size;
|
||||
|
|
|
@ -575,7 +575,7 @@ static int dio_set_defer_completion(struct dio *dio)
|
|||
/*
|
||||
* Call into the fs to map some more disk blocks. We record the current number
|
||||
* of available blocks at sdio->blocks_available. These are in units of the
|
||||
* fs blocksize, (1 << inode->i_blkbits).
|
||||
* fs blocksize, i_blocksize(inode).
|
||||
*
|
||||
* The fs is allowed to map lots of blocks at once. If it wants to do that,
|
||||
* it uses the passed inode-relative block number as the file offset, as usual.
|
||||
|
|
|
@ -4902,6 +4902,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
|
|||
|
||||
/* Zero out partial block at the edges of the range */
|
||||
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
|
||||
if (ret >= 0)
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
|
||||
if (file->f_flags & O_SYNC)
|
||||
ext4_handle_sync(handle);
|
||||
|
@ -5597,6 +5599,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
ext4_handle_sync(handle);
|
||||
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
|
||||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
|
@ -5770,6 +5773,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
|
|||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
if (IS_SYNC(inode))
|
||||
ext4_handle_sync(handle);
|
||||
if (ret >= 0)
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
|
||||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
|
|
|
@ -463,47 +463,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
|
||||
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
|
||||
(pgoff_t)num);
|
||||
if (nr_pages == 0) {
|
||||
if (whence == SEEK_DATA)
|
||||
break;
|
||||
|
||||
BUG_ON(whence != SEEK_HOLE);
|
||||
/*
|
||||
* If this is the first time to go into the loop and
|
||||
* offset is not beyond the end offset, it will be a
|
||||
* hole at this offset
|
||||
*/
|
||||
if (lastoff == startoff || lastoff < endoff)
|
||||
found = 1;
|
||||
if (nr_pages == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is the first time to go into the loop and
|
||||
* offset is smaller than the first page offset, it will be a
|
||||
* hole at this offset.
|
||||
*/
|
||||
if (lastoff == startoff && whence == SEEK_HOLE &&
|
||||
lastoff < page_offset(pvec.pages[0])) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_pages; i++) {
|
||||
struct page *page = pvec.pages[i];
|
||||
struct buffer_head *bh, *head;
|
||||
|
||||
/*
|
||||
* If the current offset is not beyond the end of given
|
||||
* range, it will be a hole.
|
||||
* If current offset is smaller than the page offset,
|
||||
* there is a hole at this offset.
|
||||
*/
|
||||
if (lastoff < endoff && whence == SEEK_HOLE &&
|
||||
page->index > end) {
|
||||
if (whence == SEEK_HOLE && lastoff < endoff &&
|
||||
lastoff < page_offset(pvec.pages[i])) {
|
||||
found = 1;
|
||||
*offset = lastoff;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (page->index > end)
|
||||
goto out;
|
||||
|
||||
lock_page(page);
|
||||
|
||||
if (unlikely(page->mapping != inode->i_mapping)) {
|
||||
|
@ -543,20 +523,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
unlock_page(page);
|
||||
}
|
||||
|
||||
/*
|
||||
* The no. of pages is less than our desired, that would be a
|
||||
* hole in there.
|
||||
*/
|
||||
if (nr_pages < num && whence == SEEK_HOLE) {
|
||||
found = 1;
|
||||
*offset = lastoff;
|
||||
/* The no. of pages is less than our desired, we are done. */
|
||||
if (nr_pages < num)
|
||||
break;
|
||||
}
|
||||
|
||||
index = pvec.pages[i - 1]->index + 1;
|
||||
pagevec_release(&pvec);
|
||||
} while (index <= end);
|
||||
|
||||
if (whence == SEEK_HOLE && lastoff < endoff) {
|
||||
found = 1;
|
||||
*offset = lastoff;
|
||||
}
|
||||
out:
|
||||
pagevec_release(&pvec);
|
||||
return found;
|
||||
|
|
|
@ -2057,7 +2057,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
|
|||
{
|
||||
struct inode *inode = mpd->inode;
|
||||
int err;
|
||||
ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
|
||||
ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
|
||||
>> inode->i_blkbits;
|
||||
|
||||
do {
|
||||
|
@ -3847,6 +3847,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
|
|||
|
||||
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
if (ret >= 0)
|
||||
ext4_update_inode_fsync_trans(handle, inode, 1);
|
||||
out_stop:
|
||||
ext4_journal_stop(handle);
|
||||
out_dio:
|
||||
|
@ -5216,8 +5218,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
|
|||
/* No extended attributes present */
|
||||
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
|
||||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
|
||||
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
|
||||
new_extra_isize);
|
||||
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
|
||||
EXT4_I(inode)->i_extra_isize, 0,
|
||||
new_extra_isize - EXT4_I(inode)->i_extra_isize);
|
||||
EXT4_I(inode)->i_extra_isize = new_extra_isize;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
|
|||
if (PageUptodate(page))
|
||||
return 0;
|
||||
|
||||
blocksize = 1 << inode->i_blkbits;
|
||||
blocksize = i_blocksize(inode);
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, blocksize, 0);
|
||||
|
||||
|
|
|
@ -758,7 +758,7 @@ static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
|
|||
sb->s_blocksize - offset : toread;
|
||||
|
||||
tmp_bh.b_state = 0;
|
||||
tmp_bh.b_size = 1 << inode->i_blkbits;
|
||||
tmp_bh.b_size = i_blocksize(inode);
|
||||
err = jfs_get_block(inode, blk, &tmp_bh, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -798,7 +798,7 @@ static ssize_t jfs_quota_write(struct super_block *sb, int type,
|
|||
sb->s_blocksize - offset : towrite;
|
||||
|
||||
tmp_bh.b_state = 0;
|
||||
tmp_bh.b_size = 1 << inode->i_blkbits;
|
||||
tmp_bh.b_size = i_blocksize(inode);
|
||||
err = jfs_get_block(inode, blk, &tmp_bh, 1);
|
||||
if (err)
|
||||
goto out;
|
||||
|
|
|
@ -147,7 +147,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
|
|||
SetPageUptodate(page);
|
||||
return;
|
||||
}
|
||||
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
|
||||
create_empty_buffers(page, i_blocksize(inode), 0);
|
||||
}
|
||||
head = page_buffers(page);
|
||||
page_bh = head;
|
||||
|
|
21
fs/nfs/dir.c
21
fs/nfs/dir.c
|
@ -2421,6 +2421,20 @@ int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_may_open);
|
||||
|
||||
static int nfs_execute_ok(struct inode *inode, int mask)
|
||||
{
|
||||
struct nfs_server *server = NFS_SERVER(inode);
|
||||
int ret;
|
||||
|
||||
if (mask & MAY_NOT_BLOCK)
|
||||
ret = nfs_revalidate_inode_rcu(server, inode);
|
||||
else
|
||||
ret = nfs_revalidate_inode(server, inode);
|
||||
if (ret == 0 && !execute_ok(inode))
|
||||
ret = -EACCES;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int nfs_permission(struct inode *inode, int mask)
|
||||
{
|
||||
struct rpc_cred *cred;
|
||||
|
@ -2438,6 +2452,9 @@ int nfs_permission(struct inode *inode, int mask)
|
|||
case S_IFLNK:
|
||||
goto out;
|
||||
case S_IFREG:
|
||||
if ((mask & MAY_OPEN) &&
|
||||
nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
|
||||
return 0;
|
||||
break;
|
||||
case S_IFDIR:
|
||||
/*
|
||||
|
@ -2470,8 +2487,8 @@ force_lookup:
|
|||
res = PTR_ERR(cred);
|
||||
}
|
||||
out:
|
||||
if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
|
||||
res = -EACCES;
|
||||
if (!res && (mask & MAY_EXEC))
|
||||
res = nfs_execute_ok(inode, mask);
|
||||
|
||||
dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
|
||||
inode->i_sb->s_id, inode->i_ino, mask, res);
|
||||
|
|
|
@ -50,7 +50,7 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
|
|||
{
|
||||
struct nfsd4_layout_seg *seg = &args->lg_seg;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
u32 block_size = (1 << inode->i_blkbits);
|
||||
u32 block_size = i_blocksize(inode);
|
||||
struct pnfs_block_extent *bex;
|
||||
struct iomap iomap;
|
||||
u32 device_generation = 0;
|
||||
|
@ -151,7 +151,7 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
|
|||
int error;
|
||||
|
||||
nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
|
||||
lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
|
||||
lcp->lc_up_len, &iomaps, i_blocksize(inode));
|
||||
if (nr_iomaps < 0)
|
||||
return nfserrno(nr_iomaps);
|
||||
|
||||
|
|
|
@ -1690,6 +1690,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
|
|||
opdesc->op_get_currentstateid(cstate, &op->u);
|
||||
op->status = opdesc->op_func(rqstp, cstate, &op->u);
|
||||
|
||||
/* Only from SEQUENCE */
|
||||
if (cstate->status == nfserr_replay_cache) {
|
||||
dprintk("%s NFS4.1 replay from cache\n", __func__);
|
||||
status = op->status;
|
||||
goto out;
|
||||
}
|
||||
if (!op->status) {
|
||||
if (opdesc->op_set_currentstateid)
|
||||
opdesc->op_set_currentstateid(cstate, &op->u);
|
||||
|
@ -1700,14 +1706,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
|
|||
if (need_wrongsec_check(rqstp))
|
||||
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
|
||||
}
|
||||
|
||||
encode_op:
|
||||
/* Only from SEQUENCE */
|
||||
if (cstate->status == nfserr_replay_cache) {
|
||||
dprintk("%s NFS4.1 replay from cache\n", __func__);
|
||||
status = op->status;
|
||||
goto out;
|
||||
}
|
||||
if (op->status == nfserr_replay_me) {
|
||||
op->replay = &cstate->replay_owner->so_replay;
|
||||
nfsd4_encode_replay(&resp->xdr, op);
|
||||
|
|
|
@ -2753,9 +2753,16 @@ out_acl:
|
|||
}
|
||||
#endif /* CONFIG_NFSD_PNFS */
|
||||
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
|
||||
status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
|
||||
NFSD_SUPPATTR_EXCLCREAT_WORD1,
|
||||
NFSD_SUPPATTR_EXCLCREAT_WORD2);
|
||||
u32 supp[3];
|
||||
|
||||
supp[0] = nfsd_suppattrs0(minorversion);
|
||||
supp[1] = nfsd_suppattrs1(minorversion);
|
||||
supp[2] = nfsd_suppattrs2(minorversion);
|
||||
supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
|
||||
supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
|
||||
supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
|
||||
|
||||
status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
|
||||
if (status)
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
|
|||
brelse(bh);
|
||||
BUG();
|
||||
}
|
||||
memset(bh->b_data, 0, 1 << inode->i_blkbits);
|
||||
memset(bh->b_data, 0, i_blocksize(inode));
|
||||
bh->b_bdev = inode->i_sb->s_bdev;
|
||||
bh->b_blocknr = blocknr;
|
||||
set_buffer_mapped(bh);
|
||||
|
|
|
@ -55,7 +55,7 @@ void nilfs_inode_add_blocks(struct inode *inode, int n)
|
|||
{
|
||||
struct nilfs_root *root = NILFS_I(inode)->i_root;
|
||||
|
||||
inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
|
||||
inode_add_bytes(inode, i_blocksize(inode) * n);
|
||||
if (root)
|
||||
atomic64_add(n, &root->blocks_count);
|
||||
}
|
||||
|
@ -64,7 +64,7 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
|
|||
{
|
||||
struct nilfs_root *root = NILFS_I(inode)->i_root;
|
||||
|
||||
inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
|
||||
inode_sub_bytes(inode, i_blocksize(inode) * n);
|
||||
if (root)
|
||||
atomic64_sub(n, &root->blocks_count);
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
|
|||
set_buffer_mapped(bh);
|
||||
|
||||
kaddr = kmap_atomic(bh->b_page);
|
||||
memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
|
||||
memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
|
||||
if (init_block)
|
||||
init_block(inode, bh, kaddr);
|
||||
flush_dcache_page(bh->b_page);
|
||||
|
@ -503,7 +503,7 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size,
|
|||
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
|
||||
|
||||
mi->mi_entry_size = entry_size;
|
||||
mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
|
||||
mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
|
||||
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
|
||||
}
|
||||
|
||||
|
|
|
@ -719,7 +719,7 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
|||
|
||||
lock_page(page);
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, 1 << inode->i_blkbits, 0);
|
||||
create_empty_buffers(page, i_blocksize(inode), 0);
|
||||
unlock_page(page);
|
||||
|
||||
bh = head = page_buffers(page);
|
||||
|
|
|
@ -1103,7 +1103,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
|||
int ret = 0;
|
||||
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
|
||||
unsigned int block_end, block_start;
|
||||
unsigned int bsize = 1 << inode->i_blkbits;
|
||||
unsigned int bsize = i_blocksize(inode);
|
||||
|
||||
if (!page_has_buffers(page))
|
||||
create_empty_buffers(page, bsize, 0);
|
||||
|
|
|
@ -808,7 +808,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
|||
/* We know that zero_from is block aligned */
|
||||
for (block_start = zero_from; block_start < zero_to;
|
||||
block_start = block_end) {
|
||||
block_end = block_start + (1 << inode->i_blkbits);
|
||||
block_end = block_start + i_blocksize(inode);
|
||||
|
||||
/*
|
||||
* block_start is block-aligned. Bump it by one to force
|
||||
|
|
|
@ -189,7 +189,7 @@ int reiserfs_commit_page(struct inode *inode, struct page *page,
|
|||
int ret = 0;
|
||||
|
||||
th.t_trans_id = 0;
|
||||
blocksize = 1 << inode->i_blkbits;
|
||||
blocksize = i_blocksize(inode);
|
||||
|
||||
if (logit) {
|
||||
reiserfs_write_lock(s);
|
||||
|
|
|
@ -524,7 +524,7 @@ static int reiserfs_get_blocks_direct_io(struct inode *inode,
|
|||
* referenced in convert_tail_for_hole() that may be called from
|
||||
* reiserfs_get_block()
|
||||
*/
|
||||
bh_result->b_size = (1 << inode->i_blkbits);
|
||||
bh_result->b_size = i_blocksize(inode);
|
||||
|
||||
ret = reiserfs_get_block(inode, iblock, bh_result,
|
||||
create | GET_BLOCK_NO_DANGLE);
|
||||
|
|
|
@ -31,7 +31,7 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
|
|||
stat->atime = inode->i_atime;
|
||||
stat->mtime = inode->i_mtime;
|
||||
stat->ctime = inode->i_ctime;
|
||||
stat->blksize = (1 << inode->i_blkbits);
|
||||
stat->blksize = i_blocksize(inode);
|
||||
stat->blocks = inode->i_blocks;
|
||||
}
|
||||
|
||||
|
@ -454,6 +454,7 @@ void __inode_add_bytes(struct inode *inode, loff_t bytes)
|
|||
inode->i_bytes -= 512;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__inode_add_bytes);
|
||||
|
||||
void inode_add_bytes(struct inode *inode, loff_t bytes)
|
||||
{
|
||||
|
|
|
@ -1206,7 +1206,7 @@ int udf_setsize(struct inode *inode, loff_t newsize)
|
|||
{
|
||||
int err;
|
||||
struct udf_inode_info *iinfo;
|
||||
int bsize = 1 << inode->i_blkbits;
|
||||
int bsize = i_blocksize(inode);
|
||||
|
||||
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
||||
S_ISLNK(inode->i_mode)))
|
||||
|
|
|
@ -81,7 +81,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
|
|||
ufs_error (sb, "ufs_free_fragments",
|
||||
"bit already cleared for fragment %u", i);
|
||||
}
|
||||
|
||||
|
||||
inode_sub_bytes(inode, count << uspi->s_fshift);
|
||||
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
|
||||
uspi->cs_total.cs_nffree += count;
|
||||
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
|
||||
|
@ -183,6 +184,7 @@ do_more:
|
|||
ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
|
||||
}
|
||||
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
|
||||
inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
|
||||
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
|
||||
ufs_clusteracct (sb, ucpi, blkno, 1);
|
||||
|
||||
|
@ -494,6 +496,20 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool try_add_frags(struct inode *inode, unsigned frags)
|
||||
{
|
||||
unsigned size = frags * i_blocksize(inode);
|
||||
spin_lock(&inode->i_lock);
|
||||
__inode_add_bytes(inode, size);
|
||||
if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
|
||||
__inode_sub_bytes(inode, size);
|
||||
spin_unlock(&inode->i_lock);
|
||||
return false;
|
||||
}
|
||||
spin_unlock(&inode->i_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
|
||||
unsigned oldcount, unsigned newcount)
|
||||
{
|
||||
|
@ -530,6 +546,9 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
|
|||
for (i = oldcount; i < newcount; i++)
|
||||
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
|
||||
return 0;
|
||||
|
||||
if (!try_add_frags(inode, count))
|
||||
return 0;
|
||||
/*
|
||||
* Block can be extended
|
||||
*/
|
||||
|
@ -647,6 +666,7 @@ cg_found:
|
|||
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
|
||||
i = uspi->s_fpb - count;
|
||||
|
||||
inode_sub_bytes(inode, i << uspi->s_fshift);
|
||||
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
|
||||
uspi->cs_total.cs_nffree += i;
|
||||
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
|
||||
|
@ -657,6 +677,8 @@ cg_found:
|
|||
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
|
||||
if (result == INVBLOCK)
|
||||
return 0;
|
||||
if (!try_add_frags(inode, count))
|
||||
return 0;
|
||||
for (i = 0; i < count; i++)
|
||||
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
|
||||
|
||||
|
@ -716,6 +738,8 @@ norot:
|
|||
return INVBLOCK;
|
||||
ucpi->c_rotor = result;
|
||||
gotit:
|
||||
if (!try_add_frags(inode, uspi->s_fpb))
|
||||
return 0;
|
||||
blkno = ufs_fragstoblks(result);
|
||||
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
|
||||
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
|
||||
|
|
|
@ -235,7 +235,8 @@ ufs_extend_tail(struct inode *inode, u64 writes_to,
|
|||
|
||||
p = ufs_get_direct_data_ptr(uspi, ufsi, block);
|
||||
tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
|
||||
new_size, err, locked_page);
|
||||
new_size - (lastfrag & uspi->s_fpbmask), err,
|
||||
locked_page);
|
||||
return tmp != 0;
|
||||
}
|
||||
|
||||
|
@ -284,7 +285,7 @@ ufs_inode_getfrag(struct inode *inode, unsigned index,
|
|||
goal += uspi->s_fpb;
|
||||
}
|
||||
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
|
||||
goal, uspi->s_fpb, err, locked_page);
|
||||
goal, nfrags, err, locked_page);
|
||||
|
||||
if (!tmp) {
|
||||
*err = -ENOSPC;
|
||||
|
@ -402,7 +403,9 @@ static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buff
|
|||
|
||||
if (!create) {
|
||||
phys64 = ufs_frag_map(inode, offsets, depth);
|
||||
goto out;
|
||||
if (phys64)
|
||||
map_bh(bh_result, sb, phys64 + frag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* This code entered only while writing ....? */
|
||||
|
|
|
@ -746,6 +746,23 @@ static void ufs_put_super(struct super_block *sb)
|
|||
return;
|
||||
}
|
||||
|
||||
static u64 ufs_max_bytes(struct super_block *sb)
|
||||
{
|
||||
struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
|
||||
int bits = uspi->s_apbshift;
|
||||
u64 res;
|
||||
|
||||
if (bits > 21)
|
||||
res = ~0ULL;
|
||||
else
|
||||
res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
|
||||
(1LL << (3*bits));
|
||||
|
||||
if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
|
||||
return MAX_LFS_FILESIZE;
|
||||
return res << uspi->s_bshift;
|
||||
}
|
||||
|
||||
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
{
|
||||
struct ufs_sb_info * sbi;
|
||||
|
@ -1212,6 +1229,7 @@ magic_found:
|
|||
"fast symlink size (%u)\n", uspi->s_maxsymlinklen);
|
||||
uspi->s_maxsymlinklen = maxsymlen;
|
||||
}
|
||||
sb->s_maxbytes = ufs_max_bytes(sb);
|
||||
sb->s_max_links = UFS_LINK_MAX;
|
||||
|
||||
inode = ufs_iget(sb, UFS_ROOTINO);
|
||||
|
|
|
@ -473,15 +473,19 @@ static inline unsigned _ubh_find_last_zero_bit_(
|
|||
static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
|
||||
struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
|
||||
{
|
||||
u8 mask;
|
||||
switch (uspi->s_fpb) {
|
||||
case 8:
|
||||
return (*ubh_get_addr (ubh, begin + block) == 0xff);
|
||||
case 4:
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
|
||||
mask = 0x0f << ((block & 0x01) << 2);
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
|
||||
case 2:
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
|
||||
mask = 0x03 << ((block & 0x03) << 1);
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
|
||||
case 1:
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
|
||||
mask = 0x01 << (block & 0x07);
|
||||
return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ xfs_map_blocks(
|
|||
{
|
||||
struct xfs_inode *ip = XFS_I(inode);
|
||||
struct xfs_mount *mp = ip->i_mount;
|
||||
ssize_t count = 1 << inode->i_blkbits;
|
||||
ssize_t count = i_blocksize(inode);
|
||||
xfs_fileoff_t offset_fsb, end_fsb;
|
||||
int error = 0;
|
||||
int bmapi_flags = XFS_BMAPI_ENTIRE;
|
||||
|
@ -921,7 +921,7 @@ xfs_aops_discard_page(
|
|||
break;
|
||||
}
|
||||
next_buffer:
|
||||
offset += 1 << inode->i_blkbits;
|
||||
offset += i_blocksize(inode);
|
||||
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
|
||||
|
@ -1363,7 +1363,7 @@ xfs_map_trim_size(
|
|||
offset + mapping_size >= i_size_read(inode)) {
|
||||
/* limit mapping to block that spans EOF */
|
||||
mapping_size = roundup_64(i_size_read(inode) - offset,
|
||||
1 << inode->i_blkbits);
|
||||
i_blocksize(inode));
|
||||
}
|
||||
if (mapping_size > LONG_MAX)
|
||||
mapping_size = LONG_MAX;
|
||||
|
@ -1395,7 +1395,7 @@ __xfs_get_blocks(
|
|||
return -EIO;
|
||||
|
||||
offset = (xfs_off_t)iblock << inode->i_blkbits;
|
||||
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
|
||||
ASSERT(bh_result->b_size >= i_blocksize(inode));
|
||||
size = bh_result->b_size;
|
||||
|
||||
if (!create && direct && offset >= i_size_read(inode))
|
||||
|
@ -1968,7 +1968,7 @@ xfs_vm_set_page_dirty(
|
|||
if (offset < end_offset)
|
||||
set_buffer_dirty(bh);
|
||||
bh = bh->b_this_page;
|
||||
offset += 1 << inode->i_blkbits;
|
||||
offset += i_blocksize(inode);
|
||||
} while (bh != head);
|
||||
}
|
||||
/*
|
||||
|
|
|
@ -947,7 +947,7 @@ xfs_file_fallocate(
|
|||
if (error)
|
||||
goto out_unlock;
|
||||
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
|
||||
unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
|
||||
unsigned int blksize_mask = i_blocksize(inode) - 1;
|
||||
|
||||
if (offset & blksize_mask || len & blksize_mask) {
|
||||
error = -EINVAL;
|
||||
|
@ -969,7 +969,7 @@ xfs_file_fallocate(
|
|||
if (error)
|
||||
goto out_unlock;
|
||||
} else if (mode & FALLOC_FL_INSERT_RANGE) {
|
||||
unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
|
||||
unsigned int blksize_mask = i_blocksize(inode) - 1;
|
||||
|
||||
new_size = i_size_read(inode) + len;
|
||||
if (offset & blksize_mask || len & blksize_mask) {
|
||||
|
|
|
@ -180,6 +180,7 @@ xfs_xattr_put_listent(
|
|||
arraytop = context->count + prefix_len + namelen + 1;
|
||||
if (arraytop > context->firstu) {
|
||||
context->count = -1; /* insufficient space */
|
||||
context->seen_enough = 1;
|
||||
return 0;
|
||||
}
|
||||
offset = (char *)context->alist + context->count;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue