This is the 4.4.89 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlnLaLoACgkQONu9yGCS
 aT7hDw/+Ipx/xnjIUJFV/aqo8lTh3XqP/TjD5whoi+yYC8axLEZBLiOSLZceVjsG
 hi2mP22gKn1i7GLXNeWIZ+rMtVzAN+qNg7i8cjWNfFp1fA7cCfFaYvlV0LVrO2tK
 WnvvE8r5kQAKyQG8498ebEjianxwxHVERnNiE5/SDpCNj14DnwCJBTEYM0tEnuXZ
 /jBIIs4xvndVa0fFfUjuAzh65AefAT1BmgsPll4GnFMUFHh30smYdFla5LL0GNIq
 FQGFvIi8Q02disSMg9lFJVOlazc/HUREiFB1qy1DRtGMnS6/Q0HW0kCxeRi/7QEi
 +HN2rLxtbpnuD5P7W4lDJ5/cyCHMIv8SJ8OqUd8uxbTWz31P/QxbM7d35d+w3rq8
 dv3sQ6CMRnuIXGL5dFHh7zYqlzNS9PKjLmxzAw9grDf+nVsDxE4KUfJy00DSN1I1
 Bopi1kCD2nUMOiBrmxkIczN6OOvcGBHh6/TTB2WEKVHn42D0fjLnO66kJVJLMsBm
 vDdKJDDSGM/0HiUa5ydr6R0Ae7My3h5AJZRa5gn0kL/myatX/vsa0B2ZLpHlVipM
 GhODBsDFkI4k4yceONDZPJmhhVab1lewTMuIW5D2KRMsgpQqLmlOyL5gykfH0rTx
 FVnLSoMAHsgm6qVPwRS5BqK/UnXogfqjiB0iXzNNZnkiABWWoUQ=
 =Skkr
 -----END PGP SIGNATURE-----

Merge 4.4.89 into android-4.4

Changes in 4.4.89
	ipv6: accept 64k - 1 packet length in ip6_find_1stfragopt()
	ipv6: add rcu grace period before freeing fib6_node
	ipv6: fix sparse warning on rt6i_node
	qlge: avoid memcpy buffer overflow
	Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()"
	tcp: initialize rcv_mss to TCP_MIN_MSS instead of 0
	Revert "net: use lib/percpu_counter API for fragmentation mem accounting"
	Revert "net: fix percpu memory leaks"
	gianfar: Fix Tx flow control deactivation
	ipv6: fix memory leak with multiple tables during netns destruction
	ipv6: fix typo in fib6_net_exit()
	f2fs: check hot_data for roll-forward recovery
	x86/fsgsbase/64: Report FSBASE and GSBASE correctly in core dumps
	md/raid5: release/flush io in raid5_do_work()
	nfsd: Fix general protection fault in release_lock_stateid()
	mm: prevent double decrease of nr_reserved_highatomic
	tty: improve tty_insert_flip_char() fast path
	tty: improve tty_insert_flip_char() slow path
	tty: fix __tty_insert_flip_char regression
	Input: i8042 - add Gigabyte P57 to the keyboard reset table
	MIPS: math-emu: <MAX|MAXA|MIN|MINA>.<D|S>: Fix quiet NaN propagation
	MIPS: math-emu: <MAX|MAXA|MIN|MINA>.<D|S>: Fix cases of both inputs zero
	MIPS: math-emu: <MAX|MIN>.<D|S>: Fix cases of both inputs negative
	MIPS: math-emu: <MAXA|MINA>.<D|S>: Fix cases of input values with opposite signs
	MIPS: math-emu: <MAXA|MINA>.<D|S>: Fix cases of both infinite inputs
	MIPS: math-emu: MINA.<D|S>: Fix some cases of infinity and zero inputs
	crypto: AF_ALG - remove SGL terminator indicator when chaining
	ext4: fix incorrect quotaoff if the quota feature is enabled
	ext4: fix quota inconsistency during orphan cleanup for read-only mounts
	powerpc: Fix DAR reporting when alignment handler faults
	block: Relax a check in blk_start_queue()
	md/bitmap: disable bitmap_resize for file-backed bitmaps.
	skd: Avoid that module unloading triggers a use-after-free
	skd: Submit requests to firmware before triggering the doorbell
	scsi: zfcp: fix queuecommand for scsi_eh commands when DIX enabled
	scsi: zfcp: add handling for FCP_RESID_OVER to the fcp ingress path
	scsi: zfcp: fix capping of unsuccessful GPN_FT SAN response trace records
	scsi: zfcp: fix passing fsf_req to SCSI trace on TMF to correlate with HBA
	scsi: zfcp: fix missing trace records for early returns in TMF eh handlers
	scsi: zfcp: fix payload with full FCP_RSP IU in SCSI trace records
	scsi: zfcp: trace HBA FSF response by default on dismiss or timedout late response
	scsi: zfcp: trace high part of "new" 64 bit SCSI LUN
	scsi: megaraid_sas: Check valid aen class range to avoid kernel panic
	scsi: megaraid_sas: Return pended IOCTLs with cmd_status MFI_STAT_WRONG_STATE in case adapter is dead
	scsi: storvsc: fix memory leak on ring buffer busy
	scsi: sg: remove 'save_scat_len'
	scsi: sg: use standard lists for sg_requests
	scsi: sg: off by one in sg_ioctl()
	scsi: sg: factor out sg_fill_request_table()
	scsi: sg: fixup infoleak when using SG_GET_REQUEST_TABLE
	scsi: qla2xxx: Fix an integer overflow in sysfs code
	ftrace: Fix selftest goto location on error
	tracing: Apply trace_clock changes to instance max buffer
	ARC: Re-enable MMU upon Machine Check exception
	PCI: shpchp: Enable bridge bus mastering if MSI is enabled
	media: v4l2-compat-ioctl32: Fix timespec conversion
	media: uvcvideo: Prevent heap overflow when accessing mapped controls
	bcache: initialize dirty stripes in flash_dev_run()
	bcache: Fix leak of bdev reference
	bcache: do not subtract sectors_to_gc for bypassed IO
	bcache: correct cache_dirty_target in __update_writeback_rate()
	bcache: Correct return value for sysfs attach errors
	bcache: fix for gc and write-back race
	bcache: fix bch_hprint crash and improve output
	ftrace: Fix memleak when unregistering dynamic ops when tracing disabled
	Linux 4.4.89

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-09-27 11:52:16 +02:00
commit d68ba9f116
58 changed files with 814 additions and 419 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
SUBLEVEL = 88
SUBLEVEL = 89
EXTRAVERSION =
NAME = Blurry Fish Butt

View file

@ -104,6 +104,12 @@ ENTRY(EV_MachineCheck)
lr r0, [efa]
mov r1, sp
; hardware auto-disables MMU, re-enable it to allow kernel vaddr
; access for say stack unwinding of modules for crash dumps
lr r3, [ARC_REG_PID]
or r3, r3, MMU_ENABLE
sr r3, [ARC_REG_PID]
lsr r3, r2, 8
bmsk r3, r3, 7
brne r3, ECR_C_MCHK_DUP_TLB, 1f

View file

@ -885,9 +885,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
local_irq_save(flags);
/* re-enable the MMU */
write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
/* loop thru all sets of TLB */
for (set = 0; set < mmu->sets; set++) {

View file

@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754dp_zero(1);
return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return x;
/* Compare exponent */
if (xe > ye)
return x;
else if (xe < ye)
return y;
/* Signs of inputs are equal, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return x;
else if (xe < ye)
return y;
} else {
/* Inputs are both negative */
if (xe > ye)
return y;
else if (xe < ye)
return x;
}
/* Compare mantissa */
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
return x;
return y;
}
union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754dp_inf(xs & ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754dp_zero(1);
return ieee754dp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
return y;
/* Compare mantissa */
if (xm <= ym)
if (xm < ym)
return y;
return x;
else if (xm > ym)
return x;
else if (xs == 0)
return x;
return y;
}

View file

@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754dp_zero(1);
return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
else if (xs < ys)
return y;
/* Compare exponent */
if (xe > ye)
return y;
else if (xe < ye)
return x;
/* Signs of inputs are the same, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return y;
else if (xe < ye)
return x;
} else {
/* Inputs are both negative */
if (xe > ye)
return x;
else if (xe < ye)
return y;
}
/* Compare mantissa */
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
return y;
return x;
}
union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754dp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754dp_inf(xs | ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
return y;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754dp_zero(1);
return ieee754dp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
DPDNORMX;
@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
return x;
/* Compare mantissa */
if (xm <= ym)
if (xm < ym)
return x;
else if (xm > ym)
return y;
else if (xs == 1)
return x;
return y;
}

View file

@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
return ys ? x : y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754sp_zero(1);
return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return x;
/* Compare exponent */
if (xe > ye)
return x;
else if (xe < ye)
return y;
/* Signs of inputs are equal, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return x;
else if (xe < ye)
return y;
} else {
/* Inputs are both negative */
if (xe > ye)
return y;
else if (xe < ye)
return x;
}
/* Compare mantissa */
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return y;
return x;
return x;
return y;
}
union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754sp_inf(xs & ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754sp_zero(1);
return ieee754sp_zero(xs & ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
return y;
/* Compare mantissa */
if (xm <= ym)
if (xm < ym)
return y;
return x;
else if (xm > ym)
return x;
else if (xs == 0)
return x;
return y;
}

View file

@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
return ys ? y : x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754sp_zero(1);
return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
else if (xs < ys)
return y;
/* Compare exponent */
if (xe > ye)
return y;
else if (xe < ye)
return x;
/* Signs of inputs are the same, let's compare exponents */
if (xs == 0) {
/* Inputs are both positive */
if (xe > ye)
return y;
else if (xe < ye)
return x;
} else {
/* Inputs are both negative */
if (xe > ye)
return x;
else if (xe < ye)
return y;
}
/* Compare mantissa */
/* Signs and exponents of inputs are equal, let's compare mantissas */
if (xs == 0) {
/* Inputs are both positive, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
}
/* Inputs are both negative, with equal signs and exponents */
if (xm <= ym)
return x;
return y;
return y;
return x;
}
union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
return ieee754sp_nanxcpt(x);
/* numbers are preferred to NaNs */
/*
* Quiet NaN handling
*/
/*
* The case of both inputs quiet NaNs
*/
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
return x;
/*
* The cases of exactly one input quiet NaN (numbers
* are here preferred as returned values to NaNs)
*/
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
return x;
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
/*
* Infinity and zero handling
*/
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
return ieee754sp_inf(xs | ys);
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
return x;
return y;
case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
return y;
return x;
case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
if (xs == ys)
return x;
return ieee754sp_zero(1);
return ieee754sp_zero(xs | ys);
case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
SPDNORMX;
@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
return x;
/* Compare mantissa */
if (xm <= ym)
if (xm < ym)
return x;
else if (xm > ym)
return y;
else if (xs == 1)
return x;
return y;
}

View file

@ -236,6 +236,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
#define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
#define __get_user_or_set_dar(_regs, _dest, _addr) \
({ \
int rc = 0; \
typeof(_addr) __addr = (_addr); \
if (__get_user_inatomic(_dest, __addr)) { \
_regs->dar = (unsigned long)__addr; \
rc = -EFAULT; \
} \
rc; \
})
#define __put_user_or_set_dar(_regs, _src, _addr) \
({ \
int rc = 0; \
typeof(_addr) __addr = (_addr); \
if (__put_user_inatomic(_src, __addr)) { \
_regs->dar = (unsigned long)__addr; \
rc = -EFAULT; \
} \
rc; \
})
static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int nb,
unsigned int flags, unsigned int instr,
@ -264,9 +286,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
} else {
unsigned long pc = regs->nip ^ (swiz & 4);
if (__get_user_inatomic(instr,
(unsigned int __user *)pc))
if (__get_user_or_set_dar(regs, instr,
(unsigned int __user *)pc))
return -EFAULT;
if (swiz == 0 && (flags & SW))
instr = cpu_to_le32(instr);
nb = (instr >> 11) & 0x1f;
@ -310,31 +333,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
((nb0 + 3) / 4) * sizeof(unsigned long));
for (i = 0; i < nb; ++i, ++p)
if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
if (__get_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p)))
if (__get_user_or_set_dar(regs,
REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
} else {
for (i = 0; i < nb; ++i, ++p)
if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
if (nb0 > 0) {
rptr = &regs->gpr[0];
addr += nb;
for (i = 0; i < nb0; ++i, ++p)
if (__put_user_inatomic(REG_BYTE(rptr,
i ^ bswiz),
SWIZ_PTR(p)))
if (__put_user_or_set_dar(regs,
REG_BYTE(rptr, i ^ bswiz),
SWIZ_PTR(p)))
return -EFAULT;
}
}
@ -346,29 +369,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
* Only POWER6 has these instructions, and it does true little-endian,
* so we don't need the address swizzling.
*/
static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
unsigned int flags)
static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
unsigned int reg, unsigned int flags)
{
char *ptr0 = (char *) &current->thread.TS_FPR(reg);
char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
int i, ret, sw = 0;
int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: FRS/FRT must be even */
if (flags & SW)
sw = 7;
ret = 0;
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
ret |= __get_user(ptr0[i^sw], addr + i);
ret |= __get_user(ptr1[i^sw], addr + i + 8);
if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
return -EFAULT;
if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
return -EFAULT;
} else {
ret |= __put_user(ptr0[i^sw], addr + i);
ret |= __put_user(ptr1[i^sw], addr + i + 8);
if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
return -EFAULT;
if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
return -EFAULT;
}
}
if (ret)
return -EFAULT;
return 1; /* exception handled and fixed up */
}
@ -378,24 +404,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
{
char *ptr0 = (char *)&regs->gpr[reg];
char *ptr1 = (char *)&regs->gpr[reg+1];
int i, ret, sw = 0;
int i, sw = 0;
if (reg & 1)
return 0; /* invalid form: GPR must be even */
if (flags & SW)
sw = 7;
ret = 0;
for (i = 0; i < 8; ++i) {
if (!(flags & ST)) {
ret |= __get_user(ptr0[i^sw], addr + i);
ret |= __get_user(ptr1[i^sw], addr + i + 8);
if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
return -EFAULT;
if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
return -EFAULT;
} else {
ret |= __put_user(ptr0[i^sw], addr + i);
ret |= __put_user(ptr1[i^sw], addr + i + 8);
if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
return -EFAULT;
if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
return -EFAULT;
}
}
if (ret)
return -EFAULT;
return 1; /* exception handled and fixed up */
}
#endif /* CONFIG_PPC64 */
@ -688,9 +717,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
for (j = 0; j < length; j += elsize) {
for (i = 0; i < elsize; ++i) {
if (flags & ST)
ret |= __put_user(ptr[i^sw], addr + i);
ret = __put_user_or_set_dar(regs, ptr[i^sw],
addr + i);
else
ret |= __get_user(ptr[i^sw], addr + i);
ret = __get_user_or_set_dar(regs, ptr[i^sw],
addr + i);
if (ret)
return ret;
}
ptr += elsize;
#ifdef __LITTLE_ENDIAN__
@ -740,7 +774,7 @@ int fix_alignment(struct pt_regs *regs)
unsigned int dsisr;
unsigned char __user *addr;
unsigned long p, swiz;
int ret, i;
int i;
union data {
u64 ll;
double dd;
@ -923,7 +957,7 @@ int fix_alignment(struct pt_regs *regs)
if (flags & F) {
/* Special case for 16-byte FP loads and stores */
PPC_WARN_ALIGNMENT(fp_pair, regs);
return emulate_fp_pair(addr, reg, flags);
return emulate_fp_pair(regs, addr, reg, flags);
} else {
#ifdef CONFIG_PPC64
/* Special case for 16-byte loads and stores */
@ -953,15 +987,12 @@ int fix_alignment(struct pt_regs *regs)
}
data.ll = 0;
ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
ret |= __get_user_inatomic(data.v[start + i],
SWIZ_PTR(p++));
if (unlikely(ret))
return -EFAULT;
if (__get_user_or_set_dar(regs, data.v[start + i],
SWIZ_PTR(p++)))
return -EFAULT;
} else if (flags & F) {
data.ll = current->thread.TS_FPR(reg);
@ -1031,15 +1062,13 @@ int fix_alignment(struct pt_regs *regs)
break;
}
ret = 0;
p = (unsigned long)addr;
for (i = 0; i < nb; i++)
ret |= __put_user_inatomic(data.v[start + i],
SWIZ_PTR(p++));
if (__put_user_or_set_dar(regs, data.v[start + i],
SWIZ_PTR(p++)))
return -EFAULT;
if (unlikely(ret))
return -EFAULT;
} else if (flags & F)
current->thread.TS_FPR(reg) = data.ll;
else

View file

@ -204,6 +204,7 @@ void set_personality_ia32(bool);
#define ELF_CORE_COPY_REGS(pr_reg, regs) \
do { \
unsigned long base; \
unsigned v; \
(pr_reg)[0] = (regs)->r15; \
(pr_reg)[1] = (regs)->r14; \
@ -226,8 +227,8 @@ do { \
(pr_reg)[18] = (regs)->flags; \
(pr_reg)[19] = (regs)->sp; \
(pr_reg)[20] = (regs)->ss; \
(pr_reg)[21] = current->thread.fs; \
(pr_reg)[22] = current->thread.gs; \
rdmsrl(MSR_FS_BASE, base); (pr_reg)[21] = base; \
rdmsrl(MSR_KERNEL_GS_BASE, base); (pr_reg)[22] = base; \
asm("movl %%ds,%0" : "=r" (v)); (pr_reg)[23] = v; \
asm("movl %%es,%0" : "=r" (v)); (pr_reg)[24] = v; \
asm("movl %%fs,%0" : "=r" (v)); (pr_reg)[25] = v; \

View file

@ -235,7 +235,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
WARN_ON(!irqs_disabled());
WARN_ON(!in_interrupt() && !irqs_disabled());
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
__blk_run_queue(q);

View file

@ -143,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg)
if (sg) {
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
}
list_add_tail(&sgl->list, &ctx->tsgl);
}

View file

@ -2214,6 +2214,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
smp_wmb();
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@ -2260,6 +2263,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
/* Make sure skd_msg_buf is written before the doorbell is triggered. */
smp_wmb();
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@ -4679,15 +4685,16 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
if (disk != NULL) {
struct request_queue *q = disk->queue;
if (disk && (disk->flags & GENHD_FL_UP))
del_gendisk(disk);
if (disk->flags & GENHD_FL_UP)
del_gendisk(disk);
if (q)
blk_cleanup_queue(q);
put_disk(disk);
if (skdev->queue) {
blk_cleanup_queue(skdev->queue);
skdev->queue = NULL;
disk->queue = NULL;
}
put_disk(disk);
skdev->disk = NULL;
}

View file

@ -904,6 +904,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
},
},
{
/* Gigabyte P57 - Elantech touchpad */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
},
},
{
/* Schenker XMG C504 - Elantech touchpad */
.matches = {

View file

@ -333,6 +333,7 @@ struct cached_dev {
/* Limit number of writeback bios in flight */
struct semaphore in_flight;
struct task_struct *writeback_thread;
struct workqueue_struct *writeback_write_wq;
struct keybuf writeback_keys;

View file

@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
if (op->bypass)
return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
wake_up_gc(op->c);
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.

View file

@ -1023,7 +1023,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
}
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
bch_sectors_dirty_init(dc);
bch_sectors_dirty_init(&dc->disk);
atomic_set(&dc->has_dirty, 1);
atomic_inc(&dc->count);
bch_writeback_queue(dc);
@ -1056,6 +1056,8 @@ static void cached_dev_free(struct closure *cl)
cancel_delayed_work_sync(&dc->writeback_rate_update);
if (!IS_ERR_OR_NULL(dc->writeback_thread))
kthread_stop(dc->writeback_thread);
if (dc->writeback_write_wq)
destroy_workqueue(dc->writeback_write_wq);
mutex_lock(&bch_register_lock);
@ -1227,6 +1229,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
goto err;
bcache_device_attach(d, c, u - c->uuids);
bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
add_disk(d->disk);
@ -1959,6 +1962,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
if (!IS_ERR(bdev))
bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto out;
}

View file

@ -191,7 +191,7 @@ STORE(__cached_dev)
{
struct cached_dev *dc = container_of(kobj, struct cached_dev,
disk.kobj);
unsigned v = size;
ssize_t v = size;
struct cache_set *c;
struct kobj_uevent_env *env;
@ -226,7 +226,7 @@ STORE(__cached_dev)
bch_cached_dev_run(dc);
if (attr == &sysfs_cache_mode) {
ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
v = bch_read_string_list(buf, bch_cache_modes + 1);
if (v < 0)
return v;

View file

@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
/**
* bch_hprint() - formats @v to human readable string for sysfs.
*
* @v - signed 64 bit integer
* @buf - the (at least 8 byte) buffer to format the result into.
*
* Returns the number of bytes used by format.
*/
ssize_t bch_hprint(char *buf, int64_t v)
{
static const char units[] = "?kMGTPEZY";
char dec[4] = "";
int u, t = 0;
int u = 0, t;
for (u = 0; v >= 1024 || v <= -1024; u++) {
t = v & ~(~0 << 10);
v >>= 10;
}
uint64_t q;
if (!u)
return sprintf(buf, "%llu", v);
if (v < 0)
q = -v;
else
q = v;
if (v < 100 && v > -100)
snprintf(dec, sizeof(dec), ".%i", t / 100);
/* For as long as the number is more than 3 digits, but at least
* once, shift right / divide by 1024. Keep the remainder for
* a digit after the decimal point.
*/
do {
u++;
return sprintf(buf, "%lli%s%c", v, dec, units[u]);
t = q & ~(~0 << 10);
q >>= 10;
} while (q >= 1000);
if (v < 0)
/* '-', up to 3 digits, '.', 1 digit, 1 character, null;
* yields 8 bytes.
*/
return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
else
return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
}
ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],

View file

@ -21,7 +21,8 @@
static void __update_writeback_rate(struct cached_dev *dc)
{
struct cache_set *c = dc->disk.c;
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
bcache_flash_devs_sectors_dirty(c);
uint64_t cache_dirty_target =
div_u64(cache_sectors * dc->writeback_percent, 100);
@ -190,7 +191,7 @@ static void write_dirty(struct closure *cl)
closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty_finish, system_wq);
continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
}
static void read_dirty_endio(struct bio *bio)
@ -210,7 +211,7 @@ static void read_dirty_submit(struct closure *cl)
closure_bio_submit(&io->bio, cl);
continue_at(cl, write_dirty, system_wq);
continue_at(cl, write_dirty, io->dc->writeback_write_wq);
}
static void read_dirty(struct cached_dev *dc)
@ -488,17 +489,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
return MAP_CONTINUE;
}
void bch_sectors_dirty_init(struct cached_dev *dc)
void bch_sectors_dirty_init(struct bcache_device *d)
{
struct sectors_dirty_init op;
bch_btree_op_init(&op.op, -1);
op.inode = dc->disk.id;
op.inode = d->id;
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
}
void bch_cached_dev_writeback_init(struct cached_dev *dc)
@ -522,6 +523,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
int bch_cached_dev_writeback_start(struct cached_dev *dc)
{
dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
WQ_MEM_RECLAIM, 0);
if (!dc->writeback_write_wq)
return -ENOMEM;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))

View file

@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
return ret;
}
static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
{
uint64_t i, ret = 0;
mutex_lock(&bch_register_lock);
for (i = 0; i < c->nr_uuids; i++) {
struct bcache_device *d = c->devices[i];
if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
continue;
ret += bcache_dev_sectors_dirty(d);
}
mutex_unlock(&bch_register_lock);
return ret;
}
static inline unsigned offset_to_stripe(struct bcache_device *d,
uint64_t offset)
{
@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
void bch_sectors_dirty_init(struct cached_dev *dc);
void bch_sectors_dirty_init(struct bcache_device *);
void bch_cached_dev_writeback_init(struct cached_dev *);
int bch_cached_dev_writeback_start(struct cached_dev *);

View file

@ -1960,6 +1960,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
long pages;
struct bitmap_page *new_bp;
if (bitmap->storage.file && !init) {
pr_info("md: cannot resize file-based bitmap\n");
return -EINVAL;
}
if (chunksize == 0) {
/* If there is enough space, leave the chunk size unchanged,
* else increase by factor of two until there is enough space.

View file

@ -5822,6 +5822,8 @@ static void raid5_do_work(struct work_struct *work)
spin_unlock_irq(&conf->device_lock);
r5l_flush_stripe_to_raid(conf->log);
async_tx_issue_pending_all();
blk_finish_plug(&plug);

View file

@ -2001,6 +2001,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
goto done;
}
/* Validate the user-provided bit-size and offset */
if (mapping->size > 32 ||
mapping->offset + mapping->size > ctrl->info.size * 8) {
ret = -EINVAL;
goto done;
}
list_for_each_entry(map, &ctrl->info.mappings, list) {
if (mapping->id == map->id) {
uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "

View file

@ -773,7 +773,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
put_user(kp->pending, &up->pending) ||
put_user(kp->sequence, &up->sequence) ||
compat_put_timespec(&kp->timestamp, &up->timestamp) ||
put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
put_user(kp->id, &up->id) ||
copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
return -EFAULT;

View file

@ -3676,7 +3676,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
u32 tx_flow_oldval = (tempval & MACCFG1_TX_FLOW);
u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
if (phydev->duplex != priv->oldduplex) {
if (!(phydev->duplex))

View file

@ -724,7 +724,7 @@ static void ql_build_coredump_seg_header(
seg_hdr->cookie = MPI_COREDUMP_COOKIE;
seg_hdr->segNum = seg_number;
seg_hdr->segSize = seg_size;
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
/*

View file

@ -541,9 +541,6 @@ void phy_stop_machine(struct phy_device *phydev)
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
/* Now we can run the state machine synchronously */
phy_state_machine(&phydev->state_queue.work);
}
/**

View file

@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
if (rc) {
ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
} else {
pci_set_master(pdev);
}
rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,

View file

@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
* Copyright IBM Corp. 2002, 2016
* Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
struct scatterlist *resp_entry = ct_els->resp;
struct fc_ct_hdr *resph;
struct fc_gpn_ft_resp *acc;
int max_entries, x, last = 0;
@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
return len; /* not GPN_FT response so do not cap */
acc = sg_virt(resp_entry);
/* cap all but accept CT responses to at least the CT header */
resph = (struct fc_ct_hdr *)acc;
if ((ct_els->status) ||
(resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
* to account for header as 1st pseudo "entry" */;
@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
rec->scsi_retries = sc->retries;
rec->scsi_allowed = sc->allowed;
rec->scsi_id = sc->device->id;
/* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
rec->scsi_lun = (u32)sc->device->lun;
rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
rec->host_scribble = (unsigned long)sc->host_scribble;
memcpy(rec->scsi_opcode, sc->cmnd,
@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
if (fsf) {
rec->fsf_req_id = fsf->req_id;
rec->pl_len = FCP_RESP_WITH_EXT;
fcp_rsp = (struct fcp_resp_with_ext *)
&(fsf->qtcb->bottom.io.fcp_rsp);
/* mandatory parts of FCP_RSP IU in this SCSI record */
memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
}
if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
(u16)ZFCP_DBF_PAY_MAX_REC);
zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
"fcp_sns", fsf->req_id);
rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
}
/* complete FCP_RSP IU in associated PAYload record
* but only if there are optional parts
*/
if (fcp_rsp->resp.fr_flags != 0)
zfcp_dbf_pl_write(
dbf, fcp_rsp,
/* at least one full PAY record
* but not beyond hardware response field
*/
min_t(u16, max_t(u16, rec->pl_len,
ZFCP_DBF_PAY_MAX_REC),
FSF_FCP_RSP_SIZE),
"fcp_riu", fsf->req_id);
}
debug_event(dbf->scsi, level, rec, sizeof(*rec));

View file

@ -2,7 +2,7 @@
* zfcp device driver
* debug feature declarations
*
* Copyright IBM Corp. 2008, 2016
* Copyright IBM Corp. 2008, 2017
*/
#ifndef ZFCP_DBF_H
@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
* @id: unique number of recovery record type
* @tag: identifier string specifying the location of initiation
* @scsi_id: scsi device id
* @scsi_lun: scsi device logical unit number
* @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
* @scsi_result: scsi result
* @scsi_retries: current retry number of scsi request
* @scsi_allowed: allowed retries
@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
* @host_scribble: LLD specific data attached to SCSI request
* @pl_len: length of paload stored as zfcp_dbf_pay
* @fsf_rsp: response for fsf request
* @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
*/
struct zfcp_dbf_scsi {
u8 id;
@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
u64 host_scribble;
u16 pl_len;
struct fcp_resp_with_ext fcp_rsp;
u32 scsi_lun_64_hi;
} __packed;
/**
@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
{
struct fsf_qtcb *qtcb = req->qtcb;
if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
ZFCP_STATUS_FSFREQ_ERROR))) {
zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
} else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
(qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
* @flag: indicates type of reset (Target Reset, Logical Unit Reset)
*/
static inline
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
struct zfcp_fsf_req *fsf_req)
{
char tmp_tag[ZFCP_DBF_TAG_LEN];
@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
memcpy(tmp_tag, "lr_", 3);
memcpy(&tmp_tag[3], tag, 4);
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
}
/**

View file

@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
* Copyright IBM Corp. 2009
* Copyright IBM Corp. 2009, 2017
*/
#ifndef ZFCP_FC_H
@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
!(rsp_flags & FCP_SNS_LEN_VAL) &&
fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
} else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
/* FCP_DL was not sufficient for SCSI data length */
if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
set_host_byte(scsi, DID_ERROR);
}
}

View file

@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
zfcp_dbf_san_res("fsscth2", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status) {
case FSF_GOOD:
zfcp_dbf_san_res("fsselh1", req);
send_els->status = 0;
zfcp_dbf_san_res("fsselh1", req);
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
zfcp_fsf_class_not_supp(req);
@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
if (scsi_prot_sg_count(scsi_cmnd)) {
if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
scsi_prot_sg_count(scsi_cmnd)) {
zfcp_qdio_set_data_div(qdio, &req->qdio_req,
scsi_prot_sg_count(scsi_cmnd));
retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,

View file

@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
* Copyright IBM Corp. 2002, 2016
* Copyright IBM Corp. 2002, 2017
*/
#define KMSG_COMPONENT "zfcp"
@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret)
if (ret) {
zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
return ret;
}
if (!(atomic_read(&adapter->status) &
ZFCP_STATUS_COMMON_RUNNING)) {
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
return SUCCESS;
}
}
if (!fsf_req)
if (!fsf_req) {
zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
return FAILED;
}
wait_for_completion(&fsf_req->completion);
if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
retval = FAILED;
} else {
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
}

View file

@ -1824,9 +1824,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
if (cmd_mfi->sync_cmd &&
cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
(cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
cmd_mfi->frame->hdr.cmd_status =
MFI_STAT_WRONG_STATE;
megasas_complete_cmd(instance,
cmd_mfi, DID_OK);
}
}
}
} else {
@ -5094,6 +5097,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
prev_aen.word =
le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
(curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
dev_info(&instance->pdev->dev,
"%s %d out of range class %d send by application\n",
__func__, __LINE__, curr_aen.members.class);
return 0;
}
/*
* A class whose enum value is smaller is inclusive of all
* higher values. If a PROGRESS (= -1) was previously

View file

@ -404,6 +404,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (start > ha->optrom_size)
return -EINVAL;
if (size > ha->optrom_size - start)
size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
switch (val) {
@ -429,8 +431,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
ha->optrom_region_size = start + size > ha->optrom_size ?
ha->optrom_size - start : size;
ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
@ -503,8 +504,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
ha->optrom_region_start = start;
ha->optrom_region_size = start + size > ha->optrom_size ?
ha->optrom_size - start : size;
ha->optrom_region_size = start + size;
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);

View file

@ -133,7 +133,7 @@ struct sg_device; /* forward declarations */
struct sg_fd;
typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
struct sg_request *nextrp; /* NULL -> tail request (slist) */
struct list_head entry; /* list entry */
struct sg_fd *parentfp; /* NULL -> not in use */
Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
@ -157,8 +157,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
unsigned save_scat_len; /* original length of trunc. scat. element */
Sg_request *headrp; /* head of request slist, NULL->empty */
struct list_head rq_list; /* head of request list */
struct fasync_struct *async_qp; /* used by asynchronous notification */
Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
char low_dma; /* as in parent but possibly overridden to 1 */
@ -840,6 +839,39 @@ static int max_sectors_bytes(struct request_queue *q)
return max_sectors << 9;
}
static void
sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
{
Sg_request *srp;
int val;
unsigned int ms;
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if (val > SG_MAX_QUEUE)
break;
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned = srp->sg_io_owned;
rinfo[val].pack_id = srp->header.pack_id;
rinfo[val].usr_ptr = srp->header.usr_ptr;
val++;
}
}
static long
sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
{
@ -951,7 +983,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
return -EFAULT;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned)) {
read_unlock_irqrestore(&sfp->rq_list_lock,
iflags);
@ -964,7 +996,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return 0;
case SG_GET_NUM_WAITING:
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
val = 0;
list_for_each_entry(srp, &sfp->rq_list, entry) {
if ((1 == srp->done) && (!srp->sg_io_owned))
++val;
}
@ -1032,42 +1065,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -EFAULT;
else {
sg_req_info_t *rinfo;
unsigned int ms;
rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
GFP_KERNEL);
if (!rinfo)
return -ENOMEM;
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
++val, srp = srp ? srp->nextrp : srp) {
memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
if (srp) {
rinfo[val].req_state = srp->done + 1;
rinfo[val].problem =
srp->header.masked_status &
srp->header.host_status &
srp->header.driver_status;
if (srp->done)
rinfo[val].duration =
srp->header.duration;
else {
ms = jiffies_to_msecs(jiffies);
rinfo[val].duration =
(ms > srp->header.duration) ?
(ms - srp->header.duration) : 0;
}
rinfo[val].orphan = srp->orphan;
rinfo[val].sg_io_owned =
srp->sg_io_owned;
rinfo[val].pack_id =
srp->header.pack_id;
rinfo[val].usr_ptr =
srp->header.usr_ptr;
}
}
sg_fill_request_table(sfp, rinfo);
read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
result = __copy_to_user(p, rinfo,
result = __copy_to_user(p, rinfo,
SZ_SG_REQ_INFO * SG_MAX_QUEUE);
result = result ? -EFAULT : 0;
kfree(rinfo);
@ -1173,7 +1179,7 @@ sg_poll(struct file *filp, poll_table * wait)
return POLLERR;
poll_wait(filp, &sfp->read_wait, wait);
read_lock_irqsave(&sfp->rq_list_lock, iflags);
for (srp = sfp->headrp; srp; srp = srp->nextrp) {
list_for_each_entry(srp, &sfp->rq_list, entry) {
/* if any read waiting, flag it */
if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
res = POLLIN | POLLRDNORM;
@ -2059,7 +2065,6 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
req_schp->pages = NULL;
req_schp->page_order = 0;
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
/* Called without mutex lock to avoid deadlock */
sfp->res_in_use = 0;
@ -2072,7 +2077,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
unsigned long iflags;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
for (resp = sfp->headrp; resp; resp = resp->nextrp) {
list_for_each_entry(resp, &sfp->rq_list, entry) {
/* look for requests that are ready + not SG_IO owned */
if ((1 == resp->done) && (!resp->sg_io_owned) &&
((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
@ -2090,70 +2095,45 @@ sg_add_request(Sg_fd * sfp)
{
int k;
unsigned long iflags;
Sg_request *resp;
Sg_request *rp = sfp->req_arr;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
resp = sfp->headrp;
if (!resp) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
resp = rp;
sfp->headrp = resp;
} else {
if (0 == sfp->cmd_q)
resp = NULL; /* command queuing disallowed */
else {
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k < SG_MAX_QUEUE) {
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
while (resp->nextrp)
resp = resp->nextrp;
resp->nextrp = rp;
resp = rp;
} else
resp = NULL;
if (!list_empty(&sfp->rq_list)) {
if (!sfp->cmd_q)
goto out_unlock;
for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
if (!rp->parentfp)
break;
}
if (k >= SG_MAX_QUEUE)
goto out_unlock;
}
if (resp) {
resp->nextrp = NULL;
resp->header.duration = jiffies_to_msecs(jiffies);
}
memset(rp, 0, sizeof (Sg_request));
rp->parentfp = sfp;
rp->header.duration = jiffies_to_msecs(jiffies);
list_add_tail(&rp->entry, &sfp->rq_list);
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return resp;
return rp;
out_unlock:
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return NULL;
}
/* Return of 1 for found; 0 for not found */
static int
sg_remove_request(Sg_fd * sfp, Sg_request * srp)
{
Sg_request *prev_rp;
Sg_request *rp;
unsigned long iflags;
int res = 0;
if ((!sfp) || (!srp) || (!sfp->headrp))
if (!sfp || !srp || list_empty(&sfp->rq_list))
return res;
write_lock_irqsave(&sfp->rq_list_lock, iflags);
prev_rp = sfp->headrp;
if (srp == prev_rp) {
sfp->headrp = prev_rp->nextrp;
prev_rp->parentfp = NULL;
if (!list_empty(&srp->entry)) {
list_del(&srp->entry);
srp->parentfp = NULL;
res = 1;
} else {
while ((rp = prev_rp->nextrp)) {
if (srp == rp) {
prev_rp->nextrp = rp->nextrp;
rp->parentfp = NULL;
res = 1;
break;
}
prev_rp = rp;
}
}
write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
return res;
@ -2172,7 +2152,7 @@ sg_add_sfp(Sg_device * sdp)
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
INIT_LIST_HEAD(&sfp->rq_list);
kref_init(&sfp->f_ref);
mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
@ -2213,10 +2193,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
{
struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
struct sg_device *sdp = sfp->parentdp;
Sg_request *srp;
/* Cleanup any responses which were never read(). */
while (sfp->headrp)
sg_finish_rem_req(sfp->headrp);
while (!list_empty(&sfp->rq_list)) {
srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
sg_finish_rem_req(srp);
}
if (sfp->reserve.bufflen > 0) {
SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
@ -2619,7 +2602,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
/* must be called while holding sg_index_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
int k, new_interface, blen, usg;
Sg_request *srp;
Sg_fd *fp;
const sg_io_hdr_t *hp;
@ -2639,13 +2622,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
(int) fp->cmd_q, (int) fp->force_packid,
(int) fp->keep_orphan);
for (m = 0, srp = fp->headrp;
srp != NULL;
++m, srp = srp->nextrp) {
list_for_each_entry(srp, &fp->rq_list, entry) {
hp = &srp->header;
new_interface = (hp->interface_id == '\0') ? 0 : 1;
if (srp->res_used) {
if (new_interface &&
if (new_interface &&
(SG_FLAG_MMAP_IO & hp->flags))
cp = " mmap>> ";
else
@ -2676,7 +2657,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
(int) srp->data.cmd_opcode);
}
if (0 == m)
if (list_empty(&fp->rq_list))
seq_puts(s, " No requests active\n");
read_unlock(&fp->rq_list_lock);
}

View file

@ -1511,6 +1511,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
ret = storvsc_do_io(dev, cmd_request);
if (ret == -EAGAIN) {
if (payload_sz > sizeof(cmd_request->mpb))
kfree(payload);
/* no more space */
return SCSI_MLQUEUE_DEVICE_BUSY;
}

View file

@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
}
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
* __tty_insert_flip_char - Add one character to the tty buffer
* @port: tty port
* @ch: character
* @flag: flag byte
*
* Queue a single byte to the tty buffering, with an optional flag.
* This is the slow path of tty_insert_flip_char.
*/
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
{
struct tty_buffer *tb;
int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
if (!__tty_buffer_request_room(port, 1, flags))
return 0;
tb = port->buf.tail;
if (~tb->flags & TTYB_NORMAL)
*flag_buf_ptr(tb, tb->used) = flag;
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
EXPORT_SYMBOL(__tty_insert_flip_char);
/**
* tty_schedule_flip - push characters to ldisc
* @port: tty port to push from

View file

@ -2204,6 +2204,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
unsigned int s_flags = sb->s_flags;
int nr_orphans = 0, nr_truncates = 0;
#ifdef CONFIG_QUOTA
int quota_update = 0;
int i;
#endif
if (!es->s_last_orphan) {
@ -2242,14 +2243,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
sb->s_flags |= MS_ACTIVE;
/* Turn on quotas so that they are updated correctly */
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
*/
if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
int ret = ext4_enable_quotas(sb);
if (!ret)
quota_update = 1;
else
ext4_msg(sb, KERN_ERR,
"Cannot turn on quotas: error %d", ret);
}
/* Turn on journaled quotas used for old sytle */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (EXT4_SB(sb)->s_qf_names[i]) {
int ret = ext4_quota_on_mount(sb, i);
if (ret < 0)
if (!ret)
quota_update = 1;
else
ext4_msg(sb, KERN_ERR,
"Cannot turn on journaled "
"quota: error %d", ret);
"quota: type %d: error %d", i, ret);
}
}
#endif
@ -2308,10 +2327,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
PLURAL(nr_truncates));
#ifdef CONFIG_QUOTA
/* Turn quotas off */
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
dquot_quota_off(sb, i);
/* Turn off quotas if they were enabled for orphan cleanup */
if (quota_update) {
for (i = 0; i < EXT4_MAXQUOTAS; i++) {
if (sb_dqopt(sb)->files[i])
dquot_quota_off(sb, i);
}
}
#endif
sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@ -5123,6 +5144,9 @@ static int ext4_enable_quotas(struct super_block *sb)
err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
DQUOT_USAGE_ENABLED);
if (err) {
for (type--; type >= 0; type--)
dquot_quota_off(sb, type);
ext4_warning(sb,
"Failed to enable quota tracking "
"(type=%d, err=%d). Please run "

View file

@ -289,7 +289,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
return 0;
/* Get the previous summary */
for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
struct curseg_info *curseg = CURSEG_I(sbi, i);
if (curseg->segno == segno) {
sum = curseg->sum_blk->entries[blkoff];

View file

@ -1145,9 +1145,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
lockdep_assert_held(&oo->oo_owner.so_client->cl_lock);
lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
list_del_init(&stp->st_locks);
nfs4_unhash_stid(&stp->st_stid);
@ -1156,12 +1154,12 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
{
struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);
struct nfs4_client *clp = stp->st_stid.sc_client;
bool unhashed;
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&clp->cl_lock);
unhashed = unhash_lock_stateid(stp);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
spin_unlock(&clp->cl_lock);
if (unhashed)
nfs4_put_stid(&stp->st_stid);
}

View file

@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
unsigned char **chars, size_t size);
extern void tty_flip_buffer_push(struct tty_port *port);
void tty_schedule_flip(struct tty_port *port);
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
unsigned char ch, char flag)
@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
return tty_insert_flip_string_flags(port, &ch, &flag, 1);
return __tty_insert_flip_char(port, ch, flag);
}
static inline int tty_insert_flip_string(struct tty_port *port,

View file

@ -1,14 +1,9 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
#include <linux/percpu_counter.h>
struct netns_frags {
/* The percpu_counter "mem" need to be cacheline aligned.
* mem.count must not share cacheline with other writers
*/
struct percpu_counter mem ____cacheline_aligned_in_smp;
/* Keep atomic mem on separate cachelines in structs that include it */
atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
static inline int inet_frags_init_net(struct netns_frags *nf)
static inline void inet_frags_init_net(struct netns_frags *nf)
{
return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
atomic_set(&nf->mem, 0);
}
static inline void inet_frags_uninit_net(struct netns_frags *nf)
{
percpu_counter_destroy(&nf->mem);
}
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@ -140,37 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
/* The default percpu_counter batch size is not big enough to scale to
* fragmentation mem acct sizes.
* The mem size of a 64K fragment is approx:
* (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
*/
static unsigned int frag_percpu_counter_batch = 130000;
static inline int frag_mem_limit(struct netns_frags *nf)
{
return percpu_counter_read(&nf->mem);
return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
__percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
__percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
atomic_add(i, &nf->mem);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
unsigned int res;
local_bh_disable();
res = percpu_counter_sum_positive(&nf->mem);
local_bh_enable();
return res;
return atomic_read(&nf->mem);
}
/* RFC 3168 support :

View file

@ -68,6 +68,7 @@ struct fib6_node {
__u16 fn_flags;
int fn_sernum;
struct rt6_info *rr_ptr;
struct rcu_head rcu;
};
#ifndef CONFIG_IPV6_SUBTREES
@ -102,7 +103,7 @@ struct rt6_info {
* the same cache line.
*/
struct fib6_table *rt6i_table;
struct fib6_node *rt6i_node;
struct fib6_node __rcu *rt6i_node;
struct in6_addr rt6i_gateway;
@ -165,13 +166,40 @@ static inline void rt6_update_expires(struct rt6_info *rt0, int timeout)
rt0->rt6i_flags |= RTF_EXPIRES;
}
/* Function to safely get fn->sernum for passed in rt
* and store result in passed in cookie.
* Return true if we can get cookie safely
* Return false if not
*/
static inline bool rt6_get_cookie_safe(const struct rt6_info *rt,
u32 *cookie)
{
struct fib6_node *fn;
bool status = false;
rcu_read_lock();
fn = rcu_dereference(rt->rt6i_node);
if (fn) {
*cookie = fn->fn_sernum;
status = true;
}
rcu_read_unlock();
return status;
}
static inline u32 rt6_get_cookie(const struct rt6_info *rt)
{
u32 cookie = 0;
if (rt->rt6i_flags & RTF_PCPU ||
(unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
rt = (struct rt6_info *)(rt->dst.from);
return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
rt6_get_cookie_safe(rt, &cookie);
return cookie;
}
static inline void ip6_rt_put(struct rt6_info *rt)

View file

@ -2667,13 +2667,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (!command || !ftrace_enabled) {
/*
* If these are control ops, they still need their
* per_cpu field freed. Since, function tracing is
* If these are dynamic or control ops, they still
* need their data freed. Since, function tracing is
* not currently active, we can just free them
* without synchronizing all CPUs.
*/
if (ops->flags & FTRACE_OPS_FL_CONTROL)
control_ops_free(ops);
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL))
goto free_ops;
return 0;
}
@ -2728,6 +2729,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
schedule_on_each_cpu(ftrace_sync);
free_ops:
arch_ftrace_trampoline_free(ops);
if (ops->flags & FTRACE_OPS_FL_CONTROL)

View file

@ -5327,7 +5327,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
if (tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
tracing_reset_online_cpus(&tr->max_buffer);
#endif

View file

@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
goto out_free;
if (cnt > 1) {
if (trace_selftest_test_global_cnt == 0)
goto out;
goto out_free;
}
if (trace_selftest_test_dyn_cnt == 0)
goto out_free;

View file

@ -1760,13 +1760,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
struct page, lru);
/*
* It should never happen but changes to locking could
* inadvertently allow a per-cpu drain to add pages
* to MIGRATE_HIGHATOMIC while unreserving so be safe
* and watch for underflows.
* In page freeing path, migratetype change is racy so
* we can counter several free pages in a pageblock
* in this loop althoug we changed the pageblock type
* from highatomic to ac->migratetype. So we should
* adjust the count once.
*/
zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
zone->nr_reserved_highatomic);
if (get_pageblock_migratetype(page) ==
MIGRATE_HIGHATOMIC) {
/*
* It should never happen but changes to
* locking could inadvertently allow a per-cpu
* drain to add pages to MIGRATE_HIGHATOMIC
* while unreserving so be safe and watch for
* underflows.
*/
zone->nr_reserved_highatomic -= min(
pageblock_nr_pages,
zone->nr_reserved_highatomic);
}
/*
* Convert to ac->migratetype and avoid the normal

View file

@ -580,19 +580,14 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&ieee802154_lowpan->frags);
if (res)
return res;
res = lowpan_frags_ns_sysctl_register(net);
if (res)
inet_frags_uninit_net(&ieee802154_lowpan->frags);
return res;
inet_frags_init_net(&ieee802154_lowpan->frags);
return lowpan_frags_ns_sysctl_register(net);
}
static void __net_exit lowpan_frags_exit_net(struct net *net)

View file

@ -234,10 +234,8 @@ evict_again:
cond_resched();
if (read_seqretry(&f->rnd_seqlock, seq) ||
percpu_counter_sum(&nf->mem))
sum_frag_mem_limit(nf))
goto evict_again;
percpu_counter_destroy(&nf->mem);
}
EXPORT_SYMBOL(inet_frags_exit_net);

View file

@ -840,8 +840,6 @@ static void __init ip4_frags_ctl_register(void)
static int __net_init ipv4_frags_init_net(struct net *net)
{
int res;
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
@ -865,13 +863,9 @@ static int __net_init ipv4_frags_init_net(struct net *net)
*/
net->ipv4.frags.timeout = IP_FRAG_TIME;
res = inet_frags_init_net(&net->ipv4.frags);
if (res)
return res;
res = ip4_frags_ns_ctl_register(net);
if (res)
inet_frags_uninit_net(&net->ipv4.frags);
return res;
inet_frags_init_net(&net->ipv4.frags);
return ip4_frags_ns_ctl_register(net);
}
static void __net_exit ipv4_frags_exit_net(struct net *net)

View file

@ -2260,6 +2260,10 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
* issue in __tcp_select_window()
*/
icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);

View file

@ -5213,7 +5213,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
* our DAD process, so we don't need
* to do it again
*/
if (!(ifp->rt->rt6i_node))
if (!rcu_access_pointer(ifp->rt->rt6i_node))
ip6_ins_rt(ifp->rt);
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);

View file

@ -150,11 +150,23 @@ static struct fib6_node *node_alloc(void)
return fn;
}
static void node_free(struct fib6_node *fn)
static void node_free_immediate(struct fib6_node *fn)
{
kmem_cache_free(fib6_node_kmem, fn);
}
static void node_free_rcu(struct rcu_head *head)
{
struct fib6_node *fn = container_of(head, struct fib6_node, rcu);
kmem_cache_free(fib6_node_kmem, fn);
}
static void node_free(struct fib6_node *fn)
{
call_rcu(&fn->rcu, node_free_rcu);
}
static void rt6_rcu_free(struct rt6_info *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
@ -191,6 +203,12 @@ static void rt6_release(struct rt6_info *rt)
}
}
static void fib6_free_table(struct fib6_table *table)
{
inetpeer_invalidate_tree(&table->tb6_peers);
kfree(table);
}
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
@ -588,9 +606,9 @@ insert_above:
if (!in || !ln) {
if (in)
node_free(in);
node_free_immediate(in);
if (ln)
node_free(ln);
node_free_immediate(ln);
return ERR_PTR(-ENOMEM);
}
@ -857,7 +875,7 @@ add:
rt->dst.rt6_next = iter;
*ins = rt;
rt->rt6i_node = fn;
rcu_assign_pointer(rt->rt6i_node, fn);
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, 0);
info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
@ -882,7 +900,7 @@ add:
return err;
*ins = rt;
rt->rt6i_node = fn;
rcu_assign_pointer(rt->rt6i_node, fn);
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
@ -1015,7 +1033,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
root, and then (in failure) stale node
in main tree.
*/
node_free(sfn);
node_free_immediate(sfn);
err = PTR_ERR(sn);
goto failure;
}
@ -1442,8 +1460,9 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
int fib6_del(struct rt6_info *rt, struct nl_info *info)
{
struct fib6_node *fn = rcu_dereference_protected(rt->rt6i_node,
lockdep_is_held(&rt->rt6i_table->tb6_lock));
struct net *net = info->nl_net;
struct fib6_node *fn = rt->rt6i_node;
struct rt6_info **rtp;
#if RT6_DEBUG >= 2
@ -1632,7 +1651,9 @@ static int fib6_clean_node(struct fib6_walker *w)
if (res) {
#if RT6_DEBUG >= 2
pr_debug("%s: del failed: rt=%p@%p err=%d\n",
__func__, rt, rt->rt6i_node, res);
__func__, rt,
rcu_access_pointer(rt->rt6i_node),
res);
#endif
continue;
}
@ -1870,15 +1891,22 @@ out_timer:
static void fib6_net_exit(struct net *net)
{
unsigned int i;
rt6_ifdown(net, NULL);
del_timer_sync(&net->ipv6.ip6_fib_timer);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
kfree(net->ipv6.fib6_local_tbl);
#endif
inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
kfree(net->ipv6.fib6_main_tbl);
for (i = 0; i < FIB6_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv6.fib_table_hash[i];
struct hlist_node *tmp;
struct fib6_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb6_hlist) {
hlist_del(&tb->tb6_hlist);
fib6_free_table(tb);
}
}
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
}

View file

@ -649,18 +649,12 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
int res;
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&net->nf_frag.frags);
if (res)
return res;
res = nf_ct_frag6_sysctl_register(net);
if (res)
inet_frags_uninit_net(&net->nf_frag.frags);
return res;
inet_frags_init_net(&net->nf_frag.frags);
return nf_ct_frag6_sysctl_register(net);
}
static void nf_ct_net_exit(struct net *net)

View file

@ -86,7 +86,6 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
while (offset <= packet_len) {
struct ipv6_opt_hdr *exthdr;
unsigned int len;
switch (**nexthdr) {
@ -112,10 +111,9 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
len = ipv6_optlen(exthdr);
if (len + offset >= IPV6_MAXPLEN)
offset += ipv6_optlen(exthdr);
if (offset > IPV6_MAXPLEN)
return -EINVAL;
offset += len;
*nexthdr = &exthdr->nexthdr;
}

View file

@ -708,19 +708,13 @@ static void ip6_frags_sysctl_unregister(void)
static int __net_init ipv6_frags_init_net(struct net *net)
{
int res;
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
res = inet_frags_init_net(&net->ipv6.frags);
if (res)
return res;
res = ip6_frags_ns_sysctl_register(net);
if (res)
inet_frags_uninit_net(&net->ipv6.frags);
return res;
inet_frags_init_net(&net->ipv6.frags);
return ip6_frags_ns_sysctl_register(net);
}
static void __net_exit ipv6_frags_exit_net(struct net *net)

View file

@ -1244,7 +1244,9 @@ static void rt6_dst_from_metrics_check(struct rt6_info *rt)
static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
{
if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
u32 rt_cookie;
if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
return NULL;
if (rt6_check_expired(rt))
@ -1312,8 +1314,14 @@ static void ip6_link_failure(struct sk_buff *skb)
if (rt->rt6i_flags & RTF_CACHE) {
dst_hold(&rt->dst);
ip6_del_rt(rt);
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
rt->rt6i_node->fn_sernum = -1;
} else {
struct fib6_node *fn;
rcu_read_lock();
fn = rcu_dereference(rt->rt6i_node);
if (fn && (rt->rt6i_flags & RTF_DEFAULT))
fn->fn_sernum = -1;
rcu_read_unlock();
}
}
}
@ -1330,7 +1338,8 @@ static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
{
return !(rt->rt6i_flags & RTF_CACHE) &&
(rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
(rt->rt6i_flags & RTF_PCPU ||
rcu_access_pointer(rt->rt6i_node));
}
static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,