This is the 4.4.43 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlh7bhIACgkQONu9yGCS aT5KKRAAw7baMz//gshbaXZuZZHJjqB+rBekdnzgBMBo4P2OJwiuFi7N27dRxiaO 6uFAB5BUlFoc16AExAnmQJIiWB8lWeAt8S20RBLaiGGQ0iPTr4W7bsVH4Tk3zEaF gjCt3Tv8kzbno64lWk02xDilkxFO09y3ZtiMVkleUDpI1DRm5iAF11j+C42OG1Ox U1QPsjCoWJyZ9Ta7SEyoQsuJcU32Wl0IW1VAroqfYAJJF5yLOxGoJQfWsiyvwEjQ VQg+Yd2LlJkHjuOp4lSAaYjNrCjvV91KwcwOocyI2iw69vyyCQpbKeg50wA1+jBO 2+b0WKTIYSA6EruAivIj0646UqnzzpUGf9DfeH2NIApO7PvTGWaIWk5uvheOf3Vz yVviVGYdedtMXixdzHVXgRVZQThlhLe2D5bvYB0bFInDrY8LlMZJVwjrbJuVQaUy u0eguKvOIXSsUwtDOLCEKKh7bH1605JXVm0yUAYRmTPbRjs8LQHu0kPpS70L5tYI MaftvgPFyLev88cDns+VjnJxm1cOHrSRyLigM4ArCrZdNs8EKPScFeV3bKcR2Gwi u05MdpwagOMSFqKdPFhiGYjjcpAeieeAOkmMro9C1KvIRhVt83cAlbP6L9R0PYSK n/wfpvrcbDKl0vcAPVscw1iM590WbRPGGrqlDGv+ak4cjsCb8ro= =kCbR -----END PGP SIGNATURE----- Merge tag 'v4.4.43' into android-4.4.y This is the 4.4.43 stable release
This commit is contained in:
commit
f103e3b0d8
191 changed files with 1736 additions and 678 deletions
|
@ -1991,6 +1991,7 @@ registers, find a list below:
|
|||
PPC | KVM_REG_PPC_TM_VSCR | 32
|
||||
PPC | KVM_REG_PPC_TM_DSCR | 64
|
||||
PPC | KVM_REG_PPC_TM_TAR | 64
|
||||
PPC | KVM_REG_PPC_TM_XER | 64
|
||||
| |
|
||||
MIPS | KVM_REG_MIPS_R0 | 64
|
||||
...
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 40
|
||||
SUBLEVEL = 43
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -85,6 +85,10 @@ void flush_anon_page(struct vm_area_struct *vma,
|
|||
*/
|
||||
#define PG_dc_clean PG_arch_1
|
||||
|
||||
#define CACHE_COLORS_NUM 4
|
||||
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
|
||||
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
|
||||
|
||||
/*
|
||||
* Simple wrapper over config option
|
||||
* Bootup code ensures that hardware matches kernel configuration
|
||||
|
@ -94,8 +98,6 @@ static inline int cache_is_vipt_aliasing(void)
|
|||
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
}
|
||||
|
||||
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
|
||||
|
||||
/*
|
||||
* checks if two addresses (after page aligning) index into same cache set
|
||||
*/
|
||||
|
|
|
@ -960,11 +960,16 @@ void arc_cache_init(void)
|
|||
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
|
||||
if (is_isa_arcompact()) {
|
||||
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
|
||||
int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
|
||||
|
||||
if (dc->alias && !handled)
|
||||
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
else if (!dc->alias && handled)
|
||||
if (dc->alias) {
|
||||
if (!handled)
|
||||
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
if (CACHE_COLORS_NUM != num_colors)
|
||||
panic("CACHE_COLORS_NUM not optimized for config\n");
|
||||
} else if (!dc->alias && handled) {
|
||||
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1023,7 +1023,7 @@
|
|||
mstp7_clks: mstp7_clks@e615014c {
|
||||
compatible = "renesas,r8a7794-mstp-clocks", "renesas,cpg-mstp-clocks";
|
||||
reg = <0 0xe615014c 0 4>, <0 0xe61501c4 0 4>;
|
||||
clocks = <&mp_clk>, <&mp_clk>,
|
||||
clocks = <&mp_clk>, <&hp_clk>,
|
||||
<&zs_clk>, <&p_clk>, <&p_clk>, <&zs_clk>,
|
||||
<&zs_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>;
|
||||
#clock-cells = <1>;
|
||||
|
|
|
@ -87,8 +87,13 @@ static int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
|||
u32 *rki = ctx->key_enc + (i * kwords);
|
||||
u32 *rko = rki + kwords;
|
||||
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
rko[0] = ror32(ce_aes_sub(rki[kwords - 1]), 8);
|
||||
rko[0] = rko[0] ^ rki[0] ^ rcon[i];
|
||||
#else
|
||||
rko[0] = rol32(ce_aes_sub(rki[kwords - 1]), 8);
|
||||
rko[0] = rko[0] ^ rki[0] ^ (rcon[i] << 24);
|
||||
#endif
|
||||
rko[1] = rko[0] ^ rki[1];
|
||||
rko[2] = rko[1] ^ rki[2];
|
||||
rko[3] = rko[2] ^ rki[3];
|
||||
|
|
|
@ -298,6 +298,16 @@ static struct clk emac_clk = {
|
|||
.gpsc = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* In order to avoid adding the emac_clk to the clock lookup table twice (and
|
||||
* screwing up the linked list in the process) create a separate clock for
|
||||
* mdio inheriting the rate from emac_clk.
|
||||
*/
|
||||
static struct clk mdio_clk = {
|
||||
.name = "mdio",
|
||||
.parent = &emac_clk,
|
||||
};
|
||||
|
||||
static struct clk mcasp_clk = {
|
||||
.name = "mcasp",
|
||||
.parent = &pll0_sysclk2,
|
||||
|
@ -462,7 +472,7 @@ static struct clk_lookup da850_clks[] = {
|
|||
CLK(NULL, "arm", &arm_clk),
|
||||
CLK(NULL, "rmii", &rmii_clk),
|
||||
CLK("davinci_emac.1", NULL, &emac_clk),
|
||||
CLK("davinci_mdio.0", "fck", &emac_clk),
|
||||
CLK("davinci_mdio.0", "fck", &mdio_clk),
|
||||
CLK("davinci-mcasp.0", NULL, &mcasp_clk),
|
||||
CLK("da8xx_lcdc.0", "fck", &lcdc_clk),
|
||||
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
|
||||
|
|
|
@ -243,10 +243,9 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
|
|||
save_state = 1;
|
||||
break;
|
||||
case PWRDM_POWER_RET:
|
||||
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE)) {
|
||||
if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
|
||||
save_state = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* CPUx CSWR is invalid hardware state. Also CPUx OSWR
|
||||
|
|
|
@ -59,7 +59,7 @@ void __iomem *zynq_scu_base;
|
|||
static void __init zynq_memory_init(void)
|
||||
{
|
||||
if (!__pa(PAGE_OFFSET))
|
||||
memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
|
||||
memblock_reserve(__pa(PAGE_OFFSET), 0x80000);
|
||||
}
|
||||
|
||||
static struct platform_device zynq_cpuidle_device = {
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
.arch armv8-a+crypto
|
||||
|
@ -19,7 +20,7 @@
|
|||
*/
|
||||
ENTRY(ce_aes_ccm_auth_data)
|
||||
ldr w8, [x3] /* leftover from prev round? */
|
||||
ld1 {v0.2d}, [x0] /* load mac */
|
||||
ld1 {v0.16b}, [x0] /* load mac */
|
||||
cbz w8, 1f
|
||||
sub w8, w8, #16
|
||||
eor v1.16b, v1.16b, v1.16b
|
||||
|
@ -31,7 +32,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||
beq 8f /* out of input? */
|
||||
cbnz w8, 0b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
1: ld1 {v3.2d}, [x4] /* load first round key */
|
||||
1: ld1 {v3.16b}, [x4] /* load first round key */
|
||||
prfm pldl1strm, [x1]
|
||||
cmp w5, #12 /* which key size? */
|
||||
add x6, x4, #16
|
||||
|
@ -41,17 +42,17 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||
mov v5.16b, v3.16b
|
||||
b 4f
|
||||
2: mov v4.16b, v3.16b
|
||||
ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
|
||||
ld1 {v5.16b}, [x6], #16 /* load 2nd round key */
|
||||
3: aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
4: ld1 {v3.2d}, [x6], #16 /* load next round key */
|
||||
4: ld1 {v3.16b}, [x6], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
5: ld1 {v4.2d}, [x6], #16 /* load next round key */
|
||||
5: ld1 {v4.16b}, [x6], #16 /* load next round key */
|
||||
subs w7, w7, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
ld1 {v5.2d}, [x6], #16 /* load next round key */
|
||||
ld1 {v5.16b}, [x6], #16 /* load next round key */
|
||||
bpl 3b
|
||||
aese v0.16b, v4.16b
|
||||
subs w2, w2, #16 /* last data? */
|
||||
|
@ -60,7 +61,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||
ld1 {v1.16b}, [x1], #16 /* load next input block */
|
||||
eor v0.16b, v0.16b, v1.16b /* xor with mac */
|
||||
bne 1b
|
||||
6: st1 {v0.2d}, [x0] /* store mac */
|
||||
6: st1 {v0.16b}, [x0] /* store mac */
|
||||
beq 10f
|
||||
adds w2, w2, #16
|
||||
beq 10f
|
||||
|
@ -79,7 +80,7 @@ ENTRY(ce_aes_ccm_auth_data)
|
|||
adds w7, w7, #1
|
||||
bne 9b
|
||||
eor v0.16b, v0.16b, v1.16b
|
||||
st1 {v0.2d}, [x0]
|
||||
st1 {v0.16b}, [x0]
|
||||
10: str w8, [x3]
|
||||
ret
|
||||
ENDPROC(ce_aes_ccm_auth_data)
|
||||
|
@ -89,27 +90,27 @@ ENDPROC(ce_aes_ccm_auth_data)
|
|||
* u32 rounds);
|
||||
*/
|
||||
ENTRY(ce_aes_ccm_final)
|
||||
ld1 {v3.2d}, [x2], #16 /* load first round key */
|
||||
ld1 {v0.2d}, [x0] /* load mac */
|
||||
ld1 {v3.16b}, [x2], #16 /* load first round key */
|
||||
ld1 {v0.16b}, [x0] /* load mac */
|
||||
cmp w3, #12 /* which key size? */
|
||||
sub w3, w3, #2 /* modified # of rounds */
|
||||
ld1 {v1.2d}, [x1] /* load 1st ctriv */
|
||||
ld1 {v1.16b}, [x1] /* load 1st ctriv */
|
||||
bmi 0f
|
||||
bne 3f
|
||||
mov v5.16b, v3.16b
|
||||
b 2f
|
||||
0: mov v4.16b, v3.16b
|
||||
1: ld1 {v5.2d}, [x2], #16 /* load next round key */
|
||||
1: ld1 {v5.16b}, [x2], #16 /* load next round key */
|
||||
aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v4.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
2: ld1 {v3.2d}, [x2], #16 /* load next round key */
|
||||
2: ld1 {v3.16b}, [x2], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v5.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
3: ld1 {v4.2d}, [x2], #16 /* load next round key */
|
||||
3: ld1 {v4.16b}, [x2], #16 /* load next round key */
|
||||
subs w3, w3, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
|
@ -120,47 +121,47 @@ ENTRY(ce_aes_ccm_final)
|
|||
aese v1.16b, v4.16b
|
||||
/* final round key cancels out */
|
||||
eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
|
||||
st1 {v0.2d}, [x0] /* store result */
|
||||
st1 {v0.16b}, [x0] /* store result */
|
||||
ret
|
||||
ENDPROC(ce_aes_ccm_final)
|
||||
|
||||
.macro aes_ccm_do_crypt,enc
|
||||
ldr x8, [x6, #8] /* load lower ctr */
|
||||
ld1 {v0.2d}, [x5] /* load mac */
|
||||
rev x8, x8 /* keep swabbed ctr in reg */
|
||||
ld1 {v0.16b}, [x5] /* load mac */
|
||||
CPU_LE( rev x8, x8 ) /* keep swabbed ctr in reg */
|
||||
0: /* outer loop */
|
||||
ld1 {v1.1d}, [x6] /* load upper ctr */
|
||||
ld1 {v1.8b}, [x6] /* load upper ctr */
|
||||
prfm pldl1strm, [x1]
|
||||
add x8, x8, #1
|
||||
rev x9, x8
|
||||
cmp w4, #12 /* which key size? */
|
||||
sub w7, w4, #2 /* get modified # of rounds */
|
||||
ins v1.d[1], x9 /* no carry in lower ctr */
|
||||
ld1 {v3.2d}, [x3] /* load first round key */
|
||||
ld1 {v3.16b}, [x3] /* load first round key */
|
||||
add x10, x3, #16
|
||||
bmi 1f
|
||||
bne 4f
|
||||
mov v5.16b, v3.16b
|
||||
b 3f
|
||||
1: mov v4.16b, v3.16b
|
||||
ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
|
||||
ld1 {v5.16b}, [x10], #16 /* load 2nd round key */
|
||||
2: /* inner loop: 3 rounds, 2x interleaved */
|
||||
aese v0.16b, v4.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v4.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
3: ld1 {v3.2d}, [x10], #16 /* load next round key */
|
||||
3: ld1 {v3.16b}, [x10], #16 /* load next round key */
|
||||
aese v0.16b, v5.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v5.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
4: ld1 {v4.2d}, [x10], #16 /* load next round key */
|
||||
4: ld1 {v4.16b}, [x10], #16 /* load next round key */
|
||||
subs w7, w7, #3
|
||||
aese v0.16b, v3.16b
|
||||
aesmc v0.16b, v0.16b
|
||||
aese v1.16b, v3.16b
|
||||
aesmc v1.16b, v1.16b
|
||||
ld1 {v5.2d}, [x10], #16 /* load next round key */
|
||||
ld1 {v5.16b}, [x10], #16 /* load next round key */
|
||||
bpl 2b
|
||||
aese v0.16b, v4.16b
|
||||
aese v1.16b, v4.16b
|
||||
|
@ -177,14 +178,14 @@ ENDPROC(ce_aes_ccm_final)
|
|||
eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
|
||||
st1 {v1.16b}, [x0], #16 /* write output block */
|
||||
bne 0b
|
||||
rev x8, x8
|
||||
st1 {v0.2d}, [x5] /* store mac */
|
||||
CPU_LE( rev x8, x8 )
|
||||
st1 {v0.16b}, [x5] /* store mac */
|
||||
str x8, [x6, #8] /* store lsb end of ctr (BE) */
|
||||
5: ret
|
||||
|
||||
6: eor v0.16b, v0.16b, v5.16b /* final round mac */
|
||||
eor v1.16b, v1.16b, v5.16b /* final round enc */
|
||||
st1 {v0.2d}, [x5] /* store mac */
|
||||
st1 {v0.16b}, [x5] /* store mac */
|
||||
add w2, w2, #16 /* process partial tail block */
|
||||
7: ldrb w9, [x1], #1 /* get 1 byte of input */
|
||||
umov w6, v1.b[0] /* get top crypted ctr byte */
|
||||
|
|
|
@ -47,24 +47,24 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
|||
kernel_neon_begin_partial(4);
|
||||
|
||||
__asm__(" ld1 {v0.16b}, %[in] ;"
|
||||
" ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" cmp %w[rounds], #10 ;"
|
||||
" bmi 0f ;"
|
||||
" bne 3f ;"
|
||||
" mov v3.16b, v1.16b ;"
|
||||
" b 2f ;"
|
||||
"0: mov v2.16b, v1.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
"1: aese v0.16b, v2.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
"2: ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
"2: ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" aese v0.16b, v3.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
"3: ld1 {v2.2d}, [%[key]], #16 ;"
|
||||
"3: ld1 {v2.16b}, [%[key]], #16 ;"
|
||||
" subs %w[rounds], %w[rounds], #3 ;"
|
||||
" aese v0.16b, v1.16b ;"
|
||||
" aesmc v0.16b, v0.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
" bpl 1b ;"
|
||||
" aese v0.16b, v2.16b ;"
|
||||
" eor v0.16b, v0.16b, v3.16b ;"
|
||||
|
@ -92,24 +92,24 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
|
|||
kernel_neon_begin_partial(4);
|
||||
|
||||
__asm__(" ld1 {v0.16b}, %[in] ;"
|
||||
" ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" cmp %w[rounds], #10 ;"
|
||||
" bmi 0f ;"
|
||||
" bne 3f ;"
|
||||
" mov v3.16b, v1.16b ;"
|
||||
" b 2f ;"
|
||||
"0: mov v2.16b, v1.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
"1: aesd v0.16b, v2.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
"2: ld1 {v1.2d}, [%[key]], #16 ;"
|
||||
"2: ld1 {v1.16b}, [%[key]], #16 ;"
|
||||
" aesd v0.16b, v3.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
"3: ld1 {v2.2d}, [%[key]], #16 ;"
|
||||
"3: ld1 {v2.16b}, [%[key]], #16 ;"
|
||||
" subs %w[rounds], %w[rounds], #3 ;"
|
||||
" aesd v0.16b, v1.16b ;"
|
||||
" aesimc v0.16b, v0.16b ;"
|
||||
" ld1 {v3.2d}, [%[key]], #16 ;"
|
||||
" ld1 {v3.16b}, [%[key]], #16 ;"
|
||||
" bpl 1b ;"
|
||||
" aesd v0.16b, v2.16b ;"
|
||||
" eor v0.16b, v0.16b, v3.16b ;"
|
||||
|
@ -173,7 +173,12 @@ int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key,
|
|||
u32 *rki = ctx->key_enc + (i * kwords);
|
||||
u32 *rko = rki + kwords;
|
||||
|
||||
#ifndef CONFIG_CPU_BIG_ENDIAN
|
||||
rko[0] = ror32(aes_sub(rki[kwords - 1]), 8) ^ rcon[i] ^ rki[0];
|
||||
#else
|
||||
rko[0] = rol32(aes_sub(rki[kwords - 1]), 8) ^ (rcon[i] << 24) ^
|
||||
rki[0];
|
||||
#endif
|
||||
rko[1] = rko[0] ^ rki[1];
|
||||
rko[2] = rko[1] ^ rki[2];
|
||||
rko[3] = rko[2] ^ rki[3];
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define AES_ENTRY(func) ENTRY(ce_ ## func)
|
||||
#define AES_ENDPROC(func) ENDPROC(ce_ ## func)
|
||||
|
|
|
@ -386,7 +386,8 @@ AES_ENDPROC(aes_ctr_encrypt)
|
|||
.endm
|
||||
|
||||
.Lxts_mul_x:
|
||||
.word 1, 0, 0x87, 0
|
||||
CPU_LE( .quad 1, 0x87 )
|
||||
CPU_BE( .quad 0x87, 1 )
|
||||
|
||||
AES_ENTRY(aes_xts_encrypt)
|
||||
FRAME_PUSH
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
#define AES_ENTRY(func) ENTRY(neon_ ## func)
|
||||
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
|
||||
|
@ -83,13 +84,13 @@
|
|||
.endm
|
||||
|
||||
.macro do_block, enc, in, rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in\().16b, \in\().16b, v15.16b /* ^round key */
|
||||
tbl \in\().16b, {\in\().16b}, v13.16b /* ShiftRows */
|
||||
sub_bytes \in
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
|
@ -229,7 +230,7 @@
|
|||
.endm
|
||||
|
||||
.macro do_block_2x, enc, in0, in1 rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
||||
|
@ -237,7 +238,7 @@
|
|||
sub_bytes_2x \in0, \in1
|
||||
tbl \in0\().16b, {\in0\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
|
@ -254,7 +255,7 @@
|
|||
.endm
|
||||
|
||||
.macro do_block_4x, enc, in0, in1, in2, in3, rounds, rk, rkp, i
|
||||
ld1 {v15.16b}, [\rk]
|
||||
ld1 {v15.4s}, [\rk]
|
||||
add \rkp, \rk, #16
|
||||
mov \i, \rounds
|
||||
1111: eor \in0\().16b, \in0\().16b, v15.16b /* ^round key */
|
||||
|
@ -266,7 +267,7 @@
|
|||
tbl \in1\().16b, {\in1\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in2\().16b, {\in2\().16b}, v13.16b /* ShiftRows */
|
||||
tbl \in3\().16b, {\in3\().16b}, v13.16b /* ShiftRows */
|
||||
ld1 {v15.16b}, [\rkp], #16
|
||||
ld1 {v15.4s}, [\rkp], #16
|
||||
subs \i, \i, #1
|
||||
beq 2222f
|
||||
.if \enc == 1
|
||||
|
@ -306,12 +307,16 @@
|
|||
.text
|
||||
.align 4
|
||||
.LForward_ShiftRows:
|
||||
.byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3
|
||||
.byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb
|
||||
CPU_LE( .byte 0x0, 0x5, 0xa, 0xf, 0x4, 0x9, 0xe, 0x3 )
|
||||
CPU_LE( .byte 0x8, 0xd, 0x2, 0x7, 0xc, 0x1, 0x6, 0xb )
|
||||
CPU_BE( .byte 0xb, 0x6, 0x1, 0xc, 0x7, 0x2, 0xd, 0x8 )
|
||||
CPU_BE( .byte 0x3, 0xe, 0x9, 0x4, 0xf, 0xa, 0x5, 0x0 )
|
||||
|
||||
.LReverse_ShiftRows:
|
||||
.byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb
|
||||
.byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3
|
||||
CPU_LE( .byte 0x0, 0xd, 0xa, 0x7, 0x4, 0x1, 0xe, 0xb )
|
||||
CPU_LE( .byte 0x8, 0x5, 0x2, 0xf, 0xc, 0x9, 0x6, 0x3 )
|
||||
CPU_BE( .byte 0x3, 0x6, 0x9, 0xc, 0xf, 0x2, 0x5, 0x8 )
|
||||
CPU_BE( .byte 0xb, 0xe, 0x1, 0x4, 0x7, 0xa, 0xd, 0x0 )
|
||||
|
||||
.LForward_Sbox:
|
||||
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
|
||||
|
|
|
@ -29,8 +29,8 @@
|
|||
* struct ghash_key const *k, const char *head)
|
||||
*/
|
||||
ENTRY(pmull_ghash_update)
|
||||
ld1 {SHASH.16b}, [x3]
|
||||
ld1 {XL.16b}, [x1]
|
||||
ld1 {SHASH.2d}, [x3]
|
||||
ld1 {XL.2d}, [x1]
|
||||
movi MASK.16b, #0xe1
|
||||
ext SHASH2.16b, SHASH.16b, SHASH.16b, #8
|
||||
shl MASK.2d, MASK.2d, #57
|
||||
|
@ -74,6 +74,6 @@ CPU_LE( rev64 T1.16b, T1.16b )
|
|||
|
||||
cbnz w0, 0b
|
||||
|
||||
st1 {XL.16b}, [x1]
|
||||
st1 {XL.2d}, [x1]
|
||||
ret
|
||||
ENDPROC(pmull_ghash_update)
|
||||
|
|
|
@ -78,7 +78,7 @@ ENTRY(sha1_ce_transform)
|
|||
ld1r {k3.4s}, [x6]
|
||||
|
||||
/* load state */
|
||||
ldr dga, [x0]
|
||||
ld1 {dgav.4s}, [x0]
|
||||
ldr dgb, [x0, #16]
|
||||
|
||||
/* load sha1_ce_state::finalize */
|
||||
|
@ -144,7 +144,7 @@ CPU_LE( rev32 v11.16b, v11.16b )
|
|||
b 1b
|
||||
|
||||
/* store new state */
|
||||
3: str dga, [x0]
|
||||
3: st1 {dgav.4s}, [x0]
|
||||
str dgb, [x0, #16]
|
||||
ret
|
||||
ENDPROC(sha1_ce_transform)
|
||||
|
|
|
@ -85,7 +85,7 @@ ENTRY(sha2_ce_transform)
|
|||
ld1 {v12.4s-v15.4s}, [x8]
|
||||
|
||||
/* load state */
|
||||
ldp dga, dgb, [x0]
|
||||
ld1 {dgav.4s, dgbv.4s}, [x0]
|
||||
|
||||
/* load sha256_ce_state::finalize */
|
||||
ldr w4, [x0, #:lo12:sha256_ce_offsetof_finalize]
|
||||
|
@ -148,6 +148,6 @@ CPU_LE( rev32 v19.16b, v19.16b )
|
|||
b 1b
|
||||
|
||||
/* store new state */
|
||||
3: stp dga, dgb, [x0]
|
||||
3: st1 {dgav.4s, dgbv.4s}, [x0]
|
||||
ret
|
||||
ENDPROC(sha2_ce_transform)
|
||||
|
|
|
@ -10,6 +10,9 @@
|
|||
|
||||
asflags-y += $(LINUXINCLUDE)
|
||||
ccflags-y += -O2 $(LINUXINCLUDE)
|
||||
|
||||
ifdef CONFIG_ETRAX_AXISFLASHMAP
|
||||
|
||||
arch-$(CONFIG_ETRAX_ARCH_V10) = v10
|
||||
arch-$(CONFIG_ETRAX_ARCH_V32) = v32
|
||||
|
||||
|
@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE
|
|||
$(call if_changed,objcopy)
|
||||
cp -p $(obj)/rescue.bin $(objtree)
|
||||
|
||||
else
|
||||
$(obj)/rescue.bin:
|
||||
|
||||
endif
|
||||
|
||||
$(obj)/testrescue.bin: $(obj)/testrescue.o
|
||||
$(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin
|
||||
# Pad it to 784 bytes
|
||||
|
|
|
@ -324,8 +324,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
#endif
|
||||
|
||||
/* Invalidate the icache for these ranges */
|
||||
local_flush_icache_range((unsigned long)gebase,
|
||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||
flush_icache_range((unsigned long)gebase,
|
||||
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
|
||||
|
||||
/*
|
||||
* Allocate comm page for guest kernel, a TLB will be reserved for
|
||||
|
|
|
@ -57,11 +57,6 @@ __system_reset_overlay:
|
|||
bctr
|
||||
|
||||
1:
|
||||
/* Save the value at addr zero for a null pointer write check later. */
|
||||
|
||||
li r4, 0
|
||||
lwz r3, 0(r4)
|
||||
|
||||
/* Primary delays then goes to _zimage_start in wrapper. */
|
||||
|
||||
or 31, 31, 31 /* db16cyc */
|
||||
|
|
|
@ -119,13 +119,12 @@ void ps3_copy_vectors(void)
|
|||
flush_cache((void *)0x100, 512);
|
||||
}
|
||||
|
||||
void platform_init(unsigned long null_check)
|
||||
void platform_init(void)
|
||||
{
|
||||
const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */
|
||||
void *chosen;
|
||||
unsigned long ft_addr;
|
||||
u64 rm_size;
|
||||
unsigned long val;
|
||||
|
||||
console_ops.write = ps3_console_write;
|
||||
platform_ops.exit = ps3_exit;
|
||||
|
@ -153,11 +152,6 @@ void platform_init(unsigned long null_check)
|
|||
|
||||
printf(" flat tree at 0x%lx\n\r", ft_addr);
|
||||
|
||||
val = *(unsigned long *)0;
|
||||
|
||||
if (val != null_check)
|
||||
printf("null check failed: %lx != %lx\n\r", val, null_check);
|
||||
|
||||
((kernel_entry_t)0)(ft_addr, 0, NULL);
|
||||
|
||||
ps3_exit();
|
||||
|
|
|
@ -545,6 +545,7 @@ struct kvm_vcpu_arch {
|
|||
u64 tfiar;
|
||||
|
||||
u32 cr_tm;
|
||||
u64 xer_tm;
|
||||
u64 lr_tm;
|
||||
u64 ctr_tm;
|
||||
u64 amr_tm;
|
||||
|
|
|
@ -587,6 +587,7 @@ struct kvm_get_htab_header {
|
|||
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
|
||||
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
|
||||
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
|
||||
#define KVM_REG_PPC_TM_XER (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x6a)
|
||||
|
||||
/* PPC64 eXternal Interrupt Controller Specification */
|
||||
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
|
||||
|
|
|
@ -584,6 +584,7 @@ int main(void)
|
|||
DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr));
|
||||
DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm));
|
||||
DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm));
|
||||
DEFINE(VCPU_XER_TM, offsetof(struct kvm_vcpu, arch.xer_tm));
|
||||
DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm));
|
||||
DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm));
|
||||
DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm));
|
||||
|
|
|
@ -44,7 +44,7 @@
|
|||
std r0,0(r1); \
|
||||
ptesync; \
|
||||
ld r0,0(r1); \
|
||||
1: cmp cr0,r0,r0; \
|
||||
1: cmpd cr0,r0,r0; \
|
||||
bne 1b; \
|
||||
IDLE_INST; \
|
||||
b .
|
||||
|
|
|
@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache)
|
|||
lis r3, KERNELBASE@h
|
||||
iccci 0,r3
|
||||
#endif
|
||||
#elif CONFIG_FSL_BOOKE
|
||||
#elif defined(CONFIG_FSL_BOOKE)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r3,SPRN_L1CSR0
|
||||
ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
|
||||
|
|
|
@ -1186,6 +1186,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|||
case KVM_REG_PPC_TM_CR:
|
||||
*val = get_reg_val(id, vcpu->arch.cr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_XER:
|
||||
*val = get_reg_val(id, vcpu->arch.xer_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_LR:
|
||||
*val = get_reg_val(id, vcpu->arch.lr_tm);
|
||||
break;
|
||||
|
@ -1393,6 +1396,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|||
case KVM_REG_PPC_TM_CR:
|
||||
vcpu->arch.cr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_XER:
|
||||
vcpu->arch.xer_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_LR:
|
||||
vcpu->arch.lr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
|
|
|
@ -653,6 +653,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
|||
HPTE_V_ABSENT);
|
||||
do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
|
||||
true);
|
||||
/* Don't lose R/C bit updates done by hardware */
|
||||
r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
|
||||
hpte[1] = cpu_to_be64(r);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2514,11 +2514,13 @@ kvmppc_save_tm:
|
|||
mfctr r7
|
||||
mfspr r8, SPRN_AMR
|
||||
mfspr r10, SPRN_TAR
|
||||
mfxer r11
|
||||
std r5, VCPU_LR_TM(r9)
|
||||
stw r6, VCPU_CR_TM(r9)
|
||||
std r7, VCPU_CTR_TM(r9)
|
||||
std r8, VCPU_AMR_TM(r9)
|
||||
std r10, VCPU_TAR_TM(r9)
|
||||
std r11, VCPU_XER_TM(r9)
|
||||
|
||||
/* Restore r12 as trap number. */
|
||||
lwz r12, VCPU_TRAP(r9)
|
||||
|
@ -2611,11 +2613,13 @@ kvmppc_restore_tm:
|
|||
ld r7, VCPU_CTR_TM(r4)
|
||||
ld r8, VCPU_AMR_TM(r4)
|
||||
ld r9, VCPU_TAR_TM(r4)
|
||||
ld r10, VCPU_XER_TM(r4)
|
||||
mtlr r5
|
||||
mtcr r6
|
||||
mtctr r7
|
||||
mtspr SPRN_AMR, r8
|
||||
mtspr SPRN_TAR, r9
|
||||
mtxer r10
|
||||
|
||||
/*
|
||||
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
||||
|
|
|
@ -565,8 +565,10 @@ static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
|
|||
prng_data->prngws.byte_counter += n;
|
||||
prng_data->prngws.reseed_counter += n;
|
||||
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(ubuf, prng_data->buf, chunk)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
nbytes -= chunk;
|
||||
ret += chunk;
|
||||
|
|
|
@ -766,8 +766,8 @@ ftrace_graph_call:
|
|||
jmp ftrace_stub
|
||||
#endif
|
||||
|
||||
.globl ftrace_stub
|
||||
ftrace_stub:
|
||||
/* This is weak to keep gas from relaxing the jumps */
|
||||
WEAK(ftrace_stub)
|
||||
ret
|
||||
END(ftrace_caller)
|
||||
|
||||
|
|
|
@ -1247,10 +1247,10 @@ static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
|
|||
return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
|
||||
}
|
||||
|
||||
static inline bool is_exception(u32 intr_info)
|
||||
static inline bool is_nmi(u32 intr_info)
|
||||
{
|
||||
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
|
||||
== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
|
||||
== (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK);
|
||||
}
|
||||
|
||||
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
||||
|
@ -5234,7 +5234,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
if (is_machine_check(intr_info))
|
||||
return handle_machine_check(vcpu);
|
||||
|
||||
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
|
||||
if (is_nmi(intr_info))
|
||||
return 1; /* already handled by vmx_vcpu_run() */
|
||||
|
||||
if (is_no_device(intr_info)) {
|
||||
|
@ -7722,7 +7722,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
|
|||
|
||||
switch (exit_reason) {
|
||||
case EXIT_REASON_EXCEPTION_NMI:
|
||||
if (!is_exception(intr_info))
|
||||
if (is_nmi(intr_info))
|
||||
return false;
|
||||
else if (is_page_fault(intr_info))
|
||||
return enable_ept;
|
||||
|
@ -8329,8 +8329,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
|
|||
kvm_machine_check();
|
||||
|
||||
/* We need to handle NMIs before interrupts are enabled */
|
||||
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
|
||||
(exit_intr_info & INTR_INFO_VALID_MASK)) {
|
||||
if (is_nmi(exit_intr_info)) {
|
||||
kvm_before_handle_nmi(&vmx->vcpu);
|
||||
asm("int $2");
|
||||
kvm_after_handle_nmi(&vmx->vcpu);
|
||||
|
|
|
@ -2949,6 +2949,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
|||
memset(&events->reserved, 0, sizeof(events->reserved));
|
||||
}
|
||||
|
||||
static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
|
||||
|
||||
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events)
|
||||
{
|
||||
|
@ -2981,10 +2983,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
|
|||
vcpu->arch.apic->sipi_vector = events->sipi_vector;
|
||||
|
||||
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
|
||||
u32 hflags = vcpu->arch.hflags;
|
||||
if (events->smi.smm)
|
||||
vcpu->arch.hflags |= HF_SMM_MASK;
|
||||
hflags |= HF_SMM_MASK;
|
||||
else
|
||||
vcpu->arch.hflags &= ~HF_SMM_MASK;
|
||||
hflags &= ~HF_SMM_MASK;
|
||||
kvm_set_hflags(vcpu, hflags);
|
||||
|
||||
vcpu->arch.smi_pending = events->smi.pending;
|
||||
if (events->smi.smm_inside_nmi)
|
||||
vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
|
||||
|
|
|
@ -655,6 +655,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
|
|||
|
||||
dprintk("%s: write %Zd bytes\n", bd->name, count);
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
|
||||
return -EINVAL;
|
||||
|
||||
bsg_set_block(bd, file);
|
||||
|
||||
bytes_written = 0;
|
||||
|
|
|
@ -271,6 +271,26 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro12,1"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1123661 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "Dell XPS 17 L702X",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "HP Pavilion dv6",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
|
||||
},
|
||||
},
|
||||
|
||||
{ },
|
||||
};
|
||||
|
||||
|
|
|
@ -942,13 +942,14 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
|
|||
timeout = MAX_JIFFY_OFFSET;
|
||||
}
|
||||
|
||||
retval = wait_for_completion_interruptible_timeout(&buf->completion,
|
||||
timeout = wait_for_completion_interruptible_timeout(&buf->completion,
|
||||
timeout);
|
||||
if (retval == -ERESTARTSYS || !retval) {
|
||||
if (timeout == -ERESTARTSYS || !timeout) {
|
||||
retval = timeout;
|
||||
mutex_lock(&fw_lock);
|
||||
fw_load_abort(fw_priv);
|
||||
mutex_unlock(&fw_lock);
|
||||
} else if (retval > 0) {
|
||||
} else if (timeout > 0) {
|
||||
retval = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,14 +20,22 @@ static inline void pm_runtime_early_init(struct device *dev)
|
|||
extern void pm_runtime_init(struct device *dev);
|
||||
extern void pm_runtime_remove(struct device *dev);
|
||||
|
||||
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
|
||||
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
|
||||
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
|
||||
WAKE_IRQ_DEDICATED_MANAGED)
|
||||
|
||||
struct wake_irq {
|
||||
struct device *dev;
|
||||
unsigned int status;
|
||||
int irq;
|
||||
bool dedicated_irq:1;
|
||||
};
|
||||
|
||||
extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
|
||||
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
|
||||
extern void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status);
|
||||
extern void dev_pm_disable_wake_irq_check(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
|
@ -102,6 +110,15 @@ static inline void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dev_pm_disable_wake_irq_check(struct device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
|
|
@ -515,7 +515,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_suspend);
|
||||
|
||||
dev_pm_enable_wake_irq(dev);
|
||||
dev_pm_enable_wake_irq_check(dev, true);
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval)
|
||||
goto fail;
|
||||
|
@ -554,7 +554,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||
return retval;
|
||||
|
||||
fail:
|
||||
dev_pm_disable_wake_irq(dev);
|
||||
dev_pm_disable_wake_irq_check(dev);
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
dev->power.deferred_resume = false;
|
||||
wake_up_all(&dev->power.wait_queue);
|
||||
|
@ -737,12 +737,12 @@ static int rpm_resume(struct device *dev, int rpmflags)
|
|||
|
||||
callback = RPM_GET_CALLBACK(dev, runtime_resume);
|
||||
|
||||
dev_pm_disable_wake_irq(dev);
|
||||
dev_pm_disable_wake_irq_check(dev);
|
||||
retval = rpm_callback(callback, dev);
|
||||
if (retval) {
|
||||
__update_runtime_status(dev, RPM_SUSPENDED);
|
||||
pm_runtime_cancel_pending(dev);
|
||||
dev_pm_enable_wake_irq(dev);
|
||||
dev_pm_enable_wake_irq_check(dev, false);
|
||||
} else {
|
||||
no_callback:
|
||||
__update_runtime_status(dev, RPM_ACTIVE);
|
||||
|
|
|
@ -110,8 +110,10 @@ void dev_pm_clear_wake_irq(struct device *dev)
|
|||
dev->power.wakeirq = NULL;
|
||||
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||
|
||||
if (wirq->dedicated_irq)
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
|
||||
free_irq(wirq->irq, wirq);
|
||||
wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
|
||||
}
|
||||
kfree(wirq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
|
||||
|
@ -179,7 +181,6 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
|||
|
||||
wirq->dev = dev;
|
||||
wirq->irq = irq;
|
||||
wirq->dedicated_irq = true;
|
||||
irq_set_status_flags(irq, IRQ_NOAUTOEN);
|
||||
|
||||
/*
|
||||
|
@ -195,6 +196,8 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
|
|||
if (err)
|
||||
goto err_free_irq;
|
||||
|
||||
wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
|
||||
|
||||
return err;
|
||||
|
||||
err_free_irq:
|
||||
|
@ -210,9 +213,9 @@ EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
|
|||
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Called from the bus code or the device driver for
|
||||
* runtime_suspend() to enable the wake-up interrupt while
|
||||
* the device is running.
|
||||
* Optionally called from the bus code or the device driver for
|
||||
* runtime_resume() to override the PM runtime core managed wake-up
|
||||
* interrupt handling to enable the wake-up interrupt.
|
||||
*
|
||||
* Note that for runtime_suspend()) the wake-up interrupts
|
||||
* should be unconditionally enabled unlike for suspend()
|
||||
|
@ -222,7 +225,7 @@ void dev_pm_enable_wake_irq(struct device *dev)
|
|||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (wirq && wirq->dedicated_irq)
|
||||
if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
|
||||
enable_irq(wirq->irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
|
||||
|
@ -231,19 +234,72 @@ EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
|
|||
* dev_pm_disable_wake_irq - Disable device wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Called from the bus code or the device driver for
|
||||
* runtime_resume() to disable the wake-up interrupt while
|
||||
* the device is running.
|
||||
* Optionally called from the bus code or the device driver for
|
||||
* runtime_suspend() to override the PM runtime core managed wake-up
|
||||
* interrupt handling to disable the wake-up interrupt.
|
||||
*/
|
||||
void dev_pm_disable_wake_irq(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (wirq && wirq->dedicated_irq)
|
||||
if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
|
||||
disable_irq_nosync(wirq->irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
|
||||
|
||||
/**
|
||||
* dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
|
||||
* @dev: Device
|
||||
* @can_change_status: Can change wake-up interrupt status
|
||||
*
|
||||
* Enables wakeirq conditionally. We need to enable wake-up interrupt
|
||||
* lazily on the first rpm_suspend(). This is needed as the consumer device
|
||||
* starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
|
||||
* otherwise try to disable already disabled wakeirq. The wake-up interrupt
|
||||
* starts disabled with IRQ_NOAUTOEN set.
|
||||
*
|
||||
* Should be only called from rpm_suspend() and rpm_resume() path.
|
||||
* Caller must hold &dev->power.lock to change wirq->status
|
||||
*/
|
||||
void dev_pm_enable_wake_irq_check(struct device *dev,
|
||||
bool can_change_status)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
|
||||
return;
|
||||
|
||||
if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
|
||||
goto enable;
|
||||
} else if (can_change_status) {
|
||||
wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
|
||||
goto enable;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
enable:
|
||||
enable_irq(wirq->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
|
||||
* @dev: Device
|
||||
*
|
||||
* Disables wake-up interrupt conditionally based on status.
|
||||
* Should be only called from rpm_suspend() and rpm_resume() path.
|
||||
*/
|
||||
void dev_pm_disable_wake_irq_check(struct device *dev)
|
||||
{
|
||||
struct wake_irq *wirq = dev->power.wakeirq;
|
||||
|
||||
if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
|
||||
return;
|
||||
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
|
||||
disable_irq_nosync(wirq->irq);
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_pm_arm_wake_irq - Arm device wake-up
|
||||
* @wirq: Device wake-up interrupt
|
||||
|
|
|
@ -1082,7 +1082,9 @@ static void bcm2835_pll_divider_off(struct clk_hw *hw)
|
|||
cprman_write(cprman, data->cm_reg,
|
||||
(cprman_read(cprman, data->cm_reg) &
|
||||
~data->load_mask) | data->hold_mask);
|
||||
cprman_write(cprman, data->a2w_reg, A2W_PLL_CHANNEL_DISABLE);
|
||||
cprman_write(cprman, data->a2w_reg,
|
||||
cprman_read(cprman, data->a2w_reg) |
|
||||
A2W_PLL_CHANNEL_DISABLE);
|
||||
spin_unlock(&cprman->regs_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -247,7 +247,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw)
|
|||
if (ret < 0) {
|
||||
dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
|
||||
ret);
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
return (ret & WM831X_CLKOUT_ENA) != 0;
|
||||
|
|
|
@ -157,10 +157,8 @@ static void __init _mx31_clocks_init(unsigned long fref)
|
|||
}
|
||||
}
|
||||
|
||||
int __init mx31_clocks_init(void)
|
||||
int __init mx31_clocks_init(unsigned long fref)
|
||||
{
|
||||
u32 fref = 26000000; /* default */
|
||||
|
||||
_mx31_clocks_init(fref);
|
||||
|
||||
clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
|
||||
|
|
|
@ -223,7 +223,8 @@ static int ast_get_dram_info(struct drm_device *dev)
|
|||
ast_write32(ast, 0x10000, 0xfc600309);
|
||||
|
||||
do {
|
||||
;
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
return -EIO;
|
||||
} while (ast_read32(ast, 0x10000) != 0x01);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
|
@ -429,7 +430,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
|
|||
ast_detect_chip(dev, &need_post);
|
||||
|
||||
if (ast->chip != AST1180) {
|
||||
ast_get_dram_info(dev);
|
||||
ret = ast_get_dram_info(dev);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
ast->vram_size = ast_get_vram_info(dev);
|
||||
DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size);
|
||||
}
|
||||
|
|
|
@ -484,6 +484,9 @@ static const struct file_operations psb_gem_fops = {
|
|||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = psb_unlocked_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.mmap = drm_gem_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
|
|
|
@ -333,6 +333,9 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
|
|||
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
|
||||
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
|
||||
return nvif_rd32(device, 0x001800) & 0x0000000f;
|
||||
else
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
|
||||
else
|
||||
|
|
|
@ -1833,7 +1833,7 @@ nvf1_chipset = {
|
|||
.fb = gk104_fb_new,
|
||||
.fuse = gf100_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gf119_i2c_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.imem = nv50_instmem_new,
|
||||
.ltc = gk104_ltc_new,
|
||||
|
@ -1941,7 +1941,7 @@ nv117_chipset = {
|
|||
.fb = gm107_fb_new,
|
||||
.fuse = gm107_fuse_new,
|
||||
.gpio = gk104_gpio_new,
|
||||
.i2c = gf119_i2c_new,
|
||||
.i2c = gk104_i2c_new,
|
||||
.ibus = gk104_ibus_new,
|
||||
.imem = nv50_instmem_new,
|
||||
.ltc = gm107_ltc_new,
|
||||
|
|
|
@ -59,6 +59,7 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
|
|||
struct nvkm_gpuobj *inst = chan->base.inst;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x002634) == chan->base.chid)
|
||||
|
@ -66,10 +67,12 @@ gf100_fifo_gpfifo_engine_fini(struct nvkm_fifo_chan *base,
|
|||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, chan->base.object.client->name);
|
||||
ret = -EBUSY;
|
||||
if (suspend)
|
||||
return ret;
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
mutex_unlock(&subdev->mutex);
|
||||
|
||||
if (ret && suspend)
|
||||
return ret;
|
||||
|
||||
if (offset) {
|
||||
nvkm_kmap(inst);
|
||||
|
|
|
@ -39,7 +39,9 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
|
|||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_client *client = chan->base.object.client;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&subdev->mutex);
|
||||
nvkm_wr32(device, 0x002634, chan->base.chid);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
|
||||
|
@ -47,10 +49,10 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
|
|||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, client->name);
|
||||
return -EBUSY;
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
mutex_unlock(&subdev->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32
|
||||
|
|
|
@ -12,6 +12,7 @@ struct nvbios_source {
|
|||
bool rw;
|
||||
bool ignore_checksum;
|
||||
bool no_pcir;
|
||||
bool require_checksum;
|
||||
};
|
||||
|
||||
int nvbios_extend(struct nvkm_bios *, u32 length);
|
||||
|
|
|
@ -86,9 +86,12 @@ shadow_image(struct nvkm_bios *bios, int idx, u32 offset, struct shadow *mthd)
|
|||
nvbios_checksum(&bios->data[image.base], image.size)) {
|
||||
nvkm_debug(subdev, "%08x: checksum failed\n",
|
||||
image.base);
|
||||
if (mthd->func->rw)
|
||||
if (!mthd->func->require_checksum) {
|
||||
if (mthd->func->rw)
|
||||
score += 1;
|
||||
score += 1;
|
||||
score += 1;
|
||||
} else
|
||||
return 0;
|
||||
} else {
|
||||
score += 3;
|
||||
}
|
||||
|
|
|
@ -99,6 +99,7 @@ nvbios_acpi_fast = {
|
|||
.init = acpi_init,
|
||||
.read = acpi_read_fast,
|
||||
.rw = false,
|
||||
.require_checksum = true,
|
||||
};
|
||||
|
||||
const struct nvbios_source
|
||||
|
|
|
@ -47,8 +47,10 @@ nvkm_ltc_tags_clear(struct nvkm_ltc *ltc, u32 first, u32 count)
|
|||
|
||||
BUG_ON((first > limit) || (limit >= ltc->num_tags));
|
||||
|
||||
mutex_lock(<c->subdev.mutex);
|
||||
ltc->func->cbc_clear(ltc, first, limit);
|
||||
ltc->func->cbc_wait(ltc);
|
||||
mutex_unlock(<c->subdev.mutex);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -90,6 +90,9 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
|
|||
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
|
||||
struct radeon_device *rdev = crtc->dev->dev_private;
|
||||
|
||||
if (radeon_crtc->cursor_out_of_bounds)
|
||||
return;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
|
||||
upper_32_bits(radeon_crtc->cursor_addr));
|
||||
|
@ -143,21 +146,25 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
|||
int xorigin = 0, yorigin = 0;
|
||||
int w = radeon_crtc->cursor_width;
|
||||
|
||||
radeon_crtc->cursor_x = x;
|
||||
radeon_crtc->cursor_y = y;
|
||||
|
||||
if (ASIC_IS_AVIVO(rdev)) {
|
||||
/* avivo cursor are offset into the total surface */
|
||||
x += crtc->x;
|
||||
y += crtc->y;
|
||||
}
|
||||
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||
|
||||
if (x < 0) {
|
||||
if (x < 0)
|
||||
xorigin = min(-x, radeon_crtc->max_cursor_width - 1);
|
||||
x = 0;
|
||||
}
|
||||
if (y < 0) {
|
||||
if (y < 0)
|
||||
yorigin = min(-y, radeon_crtc->max_cursor_height - 1);
|
||||
y = 0;
|
||||
|
||||
if (!ASIC_IS_AVIVO(rdev)) {
|
||||
x += crtc->x;
|
||||
y += crtc->y;
|
||||
}
|
||||
DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
|
||||
|
||||
/* fixed on DCE6 and newer */
|
||||
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE6(rdev)) {
|
||||
|
@ -180,27 +187,31 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
|||
if (i > 1) {
|
||||
int cursor_end, frame_end;
|
||||
|
||||
cursor_end = x - xorigin + w;
|
||||
cursor_end = x + w;
|
||||
frame_end = crtc->x + crtc->mode.crtc_hdisplay;
|
||||
if (cursor_end >= frame_end) {
|
||||
w = w - (cursor_end - frame_end);
|
||||
if (!(frame_end & 0x7f))
|
||||
w--;
|
||||
} else {
|
||||
if (!(cursor_end & 0x7f))
|
||||
w--;
|
||||
} else if (cursor_end <= 0) {
|
||||
goto out_of_bounds;
|
||||
} else if (!(cursor_end & 0x7f)) {
|
||||
w--;
|
||||
}
|
||||
if (w <= 0) {
|
||||
w = 1;
|
||||
cursor_end = x - xorigin + w;
|
||||
if (!(cursor_end & 0x7f)) {
|
||||
x--;
|
||||
WARN_ON_ONCE(x < 0);
|
||||
}
|
||||
goto out_of_bounds;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (x <= (crtc->x - w) || y <= (crtc->y - radeon_crtc->cursor_height) ||
|
||||
x >= (crtc->x + crtc->mode.crtc_hdisplay) ||
|
||||
y >= (crtc->y + crtc->mode.crtc_vdisplay))
|
||||
goto out_of_bounds;
|
||||
|
||||
x += xorigin;
|
||||
y += yorigin;
|
||||
|
||||
if (ASIC_IS_DCE4(rdev)) {
|
||||
WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
|
@ -212,6 +223,9 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
|||
WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (radeon_crtc->cursor_height - 1));
|
||||
} else {
|
||||
x -= crtc->x;
|
||||
y -= crtc->y;
|
||||
|
||||
if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
y *= 2;
|
||||
|
||||
|
@ -229,10 +243,20 @@ static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
|
|||
yorigin * 256);
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_x = x;
|
||||
radeon_crtc->cursor_y = y;
|
||||
if (radeon_crtc->cursor_out_of_bounds) {
|
||||
radeon_crtc->cursor_out_of_bounds = false;
|
||||
if (radeon_crtc->cursor_bo)
|
||||
radeon_show_cursor(crtc);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_of_bounds:
|
||||
if (!radeon_crtc->cursor_out_of_bounds) {
|
||||
radeon_hide_cursor(crtc);
|
||||
radeon_crtc->cursor_out_of_bounds = true;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_crtc_cursor_move(struct drm_crtc *crtc,
|
||||
|
@ -297,22 +321,23 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
|
|||
return ret;
|
||||
}
|
||||
|
||||
radeon_crtc->cursor_width = width;
|
||||
radeon_crtc->cursor_height = height;
|
||||
|
||||
radeon_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != radeon_crtc->cursor_hot_x ||
|
||||
if (width != radeon_crtc->cursor_width ||
|
||||
height != radeon_crtc->cursor_height ||
|
||||
hot_x != radeon_crtc->cursor_hot_x ||
|
||||
hot_y != radeon_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
|
||||
y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
|
||||
|
||||
radeon_cursor_move_locked(crtc, x, y);
|
||||
|
||||
radeon_crtc->cursor_width = width;
|
||||
radeon_crtc->cursor_height = height;
|
||||
radeon_crtc->cursor_hot_x = hot_x;
|
||||
radeon_crtc->cursor_hot_y = hot_y;
|
||||
|
||||
radeon_cursor_move_locked(crtc, x, y);
|
||||
}
|
||||
|
||||
radeon_show_cursor(crtc);
|
||||
|
|
|
@ -330,6 +330,7 @@ struct radeon_crtc {
|
|||
u16 lut_r[256], lut_g[256], lut_b[256];
|
||||
bool enabled;
|
||||
bool can_tile;
|
||||
bool cursor_out_of_bounds;
|
||||
uint32_t crtc_offset;
|
||||
struct drm_gem_object *cursor_bo;
|
||||
uint64_t cursor_addr;
|
||||
|
|
|
@ -3026,6 +3026,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
|||
(rdev->pdev->revision == 0x80) ||
|
||||
(rdev->pdev->revision == 0x81) ||
|
||||
(rdev->pdev->revision == 0x83) ||
|
||||
(rdev->pdev->revision == 0x87) ||
|
||||
(rdev->pdev->device == 0x6604) ||
|
||||
(rdev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
|
|
|
@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
|
||||
return rdesc;
|
||||
|
||||
if (*rsize < 4)
|
||||
return rdesc;
|
||||
|
||||
for (i = 0; i < *rsize - 4; i++)
|
||||
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
|
||||
rdesc[i] = 0x19;
|
||||
|
|
|
@ -188,8 +188,8 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
|
|||
!data->valid) {
|
||||
|
||||
for (i = 0; i < TEMP_IDX_LEN; i++)
|
||||
data->temp[i] = i2c_smbus_read_byte_data(client,
|
||||
temp_reg[i]);
|
||||
data->temp[i] = (int8_t)i2c_smbus_read_byte_data(
|
||||
client, temp_reg[i]);
|
||||
|
||||
data->stat1 = i2c_smbus_read_byte_data(client,
|
||||
AMC6821_REG_STAT1);
|
||||
|
|
|
@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
|
|||
if (res)
|
||||
return res;
|
||||
|
||||
val = (val * 10 / 625) * 8;
|
||||
val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8;
|
||||
|
||||
mutex_lock(&data->update_lock);
|
||||
data->temp[attr->index] = val;
|
||||
|
|
|
@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p,
|
|||
* Convert fan RPM value from sysfs into count value for fan controller
|
||||
* register (FAN_SET_CNT).
|
||||
*/
|
||||
static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p,
|
||||
static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p,
|
||||
u8 clk_div, u8 gear_mult)
|
||||
{
|
||||
if (!rpm) /* to stop the fan, set cnt to 255 */
|
||||
unsigned long f1 = clk_freq * 30 * gear_mult;
|
||||
unsigned long f2 = p * clk_div;
|
||||
|
||||
if (!rpm) /* to stop the fan, set cnt to 255 */
|
||||
return 0xff;
|
||||
|
||||
return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)),
|
||||
0, 255);
|
||||
rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2);
|
||||
return DIV_ROUND_CLOSEST(f1, rpm * f2);
|
||||
}
|
||||
|
||||
/* helper to grab and cache data, at most one time per second */
|
||||
|
|
|
@ -259,13 +259,15 @@ static int nct7802_read_fan_min(struct nct7802_data *data, u8 reg_fan_low,
|
|||
ret = 0;
|
||||
else if (ret)
|
||||
ret = DIV_ROUND_CLOSEST(1350000U, ret);
|
||||
else
|
||||
ret = 1350000U;
|
||||
abort:
|
||||
mutex_unlock(&data->access_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nct7802_write_fan_min(struct nct7802_data *data, u8 reg_fan_low,
|
||||
u8 reg_fan_high, unsigned int limit)
|
||||
u8 reg_fan_high, unsigned long limit)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -326,8 +328,8 @@ static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
|
|||
int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
|
||||
int err;
|
||||
|
||||
voltage = clamp_val(voltage, 0, 0x3ff * nct7802_vmul[nr]);
|
||||
voltage = DIV_ROUND_CLOSEST(voltage, nct7802_vmul[nr]);
|
||||
voltage = clamp_val(voltage, 0, 0x3ff);
|
||||
|
||||
mutex_lock(&data->access_lock);
|
||||
err = regmap_write(data->regmap,
|
||||
|
@ -402,7 +404,7 @@ static ssize_t store_temp(struct device *dev, struct device_attribute *attr,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127);
|
||||
val = DIV_ROUND_CLOSEST(clamp_val(val, -128000, 127000), 1000);
|
||||
|
||||
err = regmap_write(data->regmap, nr, val & 0xff);
|
||||
return err ? : count;
|
||||
|
|
|
@ -272,6 +272,7 @@ static const struct of_device_id scpi_of_match[] = {
|
|||
{.compatible = "arm,scpi-sensors"},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, scpi_of_match);
|
||||
|
||||
static struct platform_driver scpi_hwmon_platdrv = {
|
||||
.driver = {
|
||||
|
|
|
@ -1745,7 +1745,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
|||
if (!class)
|
||||
goto out;
|
||||
if (convert_mgmt_class(mad_hdr->mgmt_class) >=
|
||||
IB_MGMT_MAX_METHODS)
|
||||
ARRAY_SIZE(class->method_table))
|
||||
goto out;
|
||||
method = class->method_table[convert_mgmt_class(
|
||||
mad_hdr->mgmt_class)];
|
||||
|
|
|
@ -517,8 +517,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec,
|
|||
process_join_error(group, status);
|
||||
else {
|
||||
int mgids_changed, is_mgid0;
|
||||
ib_find_pkey(group->port->dev->device, group->port->port_num,
|
||||
be16_to_cpu(rec->pkey), &pkey_index);
|
||||
|
||||
if (ib_find_pkey(group->port->dev->device,
|
||||
group->port->port_num, be16_to_cpu(rec->pkey),
|
||||
&pkey_index))
|
||||
pkey_index = MCAST_INVALID_PKEY_INDEX;
|
||||
|
||||
spin_lock_irq(&group->port->lock);
|
||||
if (group->state == MCAST_BUSY &&
|
||||
|
|
|
@ -563,8 +563,11 @@ void ipoib_mcast_join_task(struct work_struct *work)
|
|||
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
|
||||
return;
|
||||
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr) ||
|
||||
port_attr.state != IB_PORT_ACTIVE) {
|
||||
if (ib_query_port(priv->ca, priv->port, &port_attr)) {
|
||||
ipoib_dbg(priv, "ib_query_port() failed\n");
|
||||
return;
|
||||
}
|
||||
if (port_attr.state != IB_PORT_ACTIVE) {
|
||||
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
|
||||
port_attr.state);
|
||||
return;
|
||||
|
|
|
@ -592,7 +592,6 @@ static int drv260x_probe(struct i2c_client *client,
|
|||
}
|
||||
|
||||
haptics->input_dev->name = "drv260x:haptics";
|
||||
haptics->input_dev->dev.parent = client->dev.parent;
|
||||
haptics->input_dev->close = drv260x_close;
|
||||
input_set_drvdata(haptics->input_dev, haptics);
|
||||
input_set_capability(haptics->input_dev, EV_FF, FF_RUMBLE);
|
||||
|
|
|
@ -926,7 +926,7 @@ again:
|
|||
next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE;
|
||||
left = (head - next_tail) % CMD_BUFFER_SIZE;
|
||||
|
||||
if (left <= 2) {
|
||||
if (left <= 0x20) {
|
||||
struct iommu_cmd sync_cmd;
|
||||
volatile u64 sem = 0;
|
||||
int ret;
|
||||
|
|
|
@ -809,8 +809,10 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
|
|||
goto out_free_domain;
|
||||
|
||||
group = iommu_group_get(&pdev->dev);
|
||||
if (!group)
|
||||
if (!group) {
|
||||
ret = -EINVAL;
|
||||
goto out_free_domain;
|
||||
}
|
||||
|
||||
ret = iommu_attach_group(dev_state->domain, group);
|
||||
if (ret != 0)
|
||||
|
|
|
@ -1993,6 +1993,25 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||
if (context_present(context))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* For kdump cases, old valid entries may be cached due to the
|
||||
* in-flight DMA and copied pgtable, but there is no unmapping
|
||||
* behaviour for them, thus we need an explicit cache flush for
|
||||
* the newly-mapped device. For kdump, at this point, the device
|
||||
* is supposed to finish reset at its driver probe stage, so no
|
||||
* in-flight DMA will exist, and we don't need to worry anymore
|
||||
* hereafter.
|
||||
*/
|
||||
if (context_copied(context)) {
|
||||
u16 did_old = context_domain_id(context);
|
||||
|
||||
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
|
||||
iommu->flush.flush_context(iommu, did_old,
|
||||
(((u16)bus) << 8) | devfn,
|
||||
DMA_CCMD_MASK_NOBIT,
|
||||
DMA_CCMD_DEVICE_INVL);
|
||||
}
|
||||
|
||||
pgd = domain->pgd;
|
||||
|
||||
context_clear_entry(context);
|
||||
|
@ -5020,6 +5039,25 @@ static void intel_iommu_remove_device(struct device *dev)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
#define MAX_NR_PASID_BITS (20)
|
||||
static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
|
||||
{
|
||||
/*
|
||||
* Convert ecap_pss to extend context entry pts encoding, also
|
||||
* respect the soft pasid_max value set by the iommu.
|
||||
* - number of PASID bits = ecap_pss + 1
|
||||
* - number of PASID table entries = 2^(pts + 5)
|
||||
* Therefore, pts = ecap_pss - 4
|
||||
* e.g. KBL ecap_pss = 0x13, PASID has 20 bits, pts = 15
|
||||
*/
|
||||
if (ecap_pss(iommu->ecap) < 5)
|
||||
return 0;
|
||||
|
||||
/* pasid_max is encoded as actual number of entries not the bits */
|
||||
return find_first_bit((unsigned long *)&iommu->pasid_max,
|
||||
MAX_NR_PASID_BITS) - 5;
|
||||
}
|
||||
|
||||
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
|
@ -5052,7 +5090,9 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
|
|||
|
||||
if (!(ctx_lo & CONTEXT_PASIDE)) {
|
||||
context[1].hi = (u64)virt_to_phys(iommu->pasid_state_table);
|
||||
context[1].lo = (u64)virt_to_phys(iommu->pasid_table) | ecap_pss(iommu->ecap);
|
||||
context[1].lo = (u64)virt_to_phys(iommu->pasid_table) |
|
||||
intel_iommu_get_pts(iommu);
|
||||
|
||||
wmb();
|
||||
/* CONTEXT_TT_MULTI_LEVEL and CONTEXT_TT_DEV_IOTLB are both
|
||||
* extended to permit requests-with-PASID if the PASIDE bit
|
||||
|
|
|
@ -216,6 +216,31 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bcm7038_l1_cpu_offline(struct irq_data *d)
|
||||
{
|
||||
struct cpumask *mask = irq_data_get_affinity_mask(d);
|
||||
int cpu = smp_processor_id();
|
||||
cpumask_t new_affinity;
|
||||
|
||||
/* This CPU was not on the affinity mask */
|
||||
if (!cpumask_test_cpu(cpu, mask))
|
||||
return;
|
||||
|
||||
if (cpumask_weight(mask) > 1) {
|
||||
/*
|
||||
* Multiple CPU affinity, remove this CPU from the affinity
|
||||
* mask
|
||||
*/
|
||||
cpumask_copy(&new_affinity, mask);
|
||||
cpumask_clear_cpu(cpu, &new_affinity);
|
||||
} else {
|
||||
/* Only CPU, put on the lowest online CPU */
|
||||
cpumask_clear(&new_affinity);
|
||||
cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
|
||||
}
|
||||
irq_set_affinity_locked(d, &new_affinity, false);
|
||||
}
|
||||
|
||||
static int __init bcm7038_l1_init_one(struct device_node *dn,
|
||||
unsigned int idx,
|
||||
struct bcm7038_l1_chip *intc)
|
||||
|
@ -267,6 +292,7 @@ static struct irq_chip bcm7038_l1_irq_chip = {
|
|||
.irq_mask = bcm7038_l1_mask,
|
||||
.irq_unmask = bcm7038_l1_unmask,
|
||||
.irq_set_affinity = bcm7038_l1_set_affinity,
|
||||
.irq_cpu_offline = bcm7038_l1_cpu_offline,
|
||||
};
|
||||
|
||||
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
|
||||
|
|
|
@ -762,8 +762,10 @@ static int __init ser_gigaset_init(void)
|
|||
driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
|
||||
GIGASET_MODULENAME, GIGASET_DEVNAME,
|
||||
&ops, THIS_MODULE);
|
||||
if (!driver)
|
||||
if (!driver) {
|
||||
rc = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
|
||||
if (rc != 0) {
|
||||
|
|
|
@ -6771,7 +6771,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
/* need to ensure recovery thread has run */
|
||||
wait_event_interruptible_timeout(mddev->sb_wait,
|
||||
!test_bit(MD_RECOVERY_NEEDED,
|
||||
&mddev->flags),
|
||||
&mddev->recovery),
|
||||
msecs_to_jiffies(5000));
|
||||
if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) {
|
||||
/* Need to flush page cache, and ensure no-one else opens
|
||||
|
|
|
@ -6980,6 +6980,15 @@ static int run(struct mddev *mddev)
|
|||
stripe = (stripe | (stripe-1)) + 1;
|
||||
mddev->queue->limits.discard_alignment = stripe;
|
||||
mddev->queue->limits.discard_granularity = stripe;
|
||||
|
||||
/*
|
||||
* We use 16-bit counter of active stripes in bi_phys_segments
|
||||
* (minus one for over-loaded initialization)
|
||||
*/
|
||||
blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS);
|
||||
blk_queue_max_discard_sectors(mddev->queue,
|
||||
0xfffe * STRIPE_SECTORS);
|
||||
|
||||
/*
|
||||
* unaligned part of discard request will be ignored, so can't
|
||||
* guarantee discard_zeroes_data
|
||||
|
|
|
@ -2168,11 +2168,12 @@ static int dvb_register(struct cx23885_tsport *port)
|
|||
}
|
||||
port->i2c_client_tuner = client_tuner;
|
||||
break;
|
||||
case CX23885_BOARD_HAUPPAUGE_HVR5525:
|
||||
switch (port->nr) {
|
||||
case CX23885_BOARD_HAUPPAUGE_HVR5525: {
|
||||
struct m88rs6000t_config m88rs6000t_config;
|
||||
struct a8293_platform_data a8293_pdata = {};
|
||||
|
||||
switch (port->nr) {
|
||||
|
||||
/* port b - satellite */
|
||||
case 1:
|
||||
/* attach frontend */
|
||||
|
@ -2267,6 +2268,7 @@ static int dvb_register(struct cx23885_tsport *port)
|
|||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
printk(KERN_INFO "%s: The frontend of your DVB/ATSC card "
|
||||
" isn't supported yet\n",
|
||||
|
|
|
@ -286,7 +286,10 @@ static inline u32 solo_reg_read(struct solo_dev *solo_dev, int reg)
|
|||
static inline void solo_reg_write(struct solo_dev *solo_dev, int reg,
|
||||
u32 data)
|
||||
{
|
||||
u16 val;
|
||||
|
||||
writel(data, solo_dev->reg_base + reg);
|
||||
pci_read_config_word(solo_dev->pdev, PCI_STATUS, &val);
|
||||
}
|
||||
|
||||
static inline void solo_irq_on(struct solo_dev *dev, u32 mask)
|
||||
|
|
|
@ -399,7 +399,7 @@ bool mei_cldev_enabled(struct mei_cl_device *cldev)
|
|||
EXPORT_SYMBOL_GPL(mei_cldev_enabled);
|
||||
|
||||
/**
|
||||
* mei_cldev_enable_device - enable me client device
|
||||
* mei_cldev_enable - enable me client device
|
||||
* create connection with me client
|
||||
*
|
||||
* @cldev: me client device
|
||||
|
|
|
@ -698,7 +698,7 @@ void mei_host_client_init(struct work_struct *work)
|
|||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
dev_dbg(dev->dev, "rpm: autosuspend\n");
|
||||
pm_runtime_autosuspend(dev->dev);
|
||||
pm_request_autosuspend(dev->dev);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -791,7 +791,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
|
|||
struct mmc_async_req *cur_areq = &test_areq[0].areq;
|
||||
struct mmc_async_req *other_areq = &test_areq[1].areq;
|
||||
int i;
|
||||
int ret;
|
||||
int ret = RESULT_OK;
|
||||
|
||||
test_areq[0].test = test;
|
||||
test_areq[1].test = test;
|
||||
|
|
|
@ -2040,7 +2040,27 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
|||
ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
|
||||
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
|
||||
|
||||
sdhci_do_reset(host, SDHCI_RESET_CMD);
|
||||
sdhci_do_reset(host, SDHCI_RESET_DATA);
|
||||
|
||||
err = -EIO;
|
||||
|
||||
if (cmd.opcode != MMC_SEND_TUNING_BLOCK_HS200)
|
||||
goto out;
|
||||
|
||||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
||||
memset(&cmd, 0, sizeof(cmd));
|
||||
cmd.opcode = MMC_STOP_TRANSMISSION;
|
||||
cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
|
||||
cmd.busy_timeout = 50;
|
||||
mmc_wait_for_cmd(mmc, &cmd, 0);
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -772,6 +772,17 @@ struct mvpp2_rx_desc {
|
|||
u32 reserved8;
|
||||
};
|
||||
|
||||
struct mvpp2_txq_pcpu_buf {
|
||||
/* Transmitted SKB */
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Physical address of transmitted buffer */
|
||||
dma_addr_t phys;
|
||||
|
||||
/* Size transmitted */
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/* Per-CPU Tx queue control */
|
||||
struct mvpp2_txq_pcpu {
|
||||
int cpu;
|
||||
|
@ -787,11 +798,8 @@ struct mvpp2_txq_pcpu {
|
|||
/* Number of Tx DMA descriptors reserved for each CPU */
|
||||
int reserved_num;
|
||||
|
||||
/* Array of transmitted skb */
|
||||
struct sk_buff **tx_skb;
|
||||
|
||||
/* Array of transmitted buffers' physical addresses */
|
||||
dma_addr_t *tx_buffs;
|
||||
/* Infos about transmitted buffers */
|
||||
struct mvpp2_txq_pcpu_buf *buffs;
|
||||
|
||||
/* Index of last TX DMA descriptor that was inserted */
|
||||
int txq_put_index;
|
||||
|
@ -981,10 +989,11 @@ static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
|
|||
struct sk_buff *skb,
|
||||
struct mvpp2_tx_desc *tx_desc)
|
||||
{
|
||||
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
|
||||
if (skb)
|
||||
txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
|
||||
tx_desc->buf_phys_addr;
|
||||
struct mvpp2_txq_pcpu_buf *tx_buf =
|
||||
txq_pcpu->buffs + txq_pcpu->txq_put_index;
|
||||
tx_buf->skb = skb;
|
||||
tx_buf->size = tx_desc->data_size;
|
||||
tx_buf->phys = tx_desc->buf_phys_addr;
|
||||
txq_pcpu->txq_put_index++;
|
||||
if (txq_pcpu->txq_put_index == txq_pcpu->size)
|
||||
txq_pcpu->txq_put_index = 0;
|
||||
|
@ -4403,17 +4412,16 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
|
|||
int i;
|
||||
|
||||
for (i = 0; i < num; i++) {
|
||||
dma_addr_t buf_phys_addr =
|
||||
txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
|
||||
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
|
||||
struct mvpp2_txq_pcpu_buf *tx_buf =
|
||||
txq_pcpu->buffs + txq_pcpu->txq_get_index;
|
||||
|
||||
mvpp2_txq_inc_get(txq_pcpu);
|
||||
|
||||
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
|
||||
skb_headlen(skb), DMA_TO_DEVICE);
|
||||
if (!skb)
|
||||
dma_unmap_single(port->dev->dev.parent, tx_buf->phys,
|
||||
tx_buf->size, DMA_TO_DEVICE);
|
||||
if (!tx_buf->skb)
|
||||
continue;
|
||||
dev_kfree_skb_any(skb);
|
||||
dev_kfree_skb_any(tx_buf->skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4664,15 +4672,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
|
|||
for_each_present_cpu(cpu) {
|
||||
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
|
||||
txq_pcpu->size = txq->size;
|
||||
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
|
||||
sizeof(*txq_pcpu->tx_skb),
|
||||
GFP_KERNEL);
|
||||
if (!txq_pcpu->tx_skb)
|
||||
goto error;
|
||||
|
||||
txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
|
||||
sizeof(dma_addr_t), GFP_KERNEL);
|
||||
if (!txq_pcpu->tx_buffs)
|
||||
txq_pcpu->buffs = kmalloc(txq_pcpu->size *
|
||||
sizeof(struct mvpp2_txq_pcpu_buf),
|
||||
GFP_KERNEL);
|
||||
if (!txq_pcpu->buffs)
|
||||
goto error;
|
||||
|
||||
txq_pcpu->count = 0;
|
||||
|
@ -4686,8 +4689,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
|
|||
error:
|
||||
for_each_present_cpu(cpu) {
|
||||
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
|
||||
kfree(txq_pcpu->tx_skb);
|
||||
kfree(txq_pcpu->tx_buffs);
|
||||
kfree(txq_pcpu->buffs);
|
||||
}
|
||||
|
||||
dma_free_coherent(port->dev->dev.parent,
|
||||
|
@ -4706,8 +4708,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
|
|||
|
||||
for_each_present_cpu(cpu) {
|
||||
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
|
||||
kfree(txq_pcpu->tx_skb);
|
||||
kfree(txq_pcpu->tx_buffs);
|
||||
kfree(txq_pcpu->buffs);
|
||||
}
|
||||
|
||||
if (txq->descs)
|
||||
|
|
|
@ -432,6 +432,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|||
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
|
||||
to_fw_pkey_sz(128));
|
||||
|
||||
/* Check log_max_qp from HCA caps to set in current profile */
|
||||
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
|
||||
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
|
||||
profile[prof_sel].log_max_qp,
|
||||
MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
||||
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
|
||||
}
|
||||
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
|
||||
prof->log_max_qp);
|
||||
|
@ -505,7 +512,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
struct mlx5_priv *priv = &mdev->priv;
|
||||
struct msix_entry *msix = priv->msix_arr;
|
||||
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
|
||||
int numa_node = priv->numa_node;
|
||||
int err;
|
||||
|
||||
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
|
||||
|
@ -513,7 +519,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
|
||||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
priv->irq_info[i].mask);
|
||||
|
||||
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
|
||||
|
|
|
@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
||||
static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
|
||||
{
|
||||
struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
|
||||
|
@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
|
|||
if (vdev->config.intr_type == MSI_X)
|
||||
pci_disable_msix(vdev->pdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void vxge_rem_isr(struct vxgedev *vdev)
|
||||
{
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
if (vdev->config.intr_type == MSI_X) {
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI) &&
|
||||
vdev->config.intr_type == MSI_X) {
|
||||
vxge_rem_msix_isr(vdev);
|
||||
} else
|
||||
#endif
|
||||
if (vdev->config.intr_type == INTA) {
|
||||
} else if (vdev->config.intr_type == INTA) {
|
||||
synchronize_irq(vdev->pdev->irq);
|
||||
free_irq(vdev->pdev->irq, vdev);
|
||||
}
|
||||
|
@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
|
|||
static int vxge_add_isr(struct vxgedev *vdev)
|
||||
{
|
||||
int ret = 0;
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
|
||||
int pci_fun = PCI_FUNC(vdev->pdev->devfn);
|
||||
|
||||
if (vdev->config.intr_type == MSI_X)
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
|
||||
ret = vxge_enable_msix(vdev);
|
||||
|
||||
if (ret) {
|
||||
|
@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
|
|||
vdev->config.intr_type = INTA;
|
||||
}
|
||||
|
||||
if (vdev->config.intr_type == MSI_X) {
|
||||
if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
|
||||
for (intr_idx = 0;
|
||||
intr_idx < (vdev->no_of_vpath *
|
||||
VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
|
||||
|
@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
|
|||
vdev->vxge_entries[intr_cnt].in_use = 1;
|
||||
vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
|
||||
}
|
||||
INTA_MODE:
|
||||
#endif
|
||||
|
||||
INTA_MODE:
|
||||
if (vdev->config.intr_type == INTA) {
|
||||
snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
|
||||
"%s:vxge:INTA", vdev->ndev->name);
|
||||
|
@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
|
|||
if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
|
||||
max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
|
||||
|
||||
#ifndef CONFIG_PCI_MSI
|
||||
vxge_debug_init(VXGE_ERR,
|
||||
"%s: This Kernel does not support "
|
||||
"MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
|
||||
*intr_type = INTA;
|
||||
#endif
|
||||
if (!IS_ENABLED(CONFIG_PCI_MSI)) {
|
||||
vxge_debug_init(VXGE_ERR,
|
||||
"%s: This Kernel does not support "
|
||||
"MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
|
||||
*intr_type = INTA;
|
||||
}
|
||||
|
||||
/* Configure whether MSI-X or IRQL. */
|
||||
switch (*intr_type) {
|
||||
|
|
|
@ -2939,12 +2939,6 @@ int stmmac_dvr_probe(struct device *device,
|
|||
spin_lock_init(&priv->lock);
|
||||
spin_lock_init(&priv->tx_lock);
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
pr_err("%s: ERROR %i registering the device\n", __func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
/* If a specific clk_csr value is passed from the platform
|
||||
* this means that the CSR Clock Range selection cannot be
|
||||
* changed at run-time and it is fixed. Viceversa the driver'll try to
|
||||
|
@ -2969,11 +2963,21 @@ int stmmac_dvr_probe(struct device *device,
|
|||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
error_mdio_register:
|
||||
unregister_netdev(ndev);
|
||||
error_netdev_register:
|
||||
if (priv->pcs != STMMAC_PCS_RGMII &&
|
||||
priv->pcs != STMMAC_PCS_TBI &&
|
||||
priv->pcs != STMMAC_PCS_RTBI)
|
||||
stmmac_mdio_unregister(ndev);
|
||||
error_mdio_register:
|
||||
netif_napi_del(&priv->napi);
|
||||
error_hw_init:
|
||||
clk_disable_unprepare(priv->pclk);
|
||||
|
|
|
@ -549,7 +549,8 @@ fatal_error:
|
|||
|
||||
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int queue, len;
|
||||
int queue;
|
||||
unsigned int len;
|
||||
struct cpmac_desc *desc;
|
||||
struct cpmac_priv *priv = netdev_priv(dev);
|
||||
|
||||
|
@ -559,7 +560,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
if (unlikely(skb_padto(skb, ETH_ZLEN)))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
len = max(skb->len, ETH_ZLEN);
|
||||
len = max_t(unsigned int, skb->len, ETH_ZLEN);
|
||||
queue = skb_get_queue_mapping(skb);
|
||||
netif_stop_subqueue(dev, queue);
|
||||
|
||||
|
|
|
@ -40,6 +40,8 @@
|
|||
|
||||
#include "hyperv_net.h"
|
||||
|
||||
/* Restrict GSO size to account for NVGRE */
|
||||
#define NETVSC_GSO_MAX_SIZE 62768
|
||||
|
||||
#define RING_SIZE_MIN 64
|
||||
static int ring_size = 128;
|
||||
|
@ -852,6 +854,7 @@ static int netvsc_set_channels(struct net_device *net,
|
|||
}
|
||||
goto recover;
|
||||
}
|
||||
netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
|
||||
|
||||
out:
|
||||
netvsc_open(net);
|
||||
|
|
|
@ -3446,39 +3446,87 @@ static bool delay_autosuspend(struct r8152 *tp)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
static int rtl8152_rumtime_suspend(struct r8152 *tp)
|
||||
{
|
||||
struct r8152 *tp = usb_get_intfdata(intf);
|
||||
struct net_device *netdev = tp->netdev;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&tp->control);
|
||||
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
||||
u32 rcr = 0;
|
||||
|
||||
if (PMSG_IS_AUTO(message)) {
|
||||
if (netif_running(netdev) && delay_autosuspend(tp)) {
|
||||
if (delay_autosuspend(tp)) {
|
||||
ret = -EBUSY;
|
||||
goto out1;
|
||||
}
|
||||
|
||||
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
} else {
|
||||
netif_device_detach(netdev);
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
u32 ocp_data;
|
||||
|
||||
rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
|
||||
ocp_data = rcr & ~RCR_ACPT_ALL;
|
||||
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
|
||||
rxdy_gated_en(tp, true);
|
||||
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
|
||||
PLA_OOB_CTRL);
|
||||
if (!(ocp_data & RXFIFO_EMPTY)) {
|
||||
rxdy_gated_en(tp, false);
|
||||
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
|
||||
ret = -EBUSY;
|
||||
goto out1;
|
||||
}
|
||||
}
|
||||
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
|
||||
rtl_runtime_suspend_enable(tp, true);
|
||||
|
||||
if (netif_carrier_ok(netdev)) {
|
||||
napi_disable(&tp->napi);
|
||||
rtl_stop_rx(tp);
|
||||
rxdy_gated_en(tp, false);
|
||||
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
|
||||
napi_enable(&tp->napi);
|
||||
}
|
||||
}
|
||||
|
||||
set_bit(SELECTIVE_SUSPEND, &tp->flags);
|
||||
|
||||
out1:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rtl8152_system_suspend(struct r8152 *tp)
|
||||
{
|
||||
struct net_device *netdev = tp->netdev;
|
||||
int ret = 0;
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
|
||||
clear_bit(WORK_ENABLE, &tp->flags);
|
||||
usb_kill_urb(tp->intr_urb);
|
||||
napi_disable(&tp->napi);
|
||||
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
|
||||
rtl_stop_rx(tp);
|
||||
rtl_runtime_suspend_enable(tp, true);
|
||||
} else {
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
tp->rtl_ops.down(tp);
|
||||
}
|
||||
cancel_delayed_work_sync(&tp->schedule);
|
||||
tp->rtl_ops.down(tp);
|
||||
napi_enable(&tp->napi);
|
||||
}
|
||||
out1:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct r8152 *tp = usb_get_intfdata(intf);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&tp->control);
|
||||
|
||||
if (PMSG_IS_AUTO(message))
|
||||
ret = rtl8152_rumtime_suspend(tp);
|
||||
else
|
||||
ret = rtl8152_system_suspend(tp);
|
||||
|
||||
mutex_unlock(&tp->control);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -301,7 +301,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
|
|||
.flowi4_tos = RT_TOS(ip4h->tos),
|
||||
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC |
|
||||
FLOWI_FLAG_SKIP_NH_OIF,
|
||||
.flowi4_proto = ip4h->protocol,
|
||||
.daddr = ip4h->daddr,
|
||||
.saddr = ip4h->saddr,
|
||||
};
|
||||
|
||||
if (vrf_send_v4_prep(skb, &fl4, vrf_dev))
|
||||
|
@ -410,6 +412,8 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
|
|||
struct in6_addr *nexthop;
|
||||
int ret;
|
||||
|
||||
nf_reset(skb);
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
skb->dev = dev;
|
||||
|
||||
|
@ -521,6 +525,8 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
|||
u32 nexthop;
|
||||
int ret = -EINVAL;
|
||||
|
||||
nf_reset(skb);
|
||||
|
||||
/* Be paranoid, rather than too clever. */
|
||||
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
|
||||
struct sk_buff *skb2;
|
||||
|
@ -919,6 +925,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
|
||||
if (vrf->tb_id == RT_TABLE_UNSPEC)
|
||||
return -EINVAL;
|
||||
|
||||
dev->priv_flags |= IFF_L3MDEV_MASTER;
|
||||
|
||||
|
|
|
@ -338,7 +338,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file,
|
|||
} else {
|
||||
res = -EINVAL;
|
||||
}
|
||||
} else if (strncmp("background", buf, 9) == 0) {
|
||||
} else if (strncmp("background", buf, 10) == 0) {
|
||||
res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
|
||||
} else if (strncmp("manual", buf, 6) == 0) {
|
||||
res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
|
||||
|
|
|
@ -27,7 +27,6 @@ static const struct pci_device_id ath_pci_id_table[] = {
|
|||
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
|
||||
|
||||
#ifdef CONFIG_ATH9K_PCOEM
|
||||
/* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
|
||||
|
@ -38,7 +37,7 @@ static const struct pci_device_id ath_pci_id_table[] = {
|
|||
.driver_data = ATH9K_PCI_LED_ACT_HI },
|
||||
#endif
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
|
||||
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
|
||||
|
||||
#ifdef CONFIG_ATH9K_PCOEM
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
|
@ -86,7 +85,11 @@ static const struct pci_device_id ath_pci_id_table[] = {
|
|||
0x10CF, /* Fujitsu */
|
||||
0x1536),
|
||||
.driver_data = ATH9K_PCI_D3_L1_WAR },
|
||||
#endif
|
||||
|
||||
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
|
||||
|
||||
#ifdef CONFIG_ATH9K_PCOEM
|
||||
/* AR9285 card for Asus */
|
||||
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
|
||||
0x002B,
|
||||
|
|
|
@ -1303,12 +1303,13 @@ EXPORT_SYMBOL_GPL(rtl_action_proc);
|
|||
|
||||
static void setup_arp_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc)
|
||||
{
|
||||
struct ieee80211_hw *hw = rtlpriv->hw;
|
||||
|
||||
rtlpriv->ra.is_special_data = true;
|
||||
if (rtlpriv->cfg->ops->get_btc_status())
|
||||
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
|
||||
rtlpriv, 1);
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
rtl_lps_leave(hw);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
|
@ -1381,8 +1382,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
|
|||
|
||||
if (is_tx) {
|
||||
rtlpriv->ra.is_special_data = true;
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
rtl_lps_leave(hw);
|
||||
ppsc->last_delaylps_stamp_jiffies = jiffies;
|
||||
}
|
||||
|
||||
|
|
|
@ -1153,10 +1153,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
|
|||
} else {
|
||||
mstatus = RT_MEDIA_DISCONNECT;
|
||||
|
||||
if (mac->link_state == MAC80211_LINKED) {
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
}
|
||||
if (mac->link_state == MAC80211_LINKED)
|
||||
rtl_lps_leave(hw);
|
||||
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
|
||||
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
|
||||
mac->link_state = MAC80211_NOLINK;
|
||||
|
@ -1432,8 +1430,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
|
|||
}
|
||||
|
||||
if (mac->link_state == MAC80211_LINKED) {
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
rtl_lps_leave(hw);
|
||||
mac->link_state = MAC80211_LINKED_SCANNING;
|
||||
} else {
|
||||
rtl_ips_nic_on(hw);
|
||||
|
|
|
@ -664,11 +664,9 @@ tx_status_ok:
|
|||
}
|
||||
|
||||
if (((rtlpriv->link_info.num_rx_inperiod +
|
||||
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
}
|
||||
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2))
|
||||
rtl_lps_leave(hw);
|
||||
}
|
||||
|
||||
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
|
||||
|
@ -919,10 +917,8 @@ new_trx_end:
|
|||
}
|
||||
if (((rtlpriv->link_info.num_rx_inperiod +
|
||||
rtlpriv->link_info.num_tx_inperiod) > 8) ||
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
}
|
||||
(rtlpriv->link_info.num_rx_inperiod > 2))
|
||||
rtl_lps_leave(hw);
|
||||
skb = new_skb;
|
||||
no_new:
|
||||
if (rtlpriv->use_new_trx_flow) {
|
||||
|
|
|
@ -414,8 +414,8 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
|
|||
}
|
||||
}
|
||||
|
||||
/*Enter the leisure power save mode.*/
|
||||
void rtl_lps_enter(struct ieee80211_hw *hw)
|
||||
/* Interrupt safe routine to enter the leisure power save mode.*/
|
||||
static void rtl_lps_enter_core(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
|
||||
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
|
||||
|
@ -455,10 +455,9 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
|
|||
|
||||
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
|
||||
}
|
||||
EXPORT_SYMBOL(rtl_lps_enter);
|
||||
|
||||
/*Leave the leisure power save mode.*/
|
||||
void rtl_lps_leave(struct ieee80211_hw *hw)
|
||||
/* Interrupt safe routine to leave the leisure power save mode.*/
|
||||
static void rtl_lps_leave_core(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
|
||||
|
@ -488,7 +487,6 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
|
|||
}
|
||||
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
|
||||
}
|
||||
EXPORT_SYMBOL(rtl_lps_leave);
|
||||
|
||||
/* For sw LPS*/
|
||||
void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
|
||||
|
@ -681,12 +679,34 @@ void rtl_lps_change_work_callback(struct work_struct *work)
|
|||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
|
||||
if (rtlpriv->enter_ps)
|
||||
rtl_lps_enter(hw);
|
||||
rtl_lps_enter_core(hw);
|
||||
else
|
||||
rtl_lps_leave(hw);
|
||||
rtl_lps_leave_core(hw);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
|
||||
|
||||
void rtl_lps_enter(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
|
||||
if (!in_interrupt())
|
||||
return rtl_lps_enter_core(hw);
|
||||
rtlpriv->enter_ps = true;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_lps_enter);
|
||||
|
||||
void rtl_lps_leave(struct ieee80211_hw *hw)
|
||||
{
|
||||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
|
||||
if (!in_interrupt())
|
||||
return rtl_lps_leave_core(hw);
|
||||
rtlpriv->enter_ps = false;
|
||||
schedule_work(&rtlpriv->works.lps_change_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rtl_lps_leave);
|
||||
|
||||
void rtl_swlps_wq_callback(void *data)
|
||||
{
|
||||
struct rtl_works *rtlworks = container_of_dwork_rtl(data,
|
||||
|
|
|
@ -258,8 +258,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
|
|||
|
||||
static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn)
|
||||
{
|
||||
if (vio_find_node(dn))
|
||||
struct vio_dev *vio_dev;
|
||||
|
||||
vio_dev = vio_find_node(dn);
|
||||
if (vio_dev) {
|
||||
put_device(&vio_dev->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vio_register_device_node(dn)) {
|
||||
printk(KERN_ERR
|
||||
|
@ -335,6 +340,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn)
|
|||
return -EINVAL;
|
||||
|
||||
vio_unregister_device(vio_dev);
|
||||
|
||||
put_device(&vio_dev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2043,6 +2043,10 @@ bool pci_dev_run_wake(struct pci_dev *dev)
|
|||
if (!dev->pme_support)
|
||||
return false;
|
||||
|
||||
/* PME-capable in principle, but not from the intended sleep state */
|
||||
if (!pci_pme_capable(dev, pci_target_state(dev)))
|
||||
return false;
|
||||
|
||||
while (bus->parent) {
|
||||
struct pci_dev *bridge = bus->self;
|
||||
|
||||
|
|
|
@ -126,6 +126,15 @@ static const struct dmi_system_id asus_quirks[] = {
|
|||
},
|
||||
.driver_data = &quirk_asus_wapf4,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. X45U",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X45U"),
|
||||
},
|
||||
.driver_data = &quirk_asus_wapf4,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. X456UA",
|
||||
|
|
|
@ -47,7 +47,8 @@ static struct regulator_desc vmmc_regulator = {
|
|||
.volt_table = stw481x_vmmc_voltages,
|
||||
.enable_time = 200, /* FIXME: look this up */
|
||||
.enable_reg = STW_CONF1,
|
||||
.enable_mask = STW_CONF1_PDN_VMMC,
|
||||
.enable_mask = STW_CONF1_PDN_VMMC | STW_CONF1_MMC_LS_STATUS,
|
||||
.enable_val = STW_CONF1_PDN_VMMC,
|
||||
.vsel_reg = STW_CONF1,
|
||||
.vsel_mask = STW_CONF1_VMMC_MASK,
|
||||
};
|
||||
|
|
|
@ -872,7 +872,7 @@ static int __init vmlogrdr_init(void)
|
|||
goto cleanup;
|
||||
|
||||
for (i=0; i < MAXMINOR; ++i ) {
|
||||
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
|
||||
sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
|
||||
if (!sys_ser[i].buffer) {
|
||||
rc = -ENOMEM;
|
||||
break;
|
||||
|
|
|
@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
|
|||
|
||||
|
||||
/**
|
||||
* zfcp_dbf_rec_run - trace event related to running recovery
|
||||
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
|
||||
* @level: trace level to be used for event
|
||||
* @tag: identifier for event
|
||||
* @erp: erp_action running
|
||||
*/
|
||||
void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
|
||||
void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp)
|
||||
{
|
||||
struct zfcp_dbf *dbf = erp->adapter->dbf;
|
||||
struct zfcp_dbf_rec *rec = &dbf->rec_buf;
|
||||
|
@ -319,10 +320,20 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
|
|||
else
|
||||
rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter);
|
||||
|
||||
debug_event(dbf->rec, 1, rec, sizeof(*rec));
|
||||
debug_event(dbf->rec, level, rec, sizeof(*rec));
|
||||
spin_unlock_irqrestore(&dbf->rec_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_dbf_rec_run - trace event related to running recovery
|
||||
* @tag: identifier for event
|
||||
* @erp: erp_action running
|
||||
*/
|
||||
void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp)
|
||||
{
|
||||
zfcp_dbf_rec_run_lvl(1, tag, erp);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery
|
||||
* @tag: identifier for event
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
* zfcp device driver
|
||||
* debug feature declarations
|
||||
*
|
||||
* Copyright IBM Corp. 2008, 2015
|
||||
* Copyright IBM Corp. 2008, 2016
|
||||
*/
|
||||
|
||||
#ifndef ZFCP_DBF_H
|
||||
|
@ -283,6 +283,30 @@ struct zfcp_dbf {
|
|||
struct zfcp_dbf_scsi scsi_buf;
|
||||
};
|
||||
|
||||
/**
|
||||
* zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default
|
||||
* @req: request that has been completed
|
||||
*
|
||||
* Returns true if FCP response with only benign residual under count.
|
||||
*/
|
||||
static inline
|
||||
bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req)
|
||||
{
|
||||
struct fsf_qtcb *qtcb = req->qtcb;
|
||||
u32 fsf_stat = qtcb->header.fsf_status;
|
||||
struct fcp_resp *fcp_rsp;
|
||||
u8 rsp_flags, fr_status;
|
||||
|
||||
if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND)
|
||||
return false; /* not an FCP response */
|
||||
fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp;
|
||||
rsp_flags = fcp_rsp->fr_flags;
|
||||
fr_status = fcp_rsp->fr_status;
|
||||
return (fsf_stat == FSF_FCP_RSP_AVAILABLE) &&
|
||||
(rsp_flags == FCP_RESID_UNDER) &&
|
||||
(fr_status == SAM_STAT_GOOD);
|
||||
}
|
||||
|
||||
static inline
|
||||
void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req)
|
||||
{
|
||||
|
@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
|
|||
zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
|
||||
|
||||
} else if (qtcb->header.fsf_status != FSF_GOOD) {
|
||||
zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req);
|
||||
zfcp_dbf_hba_fsf_resp("fs_ferr",
|
||||
zfcp_dbf_hba_fsf_resp_suppress(req)
|
||||
? 5 : 1, req);
|
||||
|
||||
} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
|
||||
(req->fsf_command == FSF_QTCB_OPEN_LUN)) {
|
||||
|
@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
|
|||
_zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset.
|
||||
* @scmnd: SCSI command that was NULLified.
|
||||
* @fsf_req: request that owned @scmnd.
|
||||
*/
|
||||
static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd,
|
||||
struct zfcp_fsf_req *fsf_req)
|
||||
{
|
||||
_zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req);
|
||||
}
|
||||
|
||||
#endif /* ZFCP_DBF_H */
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* Error Recovery Procedures (ERP).
|
||||
*
|
||||
* Copyright IBM Corp. 2002, 2015
|
||||
* Copyright IBM Corp. 2002, 2016
|
||||
*/
|
||||
|
||||
#define KMSG_COMPONENT "zfcp"
|
||||
|
@ -1204,6 +1204,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery
|
||||
* @port: zfcp_port whose fc_rport we should try to unblock
|
||||
*/
|
||||
static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct zfcp_adapter *adapter = port->adapter;
|
||||
int port_status;
|
||||
struct Scsi_Host *shost = adapter->scsi_host;
|
||||
struct scsi_device *sdev;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
port_status = atomic_read(&port->status);
|
||||
if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
|
||||
(port_status & (ZFCP_STATUS_COMMON_ERP_INUSE |
|
||||
ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) {
|
||||
/* new ERP of severity >= port triggered elsewhere meanwhile or
|
||||
* local link down (adapter erp_failed but not clear unblock)
|
||||
*/
|
||||
zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
return;
|
||||
}
|
||||
spin_lock(shost->host_lock);
|
||||
__shost_for_each_device(sdev, shost) {
|
||||
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
|
||||
int lun_status;
|
||||
|
||||
if (zsdev->port != port)
|
||||
continue;
|
||||
/* LUN under port of interest */
|
||||
lun_status = atomic_read(&zsdev->status);
|
||||
if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0)
|
||||
continue; /* unblock rport despite failed LUNs */
|
||||
/* LUN recovery not given up yet [maybe follow-up pending] */
|
||||
if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 ||
|
||||
(lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) {
|
||||
/* LUN blocked:
|
||||
* not yet unblocked [LUN recovery pending]
|
||||
* or meanwhile blocked [new LUN recovery triggered]
|
||||
*/
|
||||
zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action);
|
||||
spin_unlock(shost->host_lock);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/* now port has no child or all children have completed recovery,
|
||||
* and no ERP of severity >= port was meanwhile triggered elsewhere
|
||||
*/
|
||||
zfcp_scsi_schedule_rport_register(port);
|
||||
spin_unlock(shost->host_lock);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
}
|
||||
|
||||
static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
||||
{
|
||||
struct zfcp_adapter *adapter = act->adapter;
|
||||
|
@ -1214,6 +1270,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
|||
case ZFCP_ERP_ACTION_REOPEN_LUN:
|
||||
if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
|
||||
scsi_device_put(sdev);
|
||||
zfcp_erp_try_rport_unblock(port);
|
||||
break;
|
||||
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT:
|
||||
|
@ -1224,7 +1281,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
|
|||
*/
|
||||
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
|
||||
if (result == ZFCP_ERP_SUCCEEDED)
|
||||
zfcp_scsi_schedule_rport_register(port);
|
||||
zfcp_erp_try_rport_unblock(port);
|
||||
/* fall through */
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
put_device(&port->dev);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue