Merge branch 'android-4.4@9796ea8' into branch 'msm-4.4'
* refs/heads/9796ea8 Linux 4.4.67 dm ioctl: prevent stack leak in dm ioctl call nfsd: stricter decoding of write-like NFSv2/v3 ops nfsd4: minor NFSv2/v3 write decoding cleanup ext4/fscrypto: avoid RCU lookup in d_revalidate ext4 crypto: use dget_parent() in ext4_d_revalidate() ext4 crypto: revalidate dentry after adding or removing the key ext4: require encryption feature for EXT4_IOC_SET_ENCRYPTION_POLICY IB/ehca: fix maybe-uninitialized warnings IB/qib: rename BITS_PER_PAGE to RVT_BITS_PER_PAGE netlink: Allow direct reclaim for fallback allocation 8250_pci: Fix potential use-after-free in error path scsi: cxlflash: Improve EEH recovery time scsi: cxlflash: Fix to avoid EEH and host reset collisions scsi: cxlflash: Scan host only after the port is ready for I/O net: tg3: avoid uninitialized variable warning mtd: avoid stack overflow in MTD CFI code drbd: avoid redefinition of BITS_PER_PAGE ALSA: ppc/awacs: shut up maybe-uninitialized warning ASoC: intel: Fix PM and non-atomic crash in bytcr drivers Handle mismatched open calls timerfd: Protect the might cancel mechanism proper ANDROID: android-base.cfg: remove USB_OTG_WAKELOCK ANDROID: android-base.cfg: remove defunct options ANDROID: arm64: suspend: Restore the UAO state ANDROID: usb: gadget: f_audio_source: disable the CPU C-states upon playback ANDROID: usb: gadget: f_mtp: Set 0xFFFFFFFF in mtp header ContainerLength field Conflicts: drivers/usb/gadget/function/f_mtp.c fs/ext4/crypto.c Change-Id: I408054176798ba49f0b62fb46a3b0d59060d108e Signed-off-by: Blagovest Kolenichev <bkolenichev@codeaurora.org>
This commit is contained in:
commit
7e061af6db
32 changed files with 248 additions and 74 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 66
|
||||
SUBLEVEL = 67
|
||||
EXTRAVERSION =
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ CONFIG_HIGH_RES_TIMERS=y
|
|||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_INET6_AH=y
|
||||
CONFIG_INET6_DIAG_DESTROY=y
|
||||
CONFIG_INET6_ESP=y
|
||||
CONFIG_INET6_IPCOMP=y
|
||||
CONFIG_INET=y
|
||||
|
@ -72,7 +71,6 @@ CONFIG_MODVERSIONS=y
|
|||
CONFIG_NET=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_TPROXY=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
|
@ -173,5 +171,4 @@ CONFIG_USB_CONFIGFS_F_MTP=y
|
|||
CONFIG_USB_CONFIGFS_F_PTP=y
|
||||
CONFIG_USB_CONFIGFS_UEVENT=y
|
||||
CONFIG_USB_GADGET=y
|
||||
CONFIG_USB_OTG_WAKELOCK=y
|
||||
CONFIG_XFRM_USER=y
|
||||
|
|
|
@ -18,6 +18,9 @@
|
|||
#ifndef __ASM_EXEC_H
|
||||
#define __ASM_EXEC_H
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
void uao_thread_switch(struct task_struct *next);
|
||||
|
||||
#endif /* __ASM_EXEC_H */
|
||||
|
|
|
@ -49,6 +49,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/compat.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -383,7 +384,7 @@ static void tls_thread_switch(struct task_struct *next)
|
|||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
static void uao_thread_switch(struct task_struct *next)
|
||||
void uao_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
|
||||
if (task_thread_info(next)->addr_limit == KERNEL_DS)
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
@ -95,6 +96,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|||
*/
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
|
||||
CONFIG_ARM64_PAN));
|
||||
uao_thread_switch(current);
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
|
|
|
@ -479,8 +479,14 @@ void drbd_bm_cleanup(struct drbd_device *device)
|
|||
* this masks out the remaining bits.
|
||||
* Returns the number of bits cleared.
|
||||
*/
|
||||
#ifndef BITS_PER_PAGE
|
||||
#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
|
||||
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
|
||||
#else
|
||||
# if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
|
||||
# error "ambiguous BITS_PER_PAGE"
|
||||
# endif
|
||||
#endif
|
||||
#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
|
||||
static int bm_clear_surplus(struct drbd_bitmap *b)
|
||||
{
|
||||
|
|
|
@ -41,13 +41,13 @@
|
|||
|
||||
#include "qib.h"
|
||||
|
||||
#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
|
||||
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
|
||||
#define RVT_BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
|
||||
#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE-1)
|
||||
|
||||
static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
|
||||
struct qpn_map *map, unsigned off)
|
||||
{
|
||||
return (map - qpt->map) * BITS_PER_PAGE + off;
|
||||
return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
|
||||
}
|
||||
|
||||
static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
|
||||
|
@ -59,7 +59,7 @@ static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
|
|||
if (((off & qpt->mask) >> 1) >= n)
|
||||
off = (off | qpt->mask) + 2;
|
||||
} else
|
||||
off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
|
||||
off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
|
||||
return off;
|
||||
}
|
||||
|
||||
|
@ -147,8 +147,8 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
|
|||
qpn = 2;
|
||||
if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
|
||||
qpn = (qpn | qpt->mask) + 2;
|
||||
offset = qpn & BITS_PER_PAGE_MASK;
|
||||
map = &qpt->map[qpn / BITS_PER_PAGE];
|
||||
offset = qpn & RVT_BITS_PER_PAGE_MASK;
|
||||
map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
|
||||
max_scan = qpt->nmaps - !offset;
|
||||
for (i = 0;;) {
|
||||
if (unlikely(!map->page)) {
|
||||
|
@ -173,7 +173,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
|
|||
* We just need to be sure we don't loop
|
||||
* forever.
|
||||
*/
|
||||
} while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
|
||||
} while (offset < RVT_BITS_PER_PAGE && qpn < QPN_MAX);
|
||||
/*
|
||||
* In order to keep the number of pages allocated to a
|
||||
* minimum, we scan the all existing pages before increasing
|
||||
|
@ -204,9 +204,9 @@ static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
|
|||
{
|
||||
struct qpn_map *map;
|
||||
|
||||
map = qpt->map + qpn / BITS_PER_PAGE;
|
||||
map = qpt->map + qpn / RVT_BITS_PER_PAGE;
|
||||
if (map->page)
|
||||
clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
|
||||
clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
|
||||
}
|
||||
|
||||
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
|
||||
|
|
|
@ -1843,7 +1843,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
param->data_size = sizeof(*param);
|
||||
param->data_size = offsetof(struct dm_ioctl, data);
|
||||
r = fn(param, input_param_size);
|
||||
|
||||
if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
|
||||
|
|
|
@ -111,6 +111,7 @@ config MTD_MAP_BANK_WIDTH_16
|
|||
|
||||
config MTD_MAP_BANK_WIDTH_32
|
||||
bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
|
||||
select MTD_COMPLEX_MAPPINGS if HAS_IOMEM
|
||||
default n
|
||||
help
|
||||
If you wish to support CFI devices on a physical bus which is
|
||||
|
|
|
@ -12031,7 +12031,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
|
|||
int ret;
|
||||
u32 offset, len, b_offset, odd_len;
|
||||
u8 *buf;
|
||||
__be32 start, end;
|
||||
__be32 start = 0, end;
|
||||
|
||||
if (tg3_flag(tp, NO_NVRAM) ||
|
||||
eeprom->magic != TG3_EEPROM_MAGIC)
|
||||
|
|
|
@ -996,6 +996,8 @@ static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
|
|||
do {
|
||||
msleep(delay_us / 1000);
|
||||
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
|
||||
if (status == U64_MAX)
|
||||
nretry /= 2;
|
||||
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
|
||||
nretry--);
|
||||
|
||||
|
@ -1027,6 +1029,8 @@ static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
|
|||
do {
|
||||
msleep(delay_us / 1000);
|
||||
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
|
||||
if (status == U64_MAX)
|
||||
nretry /= 2;
|
||||
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
|
||||
nretry--);
|
||||
|
||||
|
@ -1137,7 +1141,7 @@ static const struct asyc_intr_info ainfo[] = {
|
|||
{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
|
||||
{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
|
||||
{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
|
||||
{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
|
||||
{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
|
||||
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
|
||||
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
|
||||
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
|
||||
|
@ -1145,7 +1149,7 @@ static const struct asyc_intr_info ainfo[] = {
|
|||
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
|
||||
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
|
||||
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
|
||||
{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
|
||||
{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
|
||||
{0x0, "", 0, 0} /* terminator */
|
||||
};
|
||||
|
||||
|
@ -1962,6 +1966,11 @@ retry:
|
|||
* cxlflash_eh_host_reset_handler() - reset the host adapter
|
||||
* @scp: SCSI command from stack identifying host.
|
||||
*
|
||||
* Following a reset, the state is evaluated again in case an EEH occurred
|
||||
* during the reset. In such a scenario, the host reset will either yield
|
||||
* until the EEH recovery is complete or return success or failure based
|
||||
* upon the current device state.
|
||||
*
|
||||
* Return:
|
||||
* SUCCESS as defined in scsi/scsi.h
|
||||
* FAILED as defined in scsi/scsi.h
|
||||
|
@ -1993,7 +2002,8 @@ static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
|
|||
} else
|
||||
cfg->state = STATE_NORMAL;
|
||||
wake_up_all(&cfg->reset_waitq);
|
||||
break;
|
||||
ssleep(1);
|
||||
/* fall through */
|
||||
case STATE_RESET:
|
||||
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
|
||||
if (cfg->state == STATE_NORMAL)
|
||||
|
@ -2534,6 +2544,9 @@ static void drain_ioctls(struct cxlflash_cfg *cfg)
|
|||
* @pdev: PCI device struct.
|
||||
* @state: PCI channel state.
|
||||
*
|
||||
* When an EEH occurs during an active reset, wait until the reset is
|
||||
* complete and then take action based upon the device state.
|
||||
*
|
||||
* Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
|
||||
*/
|
||||
static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
|
||||
|
@ -2547,6 +2560,10 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
|
|||
|
||||
switch (state) {
|
||||
case pci_channel_io_frozen:
|
||||
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
|
||||
if (cfg->state == STATE_FAILTERM)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
cfg->state = STATE_RESET;
|
||||
scsi_block_requests(cfg->host);
|
||||
drain_ioctls(cfg);
|
||||
|
|
|
@ -1921,7 +1921,7 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
|
|||
u64 *kpage)
|
||||
{
|
||||
int ret = 0;
|
||||
u64 pgaddr, prev_pgaddr;
|
||||
u64 pgaddr, prev_pgaddr = 0;
|
||||
u32 j = 0;
|
||||
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
|
||||
int nr_kpages = kpages_per_hwpage;
|
||||
|
@ -2417,6 +2417,7 @@ static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
|
|||
ehca_err(&shca->ib_device, "kpage alloc failed");
|
||||
return -ENOMEM;
|
||||
}
|
||||
hret = H_SUCCESS;
|
||||
for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
|
||||
if (!ehca_bmap_valid(ehca_bmap->top[top]))
|
||||
continue;
|
||||
|
|
|
@ -5850,17 +5850,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
|
|||
static void serial8250_io_resume(struct pci_dev *dev)
|
||||
{
|
||||
struct serial_private *priv = pci_get_drvdata(dev);
|
||||
const struct pciserial_board *board;
|
||||
struct serial_private *new;
|
||||
|
||||
if (!priv)
|
||||
return;
|
||||
|
||||
board = priv->board;
|
||||
kfree(priv);
|
||||
priv = pciserial_init_ports(dev, board);
|
||||
|
||||
if (!IS_ERR(priv)) {
|
||||
pci_set_drvdata(dev, priv);
|
||||
new = pciserial_init_ports(dev, priv->board);
|
||||
if (!IS_ERR(new)) {
|
||||
pci_set_drvdata(dev, new);
|
||||
kfree(priv);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/usb/audio.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <sound/core.h>
|
||||
#include <sound/initval.h>
|
||||
#include <sound/pcm.h>
|
||||
|
@ -268,6 +269,8 @@ struct audio_dev {
|
|||
/* number of frames sent since start_time */
|
||||
s64 frames_sent;
|
||||
struct audio_source_config *config;
|
||||
/* for creating and issuing QoS requests */
|
||||
struct pm_qos_request pm_qos;
|
||||
};
|
||||
|
||||
static inline struct audio_dev *func_to_audio(struct usb_function *f)
|
||||
|
@ -740,6 +743,10 @@ static int audio_pcm_open(struct snd_pcm_substream *substream)
|
|||
runtime->hw.channels_max = 2;
|
||||
|
||||
audio->substream = substream;
|
||||
|
||||
/* Add the QoS request and set the latency to 0 */
|
||||
pm_qos_add_request(&audio->pm_qos, PM_QOS_CPU_DMA_LATENCY, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -749,6 +756,10 @@ static int audio_pcm_close(struct snd_pcm_substream *substream)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&audio->lock, flags);
|
||||
|
||||
/* Remove the QoS request */
|
||||
pm_qos_remove_request(&audio->pm_qos);
|
||||
|
||||
audio->substream = NULL;
|
||||
spin_unlock_irqrestore(&audio->lock, flags);
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@
|
|||
#define MTP_BULK_BUFFER_SIZE 16384
|
||||
#define INTR_BUFFER_SIZE 28
|
||||
#define MAX_INST_NAME_LEN 40
|
||||
#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
|
||||
|
||||
/* String IDs */
|
||||
#define INTERFACE_STRING_INDEX 0
|
||||
|
@ -868,14 +869,11 @@ static void send_file_work(struct work_struct *data)
|
|||
/* prepend MTP data header */
|
||||
header = (struct mtp_data_header *)req->buf;
|
||||
/*
|
||||
* Set length as 0xffffffff, if it is greater than
|
||||
* 0xffffffff. Otherwise host will throw error, if file
|
||||
* size greater than 0xffffffff being transferred.
|
||||
*/
|
||||
if (count > 0xffffffffLL)
|
||||
header->length = 0xffffffff;
|
||||
else
|
||||
header->length = __cpu_to_le32(count);
|
||||
* set file size with header according to
|
||||
* MTP Specification v1.0
|
||||
*/
|
||||
header->length = (count > MTP_MAX_FILE_SIZE) ?
|
||||
MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
|
||||
header->type = __cpu_to_le16(2); /* data packet */
|
||||
header->command = __cpu_to_le16(dev->xfer_command);
|
||||
header->transaction_id =
|
||||
|
|
|
@ -227,6 +227,7 @@ struct smb_version_operations {
|
|||
/* verify the message */
|
||||
int (*check_message)(char *, unsigned int);
|
||||
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
|
||||
int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
|
||||
void (*downgrade_oplock)(struct TCP_Server_Info *,
|
||||
struct cifsInodeInfo *, bool);
|
||||
/* process transaction2 response */
|
||||
|
@ -1289,12 +1290,19 @@ struct mid_q_entry {
|
|||
void *callback_data; /* general purpose pointer for callback */
|
||||
void *resp_buf; /* pointer to received SMB header */
|
||||
int mid_state; /* wish this were enum but can not pass to wait_event */
|
||||
unsigned int mid_flags;
|
||||
__le16 command; /* smb command code */
|
||||
bool large_buf:1; /* if valid response, is pointer to large buf */
|
||||
bool multiRsp:1; /* multiple trans2 responses for one request */
|
||||
bool multiEnd:1; /* both received */
|
||||
};
|
||||
|
||||
struct close_cancelled_open {
|
||||
struct cifs_fid fid;
|
||||
struct cifs_tcon *tcon;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/* Make code in transport.c a little cleaner by moving
|
||||
update of optional stats into function below */
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
|
@ -1426,6 +1434,9 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
|
|||
#define MID_RESPONSE_MALFORMED 0x10
|
||||
#define MID_SHUTDOWN 0x20
|
||||
|
||||
/* Flags */
|
||||
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
|
||||
|
||||
/* Types of response buffer returned from SendReceive2 */
|
||||
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
|
||||
#define CIFS_SMALL_BUFFER 1
|
||||
|
|
|
@ -1424,6 +1424,8 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|||
|
||||
length = discard_remaining_data(server);
|
||||
dequeue_mid(mid, rdata->result);
|
||||
mid->resp_buf = server->smallbuf;
|
||||
server->smallbuf = NULL;
|
||||
return length;
|
||||
}
|
||||
|
||||
|
@ -1538,6 +1540,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
|||
return cifs_readv_discard(server, mid);
|
||||
|
||||
dequeue_mid(mid, false);
|
||||
mid->resp_buf = server->smallbuf;
|
||||
server->smallbuf = NULL;
|
||||
return length;
|
||||
}
|
||||
|
||||
|
|
|
@ -924,10 +924,19 @@ cifs_demultiplex_thread(void *p)
|
|||
|
||||
server->lstrp = jiffies;
|
||||
if (mid_entry != NULL) {
|
||||
if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
|
||||
mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
|
||||
server->ops->handle_cancelled_mid)
|
||||
server->ops->handle_cancelled_mid(
|
||||
mid_entry->resp_buf,
|
||||
server);
|
||||
|
||||
if (!mid_entry->multiRsp || mid_entry->multiEnd)
|
||||
mid_entry->callback(mid_entry);
|
||||
} else if (!server->ops->is_oplock_break ||
|
||||
!server->ops->is_oplock_break(buf, server)) {
|
||||
} else if (server->ops->is_oplock_break &&
|
||||
server->ops->is_oplock_break(buf, server)) {
|
||||
cifs_dbg(FYI, "Received oplock break\n");
|
||||
} else {
|
||||
cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
|
||||
atomic_read(&midCount));
|
||||
cifs_dump_mem("Received Data is: ", buf,
|
||||
|
|
|
@ -630,3 +630,47 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
|
|||
cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
smb2_cancelled_close_fid(struct work_struct *work)
|
||||
{
|
||||
struct close_cancelled_open *cancelled = container_of(work,
|
||||
struct close_cancelled_open, work);
|
||||
|
||||
cifs_dbg(VFS, "Close unmatched open\n");
|
||||
|
||||
SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
|
||||
cancelled->fid.volatile_fid);
|
||||
cifs_put_tcon(cancelled->tcon);
|
||||
kfree(cancelled);
|
||||
}
|
||||
|
||||
int
|
||||
smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
|
||||
{
|
||||
struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
|
||||
struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
|
||||
struct cifs_tcon *tcon;
|
||||
struct close_cancelled_open *cancelled;
|
||||
|
||||
if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
|
||||
return 0;
|
||||
|
||||
cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
|
||||
if (!cancelled)
|
||||
return -ENOMEM;
|
||||
|
||||
tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
|
||||
if (!tcon) {
|
||||
kfree(cancelled);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
cancelled->fid.persistent_fid = rsp->PersistentFileId;
|
||||
cancelled->fid.volatile_fid = rsp->VolatileFileId;
|
||||
cancelled->tcon = tcon;
|
||||
INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
|
||||
queue_work(cifsiod_wq, &cancelled->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1511,6 +1511,7 @@ struct smb_version_operations smb20_operations = {
|
|||
.clear_stats = smb2_clear_stats,
|
||||
.print_stats = smb2_print_stats,
|
||||
.is_oplock_break = smb2_is_valid_oplock_break,
|
||||
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
||||
.downgrade_oplock = smb2_downgrade_oplock,
|
||||
.need_neg = smb2_need_neg,
|
||||
.negotiate = smb2_negotiate,
|
||||
|
@ -1589,6 +1590,7 @@ struct smb_version_operations smb21_operations = {
|
|||
.clear_stats = smb2_clear_stats,
|
||||
.print_stats = smb2_print_stats,
|
||||
.is_oplock_break = smb2_is_valid_oplock_break,
|
||||
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
||||
.downgrade_oplock = smb2_downgrade_oplock,
|
||||
.need_neg = smb2_need_neg,
|
||||
.negotiate = smb2_negotiate,
|
||||
|
@ -1670,6 +1672,7 @@ struct smb_version_operations smb30_operations = {
|
|||
.print_stats = smb2_print_stats,
|
||||
.dump_share_caps = smb2_dump_share_caps,
|
||||
.is_oplock_break = smb2_is_valid_oplock_break,
|
||||
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
||||
.downgrade_oplock = smb2_downgrade_oplock,
|
||||
.need_neg = smb2_need_neg,
|
||||
.negotiate = smb2_negotiate,
|
||||
|
@ -1757,6 +1760,7 @@ struct smb_version_operations smb311_operations = {
|
|||
.print_stats = smb2_print_stats,
|
||||
.dump_share_caps = smb2_dump_share_caps,
|
||||
.is_oplock_break = smb2_is_valid_oplock_break,
|
||||
.handle_cancelled_mid = smb2_handle_cancelled_mid,
|
||||
.downgrade_oplock = smb2_downgrade_oplock,
|
||||
.need_neg = smb2_need_neg,
|
||||
.negotiate = smb2_negotiate,
|
||||
|
|
|
@ -47,6 +47,10 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
|
|||
struct smb_rqst *rqst);
|
||||
extern struct mid_q_entry *smb2_setup_async_request(
|
||||
struct TCP_Server_Info *server, struct smb_rqst *rqst);
|
||||
extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
|
||||
__u64 ses_id);
|
||||
extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
|
||||
__u64 ses_id, __u32 tid);
|
||||
extern int smb2_calc_signature(struct smb_rqst *rqst,
|
||||
struct TCP_Server_Info *server);
|
||||
extern int smb3_calc_signature(struct smb_rqst *rqst,
|
||||
|
@ -157,6 +161,9 @@ extern int SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
|
|||
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
const u64 persistent_fid, const u64 volatile_fid,
|
||||
const __u8 oplock_level);
|
||||
extern int smb2_handle_cancelled_mid(char *buffer,
|
||||
struct TCP_Server_Info *server);
|
||||
void smb2_cancelled_close_fid(struct work_struct *work);
|
||||
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
u64 persistent_file_id, u64 volatile_file_id,
|
||||
struct kstatfs *FSData);
|
||||
|
|
|
@ -115,22 +115,68 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
|
|||
}
|
||||
|
||||
static struct cifs_ses *
|
||||
smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
|
||||
smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
|
||||
{
|
||||
struct cifs_ses *ses;
|
||||
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
|
||||
if (ses->Suid != smb2hdr->SessionId)
|
||||
if (ses->Suid != ses_id)
|
||||
continue;
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
return ses;
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct cifs_ses *
|
||||
smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
|
||||
{
|
||||
struct cifs_ses *ses;
|
||||
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
ses = smb2_find_smb_ses_unlocked(server, ses_id);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
return ses;
|
||||
}
|
||||
|
||||
static struct cifs_tcon *
|
||||
smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
|
||||
{
|
||||
struct cifs_tcon *tcon;
|
||||
|
||||
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
|
||||
if (tcon->tid != tid)
|
||||
continue;
|
||||
++tcon->tc_count;
|
||||
return tcon;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Obtain tcon corresponding to the tid in the given
|
||||
* cifs_ses
|
||||
*/
|
||||
|
||||
struct cifs_tcon *
|
||||
smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
|
||||
{
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon;
|
||||
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
ses = smb2_find_smb_ses_unlocked(server, ses_id);
|
||||
if (!ses) {
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
return NULL;
|
||||
}
|
||||
tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
|
||||
return tcon;
|
||||
}
|
||||
|
||||
int
|
||||
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
|
||||
|
@ -143,7 +189,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
|
|||
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
|
||||
struct cifs_ses *ses;
|
||||
|
||||
ses = smb2_find_smb_ses(smb2_pdu, server);
|
||||
ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
|
||||
if (!ses) {
|
||||
cifs_dbg(VFS, "%s: Could not find session\n", __func__);
|
||||
return 0;
|
||||
|
@ -314,7 +360,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
|
|||
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
|
||||
struct cifs_ses *ses;
|
||||
|
||||
ses = smb2_find_smb_ses(smb2_pdu, server);
|
||||
ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
|
||||
if (!ses) {
|
||||
cifs_dbg(VFS, "%s: Could not find session\n", __func__);
|
||||
return 0;
|
||||
|
|
|
@ -786,9 +786,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
|
|||
|
||||
rc = wait_for_response(ses->server, midQ);
|
||||
if (rc != 0) {
|
||||
cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
|
||||
send_cancel(ses->server, buf, midQ);
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
|
||||
midQ->mid_flags |= MID_WAIT_CANCELLED;
|
||||
midQ->callback = DeleteMidQEntry;
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
cifs_small_buf_release(buf);
|
||||
|
|
|
@ -497,11 +497,6 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
|
|||
return 0;
|
||||
}
|
||||
ci = EXT4_I(d_inode(dir))->i_crypt_info;
|
||||
if (ci && ci->ci_keyring_key &&
|
||||
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
|
||||
(1 << KEY_FLAG_REVOKED) |
|
||||
(1 << KEY_FLAG_DEAD))))
|
||||
ci = NULL;
|
||||
|
||||
/* this should eventually be an flag in d_flags */
|
||||
cached_with_key = dentry->d_fsdata != NULL;
|
||||
|
|
|
@ -626,6 +626,9 @@ resizefs_out:
|
|||
struct ext4_encryption_policy policy;
|
||||
int err = 0;
|
||||
|
||||
if (!ext4_has_feature_encrypt(sb))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (copy_from_user(&policy,
|
||||
(struct ext4_encryption_policy __user *)arg,
|
||||
sizeof(policy))) {
|
||||
|
|
|
@ -358,6 +358,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
{
|
||||
unsigned int len, v, hdr, dlen;
|
||||
u32 max_blocksize = svc_max_payload(rqstp);
|
||||
struct kvec *head = rqstp->rq_arg.head;
|
||||
|
||||
p = decode_fh(p, &args->fh);
|
||||
if (!p)
|
||||
|
@ -367,6 +368,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
args->count = ntohl(*p++);
|
||||
args->stable = ntohl(*p++);
|
||||
len = args->len = ntohl(*p++);
|
||||
if ((void *)p > head->iov_base + head->iov_len)
|
||||
return 0;
|
||||
/*
|
||||
* The count must equal the amount of data passed.
|
||||
*/
|
||||
|
@ -377,9 +380,8 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
* Check to make sure that we got the right number of
|
||||
* bytes.
|
||||
*/
|
||||
hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
|
||||
dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
|
||||
- hdr;
|
||||
hdr = (void*)p - head->iov_base;
|
||||
dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
|
||||
/*
|
||||
* Round the length of the data which was specified up to
|
||||
* the next multiple of XDR units and then compare that
|
||||
|
@ -396,7 +398,7 @@ nfs3svc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
len = args->len = max_blocksize;
|
||||
}
|
||||
rqstp->rq_vec[0].iov_base = (void*)p;
|
||||
rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
|
||||
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
|
||||
v = 0;
|
||||
while (len > rqstp->rq_vec[v].iov_len) {
|
||||
len -= rqstp->rq_vec[v].iov_len;
|
||||
|
@ -471,6 +473,8 @@ nfs3svc_decode_symlinkargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
/* first copy and check from the first page */
|
||||
old = (char*)p;
|
||||
vec = &rqstp->rq_arg.head[0];
|
||||
if ((void *)old > vec->iov_base + vec->iov_len)
|
||||
return 0;
|
||||
avail = vec->iov_len - (old - (char*)vec->iov_base);
|
||||
while (len && avail && *old) {
|
||||
*new++ = *old++;
|
||||
|
|
|
@ -280,6 +280,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
struct nfsd_writeargs *args)
|
||||
{
|
||||
unsigned int len, hdr, dlen;
|
||||
struct kvec *head = rqstp->rq_arg.head;
|
||||
int v;
|
||||
|
||||
p = decode_fh(p, &args->fh);
|
||||
|
@ -300,9 +301,10 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
* Check to make sure that we got the right number of
|
||||
* bytes.
|
||||
*/
|
||||
hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
|
||||
dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
|
||||
- hdr;
|
||||
hdr = (void*)p - head->iov_base;
|
||||
if (hdr > head->iov_len)
|
||||
return 0;
|
||||
dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
|
||||
|
||||
/*
|
||||
* Round the length of the data which was specified up to
|
||||
|
@ -316,7 +318,7 @@ nfssvc_decode_writeargs(struct svc_rqst *rqstp, __be32 *p,
|
|||
return 0;
|
||||
|
||||
rqstp->rq_vec[0].iov_base = (void*)p;
|
||||
rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
|
||||
rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
|
||||
v = 0;
|
||||
while (len > rqstp->rq_vec[v].iov_len) {
|
||||
len -= rqstp->rq_vec[v].iov_len;
|
||||
|
|
17
fs/timerfd.c
17
fs/timerfd.c
|
@ -40,6 +40,7 @@ struct timerfd_ctx {
|
|||
short unsigned settime_flags; /* to show in fdinfo */
|
||||
struct rcu_head rcu;
|
||||
struct list_head clist;
|
||||
spinlock_t cancel_lock;
|
||||
bool might_cancel;
|
||||
};
|
||||
|
||||
|
@ -113,7 +114,7 @@ void timerfd_clock_was_set(void)
|
|||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
|
||||
static void __timerfd_remove_cancel(struct timerfd_ctx *ctx)
|
||||
{
|
||||
if (ctx->might_cancel) {
|
||||
ctx->might_cancel = false;
|
||||
|
@ -123,6 +124,13 @@ static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
|
|||
}
|
||||
}
|
||||
|
||||
static void timerfd_remove_cancel(struct timerfd_ctx *ctx)
|
||||
{
|
||||
spin_lock(&ctx->cancel_lock);
|
||||
__timerfd_remove_cancel(ctx);
|
||||
spin_unlock(&ctx->cancel_lock);
|
||||
}
|
||||
|
||||
static bool timerfd_canceled(struct timerfd_ctx *ctx)
|
||||
{
|
||||
if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX)
|
||||
|
@ -133,6 +141,7 @@ static bool timerfd_canceled(struct timerfd_ctx *ctx)
|
|||
|
||||
static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
|
||||
{
|
||||
spin_lock(&ctx->cancel_lock);
|
||||
if ((ctx->clockid == CLOCK_REALTIME ||
|
||||
ctx->clockid == CLOCK_REALTIME_ALARM ||
|
||||
ctx->clockid == CLOCK_POWEROFF_ALARM) &&
|
||||
|
@ -143,9 +152,10 @@ static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags)
|
|||
list_add_rcu(&ctx->clist, &cancel_list);
|
||||
spin_unlock(&cancel_lock);
|
||||
}
|
||||
} else if (ctx->might_cancel) {
|
||||
timerfd_remove_cancel(ctx);
|
||||
} else {
|
||||
__timerfd_remove_cancel(ctx);
|
||||
}
|
||||
spin_unlock(&ctx->cancel_lock);
|
||||
}
|
||||
|
||||
static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
|
||||
|
@ -398,6 +408,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
|
|||
return -ENOMEM;
|
||||
|
||||
init_waitqueue_head(&ctx->wqh);
|
||||
spin_lock_init(&ctx->cancel_lock);
|
||||
ctx->clockid = clockid;
|
||||
|
||||
if (isalarm(ctx)) {
|
||||
|
|
|
@ -122,18 +122,13 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
|
||||
# ifdef map_bankwidth
|
||||
# undef map_bankwidth
|
||||
# define map_bankwidth(map) ((map)->bankwidth)
|
||||
# undef map_bankwidth_is_large
|
||||
# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
|
||||
# undef map_words
|
||||
# define map_words(map) map_calc_words(map)
|
||||
# else
|
||||
# define map_bankwidth(map) 32
|
||||
# define map_bankwidth_is_large(map) (1)
|
||||
# define map_words(map) map_calc_words(map)
|
||||
# endif
|
||||
/* always use indirect access for 256-bit to preserve kernel stack */
|
||||
# undef map_bankwidth
|
||||
# define map_bankwidth(map) ((map)->bankwidth)
|
||||
# undef map_bankwidth_is_large
|
||||
# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
|
||||
# undef map_words
|
||||
# define map_words(map) map_calc_words(map)
|
||||
#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
|
||||
#undef MAX_MAP_BANKWIDTH
|
||||
#define MAX_MAP_BANKWIDTH 32
|
||||
|
|
|
@ -2087,7 +2087,7 @@ static int netlink_dump(struct sock *sk)
|
|||
if (!skb) {
|
||||
alloc_size = alloc_min_size;
|
||||
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
|
||||
(GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
|
||||
GFP_KERNEL);
|
||||
}
|
||||
if (!skb)
|
||||
goto errout_skb;
|
||||
|
|
|
@ -991,6 +991,7 @@ snd_pmac_awacs_init(struct snd_pmac *chip)
|
|||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
master_vol = NULL;
|
||||
if (pm7500)
|
||||
err = build_mixers(chip,
|
||||
ARRAY_SIZE(snd_pmac_awacs_mixers_pmac7500),
|
||||
|
|
|
@ -139,7 +139,7 @@ static struct snd_soc_dai_link byt_dailink[] = {
|
|||
.codec_dai_name = "snd-soc-dummy-dai",
|
||||
.codec_name = "snd-soc-dummy",
|
||||
.platform_name = "sst-mfld-platform",
|
||||
.ignore_suspend = 1,
|
||||
.nonatomic = true,
|
||||
.dynamic = 1,
|
||||
.dpcm_playback = 1,
|
||||
.dpcm_capture = 1,
|
||||
|
@ -166,6 +166,7 @@ static struct snd_soc_dai_link byt_dailink[] = {
|
|||
| SND_SOC_DAIFMT_CBS_CFS,
|
||||
.be_hw_params_fixup = byt_codec_fixup,
|
||||
.ignore_suspend = 1,
|
||||
.nonatomic = true,
|
||||
.dpcm_playback = 1,
|
||||
.dpcm_capture = 1,
|
||||
.ops = &byt_be_ssp2_ops,
|
||||
|
|
Loading…
Add table
Reference in a new issue