USB: gadget: Implement COMPAT_IOCTL for ioctls
32-bit userspace calling into 64-bit kernel cause different ioctl codes to get generated. The different ioctl code gets generated because sizeof is used on mtp and accessory structures which is different for 32/64 compilation. Because of this, 64-bit kernel can never execute the right ioctl command. Implement compat_ioctl to handle such execution environment. Change-Id: I26cc10986e28a28eab6f3c65f28f4d2b808112d9 Signed-off-by: Sujeet Kumar <ksujeet@codeaurora.org> Signed-off-by: Hemant Kumar <hemantk@codeaurora.org>
This commit is contained in:
parent
78ece44dc6
commit
ff1259b38e
3 changed files with 196 additions and 77 deletions
|
@ -818,6 +818,9 @@ static const struct file_operations acc_fops = {
|
|||
.read = acc_read,
|
||||
.write = acc_write,
|
||||
.unlocked_ioctl = acc_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = acc_ioctl,
|
||||
#endif
|
||||
.open = acc_open,
|
||||
.release = acc_release,
|
||||
};
|
||||
|
|
|
@ -999,95 +999,71 @@ static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
|
||||
static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
|
||||
struct mtp_file_range *mfr)
|
||||
{
|
||||
struct mtp_dev *dev = fp->private_data;
|
||||
struct file *filp = NULL;
|
||||
struct work_struct *work;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (mtp_lock(&dev->ioctl_excl))
|
||||
return -EBUSY;
|
||||
|
||||
switch (code) {
|
||||
case MTP_SEND_FILE:
|
||||
case MTP_RECEIVE_FILE:
|
||||
case MTP_SEND_FILE_WITH_HEADER:
|
||||
{
|
||||
struct mtp_file_range mfr;
|
||||
struct work_struct *work;
|
||||
|
||||
spin_lock_irq(&dev->lock);
|
||||
if (dev->state == STATE_CANCELED) {
|
||||
/* report cancelation to userspace */
|
||||
dev->state = STATE_READY;
|
||||
spin_unlock_irq(&dev->lock);
|
||||
ret = -ECANCELED;
|
||||
goto out;
|
||||
}
|
||||
if (dev->state == STATE_OFFLINE) {
|
||||
spin_unlock_irq(&dev->lock);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
dev->state = STATE_BUSY;
|
||||
spin_lock_irq(&dev->lock);
|
||||
if (dev->state == STATE_CANCELED) {
|
||||
/* report cancellation to userspace */
|
||||
dev->state = STATE_READY;
|
||||
spin_unlock_irq(&dev->lock);
|
||||
|
||||
if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
|
||||
ret = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
/* hold a reference to the file while we are working with it */
|
||||
filp = fget(mfr.fd);
|
||||
if (!filp) {
|
||||
ret = -EBADF;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the parameters */
|
||||
dev->xfer_file = filp;
|
||||
dev->xfer_file_offset = mfr.offset;
|
||||
dev->xfer_file_length = mfr.length;
|
||||
smp_wmb();
|
||||
|
||||
if (code == MTP_SEND_FILE_WITH_HEADER) {
|
||||
work = &dev->send_file_work;
|
||||
dev->xfer_send_header = 1;
|
||||
dev->xfer_command = mfr.command;
|
||||
dev->xfer_transaction_id = mfr.transaction_id;
|
||||
} else if (code == MTP_SEND_FILE) {
|
||||
work = &dev->send_file_work;
|
||||
dev->xfer_send_header = 0;
|
||||
} else {
|
||||
work = &dev->receive_file_work;
|
||||
}
|
||||
|
||||
/* We do the file transfer on a work queue so it will run
|
||||
* in kernel context, which is necessary for vfs_read and
|
||||
* vfs_write to use our buffers in the kernel address space.
|
||||
*/
|
||||
queue_work(dev->wq, work);
|
||||
/* wait for operation to complete */
|
||||
flush_workqueue(dev->wq);
|
||||
fput(filp);
|
||||
|
||||
/* read the result */
|
||||
smp_rmb();
|
||||
ret = dev->xfer_result;
|
||||
break;
|
||||
}
|
||||
case MTP_SEND_EVENT:
|
||||
{
|
||||
struct mtp_event event;
|
||||
/* return here so we don't change dev->state below,
|
||||
* which would interfere with bulk transfer state.
|
||||
*/
|
||||
if (copy_from_user(&event, (void __user *)value, sizeof(event)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = mtp_send_event(dev, &event);
|
||||
ret = -ECANCELED;
|
||||
goto out;
|
||||
}
|
||||
if (dev->state == STATE_OFFLINE) {
|
||||
spin_unlock_irq(&dev->lock);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
dev->state = STATE_BUSY;
|
||||
spin_unlock_irq(&dev->lock);
|
||||
|
||||
/* hold a reference to the file while we are working with it */
|
||||
filp = fget(mfr->fd);
|
||||
if (!filp) {
|
||||
ret = -EBADF;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* write the parameters */
|
||||
dev->xfer_file = filp;
|
||||
dev->xfer_file_offset = mfr->offset;
|
||||
dev->xfer_file_length = mfr->length;
|
||||
/* make sure write is done before parameters are read */
|
||||
smp_wmb();
|
||||
|
||||
if (code == MTP_SEND_FILE_WITH_HEADER) {
|
||||
work = &dev->send_file_work;
|
||||
dev->xfer_send_header = 1;
|
||||
dev->xfer_command = mfr->command;
|
||||
dev->xfer_transaction_id = mfr->transaction_id;
|
||||
} else if (code == MTP_SEND_FILE) {
|
||||
work = &dev->send_file_work;
|
||||
dev->xfer_send_header = 0;
|
||||
} else {
|
||||
work = &dev->receive_file_work;
|
||||
}
|
||||
|
||||
/* We do the file transfer on a work queue so it will run
|
||||
* in kernel context, which is necessary for vfs_read and
|
||||
* vfs_write to use our buffers in the kernel address space.
|
||||
*/
|
||||
queue_work(dev->wq, work);
|
||||
/* wait for operation to complete */
|
||||
flush_workqueue(dev->wq);
|
||||
fput(filp);
|
||||
|
||||
/* read the result */
|
||||
smp_rmb();
|
||||
ret = dev->xfer_result;
|
||||
|
||||
fail:
|
||||
spin_lock_irq(&dev->lock);
|
||||
|
@ -1102,6 +1078,113 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
|
||||
{
|
||||
struct mtp_dev *dev = fp->private_data;
|
||||
struct mtp_file_range mfr;
|
||||
struct mtp_event event;
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (code) {
|
||||
case MTP_SEND_FILE:
|
||||
case MTP_RECEIVE_FILE:
|
||||
case MTP_SEND_FILE_WITH_HEADER:
|
||||
if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
|
||||
ret = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
ret = mtp_send_receive_ioctl(fp, code, &mfr);
|
||||
break;
|
||||
case MTP_SEND_EVENT:
|
||||
if (mtp_lock(&dev->ioctl_excl))
|
||||
return -EBUSY;
|
||||
/* return here so we don't change dev->state below,
|
||||
* which would interfere with bulk transfer state.
|
||||
*/
|
||||
if (copy_from_user(&event, (void __user *)value, sizeof(event)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = mtp_send_event(dev, &event);
|
||||
mtp_unlock(&dev->ioctl_excl);
|
||||
break;
|
||||
default:
|
||||
DBG(dev->cdev, "unknown ioctl code: %d\n", code);
|
||||
}
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* 32 bit userspace calling into 64 bit kernel. handle ioctl code
|
||||
* and userspace pointer
|
||||
*/
|
||||
#ifdef CONFIG_COMPAT
|
||||
static long compat_mtp_ioctl(struct file *fp, unsigned code,
|
||||
unsigned long value)
|
||||
{
|
||||
struct mtp_dev *dev = fp->private_data;
|
||||
struct mtp_file_range mfr;
|
||||
struct __compat_mtp_file_range cmfr;
|
||||
struct mtp_event event;
|
||||
struct __compat_mtp_event cevent;
|
||||
unsigned cmd;
|
||||
bool send_file = false;
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (code) {
|
||||
case COMPAT_MTP_SEND_FILE:
|
||||
cmd = MTP_SEND_FILE;
|
||||
send_file = true;
|
||||
break;
|
||||
case COMPAT_MTP_RECEIVE_FILE:
|
||||
cmd = MTP_RECEIVE_FILE;
|
||||
send_file = true;
|
||||
break;
|
||||
case COMPAT_MTP_SEND_FILE_WITH_HEADER:
|
||||
cmd = MTP_SEND_FILE_WITH_HEADER;
|
||||
send_file = true;
|
||||
break;
|
||||
case COMPAT_MTP_SEND_EVENT:
|
||||
cmd = MTP_SEND_EVENT;
|
||||
break;
|
||||
default:
|
||||
DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
|
||||
ret = -ENOIOCTLCMD;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (send_file) {
|
||||
if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
|
||||
ret = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
mfr.fd = cmfr.fd;
|
||||
mfr.offset = cmfr.offset;
|
||||
mfr.length = cmfr.length;
|
||||
mfr.command = cmfr.command;
|
||||
mfr.transaction_id = cmfr.transaction_id;
|
||||
ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
|
||||
} else {
|
||||
if (mtp_lock(&dev->ioctl_excl))
|
||||
return -EBUSY;
|
||||
/* return here so we don't change dev->state below,
|
||||
* which would interfere with bulk transfer state.
|
||||
*/
|
||||
if (copy_from_user(&cevent, (void __user *)value,
|
||||
sizeof(cevent))) {
|
||||
ret = -EFAULT;
|
||||
goto fail;
|
||||
}
|
||||
event.length = cevent.length;
|
||||
event.data = compat_ptr(cevent.data);
|
||||
ret = mtp_send_event(dev, &event);
|
||||
mtp_unlock(&dev->ioctl_excl);
|
||||
}
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mtp_open(struct inode *ip, struct file *fp)
|
||||
{
|
||||
printk(KERN_INFO "mtp_open\n");
|
||||
|
@ -1130,6 +1213,9 @@ static const struct file_operations mtp_fops = {
|
|||
.read = mtp_read,
|
||||
.write = mtp_write,
|
||||
.unlocked_ioctl = mtp_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = compat_mtp_ioctl,
|
||||
#endif
|
||||
.open = mtp_open,
|
||||
.release = mtp_release,
|
||||
};
|
||||
|
|
|
@ -19,5 +19,35 @@
|
|||
#define __LINUX_USB_F_MTP_H
|
||||
|
||||
#include <uapi/linux/usb/f_mtp.h>
|
||||
#include <linux/ioctl.h>
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include <linux/compat.h>
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
struct __compat_mtp_file_range {
|
||||
compat_int_t fd;
|
||||
compat_loff_t offset;
|
||||
int64_t length;
|
||||
uint16_t command;
|
||||
uint32_t transaction_id;
|
||||
};
|
||||
|
||||
struct __compat_mtp_event {
|
||||
compat_size_t length;
|
||||
compat_caddr_t data;
|
||||
};
|
||||
|
||||
#define COMPAT_MTP_SEND_FILE _IOW('M', 0, \
|
||||
struct __compat_mtp_file_range)
|
||||
#define COMPAT_MTP_RECEIVE_FILE _IOW('M', 1, \
|
||||
struct __compat_mtp_file_range)
|
||||
#define COMPAT_MTP_SEND_EVENT _IOW('M', 3, \
|
||||
struct __compat_mtp_event)
|
||||
#define COMPAT_MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, \
|
||||
struct __compat_mtp_file_range)
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __LINUX_USB_F_MTP_H */
|
||||
|
|
Loading…
Add table
Reference in a new issue