usb: deoneplusify usb stack

Causes freezes and crashes on cheeseburger when using MTP
or switching android usb debugging on/off.

Change-Id: If08cc8a2662122b24c3fd0fcc5d421bb6a84d777
This commit is contained in:
codeworkx 2018-12-29 19:41:40 +01:00 committed by Daniel Hillenbrand
parent d3a2b7ab76
commit 8a4c483457
5 changed files with 57 additions and 623 deletions

View file

@ -4268,9 +4268,14 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev)
if (hub)
connect_type = hub->ports[udev->portnum - 1]->connect_type;
if(udev->bos == NULL)
if (udev->bos == NULL)
return;
if ((udev->bos->ext_cap->bmAttributes & cpu_to_le32(USB_BESL_SUPPORT)) ||
connect_type == USB_PORT_CONNECT_TYPE_HARD_WIRED) {
udev->usb2_hw_lpm_allowed = 1;
usb_set_usb2_hardware_lpm(udev, 1);
}
}
static int hub_enable_device(struct usb_device *udev)

View file

@ -800,9 +800,10 @@ const struct file_operations dwc3_ep_trb_list_fops = {
.llseek = seq_lseek,
.release = single_release,
};
static unsigned int ep_addr_rxdbg_mask = 0xFF;
static unsigned int ep_addr_rxdbg_mask = 1;
module_param(ep_addr_rxdbg_mask, uint, S_IRUGO | S_IWUSR);
static unsigned int ep_addr_txdbg_mask = 0xFF;
static unsigned int ep_addr_txdbg_mask = 1;
module_param(ep_addr_txdbg_mask, uint, S_IRUGO | S_IWUSR);
/* Maximum debug message length */

View file

@ -34,8 +34,6 @@
#include <linux/mmu_context.h>
#include <linux/poll.h>
#include <linux/eventfd.h>
#include <linux/pm_qos.h>
#include <linux/hrtimer.h>
#include "u_fs.h"
#include "u_f.h"
@ -46,15 +44,6 @@
#define NUM_PAGES 10 /* # of pages for ipc logging */
#define PM_QOS_REQUEST_SIZE 0xF000 /* > 4096*/
#define ADB_QOS_TIMEOUT 500000
#define ADB_PULL_PUSH_TIMEOUT 1000
static struct pm_qos_request adb_little_cpu_qos;
static struct hrtimer ffs_op_timer;
static bool lpm_flg = true;
static bool ffs_op_flg = true;
static void *ffs_ipc_log;
#define ffs_log(fmt, ...) do { \
if (ffs_ipc_log) \
@ -625,20 +614,15 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
if (ret < 0)
return ret;
if (unlikely(ffs->state == FFS_CLOSING)) {
pr_err("FFS_CLOSING!\n");
if (unlikely(ffs->state == FFS_CLOSING))
return -EBUSY;
}
smp_mb__before_atomic();
if (atomic_read(&ffs->opened)) {
pr_err("ep0 is already opened!\n");
if (atomic_read(&ffs->opened))
return -EBUSY;
}
file->private_data = ffs;
ffs_data_opened(ffs);
pr_info("ep0_open success!\n");
return 0;
}
@ -1107,27 +1091,6 @@ error:
return ret;
}
static enum hrtimer_restart ffs_op_timeout(struct hrtimer *timer)
{
static int cnt;
/* wait 5s to close */
if (!ffs_op_flg)
cnt = cnt + 1;
if (cnt > 5) {
pr_info("ffs_op_timeout, close lpm_disable\n");
msm_cpuidle_set_sleep_disable(false);
cnt = 0;
lpm_flg = false;
return HRTIMER_NORESTART;
}
hrtimer_start(&ffs_op_timer,
ms_to_ktime(ADB_PULL_PUSH_TIMEOUT),
HRTIMER_MODE_REL);
return HRTIMER_RESTART;
}
static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
@ -1197,8 +1160,6 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
bool adb_write_flag = false;
ENTER();
@ -1220,30 +1181,10 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kiocb->private = p;
if (p->aio) {
if (p->aio)
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
} else {
if ((strcmp(epfile->name, "ep1") == 0)
|| (strcmp(epfile->name, "ep2") == 0))
adb_write_flag = true;
if ((p->data.count & PM_QOS_REQUEST_SIZE) && adb_write_flag) {
if (!lpm_flg) {
msm_cpuidle_set_sleep_disable(true);
hrtimer_start(&ffs_op_timer,
ms_to_ktime(ADB_PULL_PUSH_TIMEOUT),
HRTIMER_MODE_REL);
}
lpm_flg = true;
ffs_op_flg = true;
pm_qos_update_request_timeout(&adb_little_cpu_qos,
(MAX_CPUFREQ - 4), ADB_QOS_TIMEOUT);
}
}
res = ffs_epfile_io(kiocb->ki_filp, p);
if (ffs_op_flg)
ffs_op_flg = false;
if (res == -EIOCBQUEUED)
return res;
if (p->aio)
@ -1260,8 +1201,6 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
bool adb_read_flag = false;
ENTER();
@ -1292,31 +1231,10 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
kiocb->private = p;
if (p->aio) {
if (p->aio)
kiocb_set_cancel_fn(kiocb, ffs_aio_cancel);
} else {
if ((strcmp(epfile->name, "ep1") == 0)
|| (strcmp(epfile->name, "ep2") == 0))
adb_read_flag = true;
if ((p->data.count & PM_QOS_REQUEST_SIZE)
&& adb_read_flag) {
if (!lpm_flg) {
msm_cpuidle_set_sleep_disable(true);
hrtimer_start(&ffs_op_timer,
ms_to_ktime(ADB_PULL_PUSH_TIMEOUT),
HRTIMER_MODE_REL);
}
lpm_flg = true;
ffs_op_flg = true;
pm_qos_update_request_timeout(&adb_little_cpu_qos,
(MAX_CPUFREQ - 4), ADB_QOS_TIMEOUT);
}
}
res = ffs_epfile_io(kiocb->ki_filp, p);
if (ffs_op_flg)
ffs_op_flg = false;
if (res == -EIOCBQUEUED)
return res;
@ -2065,9 +1983,6 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ffs->epfiles = epfiles;
pm_qos_add_request(&adb_little_cpu_qos, PM_QOS_C0_CPUFREQ_MIN,
MIN_CPUFREQ);
ffs_log("exit: eps_count %u state %d setup_state %d flag %lu",
count, ffs->state, ffs->setup_state, ffs->flags);
@ -2093,7 +2008,6 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
}
kfree(epfiles);
pm_qos_remove_request(&adb_little_cpu_qos);
ffs_log("exit");
}
@ -3514,10 +3428,6 @@ static int ffs_func_bind(struct usb_configuration *c,
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
lpm_flg = false;
ffs_op_flg = false;
hrtimer_init(&ffs_op_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ffs_op_timer.function = ffs_op_timeout;
ffs_log("exit: ret %d", ret);
return ret;
@ -4078,11 +3988,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
hrtimer_cancel(&ffs_op_timer);
if (lpm_flg)
msm_cpuidle_set_sleep_disable(false);
lpm_flg = false;
ffs_op_flg = false;
ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
ffs->setup_state, ffs->flags);

View file

@ -1231,248 +1231,6 @@ static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
return 8;
}
/*2016/09/21, CD-ROM and VID customized
*add for cdrom support MAC OSX
*/
static void _lba_to_msf(u8 *buf, int lba)
{
lba += 150;
buf[0] = (lba / 75) / 60;
buf[1] = (lba / 75) % 60;
buf[2] = lba % 75;
}
static int _read_toc_raw(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
int msf = common->cmnd[1] & 0x02;
u8 *buf = (u8 *) bh->buf;
u8 *q;
int len;
q = buf + 2;
*q++ = 1; /* first session */
*q++ = 1; /* last session */
*q++ = 1; /* session number */
*q++ = 0x14; /* data track */
*q++ = 0; /* track number */
*q++ = 0xa0; /* lead-in */
*q++ = 0; /* min */
*q++ = 0; /* sec */
*q++ = 0; /* frame */
*q++ = 0;
*q++ = 1; /* first track */
*q++ = 0x00; /* disk type */
*q++ = 0x00;
*q++ = 1; /* session number */
*q++ = 0x14; /* data track */
*q++ = 0; /* track number */
*q++ = 0xa1;
*q++ = 0; /* min */
*q++ = 0; /* sec */
*q++ = 0; /* frame */
*q++ = 0;
*q++ = 1; /* last track */
*q++ = 0x00;
*q++ = 0x00;
*q++ = 1; /* session number */
*q++ = 0x14; /* data track */
*q++ = 0; /* track number */
*q++ = 0xa2; /* lead-out */
*q++ = 0; /* min */
*q++ = 0; /* sec */
*q++ = 0; /* frame */
if (msf) {
*q++ = 0; /* reserved */
_lba_to_msf(q, curlun->num_sectors);
q += 3;
} else {
put_unaligned_be32(curlun->num_sectors, q);
q += 4;
}
*q++ = 1; /* session number */
*q++ = 0x14; /* ADR, control */
*q++ = 0; /* track number */
*q++ = 1; /* point */
*q++ = 0; /* min */
*q++ = 0; /* sec */
*q++ = 0; /* frame */
if (msf) {
*q++ = 0;
_lba_to_msf(q, 0);
q += 3;
} else {
*q++ = 0;
*q++ = 0;
*q++ = 0;
*q++ = 0;
}
len = q - buf;
put_unaligned_be16(len - 2, buf);
return len;
}
static void cd_data_to_raw(u8 *buf, int lba)
{
/* sync bytes */
buf[0] = 0x00;
memset(buf + 1, 0xff, 10);
buf[11] = 0x00;
buf += 12;
/* MSF */
_lba_to_msf(buf, lba);
buf[3] = 0x01; /* mode 1 data */
buf += 4;
/* data */
buf += 2048;
/* XXX: ECC not computed */
memset(buf, 0, 288);
}
static int do_read_cd(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
u32 lba;
struct fsg_buffhd *bh;
int rc;
u32 amount_left;
loff_t file_offset, file_offset_tmp;
unsigned int amount;
unsigned int partial_page;
ssize_t nread;
u32 nb_sectors, transfer_request;
nb_sectors = (common->cmnd[6] << 16) |
(common->cmnd[7] << 8) | common->cmnd[8];
lba = get_unaligned_be32(&common->cmnd[2]);
if (nb_sectors == 0)
return 0;
if (lba >= curlun->num_sectors) {
curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
return -EINVAL;
}
transfer_request = common->cmnd[9];
if ((transfer_request & 0xf8) == 0xf8) {
file_offset = ((loff_t) lba) << 11;
/* read all data - 2352 byte */
amount_left = 2352;
} else {
file_offset = ((loff_t) lba) << 9;
/* Carry out the file reads */
amount_left = common->data_size_from_cmnd;
}
if (unlikely(amount_left == 0))
return -EIO; /* No default reply */
for (;;) {
/* Figure out how much we need to read:
* Try to read the remaining amount.
* But don't read more than the buffer size.
* And don't try to read past the end of the file.
* Finally, if we're not at a page boundary, don't read past
* the next page.
* If this means reading 0 then we were asked to read past
* the end of file.
*/
amount = min(amount_left, FSG_BUFLEN);
amount = min((loff_t) amount,
curlun->file_length - file_offset);
partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
if (partial_page > 0)
amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
partial_page);
/* Wait for the next buffer to become available */
bh = common->next_buffhd_to_fill;
while (bh->state != BUF_STATE_EMPTY) {
rc = sleep_thread(common, true);
if (rc)
return rc;
}
/* If we were asked to read past the end of file,
* end with an empty buffer.
*/
if (amount == 0) {
curlun->sense_data =
SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
curlun->sense_data_info = file_offset >> 9;
curlun->info_valid = 1;
bh->inreq->length = 0;
bh->state = BUF_STATE_FULL;
break;
}
/* Perform the read */
file_offset_tmp = file_offset;
if ((transfer_request & 0xf8) == 0xf8) {
nread = vfs_read(curlun->filp,
((char __user *)bh->buf)+16,
amount, &file_offset_tmp);
} else {
nread = vfs_read(curlun->filp,
(char __user *)bh->buf,
amount, &file_offset_tmp);
}
VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
(unsigned long long) file_offset,
(int) nread);
if (signal_pending(current))
return -EINTR;
if (nread < 0) {
LDBG(curlun, "error in file read: %d\n",
(int) nread);
nread = 0;
} else if (nread < amount) {
LDBG(curlun, "partial file read: %d/%u\n",
(int) nread, amount);
nread -= (nread & 511); /* Round down to a block */
}
file_offset += nread;
amount_left -= nread;
common->residue -= nread;
bh->inreq->length = nread;
bh->state = BUF_STATE_FULL;
/* If an error occurred, report it and its position */
if (nread < amount) {
curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
curlun->sense_data_info = file_offset >> 9;
curlun->info_valid = 1;
break;
}
if (amount_left == 0)
break; /* No more left to read */
/* Send this buffer and go read some more */
if (!start_in_transfer(common, bh))
/* Don't know what to do if common->fsg is NULL */
return -EIO;
common->next_buffhd_to_fill = bh->next;
}
if ((transfer_request & 0xf8) == 0xf8)
cd_data_to_raw(bh->buf, lba);
return -EIO; /* No default reply */
}
/*end add for cdrom support MAC OSX*/
static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
@ -1480,25 +1238,12 @@ static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
int start_track = common->cmnd[6];
u8 *buf = (u8 *)bh->buf;
/*2016/09/21, CD-ROM and VID customized
*add for cdrom support MAC OSX
*/
int format = (common->cmnd[9] & 0xC0) >> 6;
/*end add for cdrom support MAC OSX*/
if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
start_track > 1) {
curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
return -EINVAL;
}
/*2016/09/21, CD-ROM and VID customized
*add for cdrom support MAC OSX
*/
if (format == 2)
return _read_toc_raw(common, bh);
/*end add for cdrom support MAC OSX*/
memset(buf, 0, 20);
buf[1] = (20-2); /* TOC data length */
buf[2] = 1; /* First track number */
@ -2244,30 +1989,12 @@ static int do_scsi_command(struct fsg_common *common)
goto unknown_cmnd;
common->data_size_from_cmnd =
get_unaligned_be16(&common->cmnd[7]);
/*2016/09/21, CD-ROM and VID customized
*add for cdrom support MAC OSX
*/
reply = check_command(common, 10, DATA_DIR_TO_HOST,
(0xf<<6) | (1<<1), 1,
(7<<6) | (1<<1), 1,
"READ TOC");
if (reply == 0)
reply = do_read_toc(common, bh);
break;
/*2016/09/21, CD-ROM and VID customized
*add for cdrom support MAC OSX
*/
case READ_CD:
common->data_size_from_cmnd = ((common->cmnd[6] << 16)
| (common->cmnd[7] << 8)
| (common->cmnd[8])) << 9;
reply = check_command(common, 12, DATA_DIR_TO_HOST,
(0xf<<2) | (7<<7), 1,
"READ CD");
if (reply == 0)
reply = do_read_cd(common);
break;
/*end add for cdrom support MAC OSX*/
case READ_FORMAT_CAPACITIES:
common->data_size_from_cmnd =
@ -2541,7 +2268,6 @@ reset:
if (common->fsg) {
fsg = common->fsg;
pr_err("%s:disable endpoints here\n", __func__);
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
@ -2555,6 +2281,18 @@ reset:
}
}
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
fsg->bulk_out_enabled = 0;
}
/* allow usb LPM after eps are disabled */
usb_gadget_autopm_put_async(common->gadget);
common->fsg = NULL;
wake_up(&common->fsg_wait);
}
@ -2566,6 +2304,28 @@ reset:
common->fsg = new_fsg;
fsg = common->fsg;
/* Enable the endpoints */
rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
if (rc)
goto reset;
rc = usb_ep_enable(fsg->bulk_in);
if (rc)
goto reset;
fsg->bulk_in->driver_data = common;
fsg->bulk_in_enabled = 1;
rc = config_ep_by_speed(common->gadget, &(fsg->function),
fsg->bulk_out);
if (rc)
goto reset;
rc = usb_ep_enable(fsg->bulk_out);
if (rc)
goto reset;
fsg->bulk_out->driver_data = common;
fsg->bulk_out_enabled = 1;
common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
/* Allocate the requests */
for (i = 0; i < common->fsg_num_buffers; ++i) {
struct fsg_buffhd *bh = &common->buffhds[i];
@ -2581,8 +2341,7 @@ reset:
bh->inreq->complete = bulk_in_complete;
bh->outreq->complete = bulk_out_complete;
}
pr_err("%s:increment pm_usage count num_buffers=%d\n", __func__
, common->fsg_num_buffers);
common->running = 1;
for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
@ -2597,84 +2356,20 @@ reset:
static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct fsg_dev *fsg = fsg_from_func(f);
int rc;
fsg->common->new_fsg = fsg;
pr_err("%s:increment pm_usage counter\n", __func__);
if (!fsg->common || !fsg->common->gadget)
pr_err("%s: NULL here\n", __func__);
/* prevents usb LPM until thread runs to completion */
usb_gadget_autopm_get_async(fsg->common->gadget);
/* Enable the endpoints */
rc = config_ep_by_speed(fsg->common->gadget, &(fsg->function),
fsg->bulk_in);
if (rc)
goto err_exit;
rc = usb_ep_enable(fsg->bulk_in);
if (rc)
goto err_exit;
fsg->bulk_in->driver_data = fsg->common;
fsg->bulk_in_enabled = 1;
rc = config_ep_by_speed(fsg->common->gadget, &(fsg->function),
fsg->bulk_out);
if (rc)
goto reset_bulk_int;
rc = usb_ep_enable(fsg->bulk_out);
if (rc)
goto reset_bulk_int;
fsg->bulk_out->driver_data = fsg->common;
fsg->bulk_out_enabled = 1;
fsg->common->bulk_out_maxpacket =
usb_endpoint_maxp(fsg->bulk_out->desc);
clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
return USB_GADGET_DELAYED_STATUS;
reset_bulk_int:
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in->driver_data = NULL;
fsg->bulk_in_enabled = 0;
err_exit:
return rc;
}
static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
pr_err("%s:disable endpoints\n", __func__);
/* Disable the endpoints */
if (fsg->bulk_in_enabled) {
usb_ep_disable(fsg->bulk_in);
fsg->bulk_in->driver_data = NULL;
fsg->bulk_in_enabled = 0;
}
if (fsg->bulk_out_enabled) {
usb_ep_disable(fsg->bulk_out);
fsg->bulk_out->driver_data = NULL;
fsg->bulk_out_enabled = 0;
}
pr_err("%s:eps are disabled\n", __func__);
pr_err("%s:disabled endpoints\n", __func__);
fsg->common->new_fsg = NULL;
pr_err("%s:cur_state=%d\n", __func__, fsg->common->state);
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
/* allow usb LPM after eps are disabled */
usb_gadget_autopm_put_async(fsg->common->gadget);
pr_err("%s:\n", __func__);
}
@ -2688,7 +2383,6 @@ static void handle_exception(struct fsg_common *common)
struct fsg_lun *curlun;
unsigned int exception_req_tag;
pr_err("%s:current_state= %d\n", __func__, common->state);
/*
* Clear the existing signals. Anything but SIGUSR1 is converted
* into a high-priority EXIT exception.
@ -2708,8 +2402,6 @@ static void handle_exception(struct fsg_common *common)
if (likely(common->fsg)) {
for (i = 0; i < common->fsg_num_buffers; ++i) {
bh = &common->buffhds[i];
pr_err("%s:dequeue reqs %d inreq_busy=%d outreq_busy=%d\n",
__func__, i, bh->inreq_busy, bh->outreq_busy);
if (bh->inreq_busy)
usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
if (bh->outreq_busy)
@ -2769,7 +2461,6 @@ static void handle_exception(struct fsg_common *common)
}
spin_unlock_irq(&common->lock);
pr_err("%s:cur_state2=%d\n", __func__, old_state);
/* Carry out any extra actions required for the exception */
switch (old_state) {
case FSG_STATE_ABORT_BULK_OUT:
@ -2811,17 +2502,14 @@ static void handle_exception(struct fsg_common *common)
break;
case FSG_STATE_CONFIG_CHANGE:
pr_err("%s:status change disable/enable ep\n", __func__);
do_set_interface(common, common->new_fsg);
if (common->new_fsg) {
pr_err("%s:setup continue call\n", __func__);
/*
* make sure delayed_status flag updated when set_alt
* returned.
*/
msleep(200);
usb_composite_setup_continue(common->cdev);
pr_err("%s:setup continue call done\n", __func__);
}
break;
@ -3295,7 +2983,6 @@ int fsg_common_create_luns(struct fsg_common *common, struct fsg_config *cfg)
pr_info("Number of LUNs=%d\n", cfg->nluns);
pr_err("%s:done\n", __func__);
return 0;
fail:
@ -3318,9 +3005,6 @@ void fsg_common_set_inquiry_string(struct fsg_common *common, const char *vn,
? "File-CD Gadget"
: "File-Stor Gadget"),
i);
/*Anderson@, 2016/09/21, CD-ROM and VID customized*/
snprintf(common->inquiry_string, sizeof(common->inquiry_string), "%s",
"OnePlus Device Driver");
}
EXPORT_SYMBOL_GPL(fsg_common_set_inquiry_string);
@ -3364,7 +3048,7 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
unsigned max_burst;
int ret;
struct fsg_opts *opts;
pr_err("%s:\n", __func__);
/* Don't allow to bind if we don't have at least one LUN */
ret = _fsg_common_get_max_lun(common);
if (ret < 0) {
@ -3437,7 +3121,6 @@ static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
fsg_ss_function);
if (ret)
goto autoconf_fail;
pr_err("%s: done\n", __func__);
return 0;
@ -3461,7 +3144,6 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
struct fsg_common *common = fsg->common;
DBG(fsg, "unbind\n");
pr_err("%s:current_state= %d\n", __func__, common->state);
if (fsg->common->fsg == fsg) {
fsg->common->new_fsg = NULL;
raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
@ -3470,7 +3152,6 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
}
usb_free_all_descriptors(&fsg->function);
pr_err("%s: done\n", __func__);
}
static inline struct fsg_lun_opts *to_fsg_lun_opts(struct config_item *item)
@ -3821,8 +3502,6 @@ static struct usb_function_instance *fsg_alloc_inst(void)
memset(&config, 0, sizeof(config));
config.removable = true;
config.cdrom = true;
config.ro = true;
rc = fsg_common_create_lun(opts->common, &config, 0, "lun.0",
(const char **)&opts->func_inst.group.cg_item.ci_name);
if (rc)

View file

@ -39,43 +39,12 @@
#include <linux/usb/f_mtp.h>
#include <linux/configfs.h>
#include <linux/usb/composite.h>
#include <linux/pm_qos.h>
#include "configfs.h"
#define MTP_RX_BUFFER_INIT_SIZE 1048576
#define MTP_TX_BUFFER_INIT_SIZE 1048576
#define MTP_BULK_BUFFER_SIZE 16384
/* Currently tx and rx buffer len is 1048576, inter buffer len is 28.
*Tx buffer counts is 8, Rx is 2 and intr is 5.
*In order avoid MTP can't work issue, use fixed memory.
* 0xAC400000(0xffffffc02c400000) ------
* | TX | 0x100000 * 8
* 0xACC00000(0xffffffc02cc00000) ------
* | RX | 0x100000 * 2
* 0xACE00000(0xffffffc02ce00000) ------
* | INTR | 0x40(alignment) * 5
*
*------
*/
/*Anderson@, 2016/12/09, Add fix memory for MTP*/
/*0xAC400000, 0xAC500000, 0xAC600000,
*0xAC700000, 0xAC800000, 0xAC900000,
*0xACA00000, 0xACB00000
*/
#define MTP_TX_BUFFER_BASE 0xAC400000
/*0xACC00000, 0xACD00000*/
#define MTP_RX_BUFFER_BASE 0xACC00000
/*0xACE00000, 0xACE00040, 0xACE00080, 0xACE000C0, 0xACE00100*/
#define MTP_INTR_BUFFER_BASE 0xACE00000
static int mtpBufferOffset;
static bool useFixAddr;
enum buf_type {
TX_BUFFER = 0,
RX_BUFFER,
INTR_BUFFER,
};
#define INTR_BUFFER_SIZE 28
#define MAX_INST_NAME_LEN 40
#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
@ -110,11 +79,6 @@ enum buf_type {
#define DRIVER_NAME "mtp"
#define MAX_ITERATION 100
/* values for qos requests */
#define FILE_LENGTH (10 * 1024 * 1024)
#define PM_QOS_TIMEOUT 3000000
static bool mtp_receive_flag;
unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
module_param(mtp_rx_req_len, uint, S_IRUGO | S_IWUSR);
@ -126,11 +90,6 @@ unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
module_param(mtp_tx_reqs, uint, S_IRUGO | S_IWUSR);
static const char mtp_shortname[] = DRIVER_NAME "_usb";
static struct pm_qos_request little_cpu_mtp_freq;
static struct pm_qos_request devfreq_mtp_request;
static struct pm_qos_request big_cpu_mtp_freq;
static struct delayed_work cpu_freq_qos_work;
static struct workqueue_struct *cpu_freq_qos_queue;
struct mtp_dev {
struct usb_function function;
@ -451,9 +410,8 @@ static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
{
return container_of(f, struct mtp_dev, function);
}
/*2016/12/09, Add fix memory for MTP*/
static struct usb_request *mtp_request_new(struct usb_ep *ep,
int buffer_size, enum buf_type type)
static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
{
struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
@ -461,44 +419,19 @@ static struct usb_request *mtp_request_new(struct usb_ep *ep,
return NULL;
/* now allocate buffers for the requests */
/*2016/12/09, Add fix memory for MTP*/
if (useFixAddr == true) {
if (type == TX_BUFFER)
req->buf = __va(MTP_TX_BUFFER_BASE + mtpBufferOffset);
else if (type == RX_BUFFER)
req->buf = __va(MTP_RX_BUFFER_BASE + mtpBufferOffset);
else
req->buf = __va(MTP_INTR_BUFFER_BASE + mtpBufferOffset);
} else
req->buf = kmalloc(buffer_size, GFP_KERNEL);
memset(req->buf, 0, buffer_size);
req->buf = kmalloc(buffer_size, GFP_KERNEL);
if (!req->buf) {
usb_ep_free_request(ep, req);
return NULL;
}
/*2016/12/09, Add fix memory for MTP*/
if (useFixAddr == true) {
if (buffer_size == INTR_BUFFER_SIZE)
mtpBufferOffset += 0x40; /*alignment*/
else
mtpBufferOffset += buffer_size;
}
return req;
}
static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
{
if (req) {
/*2016/12/09, Add fix memory for MTP*/
if (useFixAddr == true) {
req->buf = NULL;
mtpBufferOffset = 0;
} else
kfree(req->buf);
kfree(req->buf);
usb_ep_free_request(ep, req);
}
}
@ -622,19 +555,9 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
dev->ep_intr = ep;
retry_tx_alloc:
/*2016/12/09, Add fix memory for MTP*/
if (mtp_tx_req_len == MTP_TX_BUFFER_INIT_SIZE
&& mtp_rx_req_len == MTP_RX_BUFFER_INIT_SIZE
&& mtp_tx_reqs == MTP_TX_REQ_MAX)
useFixAddr = true;
else
useFixAddr = false;
pr_info("useFixAddr:%s\n", useFixAddr?"true":"false");
mtpBufferOffset = 0;
/* now allocate requests for our endpoints */
for (i = 0; i < mtp_tx_reqs; i++) {
/*2016/12/09, Add fix memory for MTP*/
req = mtp_request_new(dev->ep_in, mtp_tx_req_len, TX_BUFFER);
req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
if (!req) {
if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
goto fail;
@ -658,11 +581,8 @@ retry_tx_alloc:
mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
retry_rx_alloc:
/*2016/12/09, Add fix memory for MTP*/
mtpBufferOffset = 0;
for (i = 0; i < RX_REQ_MAX; i++) {
/*2016/12/09, Add fix memory for MTP*/
req = mtp_request_new(dev->ep_out, mtp_rx_req_len, RX_BUFFER);
req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
if (!req) {
if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
goto fail;
@ -674,19 +594,13 @@ retry_rx_alloc:
req->complete = mtp_complete_out;
dev->rx_req[i] = req;
}
/*2016/12/09, Add fix memory for MTP*/
mtpBufferOffset = 0;
for (i = 0; i < INTR_REQ_MAX; i++) {
/*2016/12/09, Add fix memory for MTP*/
req = mtp_request_new(dev->ep_intr,
INTR_BUFFER_SIZE, INTR_BUFFER);
req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
if (!req)
goto fail;
req->complete = mtp_complete_intr;
mtp_req_put(dev, &dev->intr_idle, req);
}
/*2016/12/09, Add fix memory for MTP*/
mtpBufferOffset = 0;
return 0;
@ -911,19 +825,6 @@ static void send_file_work(struct work_struct *data)
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_file_length >= FILE_LENGTH) {
pm_qos_update_request(&devfreq_mtp_request, MAX_CPUFREQ);
pm_qos_update_request(&little_cpu_mtp_freq, MAX_CPUFREQ);
pm_qos_update_request(&big_cpu_mtp_freq, MAX_CPUFREQ - 1);
} else {
pm_qos_update_request_timeout(&devfreq_mtp_request,
MAX_CPUFREQ, PM_QOS_TIMEOUT);
pm_qos_update_request_timeout(&little_cpu_mtp_freq,
MAX_CPUFREQ, PM_QOS_TIMEOUT);
pm_qos_update_request_timeout(&big_cpu_mtp_freq,
MAX_CPUFREQ-1, PM_QOS_TIMEOUT);
}
if (dev->xfer_send_header) {
hdr_size = sizeof(struct mtp_data_header);
count += hdr_size;
@ -1012,12 +913,6 @@ static void send_file_work(struct work_struct *data)
if (req)
mtp_req_put(dev, &dev->tx_idle, req);
if (dev->xfer_file_length >= FILE_LENGTH) {
pm_qos_update_request(&devfreq_mtp_request, MIN_CPUFREQ);
pm_qos_update_request(&little_cpu_mtp_freq, MIN_CPUFREQ);
pm_qos_update_request(&big_cpu_mtp_freq, MIN_CPUFREQ);
}
DBG(cdev, "send_file_work returning %d state:%d\n", r, dev->state);
/* write the result */
dev->xfer_result = r;
@ -1048,11 +943,6 @@ static void receive_file_work(struct work_struct *data)
if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
count, dev->ep_out->maxpacket);
if (delayed_work_pending(&cpu_freq_qos_work))
cancel_delayed_work(&cpu_freq_qos_work);
pm_qos_update_request(&devfreq_mtp_request, MAX_CPUFREQ);
pm_qos_update_request(&little_cpu_mtp_freq, MAX_CPUFREQ);
pm_qos_update_request(&big_cpu_mtp_freq, MAX_CPUFREQ - 1);
while (count > 0 || write_req) {
if (count > 0) {
@ -1157,20 +1047,12 @@ static void receive_file_work(struct work_struct *data)
}
}
queue_delayed_work(cpu_freq_qos_queue, &cpu_freq_qos_work, msecs_to_jiffies(1000)*3);
DBG(cdev, "receive_file_work returning %d\n", r);
/* write the result */
dev->xfer_result = r;
smp_wmb();
}
static void update_qos_request(struct work_struct *data)
{
pm_qos_update_request(&devfreq_mtp_request, MIN_CPUFREQ);
pm_qos_update_request(&little_cpu_mtp_freq, MIN_CPUFREQ);
pm_qos_update_request(&big_cpu_mtp_freq, MIN_CPUFREQ);
}
static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
{
struct usb_request *req = NULL;
@ -1255,11 +1137,6 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
dev->xfer_send_header = 0;
} else {
work = &dev->receive_file_work;
pm_qos_update_request(&devfreq_mtp_request, MAX_CPUFREQ);
pm_qos_update_request(&little_cpu_mtp_freq, MAX_CPUFREQ);
pm_qos_update_request(&big_cpu_mtp_freq, MAX_CPUFREQ - 1);
msm_cpuidle_set_sleep_disable(true);
mtp_receive_flag = true;
}
/* We do the file transfer on a work queue so it will run
@ -1269,16 +1146,6 @@ static long mtp_send_receive_ioctl(struct file *fp, unsigned code,
queue_work(dev->wq, work);
/* wait for operation to complete */
flush_workqueue(dev->wq);
if (mtp_receive_flag) {
mtp_receive_flag = false;
pm_qos_update_request_timeout(&devfreq_mtp_request,
MAX_CPUFREQ, PM_QOS_TIMEOUT);
pm_qos_update_request_timeout(&little_cpu_mtp_freq,
MAX_CPUFREQ, PM_QOS_TIMEOUT);
pm_qos_update_request_timeout(&big_cpu_mtp_freq,
MAX_CPUFREQ-1, PM_QOS_TIMEOUT);
msm_cpuidle_set_sleep_disable(false);
}
fput(filp);
/* read the result */
@ -1425,11 +1292,6 @@ static int mtp_release(struct inode *ip, struct file *fp)
{
printk(KERN_INFO "mtp_release\n");
if (mtp_receive_flag) {
mtp_receive_flag = false;
msm_cpuidle_set_sleep_disable(false);
}
mtp_unlock(&_mtp_dev->open_excl);
return 0;
}
@ -1867,12 +1729,6 @@ static int __mtp_setup(struct mtp_instance *fi_mtp)
INIT_WORK(&dev->send_file_work, send_file_work);
INIT_WORK(&dev->receive_file_work, receive_file_work);
cpu_freq_qos_queue = create_singlethread_workqueue("f_mtp_qos");
INIT_DELAYED_WORK(&cpu_freq_qos_work, update_qos_request);
pm_qos_add_request(&devfreq_mtp_request, PM_QOS_DEVFREQ_MIN, MIN_CPUFREQ);
pm_qos_add_request(&little_cpu_mtp_freq, PM_QOS_C0_CPUFREQ_MIN, MIN_CPUFREQ);
pm_qos_add_request(&big_cpu_mtp_freq, PM_QOS_C1_CPUFREQ_MIN, MIN_CPUFREQ);
_mtp_dev = dev;
ret = misc_register(&mtp_device);
@ -1883,10 +1739,6 @@ static int __mtp_setup(struct mtp_instance *fi_mtp)
return 0;
err2:
pm_qos_remove_request(&big_cpu_mtp_freq);
pm_qos_remove_request(&little_cpu_mtp_freq);
pm_qos_remove_request(&devfreq_mtp_request);
destroy_workqueue(cpu_freq_qos_queue);
destroy_workqueue(dev->wq);
err1:
_mtp_dev = NULL;
@ -1909,10 +1761,6 @@ static void mtp_cleanup(void)
return;
mtp_debugfs_remove();
pm_qos_remove_request(&big_cpu_mtp_freq);
pm_qos_remove_request(&little_cpu_mtp_freq);
pm_qos_remove_request(&devfreq_mtp_request);
destroy_workqueue(cpu_freq_qos_queue);
misc_deregister(&mtp_device);
destroy_workqueue(dev->wq);
_mtp_dev = NULL;
@ -2031,10 +1879,6 @@ static int mtp_ctrlreq_configfs(struct usb_function *f,
static void mtp_free(struct usb_function *f)
{
/*NO-OP: no function specific resource allocation in mtp_alloc*/
struct mtp_instance *fi_mtp;
fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
fi_mtp->func_inst.f = NULL;
}
struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,