mhi: core: add support for MHI host managing firmware upload

Add support for MHI host to directly upload firmware
to compatible devices using MHI protocol.

CRs-Fixed: 1095436
Change-Id: Iff7043f1f9afc4824edeaeccc46ed427ce7ee291
Signed-off-by: Sujeev Dias <sdias@codeaurora.org>
This commit is contained in:
Sujeev Dias 2016-11-21 14:25:42 -08:00
parent 2583f4c5d6
commit 6f370e6a8b
8 changed files with 668 additions and 168 deletions

View file

@ -5,56 +5,140 @@ Modem Host Interface protocol. The bindings referred to below, enable
the correct configuration of the interface and required sideband the correct configuration of the interface and required sideband
signals. signals.
Required properties: ==============
- compatible: should be "qcom,mhi" Node Structure
- qcom,pci-dev_id: device id reported by modem ==============
- qcom,pci-domain: pci root complex device connected to
- qcom,pci-bus: pci bus device connected to
- qcom,pci-slot: pci slot device connected to
- Refer to "Documentation/devicetree/bindings/esoc/esoc_client.txt" for
below properties:
- esoc-names
- esoc-0
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
below optional properties:
- qcom,msm-bus,name
- qcom,msm-bus,num-cases
- qcom,msm-bus,num-paths
- qcom,msm-bus,vectors-KBps
- mhi-chan-cfg-#: mhi channel configuration parameters for platform
defined as below <A B C D>:
A = chan number
B = maximum descriptors
C = event ring associated with channel
D = flags defined by mhi_macros.h GET_CHAN_PROPS
- mhi-event-cfg-#: mhi event ring configuration parameters for platform
defined as below <A B C D E>:
A = maximum event descriptors
B = MSI associated with event
C = interrupt moderation (see MHI specification)
D = Associated channel
E = flags defined by mhi_macros.h GET_EV_PROPS
- mhi-event-rings: number of event rings supported by platform
- qcom,mhi-address-window: range of the MHI device addressing window
Main node properties:
- compatible
Usage: required
Value type: <string>
Definition: "qcom,mhi"
- qcom,pci-dev_id
Usage: required
Value type: <u32>
Definition: Device id reported by modem
- qcom,pci-domain
Usage: required
Value type: <u32>
Definition: PCIE root complex device connected to
- qcom,pci-bus
Usage: required
Value type: <u32>
Definition: PCIE bus device connected to
- qcom,pci-slot
Usage: required
Value type: <u32>
Definition: PCIE slot (dev_id/function) device connected to
- esoc-names
Usage: optional
Value type: <string>
Definition: esoc name for the device
- esoc-0
Usage: required if "esoc-names" is defined
Value type: phandle
Definition: A phandle pointing to the esoc node.
- qcom,msm-bus,name
Usage: required if MHI is bus master
Value type: string
Definition: string representing the client name
- qcom,msm-bus,num-cases
Usage: required if MHI is bus master
Value type: <u32>
Definition: Number of use cases MHI support. Must be set to 2.
- qcom,msm-bus,num-paths
Usage: required if MHI is bus master
Value type: <u32>
Definition: Total number of master-slave pairs. Must be set to one.
- qcom,msm-bus,vectors-KBps
Usage: required if MHI is bus master
Value type: Array of <u32>
Definition: Array of tuples which define the bus bandwidth requirements.
Each tuple is of length 4, values are master-id, slave-id,
arbitrated bandwidth in KBps, and instantaneous bandwidth in
KBps.
- mhi-chan-cfg-#
Usage: required
Value type: Array of <u32>
Definition: mhi channel configuration parameters for platform
defined as below <A B C D>:
A = chan number
B = maximum descriptors
C = event ring associated with channel
D = flags defined by mhi_macros.h GET_CHAN_PROPS
- mhi-event-rings
Usage: required
Value type: <u32>
Definition: Number of event rings device support
- mhi-event-cfg-#
Usage: required
Value type: Array of <u32>
Definition: mhi event ring configuration parameters for platform
defined as below <A B C D E>:
A = maximum event descriptors
B = MSI associated with event
C = interrupt moderation (see MHI specification)
D = Associated channel
E = flags defined by mhi_macros.h GET_EV_PROPS
- qcom,mhi-address-window
Usage: required
Value type: Array of <u64>
Definition: start DDR address and ending DDR address device can access.
- qcom,mhi-manage-boot
Usage: optional
Value type: bool
Definition: Determine whether MHI host manages firmware download to device.
- qcom,mhi-fw-image
Usage: required if MHI host managing firmware download process
Value type: string
Definition: firmware image name
- qcom,mhi-max-sbl
Usage: required if MHI host managing firmware download process
Value type: <u32>
Definition: Maximum size in bytes SBL image device support.
- qcom,mhi-sg-size
Usage: required if MHI host managing firmware download process
Value type: <u32>
Definition: Segment size in bytes for each segment in bytes.
========
Example: Example:
========
mhi: qcom,mhi { mhi: qcom,mhi {
compatible = "qcom,mhi"; compatible = "qcom,mhi";
qcom,pci-dev_id = <0x0301>; qcom,pci-dev_id = <0x0301>;
qcom,pci-domain = <2>; qcom,pci-domain = <2>;
qcom,pci-bus = <4>; qcom,pci-bus = <4>;
qcom,pci-slot = <0>; qcom,pci-slot = <0>;
qcom,mhi-address-window = <0x0 0x80000000 0x0 0xbfffffff>; qcom,mhi-address-window = <0x0 0x80000000 0x0 0xbfffffff>;
esoc-names = "mdm"; esoc-names = "mdm";
esoc-0 = <&mdm1>; esoc-0 = <&mdm1>;
qcom,msm-bus,name = "mhi"; qcom,msm-bus,name = "mhi";
qcom,msm-bus,num-cases = <2>; qcom,msm-bus,num-cases = <2>;
qcom,msm-bus,num-paths = <1>; qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps = qcom,msm-bus,vectors-KBps =
<100 512 0 0>, <100 512 0 0>,
<100 512 1200000000 1200000000>; <100 512 1200000000 1200000000>;
mhi-event-rings = <1>; mhi-event-rings = <1>;
mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>; mhi-chan-cfg-102 = <0x66 0x80 0x5 0x62>;
mhi-event-cfg-0 = <0x80 0x0 0x0 0x0 0x11>; mhi-event-cfg-0 = <0x80 0x0 0x0 0x0 0x11>;
}; };

View file

@ -57,15 +57,47 @@ struct pcie_core_info {
bool pci_master; bool pci_master;
}; };
struct firmware_info {
const char *fw_image;
size_t max_sbl_len;
size_t segment_size;
};
struct bhie_mem_info {
void *pre_aligned;
void *aligned;
size_t alloc_size;
size_t size;
phys_addr_t phys_addr;
dma_addr_t dma_handle;
};
struct bhie_vec_table {
struct scatterlist *sg_list;
struct bhie_mem_info *bhie_mem_info;
struct bhi_vec_entry *bhi_vec_entry;
unsigned segment_count;
u32 sequence; /* sequence to indicate new xfer */
};
struct bhi_ctxt_t { struct bhi_ctxt_t {
void __iomem *bhi_base; void __iomem *bhi_base;
void *unaligned_image_loc;
dma_addr_t dma_handle;
size_t alloc_size;
void *image_loc; void *image_loc;
dma_addr_t phy_image_loc; dma_addr_t phy_image_loc;
size_t image_size; size_t image_size;
void *unaligned_image_loc;
dev_t bhi_dev; dev_t bhi_dev;
struct cdev cdev; struct cdev cdev;
struct device *dev; struct device *dev;
u32 alignment;
u32 poll_timeout;
/* BHI/E vector table */
bool manage_boot; /* fw download done by MHI host */
struct work_struct fw_load_work;
struct firmware_info firmware_info;
struct bhie_vec_table fw_table;
}; };
enum MHI_CHAN_DIR { enum MHI_CHAN_DIR {
@ -344,25 +376,27 @@ enum MHI_INIT_ERROR_STAGE {
}; };
enum STATE_TRANSITION { enum STATE_TRANSITION {
STATE_TRANSITION_RESET = 0x0, STATE_TRANSITION_RESET = MHI_STATE_RESET,
STATE_TRANSITION_READY = 0x1, STATE_TRANSITION_READY = MHI_STATE_READY,
STATE_TRANSITION_M0 = 0x2, STATE_TRANSITION_M0 = MHI_STATE_M0,
STATE_TRANSITION_M1 = 0x3, STATE_TRANSITION_M1 = MHI_STATE_M1,
STATE_TRANSITION_M2 = 0x4, STATE_TRANSITION_M2 = MHI_STATE_M2,
STATE_TRANSITION_M3 = 0x5, STATE_TRANSITION_M3 = MHI_STATE_M3,
STATE_TRANSITION_BHI = 0x6, STATE_TRANSITION_BHI,
STATE_TRANSITION_SBL = 0x7, STATE_TRANSITION_SBL,
STATE_TRANSITION_AMSS = 0x8, STATE_TRANSITION_AMSS,
STATE_TRANSITION_LINK_DOWN = 0x9, STATE_TRANSITION_LINK_DOWN,
STATE_TRANSITION_WAKE = 0xA, STATE_TRANSITION_WAKE,
STATE_TRANSITION_SYS_ERR = 0xFF, STATE_TRANSITION_BHIE,
STATE_TRANSITION_reserved = 0x80000000 STATE_TRANSITION_SYS_ERR,
STATE_TRANSITION_MAX
}; };
enum MHI_EXEC_ENV { enum MHI_EXEC_ENV {
MHI_EXEC_ENV_PBL = 0x0, MHI_EXEC_ENV_PBL = 0x0,
MHI_EXEC_ENV_SBL = 0x1, MHI_EXEC_ENV_SBL = 0x1,
MHI_EXEC_ENV_AMSS = 0x2, MHI_EXEC_ENV_AMSS = 0x2,
MHI_EXEC_ENV_BHIE = 0x3,
MHI_EXEC_ENV_reserved = 0x80000000 MHI_EXEC_ENV_reserved = 0x80000000
}; };

View file

@ -10,6 +10,7 @@
* GNU General Public License for more details. * GNU General Public License for more details.
*/ */
#include <linux/firmware.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/slab.h> #include <linux/slab.h>
@ -32,79 +33,198 @@ static int bhi_open(struct inode *mhi_inode, struct file *file_handle)
return 0; return 0;
} }
static ssize_t bhi_write(struct file *file, static int bhi_alloc_bhie_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
const char __user *buf, size_t size,
size_t count, loff_t *offp) struct bhie_vec_table *vec_table)
{ {
int ret_val = 0;
u32 pcie_word_val = 0;
u32 i = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = file->private_data;
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt; struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
size_t amount_copied = 0; const u32 align = bhi_ctxt->alignment - 1;
uintptr_t align_len = 0x1000; size_t seg_size = bhi_ctxt->firmware_info.segment_size;
u32 tx_db_val = 0; /* We need one additional entry for Vector Table */
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock; int segments = DIV_ROUND_UP(size, seg_size) + 1;
const long bhi_timeout_ms = 1000; int i;
long timeout; struct scatterlist *sg_list;
struct bhie_mem_info *bhie_mem_info, *info;
if (buf == NULL || 0 == count)
return -EIO;
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
timeout = wait_event_interruptible_timeout(
*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
msecs_to_jiffies(bhi_timeout_ms));
if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
return -EIO;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Entered. User Image size 0x%zx\n", count); "Total size:%lu total_seg:%d seg_size:%lu\n",
size, segments, seg_size);
bhi_ctxt->unaligned_image_loc = kmalloc(count + (align_len - 1), sg_list = kcalloc(segments, sizeof(*sg_list), GFP_KERNEL);
GFP_KERNEL); if (!sg_list)
return -ENOMEM;
bhie_mem_info = kcalloc(segments, sizeof(*bhie_mem_info), GFP_KERNEL);
if (!bhie_mem_info)
goto alloc_bhi_mem_info_error;
/* Allocate buffers for bhi/e vector table */
for (i = 0; i < segments; i++) {
size_t size = seg_size;
/* Last entry if for vector table */
if (i == segments - 1)
size = sizeof(struct bhi_vec_entry) * i;
info = &bhie_mem_info[i];
info->size = size;
info->alloc_size = info->size + align;
info->pre_aligned =
dma_alloc_coherent(dev, info->alloc_size,
&info->dma_handle, GFP_KERNEL);
if (!info->pre_aligned)
goto alloc_dma_error;
info->phys_addr = (info->dma_handle + align) & ~align;
info->aligned = info->pre_aligned +
(info->phys_addr - info->dma_handle);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Seg:%d unaligned Img: 0x%llx aligned:0x%llx\n",
i, info->dma_handle, info->phys_addr);
}
sg_init_table(sg_list, segments);
sg_set_buf(sg_list, info->aligned, info->size);
sg_dma_address(sg_list) = info->phys_addr;
sg_dma_len(sg_list) = info->size;
vec_table->sg_list = sg_list;
vec_table->bhie_mem_info = bhie_mem_info;
vec_table->bhi_vec_entry = info->aligned;
vec_table->segment_count = segments;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"BHI/E table successfully allocated\n");
return 0;
alloc_dma_error:
for (i = i - 1; i >= 0; i--)
dma_free_coherent(dev,
bhie_mem_info[i].alloc_size,
bhie_mem_info[i].pre_aligned,
bhie_mem_info[i].dma_handle);
kfree(bhie_mem_info);
alloc_bhi_mem_info_error:
kfree(sg_list);
return -ENOMEM;
}
static int bhi_alloc_pbl_xfer(struct mhi_device_ctxt *mhi_dev_ctxt,
size_t size)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
const u32 align_len = bhi_ctxt->alignment;
size_t alloc_size = size + (align_len - 1);
struct device *dev = &mhi_dev_ctxt->plat_dev->dev;
bhi_ctxt->unaligned_image_loc =
dma_alloc_coherent(dev, alloc_size, &bhi_ctxt->dma_handle,
GFP_KERNEL);
if (bhi_ctxt->unaligned_image_loc == NULL) if (bhi_ctxt->unaligned_image_loc == NULL)
return -ENOMEM; return -ENOMEM;
bhi_ctxt->image_loc = bhi_ctxt->alloc_size = alloc_size;
(void *)((uintptr_t)bhi_ctxt->unaligned_image_loc + bhi_ctxt->phy_image_loc = (bhi_ctxt->dma_handle + (align_len - 1)) &
(align_len - (((uintptr_t)bhi_ctxt->unaligned_image_loc) % ~(align_len - 1);
align_len))); bhi_ctxt->image_loc = bhi_ctxt->unaligned_image_loc +
(bhi_ctxt->phy_image_loc - bhi_ctxt->dma_handle);
bhi_ctxt->image_size = size;
bhi_ctxt->image_size = count;
if (0 != copy_from_user(bhi_ctxt->image_loc, buf, count)) {
ret_val = -ENOMEM;
goto bhi_copy_error;
}
amount_copied = count;
/* Flush the writes, in anticipation for a device read */
wmb();
bhi_ctxt->phy_image_loc = dma_map_single(
&mhi_dev_ctxt->plat_dev->dev,
bhi_ctxt->image_loc,
bhi_ctxt->image_size,
DMA_TO_DEVICE);
if (dma_mapping_error(NULL, bhi_ctxt->phy_image_loc)) {
ret_val = -EIO;
goto bhi_copy_error;
}
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Mapped image to DMA addr 0x%llx:\n", bhi_ctxt->phy_image_loc); "alloc_size:%lu image_size:%lu unal_addr:0x%llx0x al_addr:0x%llx\n",
bhi_ctxt->alloc_size, bhi_ctxt->image_size,
bhi_ctxt->dma_handle, bhi_ctxt->phy_image_loc);
bhi_ctxt->image_size = count; return 0;
}
/* Load firmware via bhie protocol */
static int bhi_load_bhie_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
const struct bhie_mem_info *bhie_mem_info =
&fw_table->bhie_mem_info[fw_table->segment_count - 1];
u32 val;
const u32 tx_sequence = fw_table->sequence++;
unsigned long timeout;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
/* Program TX/RX Vector table */
read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock);
return -EIO;
}
val = HIGH_WORD(bhie_mem_info->phys_addr);
mhi_reg_write(mhi_dev_ctxt,
bhi_ctxt->bhi_base,
BHIE_TXVECADDR_HIGH_OFFS,
val);
val = LOW_WORD(bhie_mem_info->phys_addr);
mhi_reg_write(mhi_dev_ctxt,
bhi_ctxt->bhi_base,
BHIE_TXVECADDR_LOW_OFFS,
val);
val = (u32)bhie_mem_info->size;
mhi_reg_write(mhi_dev_ctxt,
bhi_ctxt->bhi_base,
BHIE_TXVECSIZE_OFFS,
val);
/* Ring DB to begin Xfer */
mhi_reg_write_field(mhi_dev_ctxt,
bhi_ctxt->bhi_base,
BHIE_TXVECDB_OFFS,
BHIE_TXVECDB_SEQNUM_BMSK,
BHIE_TXVECDB_SEQNUM_SHFT,
tx_sequence);
read_unlock_bh(pm_xfer_lock);
timeout = jiffies + msecs_to_jiffies(bhi_ctxt->poll_timeout);
while (time_before(jiffies, timeout)) {
u32 current_seq, status;
read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock);
return -EIO;
}
val = mhi_reg_read(bhi_ctxt->bhi_base, BHIE_TXVECSTATUS_OFFS);
read_unlock_bh(pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"TXVEC_STATUS:0x%x\n", val);
current_seq = (val & BHIE_TXVECSTATUS_SEQNUM_BMSK) >>
BHIE_TXVECSTATUS_SEQNUM_SHFT;
status = (val & BHIE_TXVECSTATUS_STATUS_BMSK) >>
BHIE_TXVECSTATUS_STATUS_SHFT;
if ((status == BHIE_TXVECSTATUS_STATUS_XFER_COMPL) &&
(current_seq == tx_sequence)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"Image transfer complete\n");
return 0;
}
msleep(BHI_POLL_SLEEP_TIME_MS);
}
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error xfering image via BHIE\n");
return -EIO;
}
static int bhi_load_firmware(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
u32 pcie_word_val = 0;
u32 tx_db_val = 0;
unsigned long timeout;
rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
/* Write the image size */ /* Write the image size */
read_lock_bh(pm_xfer_lock); read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock); read_unlock_bh(pm_xfer_lock);
goto bhi_copy_error; return -EIO;
} }
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc); pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
@ -128,16 +248,15 @@ static ssize_t bhi_write(struct file *file,
pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB); pcie_word_val = mhi_reg_read(bhi_ctxt->bhi_base, BHI_IMGTXDB);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base, mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val); BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
read_unlock_bh(pm_xfer_lock); read_unlock_bh(pm_xfer_lock);
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) { timeout = jiffies + msecs_to_jiffies(bhi_ctxt->poll_timeout);
while (time_before(jiffies, timeout)) {
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0; u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
read_lock_bh(pm_xfer_lock); read_lock_bh(pm_xfer_lock);
if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) { if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
read_unlock_bh(pm_xfer_lock); read_unlock_bh(pm_xfer_lock);
goto bhi_copy_error; return -EIO;
} }
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE); err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1); errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
@ -148,34 +267,83 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS_MASK, BHI_STATUS_MASK,
BHI_STATUS_SHIFT); BHI_STATUS_SHIFT);
read_unlock_bh(pm_xfer_lock); read_unlock_bh(pm_xfer_lock);
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n", "%s 0x%x %s:0x%x %s:0x%x %s:0x%x %s:0x%x\n",
tx_db_val, err, errdbg1, errdbg2, errdbg3); "BHI STATUS", tx_db_val,
if (BHI_STATUS_SUCCESS != tx_db_val) "err", err,
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, "errdbg1", errdbg1,
"Incorrect BHI status: %d retry: %d\n", "errdbg2", errdbg2,
tx_db_val, i); "errdbg3", errdbg3);
else if (tx_db_val == BHI_STATUS_SUCCESS)
break; break;
usleep_range(20000, 25000); mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "retrying...\n");
msleep(BHI_POLL_SLEEP_TIME_MS);
} }
dma_unmap_single(&mhi_dev_ctxt->plat_dev->dev,
bhi_ctxt->phy_image_loc,
bhi_ctxt->image_size, DMA_TO_DEVICE);
kfree(bhi_ctxt->unaligned_image_loc); return (tx_db_val == BHI_STATUS_SUCCESS) ? 0 : -EIO;
}
static ssize_t bhi_write(struct file *file,
const char __user *buf,
size_t count, loff_t *offp)
{
int ret_val = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = file->private_data;
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
long timeout;
if (buf == NULL || 0 == count)
return -EIO;
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
ret_val = bhi_alloc_pbl_xfer(mhi_dev_ctxt, count);
if (ret_val)
return -ENOMEM;
if (copy_from_user(bhi_ctxt->image_loc, buf, count)) {
ret_val = -ENOMEM;
goto bhi_copy_error;
}
timeout = wait_event_interruptible_timeout(
*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
msecs_to_jiffies(bhi_ctxt->poll_timeout));
if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI) {
ret_val = -EIO;
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Timed out waiting for BHI\n");
goto bhi_copy_error;
}
ret_val = bhi_load_firmware(mhi_dev_ctxt);
if (ret_val) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to load bhi image\n");
}
dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
bhi_ctxt->alloc_size,
bhi_ctxt->unaligned_image_loc,
bhi_ctxt->dma_handle);
/* Regardless of failure set to RESET state */
ret_val = mhi_init_state_transition(mhi_dev_ctxt, ret_val = mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_RESET); STATE_TRANSITION_RESET);
if (ret_val) { if (ret_val) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to start state change event\n"); "Failed to start state change event\n");
} }
return amount_copied; return count;
bhi_copy_error: bhi_copy_error:
kfree(bhi_ctxt->unaligned_image_loc); dma_free_coherent(&mhi_dev_ctxt->plat_dev->dev,
return amount_copied; bhi_ctxt->alloc_size,
bhi_ctxt->unaligned_image_loc,
bhi_ctxt->dma_handle);
return ret_val;
} }
static const struct file_operations bhi_fops = { static const struct file_operations bhi_fops = {
@ -183,16 +351,14 @@ static const struct file_operations bhi_fops = {
.open = bhi_open, .open = bhi_open,
}; };
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt) int bhi_expose_dev_bhi(struct mhi_device_ctxt *mhi_dev_ctxt)
{ {
int ret_val;
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt; struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
const struct pcie_core_info *core = &mhi_dev_ctxt->core; const struct pcie_core_info *core = &mhi_dev_ctxt->core;
int ret_val = 0;
int r;
char node_name[32]; char node_name[32];
if (bhi_ctxt->bhi_base == NULL) mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Creating dev node\n");
return -EIO;
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi"); ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) { if (IS_ERR_VALUE(ret_val)) {
@ -214,12 +380,130 @@ int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
if (IS_ERR(bhi_ctxt->dev)) { if (IS_ERR(bhi_ctxt->dev)) {
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Failed to add bhi cdev\n"); "Failed to add bhi cdev\n");
r = PTR_RET(bhi_ctxt->dev); ret_val = PTR_RET(bhi_ctxt->dev);
goto err_dev_create; goto err_dev_create;
} }
return 0; return 0;
err_dev_create: err_dev_create:
cdev_del(&bhi_ctxt->cdev); cdev_del(&bhi_ctxt->cdev);
unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1); unregister_chrdev_region(MAJOR(bhi_ctxt->bhi_dev), 1);
return r; return ret_val;
}
void bhi_firmware_download(struct work_struct *work)
{
struct mhi_device_ctxt *mhi_dev_ctxt;
struct bhi_ctxt_t *bhi_ctxt;
int ret;
long timeout;
mhi_dev_ctxt = container_of(work, struct mhi_device_ctxt,
bhi_ctxt.fw_load_work);
bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Enter\n");
wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
ret = bhi_load_firmware(mhi_dev_ctxt);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Load sbl firmware\n");
return;
}
mhi_init_state_transition(mhi_dev_ctxt,
STATE_TRANSITION_RESET);
timeout = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
mhi_dev_ctxt->dev_exec_env == MHI_EXEC_ENV_BHIE,
msecs_to_jiffies(bhi_ctxt->poll_timeout));
if (!timeout) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Enter EXEC_ENV_BHIE\n");
return;
}
ret = bhi_load_bhie_firmware(mhi_dev_ctxt);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Failed to Load amss firmware\n");
}
}
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt)
{
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
struct bhie_vec_table *fw_table = &bhi_ctxt->fw_table;
const struct firmware *firmware;
struct scatterlist *itr;
int ret, i;
size_t remainder;
const u8 *image;
/* expose dev node to userspace */
if (bhi_ctxt->manage_boot == false)
return bhi_expose_dev_bhi(mhi_dev_ctxt);
/* Make sure minimum buffer we allocate for BHI/E is >= sbl image */
while (fw_info->segment_size < fw_info->max_sbl_len)
fw_info->segment_size <<= 1;
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO,
"max sbl image size:%lu segment size:%lu\n",
fw_info->max_sbl_len, fw_info->segment_size);
/* Read the fw image */
ret = request_firmware(&firmware, fw_info->fw_image,
&mhi_dev_ctxt->plat_dev->dev);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error request firmware for:%s ret:%d\n",
fw_info->fw_image, ret);
return ret;
}
ret = bhi_alloc_bhie_xfer(mhi_dev_ctxt,
firmware->size,
fw_table);
if (ret) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error Allocating memory for firmware image\n");
release_firmware(firmware);
return ret;
}
/* Copy the fw image to vector table */
remainder = firmware->size;
image = firmware->data;
for (i = 0, itr = &fw_table->sg_list[1];
i < fw_table->segment_count - 1; i++, itr++) {
size_t to_copy = min(remainder, fw_info->segment_size);
memcpy(fw_table->bhie_mem_info[i].aligned, image, to_copy);
fw_table->bhi_vec_entry[i].phys_addr =
fw_table->bhie_mem_info[i].phys_addr;
fw_table->bhi_vec_entry[i].size = to_copy;
sg_set_buf(itr, fw_table->bhie_mem_info[i].aligned, to_copy);
sg_dma_address(itr) = fw_table->bhie_mem_info[i].phys_addr;
sg_dma_len(itr) = to_copy;
remainder -= to_copy;
image += to_copy;
}
/*
* Re-use BHI/E pointer for BHI since we guranteed BHI/E segment
* is >= to SBL image.
*/
bhi_ctxt->phy_image_loc = sg_dma_address(&fw_table->sg_list[1]);
bhi_ctxt->image_size = fw_info->max_sbl_len;
fw_table->sequence++;
release_firmware(firmware);
/* Schedule a worker thread and wait for BHI Event */
schedule_work(&bhi_ctxt->fw_load_work);
return 0;
} }

View file

@ -42,6 +42,38 @@
#define BHI_STATUS_SUCCESS (2) #define BHI_STATUS_SUCCESS (2)
#define BHI_STATUS_RESET (0) #define BHI_STATUS_RESET (0)
/* BHIE Offsets */
#define BHIE_OFFSET (0x0124) /* BHIE register space offset from BHI base */
#define BHIE_MSMSOCID_OFFS (BHIE_OFFSET + 0x0000)
#define BHIE_TXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x002C)
#define BHIE_TXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0030)
#define BHIE_TXVECSIZE_OFFS (BHIE_OFFSET + 0x0034)
#define BHIE_TXVECDB_OFFS (BHIE_OFFSET + 0x003C)
#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_TXVECDB_SEQNUM_SHFT (0)
#define BHIE_TXVECSTATUS_OFFS (BHIE_OFFSET + 0x0044)
#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
#define BHIE_RXVECADDR_LOW_OFFS (BHIE_OFFSET + 0x0060)
#define BHIE_RXVECADDR_HIGH_OFFS (BHIE_OFFSET + 0x0064)
#define BHIE_RXVECSIZE_OFFS (BHIE_OFFSET + 0x0068)
#define BHIE_RXVECDB_OFFS (BHIE_OFFSET + 0x0070)
#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_RXVECDB_SEQNUM_SHFT (0)
#define BHIE_RXVECSTATUS_OFFS (BHIE_OFFSET + 0x0078)
#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
#define BHI_MAJOR_VERSION 0x0 #define BHI_MAJOR_VERSION 0x0
#define BHI_MINOR_VERSION 0x1 #define BHI_MINOR_VERSION 0x1
@ -51,10 +83,12 @@
#define BHI_READBUF_SIZE sizeof(bhi_info_type) #define BHI_READBUF_SIZE sizeof(bhi_info_type)
#define BHI_MAX_IMAGE_SIZE (256 * 1024) #define BHI_MAX_IMAGE_SIZE (256 * 1024)
#define BHI_DEFAULT_ALIGNMENT (0x1000)
#define BHI_POLL_SLEEP_TIME 1000 #define BHI_POLL_SLEEP_TIME_MS 100
#define BHI_POLL_NR_RETRIES 10 #define BHI_POLL_TIMEOUT_MS 2000
int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt); int bhi_probe(struct mhi_device_ctxt *mhi_dev_ctxt);
void bhi_firmware_download(struct work_struct *work);
#endif #endif

View file

@ -415,6 +415,47 @@ static int mhi_plat_probe(struct platform_device *pdev)
mhi_dev_ctxt->dev_space.start_win_addr, mhi_dev_ctxt->dev_space.start_win_addr,
mhi_dev_ctxt->dev_space.end_win_addr); mhi_dev_ctxt->dev_space.end_win_addr);
r = of_property_read_u32(of_node, "qcom,bhi-alignment",
&mhi_dev_ctxt->bhi_ctxt.alignment);
if (r)
mhi_dev_ctxt->bhi_ctxt.alignment = BHI_DEFAULT_ALIGNMENT;
r = of_property_read_u32(of_node, "qcom,bhi-poll-timeout",
&mhi_dev_ctxt->bhi_ctxt.poll_timeout);
if (r)
mhi_dev_ctxt->bhi_ctxt.poll_timeout = BHI_POLL_TIMEOUT_MS;
mhi_dev_ctxt->bhi_ctxt.manage_boot =
of_property_read_bool(pdev->dev.of_node,
"qcom,mhi-manage-boot");
if (mhi_dev_ctxt->bhi_ctxt.manage_boot) {
struct bhi_ctxt_t *bhi_ctxt = &mhi_dev_ctxt->bhi_ctxt;
struct firmware_info *fw_info = &bhi_ctxt->firmware_info;
r = of_property_read_string(of_node, "qcom,mhi-fw-image",
&fw_info->fw_image);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error reading DT node 'qcom,mhi-fw-image'\n");
return r;
}
r = of_property_read_u32(of_node, "qcom,mhi-max-sbl",
(u32 *)&fw_info->max_sbl_len);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error reading DT node 'qcom,mhi-max-sbl'\n");
return r;
}
r = of_property_read_u32(of_node, "qcom,mhi-sg-size",
(u32 *)&fw_info->segment_size);
if (r) {
mhi_log(mhi_dev_ctxt, MHI_MSG_ERROR,
"Error reading DT node 'qcom,mhi-sg-size'\n");
return r;
}
INIT_WORK(&bhi_ctxt->fw_load_work, bhi_firmware_download);
}
mhi_dev_ctxt->plat_dev = pdev; mhi_dev_ctxt->plat_dev = pdev;
platform_set_drvdata(pdev, mhi_dev_ctxt); platform_set_drvdata(pdev, mhi_dev_ctxt);

View file

@ -165,6 +165,10 @@ static int mhi_process_event_ring(
mhi_init_state_transition(mhi_dev_ctxt, mhi_init_state_transition(mhi_dev_ctxt,
new_state); new_state);
break; break;
case MHI_EXEC_ENV_BHIE:
new_state = STATE_TRANSITION_BHIE;
mhi_init_state_transition(mhi_dev_ctxt,
new_state);
} }
break; break;
} }

View file

@ -19,24 +19,24 @@
const char *state_transition_str(enum STATE_TRANSITION state) const char *state_transition_str(enum STATE_TRANSITION state)
{ {
static const char * const mhi_states_transition_str[] = { static const char * const
"RESET", mhi_states_transition_str[STATE_TRANSITION_MAX] = {
"READY", [STATE_TRANSITION_RESET] = "RESET",
"M0", [STATE_TRANSITION_READY] = "READY",
"M1", [STATE_TRANSITION_M0] = "M0",
"M2", [STATE_TRANSITION_M1] = "M1",
"M3", [STATE_TRANSITION_M2] = "M2",
"BHI", [STATE_TRANSITION_M3] = "M3",
"SBL", [STATE_TRANSITION_BHI] = "BHI",
"AMSS", [STATE_TRANSITION_SBL] = "SBL",
"LINK_DOWN", [STATE_TRANSITION_AMSS] = "AMSS",
"WAKE" [STATE_TRANSITION_LINK_DOWN] = "LINK_DOWN",
[STATE_TRANSITION_WAKE] = "WAKE",
[STATE_TRANSITION_BHIE] = "BHIE",
[STATE_TRANSITION_SYS_ERR] = "SYS_ERR",
}; };
if (state == STATE_TRANSITION_SYS_ERR) return (state < STATE_TRANSITION_MAX) ?
return "SYS_ERR";
return (state <= STATE_TRANSITION_WAKE) ?
mhi_states_transition_str[state] : "Invalid"; mhi_states_transition_str[state] : "Invalid";
} }
@ -158,6 +158,17 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
} }
} }
static int process_bhie_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Entered\n");
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_BHIE;
wake_up(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
mhi_log(mhi_dev_ctxt, MHI_MSG_INFO, "Exited\n");
return 0;
}
static int process_m0_transition( static int process_m0_transition(
struct mhi_device_ctxt *mhi_dev_ctxt, struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item) enum STATE_TRANSITION cur_work_item)
@ -579,6 +590,9 @@ static int process_stt_work_item(
case STATE_TRANSITION_WAKE: case STATE_TRANSITION_WAKE:
r = process_wake_transition(mhi_dev_ctxt, cur_work_item); r = process_wake_transition(mhi_dev_ctxt, cur_work_item);
break; break;
case STATE_TRANSITION_BHIE:
r = process_bhie_transition(mhi_dev_ctxt, cur_work_item);
break;
default: default:
mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL, mhi_log(mhi_dev_ctxt, MHI_MSG_CRITICAL,
"Unrecongized state: %s\n", "Unrecongized state: %s\n",

View file

@ -117,6 +117,11 @@ struct mhi_client_handle {
struct mhi_client_config *client_config; struct mhi_client_config *client_config;
}; };
struct __packed bhi_vec_entry {
u64 phys_addr;
u64 size;
};
/** /**
* mhi_is_device_ready - Check if MHI is ready to register clients * mhi_is_device_ready - Check if MHI is ready to register clients
* *