blob: e322fb7aebe1da0de4ebf8ec3c51eddde30a6cb8 [file] [log] [blame]
/*
* io_sm.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* IO dispatcher for a shared memory channel driver.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/*
* Channel Invariant:
* There is an important invariant condition which must be maintained per
* channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
* which may cause timeouts and/or failure of the sync_wait_on_event
* function.
*/
#include <linux/types.h>
#include <linux/list.h>
/* Host OS */
#include <dspbridge/host_os.h>
#include <linux/workqueue.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* Services Layer */
#include <dspbridge/ntfy.h>
#include <dspbridge/sync.h>
/* Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
/* Bridge Driver */
#include <dspbridge/dspdeh.h>
#include <dspbridge/dspio.h>
#include <dspbridge/dspioctl.h>
#include <dspbridge/wdt.h>
#include <_tiomap.h>
#include <tiomap_io.h>
#include <_tiomap_pwr.h>
/* Platform Manager */
#include <dspbridge/cod.h>
#include <dspbridge/node.h>
#include <dspbridge/dev.h>
/* Others */
#include <dspbridge/rms_sh.h>
#include <dspbridge/mgr.h>
#include <dspbridge/drv.h>
#include "_cmm.h"
#include "module_list.h"
/* This */
#include <dspbridge/io_sm.h>
#include "_msg_sm.h"
/* Defines, Data Structures, Typedefs */
#define OUTPUTNOTREADY 0xffff
#define NOTENABLED 0xffff /* Channel(s) not enabled */
#define EXTEND "_EXT_END"
#define SWAP_WORD(x) (x)
#define UL_PAGE_ALIGN_SIZE 0x10000 /* Page Align Size */
#define MAX_PM_REQS 32
#define MMU_FAULT_HEAD1 0xa5a5a5a5
#define MMU_FAULT_HEAD2 0x96969696
#define POLL_MAX 1000
#define MAX_MMU_DBGBUFF 10240
/* IO Manager: only one created per board */
struct io_mgr {
/* These four fields must be the first fields in a io_mgr_ struct */
/* Bridge device context */
struct bridge_dev_context *bridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct dev_object *dev_obj; /* Device this board represents */
/* These fields initialized in bridge_io_create() */
struct chnl_mgr *chnl_mgr;
struct shm *shared_mem; /* Shared Memory control */
u8 *input; /* Address of input channel */
u8 *output; /* Address of output channel */
struct msg_mgr *msg_mgr; /* Message manager */
/* Msg control for from DSP messages */
struct msg_ctrl *msg_input_ctrl;
/* Msg control for to DSP messages */
struct msg_ctrl *msg_output_ctrl;
u8 *msg_input; /* Address of input messages */
u8 *msg_output; /* Address of output messages */
u32 sm_buf_size; /* Size of a shared memory I/O channel */
bool shared_irq; /* Is this IRQ shared? */
u32 word_size; /* Size in bytes of DSP word */
u16 intr_val; /* Interrupt value */
/* Private extnd proc info; mmu setup */
struct mgr_processorextinfo ext_proc_info;
struct cmm_object *cmm_mgr; /* Shared Mem Mngr */
struct work_struct io_workq; /* workqueue */
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
u32 trace_buffer_begin; /* Trace message start address */
u32 trace_buffer_end; /* Trace message end address */
u32 trace_buffer_current; /* Trace message current address */
u32 gpp_read_pointer; /* GPP Read pointer to Trace buffer */
u8 *msg;
u32 gpp_va;
u32 dsp_va;
#endif
/* IO Dpc */
u32 dpc_req; /* Number of requested DPC's. */
u32 dpc_sched; /* Number of executed DPC's. */
struct tasklet_struct dpc_tasklet;
spinlock_t dpc_lock;
};
struct shm_symbol_val {
u32 shm_base;
u32 shm_lim;
u32 msg_base;
u32 msg_lim;
u32 shm0_end;
u32 dyn_ext;
u32 ext_end;
};
/* Function Prototypes */
static void io_dispatch_pm(struct io_mgr *pio_mgr);
static void notify_chnl_complete(struct chnl_object *pchnl,
struct chnl_irp *chnl_packet_obj);
static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
u8 io_mode);
static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
u8 io_mode);
static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
struct chnl_object *pchnl, u32 mask);
/* Bus Addr (cached kernel) */
static int register_shm_segs(struct io_mgr *hio_mgr,
struct cod_manager *cod_man,
u32 dw_gpp_base_pa);
static inline void set_chnl_free(struct shm *sm, u32 chnl)
{
sm->host_free_mask &= ~(1 << chnl);
}
static inline void set_chnl_busy(struct shm *sm, u32 chnl)
{
sm->host_free_mask |= 1 << chnl;
}
/*
* ======== bridge_io_create ========
* Create an IO manager object.
*/
int bridge_io_create(struct io_mgr **io_man,
struct dev_object *hdev_obj,
const struct io_attrs *mgr_attrts)
{
struct io_mgr *pio_mgr = NULL;
struct bridge_dev_context *hbridge_context = NULL;
struct cfg_devnode *dev_node_obj;
struct chnl_mgr *hchnl_mgr;
u8 dev_type;
/* Check requirements */
if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0)
return -EFAULT;
*io_man = NULL;
dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
if (!hchnl_mgr || hchnl_mgr->iomgr)
return -EFAULT;
/*
* Message manager will be created when a file is loaded, since
* size of message buffer in shared memory is configurable in
* the base image.
*/
dev_get_bridge_context(hdev_obj, &hbridge_context);
if (!hbridge_context)
return -EFAULT;
dev_get_dev_type(hdev_obj, &dev_type);
/* Allocate IO manager object */
pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
if (!pio_mgr)
return -ENOMEM;
/* Initialize chnl_mgr object */
pio_mgr->chnl_mgr = hchnl_mgr;
pio_mgr->word_size = mgr_attrts->word_size;
if (dev_type == DSP_UNIT) {
/* Create an IO DPC */
tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
/* Initialize DPC counters */
pio_mgr->dpc_req = 0;
pio_mgr->dpc_sched = 0;
spin_lock_init(&pio_mgr->dpc_lock);
if (dev_get_dev_node(hdev_obj, &dev_node_obj)) {
bridge_io_destroy(pio_mgr);
return -EIO;
}
}
pio_mgr->bridge_context = hbridge_context;
pio_mgr->shared_irq = mgr_attrts->irq_shared;
if (dsp_wdt_init()) {
bridge_io_destroy(pio_mgr);
return -EPERM;
}
/* Return IO manager object to caller... */
hchnl_mgr->iomgr = pio_mgr;
*io_man = pio_mgr;
return 0;
}
/*
* ======== bridge_io_destroy ========
* Purpose:
* Disable interrupts, destroy the IO manager.
*/
int bridge_io_destroy(struct io_mgr *hio_mgr)
{
int status = 0;
if (hio_mgr) {
/* Free IO DPC object */
tasklet_kill(&hio_mgr->dpc_tasklet);
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
kfree(hio_mgr->msg);
#endif
dsp_wdt_exit();
/* Free this IO manager object */
kfree(hio_mgr);
} else {
status = -EFAULT;
}
return status;
}
struct shm_symbol_val *_get_shm_symbol_values(struct io_mgr *hio_mgr)
{
struct shm_symbol_val *s;
struct cod_manager *cod_man;
int status;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return ERR_PTR(-ENOMEM);
status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
if (status)
goto free_symbol;
/* Get start and length of channel part of shared memory */
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
&s->shm_base);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
&s->shm_lim);
if (status)
goto free_symbol;
if (s->shm_lim <= s->shm_base) {
status = -EINVAL;
goto free_symbol;
}
/* Get start and length of message part of shared memory */
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
&s->msg_base);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
&s->msg_lim);
if (status)
goto free_symbol;
if (s->msg_lim <= s->msg_base) {
status = -EINVAL;
goto free_symbol;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
status = cod_get_sym_value(cod_man, DSP_TRACESEC_END, &s->shm0_end);
#else
status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, &s->shm0_end);
#endif
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, DYNEXTBASE, &s->dyn_ext);
if (status)
goto free_symbol;
status = cod_get_sym_value(cod_man, EXTEND, &s->ext_end);
if (status)
goto free_symbol;
return s;
free_symbol:
kfree(s);
return ERR_PTR(status);
}
/*
* ======== bridge_io_on_loaded ========
* Purpose:
* Called when a new program is loaded to get shared memory buffer
* parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
* are in DSP address units.
*/
int bridge_io_on_loaded(struct io_mgr *hio_mgr)
{
struct bridge_dev_context *dc = hio_mgr->bridge_context;
struct cfg_hostres *cfg_res = dc->resources;
struct bridge_ioctl_extproc *eproc;
struct cod_manager *cod_man;
struct chnl_mgr *hchnl_mgr;
struct msg_mgr *hmsg_mgr;
struct shm_symbol_val *s;
int status;
u8 num_procs;
s32 ndx;
u32 i;
u32 mem_sz, msg_sz, pad_sz, shm_sz, shm_base_offs;
u32 seg0_sz, seg1_sz;
u32 pa, va, da;
u32 pa_curr, va_curr, da_curr;
u32 bytes;
u32 all_bits = 0;
u32 page_size[] = {
HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
};
u32 map_attrs = DSP_MAPLITTLEENDIAN | DSP_MAPPHYSICALADDR |
DSP_MAPELEMSIZE32 | DSP_MAPDONOTLOCK;
status = dev_get_cod_mgr(hio_mgr->dev_obj, &cod_man);
if (status)
return status;
hchnl_mgr = hio_mgr->chnl_mgr;
/* The message manager is destroyed when the board is stopped */
dev_get_msg_mgr(hio_mgr->dev_obj, &hio_mgr->msg_mgr);
hmsg_mgr = hio_mgr->msg_mgr;
if (!hchnl_mgr || !hmsg_mgr)
return -EFAULT;
if (hio_mgr->shared_mem)
hio_mgr->shared_mem = NULL;
s = _get_shm_symbol_values(hio_mgr);
if (IS_ERR(s))
return PTR_ERR(s);
/* Get total length in bytes */
shm_sz = (s->shm_lim - s->shm_base + 1) * hio_mgr->word_size;
/* Calculate size of a PROCCOPY shared memory region */
dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
__func__, shm_sz - sizeof(struct shm));
/* Length (bytes) of messaging part of shared memory */
msg_sz = (s->msg_lim - s->msg_base + 1) * hio_mgr->word_size;
/* Total length (bytes) of shared memory: chnl + msg */
mem_sz = shm_sz + msg_sz;
/* Get memory reserved in host resources */
(void)mgr_enum_processor_info(0,
(struct dsp_processorinfo *)
&hio_mgr->ext_proc_info,
sizeof(struct mgr_processorextinfo),
&num_procs);
/* IO supports only one DSP for now */
if (num_procs != 1) {
status = -EINVAL;
goto free_symbol;
}
/* The first MMU TLB entry(TLB_0) in DCD is ShmBase */
pa = cfg_res->mem_phys[1];
va = cfg_res->mem_base[1];
/* This is the virtual uncached ioremapped address!!! */
/* Why can't we directly take the DSPVA from the symbols? */
da = hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt;
seg0_sz = (s->shm0_end - da) * hio_mgr->word_size;
seg1_sz = (s->ext_end - s->dyn_ext) * hio_mgr->word_size;
/* 4K align */
seg1_sz = (seg1_sz + 0xFFF) & (~0xFFFUL);
/* 64K align */
seg0_sz = (seg0_sz + 0xFFFF) & (~0xFFFFUL);
pad_sz = UL_PAGE_ALIGN_SIZE - ((pa + seg1_sz) % UL_PAGE_ALIGN_SIZE);
if (pad_sz == UL_PAGE_ALIGN_SIZE)
pad_sz = 0x0;
dev_dbg(bridge, "%s: pa %x, va %x, da %x\n", __func__, pa, va, da);
dev_dbg(bridge,
"shm0_end %x, dyn_ext %x, ext_end %x, seg0_sz %x seg1_sz %x\n",
s->shm0_end, s->dyn_ext, s->ext_end, seg0_sz, seg1_sz);
if ((seg0_sz + seg1_sz + pad_sz) > cfg_res->mem_length[1]) {
pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
__func__, cfg_res->mem_length[1],
seg0_sz + seg1_sz + pad_sz);
status = -ENOMEM;
goto free_symbol;
}
pa_curr = pa;
va_curr = s->dyn_ext * hio_mgr->word_size;
da_curr = va;
bytes = seg1_sz;
/*
* Try to fit into TLB entries. If not possible, push them to page
* tables. It is quite possible that if sections are not on
* bigger page boundary, we may end up making several small pages.
* So, push them onto page tables, if that is the case.
*/
while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge,
"seg all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
all_bits, pa_curr, va_curr, bytes);
for (i = 0; i < 4; i++) {
if ((bytes >= page_size[i]) &&
((all_bits & (page_size[i] - 1)) == 0)) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
if (status)
goto free_symbol;
pa_curr += page_size[i];
va_curr += page_size[i];
da_curr += page_size[i];
bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have
* reached an address aligned to a bigger page
* size.
*/
break;
}
}
}
pa_curr += pad_sz;
va_curr += pad_sz;
da_curr += pad_sz;
bytes = seg0_sz;
va_curr = da * hio_mgr->word_size;
eproc = kzalloc(sizeof(*eproc) * BRDIOCTL_NUMOFMMUTLB, GFP_KERNEL);
if (!eproc) {
status = -ENOMEM;
goto free_symbol;
}
ndx = 0;
/* Configure the TLB entries for the next cacheable segment */
while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned.
*/
all_bits = pa_curr | va_curr;
dev_dbg(bridge,
"seg1 all_bits %x, pa_curr %x, va_curr %x, bytes %x\n",
all_bits, pa_curr, va_curr, bytes);
for (i = 0; i < 4; i++) {
if (!(bytes >= page_size[i]) ||
!((all_bits & (page_size[i] - 1)) == 0))
continue;
if (ndx >= MAX_LOCK_TLB_ENTRIES) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
pa_curr, va_curr,
page_size[i], map_attrs,
NULL);
dev_dbg(bridge,
"PTE pa %x va %x dsp_va %x sz %x\n",
eproc[ndx].gpp_pa,
eproc[ndx].gpp_va,
eproc[ndx].dsp_va *
hio_mgr->word_size, page_size[i]);
if (status)
goto free_eproc;
}
/* This is the physical address written to DSP MMU */
eproc[ndx].gpp_pa = pa_curr;
/*
* This is the virtual uncached ioremapped
* address!!!
*/
eproc[ndx].gpp_va = da_curr;
eproc[ndx].dsp_va = va_curr / hio_mgr->word_size;
eproc[ndx].size = page_size[i];
eproc[ndx].endianism = HW_LITTLE_ENDIAN;
eproc[ndx].elem_size = HW_ELEM_SIZE16BIT;
eproc[ndx].mixed_mode = HW_MMU_CPUES;
dev_dbg(bridge, "%s: tlb pa %x va %x dsp_va %x sz %x\n",
__func__, eproc[ndx].gpp_pa,
eproc[ndx].gpp_va,
eproc[ndx].dsp_va * hio_mgr->word_size,
page_size[i]);
ndx++;
pa_curr += page_size[i];
va_curr += page_size[i];
da_curr += page_size[i];
bytes -= page_size[i];
/*
* Don't try smaller sizes. Hopefully we have reached
* an address aligned to a bigger page size.
*/
break;
}
}
/*
* Copy remaining entries from CDB. All entries are 1 MB and
* should not conflict with shm entries on MPU or DSP side.
*/
for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
struct mgr_processorextinfo *ep = &hio_mgr->ext_proc_info;
u32 word_sz = hio_mgr->word_size;
if (ep->ty_tlb[i].gpp_phys == 0)
continue;
if ((ep->ty_tlb[i].gpp_phys > pa - 0x100000 &&
ep->ty_tlb[i].gpp_phys <= pa + seg0_sz) ||
(ep->ty_tlb[i].dsp_virt > da - 0x100000 / word_sz &&
ep->ty_tlb[i].dsp_virt <= da + seg0_sz / word_sz)) {
dev_dbg(bridge,
"err cdb%d pa %x da %x shm pa %x da %x sz %x\n",
i, ep->ty_tlb[i].gpp_phys,
ep->ty_tlb[i].dsp_virt, pa, da, seg0_sz);
status = -EPERM;
goto free_eproc;
}
if (ndx >= MAX_LOCK_TLB_ENTRIES) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
ep->ty_tlb[i].gpp_phys,
ep->ty_tlb[i].dsp_virt,
0x100000, map_attrs, NULL);
if (status)
goto free_eproc;
}
eproc[ndx].dsp_va = ep->ty_tlb[i].dsp_virt;
eproc[ndx].gpp_pa = ep->ty_tlb[i].gpp_phys;
eproc[ndx].gpp_va = 0;
/* 1 MB */
eproc[ndx].size = 0x100000;
dev_dbg(bridge, "shm MMU entry pa %x da 0x%x\n",
eproc[ndx].gpp_pa, eproc[ndx].dsp_va);
ndx++;
}
/* Map the L4 peripherals */
i = 0;
while (l4_peripheral_table[i].phys_addr) {
status = hio_mgr->intf_fxns->brd_mem_map(dc,
l4_peripheral_table[i].phys_addr,
l4_peripheral_table[i].dsp_virt_addr,
HW_PAGE_SIZE4KB, map_attrs, NULL);
if (status)
goto free_eproc;
i++;
}
for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
eproc[i].dsp_va = 0;
eproc[i].gpp_pa = 0;
eproc[i].gpp_va = 0;
eproc[i].size = 0;
}
/*
* Set the shm physical address entry (grayed out in CDB file)
* to the virtual uncached ioremapped address of shm reserved
* on MPU.
*/
hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys =
(va + seg1_sz + pad_sz);
/*
* Need shm Phys addr. IO supports only one DSP for now:
* num_procs = 1.
*/
if (!hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys)
return -EFAULT;
if (eproc[0].dsp_va > s->shm_base)
return -EPERM;
/* shm_base may not be at ul_dsp_va address */
shm_base_offs = (s->shm_base - eproc[0].dsp_va) *
hio_mgr->word_size;
/*
* bridge_dev_ctrl() will set dev context dsp-mmu info. In
* bridge_brd_start() the MMU will be re-programed with MMU
* DSPVa-GPPPa pair info while DSP is in a known
* (reset) state.
*/
status = hio_mgr->intf_fxns->dev_cntrl(hio_mgr->bridge_context,
BRDIOCTL_SETMMUCONFIG, eproc);
if (status)
goto free_eproc;
s->shm_base = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
s->shm_base += shm_base_offs;
s->shm_base = (u32) MEM_LINEAR_ADDRESS((void *)s->shm_base,
mem_sz);
if (!s->shm_base) {
status = -EFAULT;
goto free_eproc;
}
/* Register SM */
status = register_shm_segs(hio_mgr, cod_man, eproc[0].gpp_pa);
hio_mgr->shared_mem = (struct shm *)s->shm_base;
hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
hio_mgr->output = hio_mgr->input + (shm_sz -
sizeof(struct shm)) / 2;
hio_mgr->sm_buf_size = hio_mgr->output - hio_mgr->input;
/* Set up Shared memory addresses for messaging */
hio_mgr->msg_input_ctrl =
(struct msg_ctrl *)((u8 *) hio_mgr->shared_mem + shm_sz);
hio_mgr->msg_input =
(u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
hio_mgr->msg_output_ctrl =
(struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
msg_sz / 2);
hio_mgr->msg_output =
(u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
hmsg_mgr->max_msgs =
((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input) /
sizeof(struct msg_dspmsg);
dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
"output %p, msg_input_ctrl %p, msg_input %p, "
"msg_output_ctrl %p, msg_output %p\n",
(u8 *) hio_mgr->shared_mem, hio_mgr->input,
hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
hio_mgr->msg_output);
dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
hmsg_mgr->max_msgs);
memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
/* Get the start address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
&hio_mgr->trace_buffer_begin);
if (status)
goto free_eproc;
hio_mgr->gpp_read_pointer =
hio_mgr->trace_buffer_begin =
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_begin - da);
/* Get the end address of trace buffer */
status = cod_get_sym_value(cod_man, SYS_PUTCEND,
&hio_mgr->trace_buffer_end);
if (status)
goto free_eproc;
hio_mgr->trace_buffer_end =
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_end - da);
/* Get the current address of DSP write pointer */
status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
&hio_mgr->trace_buffer_current);
if (status)
goto free_eproc;
hio_mgr->trace_buffer_current =
(va + seg1_sz + pad_sz) +
(hio_mgr->trace_buffer_current - da);
/* Calculate the size of trace buffer */
kfree(hio_mgr->msg);
hio_mgr->msg = kmalloc(((hio_mgr->trace_buffer_end -
hio_mgr->trace_buffer_begin) *
hio_mgr->word_size) + 2, GFP_KERNEL);
if (!hio_mgr->msg) {
status = -ENOMEM;
goto free_eproc;
}
hio_mgr->dsp_va = da;
hio_mgr->gpp_va = (va + seg1_sz + pad_sz);
#endif
free_eproc:
kfree(eproc);
free_symbol:
kfree(s);
return status;
}
/*
* ======== io_buf_size ========
* Size of shared memory I/O channel.
*/
u32 io_buf_size(struct io_mgr *hio_mgr)
{
if (hio_mgr)
return hio_mgr->sm_buf_size;
else
return 0;
}
/*
* ======== io_cancel_chnl ========
* Cancel IO on a given PCPY channel.
*/
void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
{
struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
struct shm *sm;
if (!hio_mgr)
goto func_end;
sm = hio_mgr->shared_mem;
/* Inform DSP that we have no more buffers on this channel */
set_chnl_free(sm, chnl);
sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
func_end:
return;
}
/*
* ======== io_dispatch_pm ========
* Performs I/O dispatch on PM related messages from DSP
*/
static void io_dispatch_pm(struct io_mgr *pio_mgr)
{
int status;
u32 parg[2];
/* Perform Power message processing here */
parg[0] = pio_mgr->intr_val;
/* Send the command to the Bridge clk/pwr manager to handle */
if (parg[0] == MBX_PM_HIBERNATE_EN) {
dev_dbg(bridge, "PM: Hibernate command\n");
status = pio_mgr->intf_fxns->
dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_PWR_HIBERNATE, parg);
if (status)
pr_err("%s: hibernate cmd failed 0x%x\n",
__func__, status);
} else if (parg[0] == MBX_PM_OPP_REQ) {
parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
status = pio_mgr->intf_fxns->
dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_CONSTRAINT_REQUEST, parg);
if (status)
dev_dbg(bridge, "PM: Failed to set constraint "
"= 0x%x\n", parg[1]);
} else {
dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
parg[0]);
status = pio_mgr->intf_fxns->
dev_cntrl(pio_mgr->bridge_context,
BRDIOCTL_CLK_CTRL, parg);
if (status)
dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
"= 0x%x\n", *parg);
}
}
/*
* ======== io_dpc ========
* Deferred procedure call for shared memory channel driver ISR. Carries
* out the dispatch of I/O as a non-preemptible event. It can only be
* pre-empted by an ISR.
*/
void io_dpc(unsigned long ref_data)
{
struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
struct chnl_mgr *chnl_mgr_obj;
struct msg_mgr *msg_mgr_obj;
struct deh_mgr *hdeh_mgr;
u32 requested;
u32 serviced;
if (!pio_mgr)
goto func_end;
chnl_mgr_obj = pio_mgr->chnl_mgr;
dev_get_msg_mgr(pio_mgr->dev_obj, &msg_mgr_obj);
dev_get_deh_mgr(pio_mgr->dev_obj, &hdeh_mgr);
if (!chnl_mgr_obj)
goto func_end;
requested = pio_mgr->dpc_req;
serviced = pio_mgr->dpc_sched;
if (serviced == requested)
goto func_end;
/* Process pending DPC's */
do {
/* Check value of interrupt reg to ensure it's a valid error */
if ((pio_mgr->intr_val > DEH_BASE) &&
(pio_mgr->intr_val < DEH_LIMIT)) {
/* Notify DSP/BIOS exception */
if (hdeh_mgr) {
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
print_dsp_debug_trace(pio_mgr);
#endif
bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
pio_mgr->intr_val);
}
}
/* Proc-copy channel dispatch */
input_chnl(pio_mgr, NULL, IO_SERVICE);
output_chnl(pio_mgr, NULL, IO_SERVICE);
#ifdef CHNL_MESSAGES
if (msg_mgr_obj) {
/* Perform I/O dispatch on message queues */
input_msg(pio_mgr, msg_mgr_obj);
output_msg(pio_mgr, msg_mgr_obj);
}
#endif
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
/* Notify DSP Trace message */
print_dsp_debug_trace(pio_mgr);
}
#endif
serviced++;
} while (serviced != requested);
pio_mgr->dpc_sched = requested;
func_end:
return;
}
/*
* ======== io_mbox_msg ========
* Main interrupt handler for the shared memory IO manager.
* Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
* schedules a DPC to dispatch I/O.
*/
int io_mbox_msg(struct notifier_block *self, unsigned long len, void *msg)
{
struct io_mgr *pio_mgr;
struct dev_object *dev_obj;
unsigned long flags;
dev_obj = dev_get_first();
dev_get_io_mgr(dev_obj, &pio_mgr);
if (!pio_mgr)
return NOTIFY_BAD;
pio_mgr->intr_val = (u16)((u32)msg);
if (pio_mgr->intr_val & MBX_PM_CLASS)
io_dispatch_pm(pio_mgr);
if (pio_mgr->intr_val == MBX_DEH_RESET) {
pio_mgr->intr_val = 0;
} else {
spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
pio_mgr->dpc_req++;
spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
tasklet_schedule(&pio_mgr->dpc_tasklet);
}
return NOTIFY_OK;
}
/*
* ======== io_request_chnl ========
* Purpose:
* Request channel I/O from the DSP. Sets flags in shared memory, then
* interrupts the DSP.
*/
void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
u8 io_mode, u16 *mbx_val)
{
struct chnl_mgr *chnl_mgr_obj;
struct shm *sm;
if (!pchnl || !mbx_val)
goto func_end;
chnl_mgr_obj = io_manager->chnl_mgr;
sm = io_manager->shared_mem;
if (io_mode == IO_INPUT) {
/* Indicate to the DSP we have a buffer available for input */
set_chnl_busy(sm, pchnl->chnl_id);
*mbx_val = MBX_PCPY_CLASS;
} else if (io_mode == IO_OUTPUT) {
/*
* Record the fact that we have a buffer available for
* output.
*/
chnl_mgr_obj->output_mask |= (1 << pchnl->chnl_id);
} else {
}
func_end:
return;
}
/*
* ======== iosm_schedule ========
* Schedule DPC for IO.
*/
void iosm_schedule(struct io_mgr *io_manager)
{
unsigned long flags;
if (!io_manager)
return;
/* Increment count of DPC's pending. */
spin_lock_irqsave(&io_manager->dpc_lock, flags);
io_manager->dpc_req++;
spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
/* Schedule DPC */
tasklet_schedule(&io_manager->dpc_tasklet);
}
/*
* ======== find_ready_output ========
* Search for a host output channel which is ready to send. If this is
* called as a result of servicing the DPC, then implement a round
* robin search; otherwise, this was called by a client thread (via
* IO_Dispatch()), so just start searching from the current channel id.
*/
static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
struct chnl_object *pchnl, u32 mask)
{
u32 ret = OUTPUTNOTREADY;
u32 id, start_id;
u32 shift;
id = (pchnl !=
NULL ? pchnl->chnl_id : (chnl_mgr_obj->last_output + 1));
id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
if (id >= CHNL_MAXCHANNELS)
goto func_end;
if (mask) {
shift = (1 << id);
start_id = id;
do {
if (mask & shift) {
ret = id;
if (pchnl == NULL)
chnl_mgr_obj->last_output = id;
break;
}
id = id + 1;
id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
shift = (1 << id);
} while (id != start_id);
}
func_end:
return ret;
}
/*
* ======== input_chnl ========
* Dispatch a buffer on an input channel.
*/
static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
u8 io_mode)
{
struct chnl_mgr *chnl_mgr_obj;
struct shm *sm;
u32 chnl_id;
u32 bytes;
struct chnl_irp *chnl_packet_obj = NULL;
u32 dw_arg;
bool clear_chnl = false;
bool notify_client = false;
sm = pio_mgr->shared_mem;
chnl_mgr_obj = pio_mgr->chnl_mgr;
/* Attempt to perform input */
if (!sm->input_full)
goto func_end;
bytes = sm->input_size * chnl_mgr_obj->word_size;
chnl_id = sm->input_id;
dw_arg = sm->arg;
if (chnl_id >= CHNL_MAXCHANNELS) {
/* Shouldn't be here: would indicate corrupted shm. */
goto func_end;
}
pchnl = chnl_mgr_obj->channels[chnl_id];
if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
if ((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
/* Get the I/O request, and attempt a transfer */
if (!list_empty(&pchnl->io_requests)) {
if (!pchnl->cio_reqs)
goto func_end;
chnl_packet_obj = list_first_entry(
&pchnl->io_requests,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
pchnl->cio_reqs--;
/*
* Ensure we don't overflow the client's
* buffer.
*/
bytes = min(bytes, chnl_packet_obj->byte_size);
memcpy(chnl_packet_obj->host_sys_buf,
pio_mgr->input, bytes);
pchnl->bytes_moved += bytes;
chnl_packet_obj->byte_size = bytes;
chnl_packet_obj->arg = dw_arg;
chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
if (bytes == 0) {
/*
* This assertion fails if the DSP
* sends EOS more than once on this
* channel.
*/
if (pchnl->state & CHNL_STATEEOS)
goto func_end;
/*
* Zero bytes indicates EOS. Update
* IOC status for this chirp, and also
* the channel state.
*/
chnl_packet_obj->status |=
CHNL_IOCSTATEOS;
pchnl->state |= CHNL_STATEEOS;
/*
* Notify that end of stream has
* occurred.
*/
ntfy_notify(pchnl->ntfy_obj,
DSP_STREAMDONE);
}
/* Tell DSP if no more I/O buffers available */
if (list_empty(&pchnl->io_requests))
set_chnl_free(sm, pchnl->chnl_id);
clear_chnl = true;
notify_client = true;
} else {
/*
* Input full for this channel, but we have no
* buffers available. The channel must be
* "idling". Clear out the physical input
* channel.
*/
clear_chnl = true;
}
} else {
/* Input channel cancelled: clear input channel */
clear_chnl = true;
}
} else {
/* DPC fired after host closed channel: clear input channel */
clear_chnl = true;
}
if (clear_chnl) {
/* Indicate to the DSP we have read the input */
sm->input_full = 0;
sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
if (notify_client) {
/* Notify client with IO completion record */
notify_chnl_complete(pchnl, chnl_packet_obj);
}
func_end:
return;
}
/*
* ======== input_msg ========
* Copies messages from shared memory to the message queues.
*/
static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
{
u32 num_msgs;
u32 i;
u8 *msg_input;
struct msg_queue *msg_queue_obj;
struct msg_frame *pmsg;
struct msg_dspmsg msg;
struct msg_ctrl *msg_ctr_obj;
u32 input_empty;
u32 addr;
msg_ctr_obj = pio_mgr->msg_input_ctrl;
/* Get the number of input messages to be read */
input_empty = msg_ctr_obj->buf_empty;
num_msgs = msg_ctr_obj->size;
if (input_empty)
return;
msg_input = pio_mgr->msg_input;
for (i = 0; i < num_msgs; i++) {
/* Read the next message */
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.cmd);
msg.msg.cmd =
read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg1);
msg.msg.arg1 =
read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.arg2);
msg.msg.arg2 =
read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
msg.msgq_id =
read_ext32_bit_dsp_data(pio_mgr->bridge_context, addr);
msg_input += sizeof(struct msg_dspmsg);
/* Determine which queue to put the message in */
dev_dbg(bridge, "input msg: cmd=0x%x arg1=0x%x "
"arg2=0x%x msgq_id=0x%x\n", msg.msg.cmd,
msg.msg.arg1, msg.msg.arg2, msg.msgq_id);
/*
* Interrupt may occur before shared memory and message
* input locations have been set up. If all nodes were
* cleaned up, hmsg_mgr->max_msgs should be 0.
*/
list_for_each_entry(msg_queue_obj, &hmsg_mgr->queue_list,
list_elem) {
if (msg.msgq_id != msg_queue_obj->msgq_id)
continue;
/* Found it */
if (msg.msg.cmd == RMS_EXITACK) {
/*
* Call the node exit notification.
* The exit message does not get
* queued.
*/
(*hmsg_mgr->on_exit)(msg_queue_obj->arg,
msg.msg.arg1);
break;
}
/*
* Not an exit acknowledgement, queue
* the message.
*/
if (list_empty(&msg_queue_obj->msg_free_list)) {
/*
* No free frame to copy the
* message into.
*/
pr_err("%s: no free msg frames,"
" discarding msg\n",
__func__);
break;
}
pmsg = list_first_entry(&msg_queue_obj->msg_free_list,
struct msg_frame, list_elem);
list_del(&pmsg->list_elem);
pmsg->msg_data = msg;
list_add_tail(&pmsg->list_elem,
&msg_queue_obj->msg_used_list);
ntfy_notify(msg_queue_obj->ntfy_obj,
DSP_NODEMESSAGEREADY);
sync_set_event(msg_queue_obj->sync_event);
}
}
/* Set the post SWI flag */
if (num_msgs > 0) {
/* Tell the DSP we've read the messages */
msg_ctr_obj->buf_empty = true;
msg_ctr_obj->post_swi = true;
sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
}
/*
* ======== notify_chnl_complete ========
* Purpose:
* Signal the channel event, notifying the client that I/O has completed.
*/
static void notify_chnl_complete(struct chnl_object *pchnl,
struct chnl_irp *chnl_packet_obj)
{
bool signal_event;
if (!pchnl || !pchnl->sync_event || !chnl_packet_obj)
goto func_end;
/*
* Note: we signal the channel event only if the queue of IO
* completions is empty. If it is not empty, the event is sure to be
* signalled by the only IO completion list consumer:
* bridge_chnl_get_ioc().
*/
signal_event = list_empty(&pchnl->io_completions);
/* Enqueue the IO completion info for the client */
list_add_tail(&chnl_packet_obj->link, &pchnl->io_completions);
pchnl->cio_cs++;
if (pchnl->cio_cs > pchnl->chnl_packets)
goto func_end;
/* Signal the channel event (if not already set) that IO is complete */
if (signal_event)
sync_set_event(pchnl->sync_event);
/* Notify that IO is complete */
ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
func_end:
return;
}
/*
* ======== output_chnl ========
* Purpose:
* Dispatch a buffer on an output channel.
*/
static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
u8 io_mode)
{
struct chnl_mgr *chnl_mgr_obj;
struct shm *sm;
u32 chnl_id;
struct chnl_irp *chnl_packet_obj;
u32 dw_dsp_f_mask;
chnl_mgr_obj = pio_mgr->chnl_mgr;
sm = pio_mgr->shared_mem;
/* Attempt to perform output */
if (sm->output_full)
goto func_end;
if (pchnl && !((pchnl->state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
goto func_end;
/* Look to see if both a PC and DSP output channel are ready */
dw_dsp_f_mask = sm->dsp_free_mask;
chnl_id =
find_ready_output(chnl_mgr_obj, pchnl,
(chnl_mgr_obj->output_mask & dw_dsp_f_mask));
if (chnl_id == OUTPUTNOTREADY)
goto func_end;
pchnl = chnl_mgr_obj->channels[chnl_id];
if (!pchnl || list_empty(&pchnl->io_requests)) {
/* Shouldn't get here */
goto func_end;
}
if (!pchnl->cio_reqs)
goto func_end;
/* Get the I/O request, and attempt a transfer */
chnl_packet_obj = list_first_entry(&pchnl->io_requests,
struct chnl_irp, link);
list_del(&chnl_packet_obj->link);
pchnl->cio_reqs--;
/* Record fact that no more I/O buffers available */
if (list_empty(&pchnl->io_requests))
chnl_mgr_obj->output_mask &= ~(1 << chnl_id);
/* Transfer buffer to DSP side */
chnl_packet_obj->byte_size = min(pio_mgr->sm_buf_size,
chnl_packet_obj->byte_size);
memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
chnl_packet_obj->byte_size);
pchnl->bytes_moved += chnl_packet_obj->byte_size;
/* Write all 32 bits of arg */
sm->arg = chnl_packet_obj->arg;
#if _CHNL_WORDSIZE == 2
/* Access can be different SM access word size (e.g. 16/32 bit words) */
sm->output_id = (u16) chnl_id;
sm->output_size = (u16) (chnl_packet_obj->byte_size +
chnl_mgr_obj->word_size - 1) /
(u16) chnl_mgr_obj->word_size;
#else
sm->output_id = chnl_id;
sm->output_size = (chnl_packet_obj->byte_size +
chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
#endif
sm->output_full = 1;
/* Indicate to the DSP we have written the output */
sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
/* Notify client with IO completion record (keep EOS) */
chnl_packet_obj->status &= CHNL_IOCSTATEOS;
notify_chnl_complete(pchnl, chnl_packet_obj);
/* Notify if stream is done. */
if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
func_end:
return;
}
/*
* ======== output_msg ========
* Copies messages from the message queues to the shared memory.
*/
static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
{
u32 num_msgs = 0;
u32 i;
struct msg_dspmsg *msg_output;
struct msg_frame *pmsg;
struct msg_ctrl *msg_ctr_obj;
u32 val;
u32 addr;
msg_ctr_obj = pio_mgr->msg_output_ctrl;
/* Check if output has been cleared */
if (!msg_ctr_obj->buf_empty)
return;
num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
msg_output = (struct msg_dspmsg *) pio_mgr->msg_output;
/* Copy num_msgs messages into shared memory */
for (i = 0; i < num_msgs; i++) {
if (list_empty(&hmsg_mgr->msg_used_list))
continue;
pmsg = list_first_entry(&hmsg_mgr->msg_used_list,
struct msg_frame, list_elem);
list_del(&pmsg->list_elem);
val = (pmsg->msg_data).msgq_id;
addr = (u32) &msg_output->msgq_id;
write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.cmd;
addr = (u32) &msg_output->msg.cmd;
write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.arg1;
addr = (u32) &msg_output->msg.arg1;
write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
val = (pmsg->msg_data).msg.arg2;
addr = (u32) &msg_output->msg.arg2;
write_ext32_bit_dsp_data(pio_mgr->bridge_context, addr, val);
msg_output++;
list_add_tail(&pmsg->list_elem, &hmsg_mgr->msg_free_list);
sync_set_event(hmsg_mgr->sync_event);
}
if (num_msgs > 0) {
hmsg_mgr->msgs_pending -= num_msgs;
#if _CHNL_WORDSIZE == 2
/*
* Access can be different SM access word size
* (e.g. 16/32 bit words)
*/
msg_ctr_obj->size = (u16) num_msgs;
#else
msg_ctr_obj->size = num_msgs;
#endif
msg_ctr_obj->buf_empty = false;
/* Set the post SWI flag */
msg_ctr_obj->post_swi = true;
/* Tell the DSP we have written the output. */
sm_interrupt_dsp(pio_mgr->bridge_context, MBX_PCPY_CLASS);
}
}
/*
* ======== register_shm_segs ========
* purpose:
* Registers GPP SM segment with CMM.
*/
static int register_shm_segs(struct io_mgr *hio_mgr,
struct cod_manager *cod_man,
u32 dw_gpp_base_pa)
{
int status = 0;
u32 ul_shm0_base = 0;
u32 shm0_end = 0;
u32 ul_shm0_rsrvd_start = 0;
u32 ul_rsrvd_size = 0;
u32 ul_gpp_phys;
u32 ul_dsp_virt;
u32 ul_shm_seg_id0 = 0;
u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
/*
* Read address and size info for first SM region.
* Get start of 1st SM Heap region.
*/
status =
cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
if (ul_shm0_base == 0) {
status = -EPERM;
goto func_end;
}
/* Get end of 1st SM Heap region */
if (!status) {
/* Get start and length of message part of shared memory */
status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
&shm0_end);
if (shm0_end == 0) {
status = -EPERM;
goto func_end;
}
}
/* Start of Gpp reserved region */
if (!status) {
/* Get start and length of message part of shared memory */
status =
cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
&ul_shm0_rsrvd_start);
if (ul_shm0_rsrvd_start == 0) {
status = -EPERM;
goto func_end;
}
}
/* Register with CMM */
if (!status) {
status = dev_get_cmm_mgr(hio_mgr->dev_obj, &hio_mgr->cmm_mgr);
if (!status) {
status = cmm_un_register_gppsm_seg(hio_mgr->cmm_mgr,
CMM_ALLSEGMENTS);
}
}
/* Register new SM region(s) */
if (!status && (shm0_end - ul_shm0_base) > 0) {
/* Calc size (bytes) of SM the GPP can alloc from */
ul_rsrvd_size =
(shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
if (ul_rsrvd_size <= 0) {
status = -EPERM;
goto func_end;
}
/* Calc size of SM DSP can alloc from */
ul_dsp_size =
(ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
if (ul_dsp_size <= 0) {
status = -EPERM;
goto func_end;
}
/* First TLB entry reserved for Bridge SM use. */
ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].gpp_phys;
/* Get size in bytes */
ul_dsp_virt =
hio_mgr->ext_proc_info.ty_tlb[0].dsp_virt *
hio_mgr->word_size;
/*
* Calc byte offset used to convert GPP phys <-> DSP byte
* address.
*/
if (dw_gpp_base_pa > ul_dsp_virt)
dw_offset = dw_gpp_base_pa - ul_dsp_virt;
else
dw_offset = ul_dsp_virt - dw_gpp_base_pa;
if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
status = -EPERM;
goto func_end;
}
/*
* Calc Gpp phys base of SM region.
* This is actually uncached kernel virtual address.
*/
dw_gpp_base_va =
ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
ul_dsp_virt;
/*
* Calc Gpp phys base of SM region.
* This is the physical address.
*/
dw_gpp_base_pa =
dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
ul_dsp_virt;
/* Register SM Segment 0. */
status =
cmm_register_gppsm_seg(hio_mgr->cmm_mgr, dw_gpp_base_pa,
ul_rsrvd_size, dw_offset,
(dw_gpp_base_pa >
ul_dsp_virt) ? CMM_ADDTODSPPA :
CMM_SUBFROMDSPPA,
(u32) (ul_shm0_base *
hio_mgr->word_size),
ul_dsp_size, &ul_shm_seg_id0,
dw_gpp_base_va);
/* First SM region is seg_id = 1 */
if (ul_shm_seg_id0 != 1)
status = -EPERM;
}
func_end:
return status;
}
/* ZCPY IO routines. */
/*
* ======== IO_SHMcontrol ========
* Sets the requested shm setting.
*/
int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
{
#ifdef CONFIG_TIDSPBRIDGE_DVFS
u32 i;
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
switch (desc) {
case SHM_CURROPP:
/* Update the shared memory with requested OPP information */
if (pargs != NULL)
hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
*(u32 *) pargs;
else
return -EPERM;
break;
case SHM_OPPINFO:
/*
* Update the shared memory with the voltage, frequency,
* min and max frequency values for an OPP.
*/
for (i = 0; i <= dsp_max_opps; i++) {
hio_mgr->shared_mem->opp_table_struct.opp_point[i].
voltage = vdd1_dsp_freq[i][0];
dev_dbg(bridge, "OPP-shm: voltage: %d\n",
vdd1_dsp_freq[i][0]);
hio_mgr->shared_mem->opp_table_struct.
opp_point[i].frequency = vdd1_dsp_freq[i][1];
dev_dbg(bridge, "OPP-shm: frequency: %d\n",
vdd1_dsp_freq[i][1]);
hio_mgr->shared_mem->opp_table_struct.opp_point[i].
min_freq = vdd1_dsp_freq[i][2];
dev_dbg(bridge, "OPP-shm: min freq: %d\n",
vdd1_dsp_freq[i][2]);
hio_mgr->shared_mem->opp_table_struct.opp_point[i].
max_freq = vdd1_dsp_freq[i][3];
dev_dbg(bridge, "OPP-shm: max freq: %d\n",
vdd1_dsp_freq[i][3]);
}
hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
dsp_max_opps;
dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
/* Update the current OPP number */
if (pdata->dsp_get_opp)
i = (*pdata->dsp_get_opp) ();
hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
break;
case SHM_GETOPP:
/* Get the OPP that DSP has requested */
*(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
break;
default:
break;
}
#endif
return 0;
}
/*
* ======== bridge_io_get_proc_load ========
* Gets the Processor's Load information
*/
int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
struct dsp_procloadstat *proc_lstat)
{
if (!hio_mgr->shared_mem)
return -EFAULT;
proc_lstat->curr_load =
hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
proc_lstat->predicted_load =
hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
proc_lstat->curr_dsp_freq =
hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
proc_lstat->predicted_freq =
hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
"Pred Freq = %d\n", proc_lstat->curr_load,
proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
proc_lstat->predicted_freq);
return 0;
}
#if defined(CONFIG_TIDSPBRIDGE_BACKTRACE)
void print_dsp_debug_trace(struct io_mgr *hio_mgr)
{
u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
while (true) {
/* Get the DSP current pointer */
ul_gpp_cur_pointer =
*(u32 *) (hio_mgr->trace_buffer_current);
ul_gpp_cur_pointer =
hio_mgr->gpp_va + (ul_gpp_cur_pointer -
hio_mgr->dsp_va);
/* No new debug messages available yet */
if (ul_gpp_cur_pointer == hio_mgr->gpp_read_pointer) {
break;
} else if (ul_gpp_cur_pointer > hio_mgr->gpp_read_pointer) {
/* Continuous data */
ul_new_message_length =
ul_gpp_cur_pointer - hio_mgr->gpp_read_pointer;
memcpy(hio_mgr->msg,
(char *)hio_mgr->gpp_read_pointer,
ul_new_message_length);
hio_mgr->msg[ul_new_message_length] = '\0';
/*
* Advance the GPP trace pointer to DSP current
* pointer.
*/
hio_mgr->gpp_read_pointer += ul_new_message_length;
/* Print the trace messages */
pr_info("DSPTrace: %s\n", hio_mgr->msg);
} else if (ul_gpp_cur_pointer < hio_mgr->gpp_read_pointer) {
/* Handle trace buffer wraparound */
memcpy(hio_mgr->msg,
(char *)hio_mgr->gpp_read_pointer,
hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer);
ul_new_message_length =
ul_gpp_cur_pointer - hio_mgr->trace_buffer_begin;
memcpy(&hio_mgr->msg[hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer],
(char *)hio_mgr->trace_buffer_begin,
ul_new_message_length);
hio_mgr->msg[hio_mgr->trace_buffer_end -
hio_mgr->gpp_read_pointer +
ul_new_message_length] = '\0';
/*
* Advance the GPP trace pointer to DSP current
* pointer.
*/
hio_mgr->gpp_read_pointer =
hio_mgr->trace_buffer_begin +
ul_new_message_length;
/* Print the trace messages */
pr_info("DSPTrace: %s\n", hio_mgr->msg);
}
}
}
#endif
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
/*
* ======== print_dsp_trace_buffer ========
* Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
* Parameters:
* hdeh_mgr: Handle to DEH manager object
* number of extra carriage returns to generate.
* Returns:
* 0: Success.
* -ENOMEM: Unable to allocate memory.
* Requires:
* hdeh_mgr muse be valid. Checked in bridge_deh_notify.
*/
int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
{
int status = 0;
struct cod_manager *cod_mgr;
u32 ul_trace_end;
u32 ul_trace_begin;
u32 trace_cur_pos;
u32 ul_num_bytes = 0;
u32 ul_num_words = 0;
u32 ul_word_size = 2;
char *psz_buf;
char *str_beg;
char *trace_end;
char *buf_end;
char *new_line;
struct bridge_dev_context *pbridge_context = hbridge_context;
struct bridge_drv_interface *intf_fxns;
struct dev_object *dev_obj = (struct dev_object *)
pbridge_context->dev_obj;
status = dev_get_cod_mgr(dev_obj, &cod_mgr);
if (cod_mgr) {
/* Look for SYS_PUTCBEG/SYS_PUTCEND */
status =
cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
} else {
status = -EFAULT;
}
if (!status)
status =
cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
if (!status)
/* trace_cur_pos will hold the address of a DSP pointer */
status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
&trace_cur_pos);
if (status)
goto func_end;
ul_num_bytes = (ul_trace_end - ul_trace_begin);
ul_num_words = ul_num_bytes * ul_word_size;
status = dev_get_intf_fxns(dev_obj, &intf_fxns);
if (status)
goto func_end;
psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
if (psz_buf != NULL) {
/* Read trace buffer data */
status = (*intf_fxns->brd_read)(pbridge_context,
(u8 *)psz_buf, (u32)ul_trace_begin,
ul_num_bytes, 0);
if (status)
goto func_end;
/* Pack and do newline conversion */
pr_debug("PrintDspTraceBuffer: "
"before pack and unpack.\n");
pr_debug("%s: DSP Trace Buffer Begin:\n"
"=======================\n%s\n",
__func__, psz_buf);
/* Read the value at the DSP address in trace_cur_pos. */
status = (*intf_fxns->brd_read)(pbridge_context,
(u8 *)&trace_cur_pos, (u32)trace_cur_pos,
4, 0);
if (status)
goto func_end;
/* Pack and do newline conversion */
pr_info("DSP Trace Buffer Begin:\n"
"=======================\n%s\n",
psz_buf);
/* convert to offset */
trace_cur_pos = trace_cur_pos - ul_trace_begin;
if (ul_num_bytes) {
/*
* The buffer is not full, find the end of the
* data -- buf_end will be >= pszBuf after
* while.
*/
buf_end = &psz_buf[ul_num_bytes+1];
/* DSP print position */
trace_end = &psz_buf[trace_cur_pos];
/*
* Search buffer for a new_line and replace it
* with '\0', then print as string.
* Continue until end of buffer is reached.
*/
str_beg = trace_end;
ul_num_bytes = buf_end - str_beg;
while (str_beg < buf_end) {
new_line = strnchr(str_beg, ul_num_bytes,
'\n');
if (new_line && new_line < buf_end) {
*new_line = 0;
pr_debug("%s\n", str_beg);
str_beg = ++new_line;
ul_num_bytes = buf_end - str_beg;
} else {
/*
* Assume buffer empty if it contains
* a zero
*/
if (*str_beg != '\0') {
str_beg[ul_num_bytes] = 0;
pr_debug("%s\n", str_beg);
}
str_beg = buf_end;
ul_num_bytes = 0;
}
}
/*
* Search buffer for a nNewLine and replace it
* with '\0', then print as string.
* Continue until buffer is exhausted.
*/
str_beg = psz_buf;
ul_num_bytes = trace_end - str_beg;
while (str_beg < trace_end) {
new_line = strnchr(str_beg, ul_num_bytes, '\n');
if (new_line != NULL && new_line < trace_end) {
*new_line = 0;
pr_debug("%s\n", str_beg);
str_beg = ++new_line;
ul_num_bytes = trace_end - str_beg;
} else {
/*
* Assume buffer empty if it contains
* a zero
*/
if (*str_beg != '\0') {
str_beg[ul_num_bytes] = 0;
pr_debug("%s\n", str_beg);
}
str_beg = trace_end;
ul_num_bytes = 0;
}
}
}
pr_info("\n=======================\n"
"DSP Trace Buffer End:\n");
kfree(psz_buf);
} else {
status = -ENOMEM;
}
func_end:
if (status)
dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
return status;
}
/**
* dump_dsp_stack() - This function dumps the data on the DSP stack.
* @bridge_context: Bridge driver's device context pointer.
*
*/
int dump_dsp_stack(struct bridge_dev_context *bridge_context)
{
int status = 0;
struct cod_manager *code_mgr;
struct node_mgr *node_mgr;
u32 trace_begin;
char name[256];
struct {
u32 head[2];
u32 size;
} mmu_fault_dbg_info;
u32 *buffer;
u32 *buffer_beg;
u32 *buffer_end;
u32 exc_type;
u32 dyn_ext_base;
u32 i;
u32 offset_output;
u32 total_size;
u32 poll_cnt;
const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
"IRP", "NRP", "AMR", "SSR",
"ILC", "RILC", "IER", "CSR"};
const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
struct bridge_drv_interface *intf_fxns;
struct dev_object *dev_object = bridge_context->dev_obj;
status = dev_get_cod_mgr(dev_object, &code_mgr);
if (!code_mgr) {
pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
status = -EFAULT;
}
if (!status) {
status = dev_get_node_manager(dev_object, &node_mgr);
if (!node_mgr) {
pr_debug("%s: Failed on dev_get_node_manager.\n",
__func__);
status = -EFAULT;
}
}
if (!status) {
/* Look for SYS_PUTCBEG/SYS_PUTCEND: */
status =
cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
pr_debug("%s: trace_begin Value 0x%x\n",
__func__, trace_begin);
if (status)
pr_debug("%s: Failed on cod_get_sym_value.\n",
__func__);
}
if (!status)
status = dev_get_intf_fxns(dev_object, &intf_fxns);
/*
* Check for the "magic number" in the trace buffer. If it has
* yet to appear then poll the trace buffer to wait for it. Its
* appearance signals that the DSP has finished dumping its state.
*/
mmu_fault_dbg_info.head[0] = 0;
mmu_fault_dbg_info.head[1] = 0;
if (!status) {
poll_cnt = 0;
while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
poll_cnt < POLL_MAX) {
/* Read DSP dump size from the DSP trace buffer... */
status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
sizeof(mmu_fault_dbg_info), 0);
if (status)
break;
poll_cnt++;
}
if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
status = -ETIME;
pr_err("%s:No DSP MMU-Fault information available.\n",
__func__);
}
}
if (!status) {
total_size = mmu_fault_dbg_info.size;
/* Limit the size in case DSP went crazy */
if (total_size > MAX_MMU_DBGBUFF)
total_size = MAX_MMU_DBGBUFF;
buffer = kzalloc(total_size, GFP_ATOMIC);
if (!buffer) {
status = -ENOMEM;
pr_debug("%s: Failed to "
"allocate stack dump buffer.\n", __func__);
goto func_end;
}
buffer_beg = buffer;
buffer_end = buffer + total_size / 4;
/* Read bytes from the DSP trace buffer... */
status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)buffer, (u32)trace_begin,
total_size, 0);
if (status) {
pr_debug("%s: Failed to Read Trace Buffer.\n",
__func__);
goto func_end;
}
pr_err("\nAproximate Crash Position:\n"
"--------------------------\n");
exc_type = buffer[3];
if (!exc_type)
i = buffer[79]; /* IRP */
else
i = buffer[80]; /* NRP */
status =
cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
if (status) {
status = -EFAULT;
goto func_end;
}
if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
0x1000, &offset_output, name) == 0))
pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
i - offset_output);
else
pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
buffer += 4;
pr_err("\nExecution Info:\n"
"---------------\n");
if (*buffer < ARRAY_SIZE(exec_ctxt)) {
pr_err("Execution context \t%s\n",
exec_ctxt[*buffer++]);
} else {
pr_err("Execution context corrupt\n");
kfree(buffer_beg);
return -EFAULT;
}
pr_err("Task Handle\t\t0x%x\n", *buffer++);
pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
pr_err("Stack Top\t\t0x%x\n", *buffer++);
pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
pr_err("Stack Size\t\t0x%x\n", *buffer++);
pr_err("Stack Size In Use\t0x%x\n", *buffer++);
pr_err("\nCPU Registers\n"
"---------------\n");
for (i = 0; i < 32; i++) {
if (i == 4 || i == 6 || i == 8)
pr_err("A%d 0x%-8x [Function Argument %d]\n",
i, *buffer++, i-3);
else if (i == 15)
pr_err("A15 0x%-8x [Frame Pointer]\n",
*buffer++);
else
pr_err("A%d 0x%x\n", i, *buffer++);
}
pr_err("\nB0 0x%x\n", *buffer++);
pr_err("B1 0x%x\n", *buffer++);
pr_err("B2 0x%x\n", *buffer++);
if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
*buffer, 0x1000, &offset_output, name) == 0))
pr_err("B3 0x%-8x [Function Return Pointer:"
" \"%s\" + 0x%x]\n", *buffer, name,
*buffer - offset_output);
else
pr_err("B3 0x%-8x [Function Return Pointer:"
"Unable to match to a symbol.]\n", *buffer);
buffer++;
for (i = 4; i < 32; i++) {
if (i == 4 || i == 6 || i == 8)
pr_err("B%d 0x%-8x [Function Argument %d]\n",
i, *buffer++, i-2);
else if (i == 14)
pr_err("B14 0x%-8x [Data Page Pointer]\n",
*buffer++);
else
pr_err("B%d 0x%x\n", i, *buffer++);
}
pr_err("\n");
for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
pr_err("\nStack:\n"
"------\n");
for (i = 0; buffer < buffer_end; i++, buffer++) {
if ((*buffer > dyn_ext_base) && (
node_find_addr(node_mgr, *buffer , 0x600,
&offset_output, name) == 0))
pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
i, *buffer, name,
*buffer - offset_output);
else
pr_err("[%d] 0x%x\n", i, *buffer);
}
kfree(buffer_beg);
}
func_end:
return status;
}
/**
* dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
* @bridge_context: Bridge driver's device context pointer.
*
*/
void dump_dl_modules(struct bridge_dev_context *bridge_context)
{
struct cod_manager *code_mgr;
struct bridge_drv_interface *intf_fxns;
struct bridge_dev_context *bridge_ctxt = bridge_context;
struct dev_object *dev_object = bridge_ctxt->dev_obj;
struct modules_header modules_hdr;
struct dll_module *module_struct = NULL;
u32 module_dsp_addr;
u32 module_size;
u32 module_struct_size = 0;
u32 sect_ndx;
char *sect_str ;
int status = 0;
status = dev_get_intf_fxns(dev_object, &intf_fxns);
if (status) {
pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
goto func_end;
}
status = dev_get_cod_mgr(dev_object, &code_mgr);
if (!code_mgr) {
pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
status = -EFAULT;
goto func_end;
}
/* Lookup the address of the modules_header structure */
status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
if (status) {
pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
__func__);
goto func_end;
}
pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
/* Copy the modules_header structure from DSP memory. */
status = (*intf_fxns->brd_read)(bridge_context, (u8 *) &modules_hdr,
(u32) module_dsp_addr, sizeof(modules_hdr), 0);
if (status) {
pr_debug("%s: Failed failed to read modules header.\n",
__func__);
goto func_end;
}
module_dsp_addr = modules_hdr.first_module;
module_size = modules_hdr.first_module_size;
pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
module_size);
pr_err("\nDynamically Loaded Modules:\n"
"---------------------------\n");
/* For each dll_module structure in the list... */
while (module_size) {
/*
* Allocate/re-allocate memory to hold the dll_module
* structure. The memory is re-allocated only if the existing
* allocation is too small.
*/
if (module_size > module_struct_size) {
kfree(module_struct);
module_struct = kzalloc(module_size+128, GFP_ATOMIC);
module_struct_size = module_size+128;
pr_debug("%s: allocated module struct %p %d\n",
__func__, module_struct, module_struct_size);
if (!module_struct)
goto func_end;
}
/* Copy the dll_module structure from DSP memory */
status = (*intf_fxns->brd_read)(bridge_context,
(u8 *)module_struct, module_dsp_addr, module_size, 0);
if (status) {
pr_debug(
"%s: Failed to read dll_module struct for 0x%x.\n",
__func__, module_dsp_addr);
break;
}
/* Update info regarding the _next_ module in the list. */
module_dsp_addr = module_struct->next_module;
module_size = module_struct->next_module_size;
pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
__func__, module_dsp_addr, module_size,
module_struct->num_sects);
/*
* The section name strings start immediately following
* the array of dll_sect structures.
*/
sect_str = (char *) &module_struct->
sects[module_struct->num_sects];
pr_err("%s\n", sect_str);
/*
* Advance to the first section name string.
* Each string follows the one before.
*/
sect_str += strlen(sect_str) + 1;
/* Access each dll_sect structure and its name string. */
for (sect_ndx = 0;
sect_ndx < module_struct->num_sects; sect_ndx++) {
pr_err(" Section: 0x%x ",
module_struct->sects[sect_ndx].sect_load_adr);
if (((u32) sect_str - (u32) module_struct) <
module_struct_size) {
pr_err("%s\n", sect_str);
/* Each string follows the one before. */
sect_str += strlen(sect_str)+1;
} else {
pr_err("<string error>\n");
pr_debug("%s: section name sting address "
"is invalid %p\n", __func__, sect_str);
}
}
}
func_end:
kfree(module_struct);
}
#endif