blob: 84f11791c4f9663d2cf5fe0b1d60a1ff626a6339 [file] [log] [blame]
/* ==========================================================================
* $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $
* $Revision: #189 $
* $Date: 2011/10/24 $
* $Change: 1871160 $
*
* Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
* "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
* otherwise expressly agreed to in writing between Synopsys and you.
*
* The Software IS NOT an item of Licensed Software or Licensed Product under
* any End User Software License Agreement or Agreement for Licensed Product
* with Synopsys or any supplement thereto. You are permitted to use and
* redistribute this Software in source and binary forms, with or without
* modification, provided that redistributions of source code must retain this
* notice. You may not view, use, disclose, copy or distribute this file or
* any information contained herein except pursuant to this license grant from
* Synopsys. If you do not agree with this notice, including the disclaimer
* below, then you are not authorized to use the Software.
*
* THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
* ========================================================================== */
/** @file
*
* The Core Interface Layer provides basic services for accessing and
* managing the DWC_otg hardware. These services are used by both the
* Host Controller Driver and the Peripheral Controller Driver.
*
* The CIL manages the memory map for the core so that the HCD and PCD
* don't have to do this separately. It also handles basic tasks like
* reading/writing the registers and data FIFOs in the controller.
* Some of the data access functions provide encapsulation of several
* operations required to perform a task, such as writing multiple
* registers to start a transfer. Finally, the CIL performs basic
* services that are not specific to either the host or device modes
* of operation. These services include management of the OTG Host
* Negotiation Protocol (HNP) and Session Request Protocol (SRP). A
* Diagnostic API is also provided to allow testing of the controller
* hardware.
*
* The Core Interface Layer has the following requirements:
* - Provides basic controller operations.
* - Minimal use of OS services.
* - The OS services used will be abstracted by using inline functions
* or macros.
*
*/
#include "dwc_os.h"
#include "dwc_otg_regs.h"
#include "dwc_otg_cil.h"
static int dwc_otg_setup_params(dwc_otg_core_if_t * core_if);
/**
* This function is called to initialize the DWC_otg CSR data
* structures. The register addresses in the device and host
* structures are initialized from the base address supplied by the
* caller. The calling function must make the OS calls to get the
* base address of the DWC_otg controller registers. The core_params
* argument holds the parameters that specify how the core should be
* configured.
*
* @param reg_base_addr Base address of DWC_otg core registers
*
*/
dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t * reg_base_addr)
{
dwc_otg_core_if_t *core_if = 0;
dwc_otg_dev_if_t *dev_if = 0;
dwc_otg_host_if_t *host_if = 0;
uint8_t *reg_base = (uint8_t *) reg_base_addr;
int i = 0;
DWC_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, reg_base_addr);
core_if = DWC_ALLOC(sizeof(dwc_otg_core_if_t));
if (core_if == NULL) {
DWC_DEBUGPL(DBG_CIL,
"Allocation of dwc_otg_core_if_t failed\n");
return 0;
}
core_if->core_global_regs = (dwc_otg_core_global_regs_t *) reg_base;
/*
* Allocate the Device Mode structures.
*/
dev_if = DWC_ALLOC(sizeof(dwc_otg_dev_if_t));
if (dev_if == NULL) {
DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n");
DWC_FREE(core_if);
return 0;
}
dev_if->dev_global_regs =
(dwc_otg_device_global_regs_t *) (reg_base +
DWC_DEV_GLOBAL_REG_OFFSET);
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
dev_if->in_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *)
(reg_base + DWC_DEV_IN_EP_REG_OFFSET +
(i * DWC_EP_REG_OFFSET));
dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *)
(reg_base + DWC_DEV_OUT_EP_REG_OFFSET +
(i * DWC_EP_REG_OFFSET));
DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n",
i, &dev_if->in_ep_regs[i]->diepctl);
DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n",
i, &dev_if->out_ep_regs[i]->doepctl);
}
dev_if->speed = 0; // unknown
core_if->dev_if = dev_if;
/*
* Allocate the Host Mode structures.
*/
host_if = DWC_ALLOC(sizeof(dwc_otg_host_if_t));
if (host_if == NULL) {
DWC_DEBUGPL(DBG_CIL,
"Allocation of dwc_otg_host_if_t failed\n");
DWC_FREE(dev_if);
DWC_FREE(core_if);
return 0;
}
host_if->host_global_regs = (dwc_otg_host_global_regs_t *)
(reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET);
host_if->hprt0 =
(uint32_t *) (reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET);
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
host_if->hc_regs[i] = (dwc_otg_hc_regs_t *)
(reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET +
(i * DWC_OTG_CHAN_REGS_OFFSET));
DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n",
i, &host_if->hc_regs[i]->hcchar);
}
host_if->num_host_channels = MAX_EPS_CHANNELS;
core_if->host_if = host_if;
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
core_if->data_fifo[i] =
(uint32_t *) (reg_base + DWC_OTG_DATA_FIFO_OFFSET +
(i * DWC_OTG_DATA_FIFO_SIZE));
DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08lx\n",
i, (unsigned long)core_if->data_fifo[i]);
}
core_if->pcgcctl = (uint32_t *) (reg_base + DWC_OTG_PCGCCTL_OFFSET);
/* Initiate lx_state to L3 disconnected state */
core_if->lx_state = DWC_OTG_L3;
/*
* Store the contents of the hardware configuration registers here for
* easy access later.
*/
core_if->hwcfg1.d32 =
DWC_READ_REG32(&core_if->core_global_regs->ghwcfg1);
core_if->hwcfg2.d32 =
DWC_READ_REG32(&core_if->core_global_regs->ghwcfg2);
core_if->hwcfg3.d32 =
DWC_READ_REG32(&core_if->core_global_regs->ghwcfg3);
core_if->hwcfg4.d32 =
DWC_READ_REG32(&core_if->core_global_regs->ghwcfg4);
/* Force host mode to get HPTXFSIZ exact power on value */
{
gusbcfg_data_t gusbcfg = {.d32 = 0 };
gusbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
gusbcfg.b.force_host_mode = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
dwc_mdelay(100);
core_if->hptxfsiz.d32 =
DWC_READ_REG32(&core_if->core_global_regs->hptxfsiz);
gusbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
gusbcfg.b.force_host_mode = 0;
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
dwc_mdelay(100);
}
DWC_DEBUGPL(DBG_CILV, "hwcfg1=%08x\n", core_if->hwcfg1.d32);
DWC_DEBUGPL(DBG_CILV, "hwcfg2=%08x\n", core_if->hwcfg2.d32);
DWC_DEBUGPL(DBG_CILV, "hwcfg3=%08x\n", core_if->hwcfg3.d32);
DWC_DEBUGPL(DBG_CILV, "hwcfg4=%08x\n", core_if->hwcfg4.d32);
core_if->hcfg.d32 =
DWC_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
core_if->dcfg.d32 =
DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
DWC_DEBUGPL(DBG_CILV, "hcfg=%08x\n", core_if->hcfg.d32);
DWC_DEBUGPL(DBG_CILV, "dcfg=%08x\n", core_if->dcfg.d32);
DWC_DEBUGPL(DBG_CILV, "op_mode=%0x\n", core_if->hwcfg2.b.op_mode);
DWC_DEBUGPL(DBG_CILV, "arch=%0x\n", core_if->hwcfg2.b.architecture);
DWC_DEBUGPL(DBG_CILV, "num_dev_ep=%d\n", core_if->hwcfg2.b.num_dev_ep);
DWC_DEBUGPL(DBG_CILV, "num_host_chan=%d\n",
core_if->hwcfg2.b.num_host_chan);
DWC_DEBUGPL(DBG_CILV, "nonperio_tx_q_depth=0x%0x\n",
core_if->hwcfg2.b.nonperio_tx_q_depth);
DWC_DEBUGPL(DBG_CILV, "host_perio_tx_q_depth=0x%0x\n",
core_if->hwcfg2.b.host_perio_tx_q_depth);
DWC_DEBUGPL(DBG_CILV, "dev_token_q_depth=0x%0x\n",
core_if->hwcfg2.b.dev_token_q_depth);
DWC_DEBUGPL(DBG_CILV, "Total FIFO SZ=%d\n",
core_if->hwcfg3.b.dfifo_depth);
DWC_DEBUGPL(DBG_CILV, "xfer_size_cntr_width=%0x\n",
core_if->hwcfg3.b.xfer_size_cntr_width);
/*
* Set the SRP sucess bit for FS-I2c
*/
core_if->srp_success = 0;
core_if->srp_timer_started = 0;
/*
* Create new workqueue and init works
*/
core_if->wq_otg = DWC_WORKQ_ALLOC("dwc_otg");
if (core_if->wq_otg == 0) {
DWC_WARN("DWC_WORKQ_ALLOC failed\n");
DWC_FREE(host_if);
DWC_FREE(dev_if);
DWC_FREE(core_if);
return 0;
}
core_if->snpsid = DWC_READ_REG32(&core_if->core_global_regs->gsnpsid);
DWC_PRINTF("Core Release: %x.%x%x%x\n",
(core_if->snpsid >> 12 & 0xF),
(core_if->snpsid >> 8 & 0xF),
(core_if->snpsid >> 4 & 0xF), (core_if->snpsid & 0xF));
core_if->wkp_timer = DWC_TIMER_ALLOC("Wake Up Timer",
w_wakeup_detected, core_if);
if (core_if->wkp_timer == 0) {
DWC_WARN("DWC_TIMER_ALLOC failed\n");
DWC_FREE(host_if);
DWC_FREE(dev_if);
DWC_WORKQ_FREE(core_if->wq_otg);
DWC_FREE(core_if);
return 0;
}
if (dwc_otg_setup_params(core_if)) {
DWC_WARN("Error while setting core params\n");
}
core_if->hibernation_suspend = 0;
/** ADP initialization */
dwc_otg_adp_init(core_if);
return core_if;
}
/**
* This function frees the structures allocated by dwc_otg_cil_init().
*
* @param core_if The core interface pointer returned from
* dwc_otg_cil_init().
*
*/
void dwc_otg_cil_remove(dwc_otg_core_if_t * core_if)
{
/* Disable all interrupts */
DWC_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 1, 0);
DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk, 0);
if (core_if->wq_otg) {
DWC_WORKQ_WAIT_WORK_DONE(core_if->wq_otg, 500);
DWC_WORKQ_FREE(core_if->wq_otg);
}
if (core_if->dev_if) {
DWC_FREE(core_if->dev_if);
}
if (core_if->host_if) {
DWC_FREE(core_if->host_if);
}
/** Remove ADP Stuff */
dwc_otg_adp_remove(core_if);
if (core_if->core_params) {
DWC_FREE(core_if->core_params);
}
if (core_if->wkp_timer) {
DWC_TIMER_FREE(core_if->wkp_timer);
}
if (core_if->srp_timer) {
DWC_TIMER_FREE(core_if->srp_timer);
}
DWC_FREE(core_if);
}
/**
* This function enables the controller's Global Interrupt in the AHB Config
* register.
*
* @param core_if Programming view of DWC_otg controller.
*/
void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t * core_if)
{
gahbcfg_data_t ahbcfg = {.d32 = 0 };
ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */
DWC_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32);
}
/**
* This function disables the controller's Global Interrupt in the AHB Config
* register.
*
* @param core_if Programming view of DWC_otg controller.
*/
void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t * core_if)
{
gahbcfg_data_t ahbcfg = {.d32 = 0 };
ahbcfg.b.glblintrmsk = 1; /* Disable interrupts */
DWC_MODIFY_REG32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0);
}
/**
* This function initializes the commmon interrupts, used in both
* device and host modes.
*
* @param core_if Programming view of the DWC_otg controller
*
*/
static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t * core_if)
{
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
gintmsk_data_t intr_mask = {.d32 = 0 };
/* Clear any pending OTG Interrupts */
DWC_WRITE_REG32(&global_regs->gotgint, 0xFFFFFFFF);
/* Clear any pending interrupts */
DWC_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
/*
* Enable the interrupts in the GINTMSK.
*/
intr_mask.b.modemismatch = 1;
intr_mask.b.otgintr = 1;
if (!core_if->dma_enable) {
intr_mask.b.rxstsqlvl = 1;
}
intr_mask.b.conidstschng = 1;
intr_mask.b.wkupintr = 1;
intr_mask.b.disconnect = 0;
intr_mask.b.usbsuspend = 1;
intr_mask.b.sessreqintr = 1;
#ifdef CONFIG_USB_DWC_OTG_LPM
if (core_if->core_params->lpm_enable) {
intr_mask.b.lpmtranrcvd = 1;
}
#endif
DWC_WRITE_REG32(&global_regs->gintmsk, intr_mask.d32);
}
/*
* The restore operation is modified to support Synopsys Emulated Powerdown and
* Hibernation. This function is for exiting from Device mode hibernation by
* Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
* @param core_if Programming view of DWC_otg controller.
* @param rem_wakeup - indicates whether resume is initiated by Device or Host.
* @param reset - indicates whether resume is initiated by Reset.
*/
int dwc_otg_device_hibernation_restore(dwc_otg_core_if_t * core_if,
int rem_wakeup, int reset)
{
gpwrdn_data_t gpwrdn = {.d32 = 0 };
pcgcctl_data_t pcgcctl = {.d32 = 0 };
dctl_data_t dctl = {.d32 = 0 };
int timeout = 2000;
if (!core_if->hibernation_suspend) {
DWC_PRINTF("Already exited from Hibernation\n");
return 1;
}
DWC_DEBUGPL(DBG_PCD, "%s called\n", __FUNCTION__);
/* Switch-on voltage to the core */
gpwrdn.b.pwrdnswtch = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Reset core */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnrstn = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Assert Restore signal */
gpwrdn.d32 = 0;
gpwrdn.b.restore = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
dwc_udelay(10);
/* Disable power clamps */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnclmp = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
if (rem_wakeup) {
dwc_udelay(70);
}
/* Deassert Reset core */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnrstn = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
dwc_udelay(10);
/* Disable PMU interrupt */
gpwrdn.d32 = 0;
gpwrdn.b.pmuintsel = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
/* Mask interrupts from gpwrdn */
gpwrdn.d32 = 0;
gpwrdn.b.connect_det_msk = 1;
gpwrdn.b.srp_det_msk = 1;
gpwrdn.b.disconn_det_msk = 1;
gpwrdn.b.rst_det_msk = 1;
gpwrdn.b.lnstchng_msk = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
/* Indicates that we are going out from hibernation */
core_if->hibernation_suspend = 0;
/*
* Set Restore Essential Regs bit in PCGCCTL register, restore_mode = 1
* indicates restore from remote_wakeup
*/
restore_essential_regs(core_if, rem_wakeup, 0);
/*
* Wait a little for seeing new value of variable hibernation_suspend if
* Restore done interrupt received before polling
*/
dwc_udelay(10);
if (core_if->hibernation_suspend == 0) {
/*
* Wait For Restore_done Interrupt. This mechanism of polling the
* interrupt is introduced to avoid any possible race conditions
*/
do {
gintsts_data_t gintsts;
gintsts.d32 =
DWC_READ_REG32(&core_if->core_global_regs->gintsts);
if (gintsts.b.restoredone) {
gintsts.d32 = 0;
gintsts.b.restoredone = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->
gintsts, gintsts.d32);
DWC_PRINTF("Restore Done Interrupt seen\n");
break;
}
dwc_udelay(10);
} while (--timeout);
if (!timeout) {
DWC_PRINTF("Restore Done interrupt wasn't generated here\n");
}
}
/* Clear all pending interupts */
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
/* De-assert Restore */
gpwrdn.d32 = 0;
gpwrdn.b.restore = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
if (!rem_wakeup) {
pcgcctl.d32 = 0;
pcgcctl.b.rstpdwnmodule = 1;
DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
}
/* Restore GUSBCFG and DCFG */
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
core_if->gr_backup->gusbcfg_local);
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg,
core_if->dr_backup->dcfg);
/* De-assert Wakeup Logic */
gpwrdn.d32 = 0;
gpwrdn.b.pmuactv = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
if (!rem_wakeup) {
/* Set Device programming done bit */
dctl.b.pwronprgdone = 1;
DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
} else {
/* Start Remote Wakeup Signaling */
dctl.d32 = core_if->dr_backup->dctl;
dctl.b.rmtwkupsig = 1;
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32);
}
dwc_mdelay(2);
/* Clear all pending interupts */
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
/* Restore global registers */
dwc_otg_restore_global_regs(core_if);
/* Restore device global registers */
dwc_otg_restore_dev_regs(core_if, rem_wakeup);
if (rem_wakeup) {
dwc_mdelay(7);
dctl.d32 = 0;
dctl.b.rmtwkupsig = 1;
DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32, 0);
}
core_if->hibernation_suspend = 0;
/* The core will be in ON STATE */
core_if->lx_state = DWC_OTG_L0;
DWC_PRINTF("Hibernation recovery completes here\n");
return 1;
}
/*
* The restore operation is modified to support Synopsys Emulated Powerdown and
* Hibernation. This function is for exiting from Host mode hibernation by
* Host Initiated Resume/Reset and Device Initiated Remote-Wakeup.
* @param core_if Programming view of DWC_otg controller.
* @param rem_wakeup - indicates whether resume is initiated by Device or Host.
* @param reset - indicates whether resume is initiated by Reset.
*/
int dwc_otg_host_hibernation_restore(dwc_otg_core_if_t * core_if,
int rem_wakeup, int reset)
{
gpwrdn_data_t gpwrdn = {.d32 = 0 };
hprt0_data_t hprt0 = {.d32 = 0 };
int timeout = 2000;
DWC_DEBUGPL(DBG_HCD, "%s called\n", __FUNCTION__);
/* Switch-on voltage to the core */
gpwrdn.b.pwrdnswtch = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Reset core */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnrstn = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Assert Restore signal */
gpwrdn.d32 = 0;
gpwrdn.b.restore = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
dwc_udelay(10);
/* Disable power clamps */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnclmp = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
if (!rem_wakeup) {
dwc_udelay(50);
}
/* Deassert Reset core */
gpwrdn.d32 = 0;
gpwrdn.b.pwrdnrstn = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0, gpwrdn.d32);
dwc_udelay(10);
/* Disable PMU interrupt */
gpwrdn.d32 = 0;
gpwrdn.b.pmuintsel = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
gpwrdn.d32 = 0;
gpwrdn.b.connect_det_msk = 1;
gpwrdn.b.srp_det_msk = 1;
gpwrdn.b.disconn_det_msk = 1;
gpwrdn.b.rst_det_msk = 1;
gpwrdn.b.lnstchng_msk = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
/* Indicates that we are going out from hibernation */
core_if->hibernation_suspend = 0;
/* Set Restore Essential Regs bit in PCGCCTL register */
restore_essential_regs(core_if, rem_wakeup, 1);
/* Wait a little for seeing new value of variable hibernation_suspend if
* Restore done interrupt received before polling */
dwc_udelay(10);
if (core_if->hibernation_suspend == 0) {
/* Wait For Restore_done Interrupt. This mechanism of polling the
* interrupt is introduced to avoid any possible race conditions
*/
do {
gintsts_data_t gintsts;
gintsts.d32 = DWC_READ_REG32(&core_if->core_global_regs->gintsts);
if (gintsts.b.restoredone) {
gintsts.d32 = 0;
gintsts.b.restoredone = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, gintsts.d32);
DWC_DEBUGPL(DBG_HCD,"Restore Done Interrupt seen\n");
break;
}
dwc_udelay(10);
} while (--timeout);
if (!timeout) {
DWC_WARN("Restore Done interrupt wasn't generated\n");
}
}
/* Set the flag's value to 0 again after receiving restore done interrupt */
core_if->hibernation_suspend = 0;
/* This step is not described in functional spec but if not wait for this
* delay, mismatch interrupts occurred because just after restore core is
* in Device mode(gintsts.curmode == 0) */
dwc_mdelay(100);
/* Clear all pending interrupts */
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
/* De-assert Restore */
gpwrdn.d32 = 0;
gpwrdn.b.restore = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Restore GUSBCFG and HCFG */
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg,
core_if->gr_backup->gusbcfg_local);
DWC_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
core_if->hr_backup->hcfg_local);
/* De-assert Wakeup Logic */
gpwrdn.d32 = 0;
gpwrdn.b.pmuactv = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, gpwrdn.d32, 0);
dwc_udelay(10);
/* Start the Resume operation by programming HPRT0 */
hprt0.d32 = core_if->hr_backup->hprt0_local;
hprt0.b.prtpwr = 1;
hprt0.b.prtena = 0;
hprt0.b.prtsusp = 0;
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
DWC_PRINTF("Resume Starts Now\n");
if (!reset) { // Indicates it is Resume Operation
hprt0.d32 = core_if->hr_backup->hprt0_local;
hprt0.b.prtres = 1;
hprt0.b.prtpwr = 1;
hprt0.b.prtena = 0;
hprt0.b.prtsusp = 0;
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
if (!rem_wakeup)
hprt0.b.prtres = 0;
/* Wait for Resume time and then program HPRT again */
dwc_mdelay(100);
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
} else { // Indicates it is Reset Operation
hprt0.d32 = core_if->hr_backup->hprt0_local;
hprt0.b.prtrst = 1;
hprt0.b.prtpwr = 1;
hprt0.b.prtena = 0;
hprt0.b.prtsusp = 0;
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
/* Wait for Reset time and then program HPRT again */
dwc_mdelay(60);
hprt0.b.prtrst = 0;
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
}
/* Clear all interrupt status */
hprt0.d32 = dwc_otg_read_hprt0(core_if);
hprt0.b.prtconndet = 1;
hprt0.b.prtenchng = 1;
DWC_WRITE_REG32(core_if->host_if->hprt0, hprt0.d32);
/* Clear all pending interupts */
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
/* Restore global registers */
dwc_otg_restore_global_regs(core_if);
/* Restore host global registers */
dwc_otg_restore_host_regs(core_if, reset);
/* The core will be in ON STATE */
core_if->lx_state = DWC_OTG_L0;
DWC_PRINTF("Hibernation recovery is complete here\n");
return 0;
}
/** Saves some register values into system memory. */
int dwc_otg_save_global_regs(dwc_otg_core_if_t * core_if)
{
struct dwc_otg_global_regs_backup *gr;
int i;
gr = core_if->gr_backup;
if (!gr) {
gr = DWC_ALLOC(sizeof(*gr));
if (!gr) {
return -DWC_E_NO_MEMORY;
}
core_if->gr_backup = gr;
}
gr->gotgctl_local = DWC_READ_REG32(&core_if->core_global_regs->gotgctl);
gr->gintmsk_local = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
gr->gahbcfg_local = DWC_READ_REG32(&core_if->core_global_regs->gahbcfg);
gr->gusbcfg_local = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
gr->grxfsiz_local = DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
gr->gnptxfsiz_local = DWC_READ_REG32(&core_if->core_global_regs->gnptxfsiz);
gr->hptxfsiz_local = DWC_READ_REG32(&core_if->core_global_regs->hptxfsiz);
#ifdef CONFIG_USB_DWC_OTG_LPM
gr->glpmcfg_local = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
#endif
gr->gi2cctl_local = DWC_READ_REG32(&core_if->core_global_regs->gi2cctl);
gr->pcgcctl_local = DWC_READ_REG32(core_if->pcgcctl);
gr->gdfifocfg_local =
DWC_READ_REG32(&core_if->core_global_regs->gdfifocfg);
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
gr->dtxfsiz_local[i] =
DWC_READ_REG32(&(core_if->core_global_regs->dtxfsiz[i]));
}
DWC_DEBUGPL(DBG_ANY, "===========Backing Global registers==========\n");
DWC_DEBUGPL(DBG_ANY, "Backed up gotgctl = %08x\n", gr->gotgctl_local);
DWC_DEBUGPL(DBG_ANY, "Backed up gintmsk = %08x\n", gr->gintmsk_local);
DWC_DEBUGPL(DBG_ANY, "Backed up gahbcfg = %08x\n", gr->gahbcfg_local);
DWC_DEBUGPL(DBG_ANY, "Backed up gusbcfg = %08x\n", gr->gusbcfg_local);
DWC_DEBUGPL(DBG_ANY, "Backed up grxfsiz = %08x\n", gr->grxfsiz_local);
DWC_DEBUGPL(DBG_ANY, "Backed up gnptxfsiz = %08x\n",
gr->gnptxfsiz_local);
DWC_DEBUGPL(DBG_ANY, "Backed up hptxfsiz = %08x\n",
gr->hptxfsiz_local);
#ifdef CONFIG_USB_DWC_OTG_LPM
DWC_DEBUGPL(DBG_ANY, "Backed up glpmcfg = %08x\n", gr->glpmcfg_local);
#endif
DWC_DEBUGPL(DBG_ANY, "Backed up gi2cctl = %08x\n", gr->gi2cctl_local);
DWC_DEBUGPL(DBG_ANY, "Backed up pcgcctl = %08x\n", gr->pcgcctl_local);
DWC_DEBUGPL(DBG_ANY,"Backed up gdfifocfg = %08x\n",gr->gdfifocfg_local);
return 0;
}
/** Saves GINTMSK register before setting the msk bits. */
int dwc_otg_save_gintmsk_reg(dwc_otg_core_if_t * core_if)
{
struct dwc_otg_global_regs_backup *gr;
gr = core_if->gr_backup;
if (!gr) {
gr = DWC_ALLOC(sizeof(*gr));
if (!gr) {
return -DWC_E_NO_MEMORY;
}
core_if->gr_backup = gr;
}
gr->gintmsk_local = DWC_READ_REG32(&core_if->core_global_regs->gintmsk);
DWC_DEBUGPL(DBG_ANY,"=============Backing GINTMSK registers============\n");
DWC_DEBUGPL(DBG_ANY, "Backed up gintmsk = %08x\n", gr->gintmsk_local);
return 0;
}
int dwc_otg_save_dev_regs(dwc_otg_core_if_t * core_if)
{
struct dwc_otg_dev_regs_backup *dr;
int i;
dr = core_if->dr_backup;
if (!dr) {
dr = DWC_ALLOC(sizeof(*dr));
if (!dr) {
return -DWC_E_NO_MEMORY;
}
core_if->dr_backup = dr;
}
dr->dcfg = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
dr->dctl = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dctl);
dr->daintmsk =
DWC_READ_REG32(&core_if->dev_if->dev_global_regs->daintmsk);
dr->diepmsk =
DWC_READ_REG32(&core_if->dev_if->dev_global_regs->diepmsk);
dr->doepmsk =
DWC_READ_REG32(&core_if->dev_if->dev_global_regs->doepmsk);
for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
dr->diepctl[i] =
DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
dr->dieptsiz[i] =
DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz);
dr->diepdma[i] =
DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma);
}
DWC_DEBUGPL(DBG_ANY,
"=============Backing Host registers==============\n");
DWC_DEBUGPL(DBG_ANY, "Backed up dcfg = %08x\n", dr->dcfg);
DWC_DEBUGPL(DBG_ANY, "Backed up dctl = %08x\n", dr->dctl);
DWC_DEBUGPL(DBG_ANY, "Backed up daintmsk = %08x\n",
dr->daintmsk);
DWC_DEBUGPL(DBG_ANY, "Backed up diepmsk = %08x\n", dr->diepmsk);
DWC_DEBUGPL(DBG_ANY, "Backed up doepmsk = %08x\n", dr->doepmsk);
for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
DWC_DEBUGPL(DBG_ANY, "Backed up diepctl[%d] = %08x\n", i,
dr->diepctl[i]);
DWC_DEBUGPL(DBG_ANY, "Backed up dieptsiz[%d] = %08x\n",
i, dr->dieptsiz[i]);
DWC_DEBUGPL(DBG_ANY, "Backed up diepdma[%d] = %08x\n", i,
dr->diepdma[i]);
}
return 0;
}
int dwc_otg_save_host_regs(dwc_otg_core_if_t * core_if)
{
struct dwc_otg_host_regs_backup *hr;
int i;
hr = core_if->hr_backup;
if (!hr) {
hr = DWC_ALLOC(sizeof(*hr));
if (!hr) {
return -DWC_E_NO_MEMORY;
}
core_if->hr_backup = hr;
}
hr->hcfg_local =
DWC_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
hr->haintmsk_local =
DWC_READ_REG32(&core_if->host_if->host_global_regs->haintmsk);
for (i = 0; i < dwc_otg_get_param_host_channels(core_if); ++i) {
hr->hcintmsk_local[i] =
DWC_READ_REG32(&core_if->host_if->hc_regs[i]->hcintmsk);
}
hr->hprt0_local = DWC_READ_REG32(core_if->host_if->hprt0);
hr->hfir_local =
DWC_READ_REG32(&core_if->host_if->host_global_regs->hfir);
DWC_DEBUGPL(DBG_ANY,
"=============Backing Host registers===============\n");
DWC_DEBUGPL(DBG_ANY, "Backed up hcfg = %08x\n",
hr->hcfg_local);
DWC_DEBUGPL(DBG_ANY, "Backed up haintmsk = %08x\n", hr->haintmsk_local);
for (i = 0; i < dwc_otg_get_param_host_channels(core_if); ++i) {
DWC_DEBUGPL(DBG_ANY, "Backed up hcintmsk[%02d]=%08x\n", i,
hr->hcintmsk_local[i]);
}
DWC_DEBUGPL(DBG_ANY, "Backed up hprt0 = %08x\n",
hr->hprt0_local);
DWC_DEBUGPL(DBG_ANY, "Backed up hfir = %08x\n",
hr->hfir_local);
return 0;
}
int dwc_otg_restore_global_regs(dwc_otg_core_if_t *core_if)
{
struct dwc_otg_global_regs_backup *gr;
int i;
gr = core_if->gr_backup;
if (!gr) {
return -DWC_E_INVALID;
}
DWC_WRITE_REG32(&core_if->core_global_regs->gotgctl, gr->gotgctl_local);
DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk, gr->gintmsk_local);
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gr->gusbcfg_local);
DWC_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gr->gahbcfg_local);
DWC_WRITE_REG32(&core_if->core_global_regs->grxfsiz, gr->grxfsiz_local);
DWC_WRITE_REG32(&core_if->core_global_regs->gnptxfsiz,
gr->gnptxfsiz_local);
DWC_WRITE_REG32(&core_if->core_global_regs->hptxfsiz,
gr->hptxfsiz_local);
DWC_WRITE_REG32(&core_if->core_global_regs->gdfifocfg,
gr->gdfifocfg_local);
for (i = 0; i < MAX_EPS_CHANNELS; i++) {
DWC_WRITE_REG32(&core_if->core_global_regs->dtxfsiz[i],
gr->dtxfsiz_local[i]);
}
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
DWC_WRITE_REG32(core_if->host_if->hprt0, 0x0000100A);
DWC_WRITE_REG32(&core_if->core_global_regs->gahbcfg,
(gr->gahbcfg_local));
return 0;
}
int dwc_otg_restore_dev_regs(dwc_otg_core_if_t * core_if, int rem_wakeup)
{
struct dwc_otg_dev_regs_backup *dr;
int i;
dr = core_if->dr_backup;
if (!dr) {
return -DWC_E_INVALID;
}
if (!rem_wakeup)
{
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dctl, dr->dctl);
}
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->daintmsk, dr->daintmsk);
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->diepmsk, dr->diepmsk);
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->doepmsk, dr->doepmsk);
for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl, dr->diepctl[i]);
DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->dieptsiz, dr->dieptsiz[i]);
DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[i]->diepdma, dr->diepdma[i]);
}
return 0;
}
int dwc_otg_restore_host_regs(dwc_otg_core_if_t * core_if, int reset)
{
struct dwc_otg_host_regs_backup *hr;
int i;
hr = core_if->hr_backup;
if (!hr) {
return -DWC_E_INVALID;
}
DWC_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hr->hcfg_local);
//if (!reset)
//{
// DWC_WRITE_REG32(&core_if->host_if->host_global_regs->hfir, hr->hfir_local);
//}
DWC_WRITE_REG32(&core_if->host_if->host_global_regs->haintmsk,
hr->haintmsk_local);
for (i = 0; i < dwc_otg_get_param_host_channels(core_if); ++i) {
DWC_WRITE_REG32(&core_if->host_if->hc_regs[i]->hcintmsk,
hr->hcintmsk_local[i]);
}
return 0;
}
int restore_lpm_i2c_regs(dwc_otg_core_if_t * core_if)
{
struct dwc_otg_global_regs_backup *gr;
gr = core_if->gr_backup;
/* Restore values for LPM and I2C */
#ifdef CONFIG_USB_DWC_OTG_LPM
DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, gr->glpmcfg_local);
#endif
DWC_WRITE_REG32(&core_if->core_global_regs->gi2cctl, gr->gi2cctl_local);
return 0;
}
int restore_essential_regs(dwc_otg_core_if_t * core_if, int rmode, int is_host)
{
struct dwc_otg_global_regs_backup *gr;
pcgcctl_data_t pcgcctl = {.d32 = 0 };
gahbcfg_data_t gahbcfg = {.d32 = 0 };
gusbcfg_data_t gusbcfg = {.d32 = 0 };
gintmsk_data_t gintmsk = {.d32 = 0 };
/* Restore LPM and I2C registers */
restore_lpm_i2c_regs(core_if);
/* Set PCGCCTL to 0 */
DWC_WRITE_REG32(core_if->pcgcctl, 0x00000000);
gr = core_if->gr_backup;
/* Load restore values for [31:14] bits */
DWC_WRITE_REG32(core_if->pcgcctl,
((gr->pcgcctl_local & 0xffffc000) | 0x00020000));
/* Umnask global Interrupt in GAHBCFG and restore it */
gahbcfg.d32 = gr->gahbcfg_local;
gahbcfg.b.glblintrmsk = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->gahbcfg, gahbcfg.d32);
/* Clear all pending interupts */
DWC_WRITE_REG32(&core_if->core_global_regs->gintsts, 0xFFFFFFFF);
/* Unmask restore done interrupt */
gintmsk.b.restoredone = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->gintmsk, gintmsk.d32);
/* Restore GUSBCFG and HCFG/DCFG */
gusbcfg.d32 = core_if->gr_backup->gusbcfg_local;
DWC_WRITE_REG32(&core_if->core_global_regs->gusbcfg, gusbcfg.d32);
if (is_host) {
hcfg_data_t hcfg = {.d32 = 0 };
hcfg.d32 = core_if->hr_backup->hcfg_local;
DWC_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg,
hcfg.d32);
/* Load restore values for [31:14] bits */
pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
if (rmode)
pcgcctl.b.restoremode = 1;
DWC_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
dwc_udelay(10);
/* Load restore values for [31:14] bits and set EssRegRestored bit */
pcgcctl.d32 = gr->pcgcctl_local | 0xffffc000;
pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
pcgcctl.b.ess_reg_restored = 1;
if (rmode)
pcgcctl.b.restoremode = 1;
DWC_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
} else {
dcfg_data_t dcfg = {.d32 = 0 };
dcfg.d32 = core_if->dr_backup->dcfg;
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
/* Load restore values for [31:14] bits */
pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
if (!rmode) {
pcgcctl.d32 |= 0x208;
}
DWC_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
dwc_udelay(10);
/* Load restore values for [31:14] bits */
pcgcctl.d32 = gr->pcgcctl_local & 0xffffc000;
pcgcctl.d32 = gr->pcgcctl_local | 0x00020000;
pcgcctl.b.ess_reg_restored = 1;
if (!rmode)
pcgcctl.d32 |= 0x208;
DWC_WRITE_REG32(core_if->pcgcctl, pcgcctl.d32);
}
return 0;
}
/**
* Initializes the FSLSPClkSel field of the HCFG register depending on the PHY
* type.
*/
static void init_fslspclksel(dwc_otg_core_if_t * core_if)
{
uint32_t val;
hcfg_data_t hcfg;
if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
(core_if->hwcfg2.b.fs_phy_type == 1) &&
(core_if->core_params->ulpi_fs_ls)) ||
(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
/* Full speed PHY */
val = DWC_HCFG_48_MHZ;
} else {
/* High speed PHY running at full speed or high speed */
val = DWC_HCFG_30_60_MHZ;
}
DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val);
hcfg.d32 = DWC_READ_REG32(&core_if->host_if->host_global_regs->hcfg);
hcfg.b.fslspclksel = val;
DWC_WRITE_REG32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32);
}
/**
* Initializes the DevSpd field of the DCFG register depending on the PHY type
* and the enumeration speed of the device.
*/
static void init_devspd(dwc_otg_core_if_t * core_if)
{
uint32_t val;
dcfg_data_t dcfg;
if (((core_if->hwcfg2.b.hs_phy_type == 2) &&
(core_if->hwcfg2.b.fs_phy_type == 1) &&
(core_if->core_params->ulpi_fs_ls)) ||
(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
/* Full speed PHY */
val = 0x3;
} else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
/* High speed PHY running at full speed */
val = 0x1;
} else {
/* High speed PHY running at high speed */
val = 0x0;
}
DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val);
dcfg.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dcfg);
dcfg.b.devspd = val;
DWC_WRITE_REG32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32);
}
/**
* This function calculates the number of IN EPS
* using GHWCFG1 and GHWCFG2 registers values
*
* @param core_if Programming view of the DWC_otg controller
*/
static uint32_t calc_num_in_eps(dwc_otg_core_if_t * core_if)
{
uint32_t num_in_eps = 0;
uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3;
uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps;
int i;
for (i = 0; i < num_eps; ++i) {
if (!(hwcfg1 & 0x1))
num_in_eps++;
hwcfg1 >>= 2;
}
if (core_if->hwcfg4.b.ded_fifo_en) {
num_in_eps =
(num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps;
}
return num_in_eps;
}
/**
* This function calculates the number of OUT EPS
* using GHWCFG1 and GHWCFG2 registers values
*
* @param core_if Programming view of the DWC_otg controller
*/
static uint32_t calc_num_out_eps(dwc_otg_core_if_t * core_if)
{
uint32_t num_out_eps = 0;
uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep;
uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2;
int i;
for (i = 0; i < num_eps; ++i) {
if (!(hwcfg1 & 0x1))
num_out_eps++;
hwcfg1 >>= 2;
}
return num_out_eps;
}
/**
* This function initializes the DWC_otg controller registers and
* prepares the core for device mode or host mode operation.
*
* @param core_if Programming view of the DWC_otg controller
*
*/
void dwc_otg_core_init(dwc_otg_core_if_t * core_if)
{
int i = 0;
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
dwc_otg_dev_if_t *dev_if = core_if->dev_if;
gahbcfg_data_t ahbcfg = {.d32 = 0 };
gusbcfg_data_t usbcfg = {.d32 = 0 };
gi2cctl_data_t i2cctl = {.d32 = 0 };
DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if);
/* Common Initialization */
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
/* Program the ULPI External VBUS bit if needed */
usbcfg.b.ulpi_ext_vbus_drv =
(core_if->core_params->phy_ulpi_ext_vbus ==
DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0;
/* Set external TS Dline pulsing */
usbcfg.b.term_sel_dl_pulse =
(core_if->core_params->ts_dline == 1) ? 1 : 0;
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
/* Reset the Controller */
dwc_otg_core_reset(core_if);
core_if->adp_enable = core_if->core_params->adp_supp_enable;
core_if->power_down = core_if->core_params->power_down;
core_if->otg_sts = 0;
/* Initialize parameters from Hardware configuration registers. */
dev_if->num_in_eps = calc_num_in_eps(core_if);
dev_if->num_out_eps = calc_num_out_eps(core_if);
DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n",
core_if->hwcfg4.b.num_dev_perio_in_ep);
for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
dev_if->perio_tx_fifo_size[i] =
DWC_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n",
i, dev_if->perio_tx_fifo_size[i]);
}
for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
dev_if->tx_fifo_size[i] =
DWC_READ_REG32(&global_regs->dtxfsiz[i]) >> 16;
DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n",
i, dev_if->tx_fifo_size[i]);
}
core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth;
core_if->rx_fifo_size = DWC_READ_REG32(&global_regs->grxfsiz);
core_if->nperio_tx_fifo_size =
DWC_READ_REG32(&global_regs->gnptxfsiz) >> 16;
DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size);
DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n",
core_if->nperio_tx_fifo_size);
/* This programming sequence needs to happen in FS mode before any other
* programming occurs */
if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) &&
(core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) {
/* If FS mode with FS PHY */
/* core_init() is now called on every switch so only call the
* following for the first time through. */
if (!core_if->phy_init_done) {
core_if->phy_init_done = 1;
DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n");
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
usbcfg.b.physel = 1;
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
/* Reset after a PHY select */
dwc_otg_core_reset(core_if);
}
/* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also
* do this on HNP Dev/Host mode switches (done in dev_init and
* host_init). */
if (dwc_otg_is_host_mode(core_if)) {
init_fslspclksel(core_if);
} else {
init_devspd(core_if);
}
if (core_if->core_params->i2c_enable) {
DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n");
/* Program GUSBCFG.OtgUtmifsSel to I2C */
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
usbcfg.b.otgutmifssel = 1;
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
/* Program GI2CCTL.I2CEn */
i2cctl.d32 = DWC_READ_REG32(&global_regs->gi2cctl);
i2cctl.b.i2cdevaddr = 1;
i2cctl.b.i2cen = 0;
DWC_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
i2cctl.b.i2cen = 1;
DWC_WRITE_REG32(&global_regs->gi2cctl, i2cctl.d32);
}
} /* endif speed == DWC_SPEED_PARAM_FULL */
else {
/* High speed PHY. */
if (!core_if->phy_init_done) {
core_if->phy_init_done = 1;
/* HS PHY parameters. These parameters are preserved
* during soft reset so only program the first time. Do
* a soft reset immediately after setting phyif. */
if (core_if->core_params->phy_type == 2) {
/* ULPI interface */
usbcfg.b.ulpi_utmi_sel = 1;
usbcfg.b.phyif = 0;
usbcfg.b.ddrsel =
core_if->core_params->phy_ulpi_ddr;
} else if (core_if->core_params->phy_type == 1) {
/* UTMI+ interface */
usbcfg.b.ulpi_utmi_sel = 0;
if (core_if->core_params->phy_utmi_width == 16) {
usbcfg.b.phyif = 1;
} else {
usbcfg.b.phyif = 0;
}
} else {
DWC_ERROR("FS PHY TYPE\n");
}
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
/* Reset after setting the PHY parameters */
dwc_otg_core_reset(core_if);
}
}
if ((core_if->hwcfg2.b.hs_phy_type == 2) &&
(core_if->hwcfg2.b.fs_phy_type == 1) &&
(core_if->core_params->ulpi_fs_ls)) {
DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n");
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
usbcfg.b.ulpi_fsls = 1;
usbcfg.b.ulpi_clk_sus_m = 1;
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
} else {
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
usbcfg.b.ulpi_fsls = 0;
usbcfg.b.ulpi_clk_sus_m = 0;
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
}
/* Program the GAHBCFG Register. */
switch (core_if->hwcfg2.b.architecture) {
case DWC_SLAVE_ONLY_ARCH:
DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n");
ahbcfg.b.nptxfemplvl_txfemplvl =
DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY;
core_if->dma_enable = 0;
core_if->dma_desc_enable = 0;
break;
case DWC_EXT_DMA_ARCH:
DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n");
{
uint8_t brst_sz = core_if->core_params->dma_burst_size;
ahbcfg.b.hburstlen = 0;
while (brst_sz > 1) {
ahbcfg.b.hburstlen++;
brst_sz >>= 1;
}
}
core_if->dma_enable = (core_if->core_params->dma_enable != 0);
core_if->dma_desc_enable =
(core_if->core_params->dma_desc_enable != 0);
break;
case DWC_INT_DMA_ARCH:
DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n");
/* Old value was DWC_GAHBCFG_INT_DMA_BURST_INCR - done for
Host mode ISOC in issue fix - vahrama */
ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR16;
core_if->dma_enable = (core_if->core_params->dma_enable != 0);
core_if->dma_desc_enable =
(core_if->core_params->dma_desc_enable != 0);
break;
}
if (core_if->dma_enable) {
if (core_if->dma_desc_enable) {
DWC_PRINTF("Using Descriptor DMA mode\n");
} else {
DWC_PRINTF("Using Buffer DMA mode\n");
}
} else {
DWC_PRINTF("Using Slave mode\n");
core_if->dma_desc_enable = 0;
}
if (core_if->core_params->ahb_single) {
ahbcfg.b.ahbsingle = 1;
}
ahbcfg.b.dmaenable = core_if->dma_enable;
DWC_WRITE_REG32(&global_regs->gahbcfg, ahbcfg.d32);
core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en;
core_if->pti_enh_enable = core_if->core_params->pti_enable != 0;
core_if->multiproc_int_enable = core_if->core_params->mpi_enable;
DWC_PRINTF("Periodic Transfer Interrupt Enhancement - %s\n",
((core_if->pti_enh_enable) ? "enabled" : "disabled"));
DWC_PRINTF("Multiprocessor Interrupt Enhancement - %s\n",
((core_if->multiproc_int_enable) ? "enabled" : "disabled"));
/*
* Program the GUSBCFG register.
*/
usbcfg.d32 = DWC_READ_REG32(&global_regs->gusbcfg);
switch (core_if->hwcfg2.b.op_mode) {
case DWC_MODE_HNP_SRP_CAPABLE:
usbcfg.b.hnpcap = (core_if->core_params->otg_cap ==
DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE);
usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
break;
case DWC_MODE_SRP_ONLY_CAPABLE:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
break;
case DWC_MODE_NO_HNP_SRP_CAPABLE:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = 0;
break;
case DWC_MODE_SRP_CAPABLE_DEVICE:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
break;
case DWC_MODE_NO_SRP_CAPABLE_DEVICE:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = 0;
break;
case DWC_MODE_SRP_CAPABLE_HOST:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = (core_if->core_params->otg_cap !=
DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE);
break;
case DWC_MODE_NO_SRP_CAPABLE_HOST:
usbcfg.b.hnpcap = 0;
usbcfg.b.srpcap = 0;
break;
}
DWC_WRITE_REG32(&global_regs->gusbcfg, usbcfg.d32);
#ifdef CONFIG_USB_DWC_OTG_LPM
if (core_if->core_params->lpm_enable) {
glpmcfg_data_t lpmcfg = {.d32 = 0 };
/* To enable LPM support set lpm_cap_en bit */
lpmcfg.b.lpm_cap_en = 1;
/* Make AppL1Res ACK */
lpmcfg.b.appl_resp = 1;
/* Retry 3 times */
lpmcfg.b.retry_count = 3;
DWC_MODIFY_REG32(&core_if->core_global_regs->glpmcfg,
0, lpmcfg.d32);
}
#endif
if (core_if->core_params->ic_usb_cap) {
gusbcfg_data_t gusbcfg = {.d32 = 0 };
gusbcfg.b.ic_usb_cap = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gusbcfg,
0, gusbcfg.d32);
}
{
gotgctl_data_t gotgctl = {.d32 = 0 };
gotgctl.b.otgver = core_if->core_params->otg_ver;
DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl, 0,
gotgctl.d32);
/* Set OTG version supported */
core_if->otg_ver = core_if->core_params->otg_ver;
DWC_PRINTF("OTG VER PARAM: %d, OTG VER FLAG: %d\n",
core_if->core_params->otg_ver, core_if->otg_ver);
}
/* Enable common interrupts */
dwc_otg_enable_common_interrupts(core_if);
/* Do device or host intialization based on mode during PCD
* and HCD initialization */
if (dwc_otg_is_host_mode(core_if)) {
DWC_DEBUGPL(DBG_ANY, "Host Mode\n");
core_if->op_state = A_HOST;
} else {
DWC_DEBUGPL(DBG_ANY, "Device Mode\n");
core_if->op_state = B_PERIPHERAL;
#ifdef DWC_DEVICE_ONLY
dwc_otg_core_dev_init(core_if);
#endif
}
}
/**
* This function enables the Device mode interrupts.
*
* @param core_if Programming view of DWC_otg controller
*/
void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t * core_if)
{
gintmsk_data_t intr_mask = {.d32 = 0 };
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
/* Disable all interrupts. */
DWC_WRITE_REG32(&global_regs->gintmsk, 0);
/* Clear any pending interrupts */
DWC_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
/* Enable the common interrupts */
dwc_otg_enable_common_interrupts(core_if);
/* Enable interrupts */
intr_mask.b.usbreset = 1;
intr_mask.b.enumdone = 1;
/* Disable Disconnect interrupt in Device mode */
intr_mask.b.disconnect = 0;
if (!core_if->multiproc_int_enable) {
intr_mask.b.inepintr = 1;
intr_mask.b.outepintr = 1;
}
intr_mask.b.erlysuspend = 1;
if (core_if->en_multiple_tx_fifo == 0) {
intr_mask.b.epmismatch = 1;
}
//intr_mask.b.incomplisoout = 1;
intr_mask.b.incomplisoin = 1;
/* Enable the ignore frame number for ISOC xfers - MAS */
/* Disable to support high bandwith ISOC transfers - manukz */
#if 0
#ifdef DWC_UTE_PER_IO
if (core_if->dma_enable) {
if (core_if->dma_desc_enable) {
dctl_data_t dctl1 = {.d32 = 0 };
dctl1.b.ifrmnum = 1;
DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
dctl, 0, dctl1.d32);
DWC_DEBUG("----Enabled Ignore frame number (0x%08x)",
DWC_READ_REG32(&core_if->dev_if->
dev_global_regs->dctl));
}
}
#endif
#endif
#ifdef DWC_EN_ISOC
if (core_if->dma_enable) {
if (core_if->dma_desc_enable == 0) {
if (core_if->pti_enh_enable) {
dctl_data_t dctl = {.d32 = 0 };
dctl.b.ifrmnum = 1;
DWC_MODIFY_REG32(&core_if->
dev_if->dev_global_regs->dctl,
0, dctl.d32);
} else {
intr_mask.b.incomplisoin = 1;
intr_mask.b.incomplisoout = 1;
}
}
} else {
intr_mask.b.incomplisoin = 1;
intr_mask.b.incomplisoout = 1;
}
#endif /* DWC_EN_ISOC */
/** @todo NGS: Should this be a module parameter? */
#ifdef USE_PERIODIC_EP
intr_mask.b.isooutdrop = 1;
intr_mask.b.eopframe = 1;
intr_mask.b.incomplisoin = 1;
intr_mask.b.incomplisoout = 1;
#endif
DWC_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__,
DWC_READ_REG32(&global_regs->gintmsk));
}
/**
* This function initializes the DWC_otg controller registers for
* device mode.
*
* @param core_if Programming view of DWC_otg controller
*
*/
void dwc_otg_core_dev_init(dwc_otg_core_if_t * core_if)
{
int i;
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
dwc_otg_dev_if_t *dev_if = core_if->dev_if;
dwc_otg_core_params_t *params = core_if->core_params;
dcfg_data_t dcfg = {.d32 = 0 };
depctl_data_t diepctl = {.d32 = 0 };
grstctl_t resetctl = {.d32 = 0 };
uint32_t rx_fifo_size;
fifosize_data_t nptxfifosize;
fifosize_data_t txfifosize;
dthrctl_data_t dthrctl;
fifosize_data_t ptxfifosize;
uint16_t rxfsiz, nptxfsiz;
gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
hwcfg3_data_t hwcfg3 = {.d32 = 0 };
/* Restart the Phy Clock */
DWC_WRITE_REG32(core_if->pcgcctl, 0);
/* Device configuration register */
init_devspd(core_if);
dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0;
dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80;
/* Enable Device OUT NAK in case of DDMA mode*/
if (core_if->core_params->dev_out_nak) {
dcfg.b.endevoutnak = 1;
}
if (core_if->core_params->cont_on_bna) {
dctl_data_t dctl = {.d32 = 0 };
dctl.b.encontonbna = 1;
DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
}
DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
/* Configure data FIFO sizes */
if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
core_if->total_fifo_size);
DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
params->dev_rx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
params->dev_nperio_tx_fifo_size);
/* Rx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->grxfsiz));
#ifdef DWC_UTE_CFI
core_if->pwron_rxfsiz = DWC_READ_REG32(&global_regs->grxfsiz);
core_if->init_rxfsiz = params->dev_rx_fifo_size;
#endif
rx_fifo_size = params->dev_rx_fifo_size;
DWC_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->grxfsiz));
/** Set Periodic Tx FIFO Mask all bits 0 */
core_if->p_tx_msk = 0;
/** Set Tx FIFO Mask all bits 0 */
core_if->tx_msk = 0;
if (core_if->en_multiple_tx_fifo == 0) {
/* Non-periodic Tx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
nptxfifosize.b.startaddr = 0x400;
DWC_WRITE_REG32(&global_regs->gnptxfsiz,
nptxfifosize.d32);
DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
/**@todo NGS: Fix Periodic FIFO Sizing! */
/*
* Periodic Tx FIFOs These FIFOs are numbered from 1 to 15.
* Indexes of the FIFO size module parameters in the
* dev_perio_tx_fifo_size array and the FIFO size registers in
* the dptxfsiz array run from 0 to 14.
*/
/** @todo Finish debug of this */
ptxfifosize.b.startaddr =
nptxfifosize.b.startaddr + nptxfifosize.b.depth;
for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) {
ptxfifosize.b.depth =
params->dev_perio_tx_fifo_size[i];
DWC_DEBUGPL(DBG_CIL,
"initial dtxfsiz[%d]=%08x\n", i,
DWC_READ_REG32(&global_regs->dtxfsiz
[i]));
DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
ptxfifosize.d32);
DWC_DEBUGPL(DBG_CIL, "new dtxfsiz[%d]=%08x\n",
i,
DWC_READ_REG32(&global_regs->dtxfsiz
[i]));
ptxfifosize.b.startaddr += ptxfifosize.b.depth;
}
} else {
/*
* Tx FIFOs These FIFOs are numbered from 1 to 15.
* Indexes of the FIFO size module parameters in the
* dev_tx_fifo_size array and the FIFO size registers in
* the dtxfsiz array run from 0 to 14.
*/
/* Non-periodic Tx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
#ifdef DWC_UTE_CFI
core_if->pwron_gnptxfsiz =
(DWC_READ_REG32(&global_regs->gnptxfsiz) >> 16);
core_if->init_gnptxfsiz =
params->dev_nperio_tx_fifo_size;
#endif
nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
nptxfifosize.b.startaddr = 0x400;
DWC_WRITE_REG32(&global_regs->gnptxfsiz,
nptxfifosize.d32);
DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
txfifosize.b.startaddr =
nptxfifosize.b.startaddr + nptxfifosize.b.depth;
for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
txfifosize.b.depth =
params->dev_tx_fifo_size[i];
DWC_DEBUGPL(DBG_CIL,
"initial dtxfsiz[%d]=%08x\n",
i,
DWC_READ_REG32(&global_regs->dtxfsiz
[i]));
#ifdef DWC_UTE_CFI
core_if->pwron_txfsiz[i] =
(DWC_READ_REG32
(&global_regs->dtxfsiz[i]) >> 16);
core_if->init_txfsiz[i] =
params->dev_tx_fifo_size[i];
#endif
DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
txfifosize.d32);
DWC_DEBUGPL(DBG_CIL,
"new dtxfsiz[%d]=%08x\n",
i,
DWC_READ_REG32(&global_regs->dtxfsiz
[i]));
txfifosize.b.startaddr += txfifosize.b.depth;
}
/* Calculating DFIFOCFG for Device mode to include RxFIFO and NPTXFIFO */
gdfifocfg.d32 = DWC_READ_REG32(&global_regs->gdfifocfg);
hwcfg3.d32 = DWC_READ_REG32(&global_regs->ghwcfg3);
gdfifocfg.b.gdfifocfg = (DWC_READ_REG32(&global_regs->ghwcfg3) >> 16);
DWC_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
rxfsiz = (DWC_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
nptxfsiz = (DWC_READ_REG32(&global_regs->gnptxfsiz) >> 16);
gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz;
DWC_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
}
}
/* Flush the FIFOs */
dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
dwc_otg_flush_rx_fifo(core_if);
/* Flush the Learning Queue. */
resetctl.b.intknqflsh = 1;
DWC_WRITE_REG32(&core_if->core_global_regs->grstctl, resetctl.d32);
if (!core_if->core_params->en_multiple_tx_fifo && core_if->dma_enable) {
core_if->start_predict = 0;
for (i = 0; i<= core_if->dev_if->num_in_eps; ++i) {
core_if->nextep_seq[i] = 0xff; // 0xff - EP not active
}
core_if->nextep_seq[0] = 0;
core_if->first_in_nextep_seq = 0;
diepctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[0]->diepctl);
diepctl.b.nextep = 0;
DWC_WRITE_REG32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32);
/* Update IN Endpoint Mismatch Count by active IN NP EP count + 1 */
dcfg.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dcfg);
dcfg.b.epmscnt = 2;
DWC_WRITE_REG32(&dev_if->dev_global_regs->dcfg, dcfg.d32);
DWC_DEBUGPL(DBG_CILV,"%s first_in_nextep_seq= %2d; nextep_seq[]:\n",
__func__, core_if->first_in_nextep_seq);
for (i=0; i <= core_if->dev_if->num_in_eps; i++) {
DWC_DEBUGPL(DBG_CILV, "%2d ", core_if->nextep_seq[i]);
}
DWC_DEBUGPL(DBG_CILV,"\n");
}
/* Clear all pending Device Interrupts */
/** @todo - if the condition needed to be checked
* or in any case all pending interrutps should be cleared?
*/
if (core_if->multiproc_int_enable) {
for (i = 0; i < core_if->dev_if->num_in_eps; ++i) {
DWC_WRITE_REG32(&dev_if->
dev_global_regs->diepeachintmsk[i], 0);
}
for (i = 0; i < core_if->dev_if->num_out_eps; ++i) {
DWC_WRITE_REG32(&dev_if->
dev_global_regs->doepeachintmsk[i], 0);
}
DWC_WRITE_REG32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF);
DWC_WRITE_REG32(&dev_if->dev_global_regs->deachintmsk, 0);
} else {
DWC_WRITE_REG32(&dev_if->dev_global_regs->diepmsk, 0);
DWC_WRITE_REG32(&dev_if->dev_global_regs->doepmsk, 0);
DWC_WRITE_REG32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF);
DWC_WRITE_REG32(&dev_if->dev_global_regs->daintmsk, 0);
}
for (i = 0; i <= dev_if->num_in_eps; i++) {
depctl_data_t depctl;
depctl.d32 = DWC_READ_REG32(&dev_if->in_ep_regs[i]->diepctl);
if (depctl.b.epena) {
depctl.d32 = 0;
depctl.b.epdis = 1;
depctl.b.snak = 1;
} else {
depctl.d32 = 0;
}
DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32);
DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->dieptsiz, 0);
DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepdma, 0);
DWC_WRITE_REG32(&dev_if->in_ep_regs[i]->diepint, 0xFF);
}
for (i = 0; i <= dev_if->num_out_eps; i++) {
depctl_data_t depctl;
depctl.d32 = DWC_READ_REG32(&dev_if->out_ep_regs[i]->doepctl);
if (depctl.b.epena) {
depctl.d32 = 0;
depctl.b.epdis = 1;
depctl.b.snak = 1;
} else {
depctl.d32 = 0;
}
DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32);
DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doeptsiz, 0);
DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepdma, 0);
DWC_WRITE_REG32(&dev_if->out_ep_regs[i]->doepint, 0xFF);
}
if (core_if->en_multiple_tx_fifo && core_if->dma_enable) {
dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1;
dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1;
dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1;
dev_if->rx_thr_length = params->rx_thr_length;
dev_if->tx_thr_length = params->tx_thr_length;
dev_if->setup_desc_index = 0;
dthrctl.d32 = 0;
dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en;
dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en;
dthrctl.b.tx_thr_len = dev_if->tx_thr_length;
dthrctl.b.rx_thr_en = dev_if->rx_thr_en;
dthrctl.b.rx_thr_len = dev_if->rx_thr_length;
dthrctl.b.ahb_thr_ratio = params->ahb_thr_ratio;
DWC_WRITE_REG32(&dev_if->dev_global_regs->dtknqr3_dthrctl,
dthrctl.d32);
DWC_DEBUGPL(DBG_CIL,
"Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n",
dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en,
dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len,
dthrctl.b.rx_thr_len);
}
dwc_otg_enable_device_interrupts(core_if);
{
diepmsk_data_t msk = {.d32 = 0 };
msk.b.txfifoundrn = 1;
if (core_if->multiproc_int_enable) {
DWC_MODIFY_REG32(&dev_if->
dev_global_regs->diepeachintmsk[0],
msk.d32, msk.d32);
} else {
DWC_MODIFY_REG32(&dev_if->dev_global_regs->diepmsk,
msk.d32, msk.d32);
}
}
if (core_if->multiproc_int_enable) {
/* Set NAK on Babble */
dctl_data_t dctl = {.d32 = 0 };
dctl.b.nakonbble = 1;
DWC_MODIFY_REG32(&dev_if->dev_global_regs->dctl, 0, dctl.d32);
}
if (core_if->snpsid >= OTG_CORE_REV_2_94a) {
dctl_data_t dctl = {.d32 = 0 };
dctl.d32 = DWC_READ_REG32(&dev_if->dev_global_regs->dctl);
dctl.b.sftdiscon = 0;
DWC_WRITE_REG32(&dev_if->dev_global_regs->dctl, dctl.d32);
}
}
/**
* This function enables the Host mode interrupts.
*
* @param core_if Programming view of DWC_otg controller
*/
void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t * core_if)
{
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
gintmsk_data_t intr_mask = {.d32 = 0 };
DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__);
/* Disable all interrupts. */
DWC_WRITE_REG32(&global_regs->gintmsk, 0);
/* Clear any pending interrupts. */
DWC_WRITE_REG32(&global_regs->gintsts, 0xFFFFFFFF);
/* Enable the common interrupts */
dwc_otg_enable_common_interrupts(core_if);
/*
* Enable host mode interrupts without disturbing common
* interrupts.
*/
intr_mask.b.disconnect = 1;
intr_mask.b.portintr = 1;
intr_mask.b.hcintr = 1;
DWC_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32);
}
/**
* This function disables the Host Mode interrupts.
*
* @param core_if Programming view of DWC_otg controller
*/
void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t * core_if)
{
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
gintmsk_data_t intr_mask = {.d32 = 0 };
DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__);
/*
* Disable host mode interrupts without disturbing common
* interrupts.
*/
intr_mask.b.sofintr = 1;
intr_mask.b.portintr = 1;
intr_mask.b.hcintr = 1;
intr_mask.b.ptxfempty = 1;
intr_mask.b.nptxfempty = 1;
DWC_MODIFY_REG32(&global_regs->gintmsk, intr_mask.d32, 0);
}
/**
* This function initializes the DWC_otg controller registers for
* host mode.
*
* This function flushes the Tx and Rx FIFOs and it flushes any entries in the
* request queues. Host channels are reset to ensure that they are ready for
* performing transfers.
*
* @param core_if Programming view of DWC_otg controller
*
*/
void dwc_otg_core_host_init(dwc_otg_core_if_t * core_if)
{
dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
dwc_otg_host_if_t *host_if = core_if->host_if;
dwc_otg_core_params_t *params = core_if->core_params;
hprt0_data_t hprt0 = {.d32 = 0 };
fifosize_data_t nptxfifosize;
fifosize_data_t ptxfifosize;
uint16_t rxfsiz, nptxfsiz, hptxfsiz;
gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
int i;
hcchar_data_t hcchar;
hcfg_data_t hcfg;
hfir_data_t hfir;
dwc_otg_hc_regs_t *hc_regs;
int num_channels;
gotgctl_data_t gotgctl = {.d32 = 0 };
DWC_DEBUGPL(DBG_CILV, "%s(%p)\n", __func__, core_if);
/* Restart the Phy Clock */
DWC_WRITE_REG32(core_if->pcgcctl, 0);
/* Initialize Host Configuration Register */
init_fslspclksel(core_if);
if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) {
hcfg.d32 = DWC_READ_REG32(&host_if->host_global_regs->hcfg);
hcfg.b.fslssupp = 1;
DWC_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
}
/* This bit allows dynamic reloading of the HFIR register
* during runtime. This bit needs to be programmed during
* initial configuration and its value must not be changed
* during runtime.*/
if (core_if->core_params->reload_ctl == 1) {
hfir.d32 = DWC_READ_REG32(&host_if->host_global_regs->hfir);
hfir.b.hfirrldctrl = 1;
DWC_WRITE_REG32(&host_if->host_global_regs->hfir, hfir.d32);
}
if (core_if->core_params->dma_desc_enable) {
uint8_t op_mode = core_if->hwcfg2.b.op_mode;
if (!
(core_if->hwcfg4.b.desc_dma
&& (core_if->snpsid >= OTG_CORE_REV_2_90a)
&& ((op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG)
|| (op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG)
|| (op_mode ==
DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG)
|| (op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)
|| (op_mode ==
DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST)))) {
DWC_ERROR("Host can't operate in Descriptor DMA mode.\n"
"Either core version is below 2.90a or "
"GHWCFG2, GHWCFG4 registers' values do not allow Descriptor DMA in host mode.\n"
"To run the driver in Buffer DMA host mode set dma_desc_enable "
"module parameter to 0.\n");
return;
}
hcfg.d32 = DWC_READ_REG32(&host_if->host_global_regs->hcfg);
hcfg.b.descdma = 1;
DWC_WRITE_REG32(&host_if->host_global_regs->hcfg, hcfg.d32);
}
/* Configure data FIFO sizes */
if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n",
core_if->total_fifo_size);
DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n",
params->host_rx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n",
params->host_nperio_tx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "P Tx FIFO Size=%d\n",
params->host_perio_tx_fifo_size);
/* Rx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->grxfsiz));
DWC_WRITE_REG32(&global_regs->grxfsiz,
params->host_rx_fifo_size);
DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->grxfsiz));
/* Non-periodic Tx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
nptxfifosize.b.depth = params->host_nperio_tx_fifo_size;
nptxfifosize.b.startaddr = 0x400;
DWC_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->gnptxfsiz));
/* Periodic Tx FIFO */
DWC_DEBUGPL(DBG_CIL, "initial hptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->hptxfsiz));
ptxfifosize.b.depth = params->host_perio_tx_fifo_size;
ptxfifosize.b.startaddr = 0xC00;
DWC_WRITE_REG32(&global_regs->hptxfsiz, ptxfifosize.d32);
DWC_DEBUGPL(DBG_CIL, "new hptxfsiz=%08x\n",
DWC_READ_REG32(&global_regs->hptxfsiz));
if (core_if->en_multiple_tx_fifo) {
/* Global DFIFOCFG calculation for Host mode - include RxFIFO, NPTXFIFO and HPTXFIFO */
gdfifocfg.d32 = DWC_READ_REG32(&global_regs->gdfifocfg);
rxfsiz = (DWC_READ_REG32(&global_regs->grxfsiz) & 0x0000ffff);
nptxfsiz = (DWC_READ_REG32(&global_regs->gnptxfsiz) >> 16);
hptxfsiz = (DWC_READ_REG32(&global_regs->hptxfsiz) >> 16);
gdfifocfg.b.epinfobase = rxfsiz + nptxfsiz + hptxfsiz;
DWC_WRITE_REG32(&global_regs->gdfifocfg, gdfifocfg.d32);
}
}
/* TODO - check this */
/* Clear Host Set HNP Enable in the OTG Control Register */
gotgctl.b.hstsethnpen = 1;
DWC_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
/* Make sure the FIFOs are flushed. */
dwc_otg_flush_tx_fifo(core_if, 0x10 /* all TX FIFOs */ );
dwc_otg_flush_rx_fifo(core_if);
/* Clear Host Set HNP Enable in the OTG Control Register */
gotgctl.b.hstsethnpen = 1;
DWC_MODIFY_REG32(&global_regs->gotgctl, gotgctl.d32, 0);
if (!core_if->core_params->dma_desc_enable) {
/* Flush out any leftover queued requests. */
num_channels = core_if->core_params->host_channels;
for (i = 0; i < num_channels; i++) {
hc_regs = core_if->host_if->hc_regs[i];
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
hcchar.b.chen = 0;
hcchar.b.chdis = 1;
hcchar.b.epdir = 0;
DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
}
/* Halt all channels to put them into a known state. */
for (i = 0; i < num_channels; i++) {
int count = 0;
hc_regs = core_if->host_if->hc_regs[i];
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
hcchar.b.chen = 1;
hcchar.b.chdis = 1;
hcchar.b.epdir = 0;
DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i);
do {
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
if (++count > 1000) {
DWC_ERROR
("%s: Unable to clear halt on channel %d\n",
__func__, i);
break;
}
dwc_udelay(1);
} while (hcchar.b.chen);
}
}
/* Turn on the vbus power. */
DWC_PRINTF("Init: Port Power? op_state=%d\n", core_if->op_state);
if (core_if->op_state == A_HOST) {
hprt0.d32 = dwc_otg_read_hprt0(core_if);
DWC_PRINTF("Init: Power Port (%d)\n", hprt0.b.prtpwr);
if (hprt0.b.prtpwr == 0) {
hprt0.b.prtpwr = 1;
DWC_WRITE_REG32(host_if->hprt0, hprt0.d32);
}
}
dwc_otg_enable_host_interrupts(core_if);
}
/**
* Prepares a host channel for transferring packets to/from a specific
* endpoint. The HCCHARn register is set up with the characteristics specified
* in _hc. Host channel interrupts that may need to be serviced while this
* transfer is in progress are enabled.
*
* @param core_if Programming view of DWC_otg controller
* @param hc Information needed to initialize the host channel
*/
void dwc_otg_hc_init(dwc_otg_core_if_t * core_if, dwc_hc_t * hc)
{
uint32_t intr_enable;
hcintmsk_data_t hc_intr_mask;
gintmsk_data_t gintmsk = {.d32 = 0 };
hcchar_data_t hcchar;
hcsplt_data_t hcsplt;
uint8_t hc_num = hc->hc_num;
dwc_otg_host_if_t *host_if = core_if->host_if;
dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num];
/* Clear old interrupt conditions for this host channel. */
hc_intr_mask.d32 = 0xFFFFFFFF;
hc_intr_mask.b.reserved14_31 = 0;
DWC_WRITE_REG32(&hc_regs->hcint, hc_intr_mask.d32);
/* Enable channel interrupts required for this transfer. */
hc_intr_mask.d32 = 0;
hc_intr_mask.b.chhltd = 1;
if (core_if->dma_enable) {
/* For Descriptor DMA mode core halts the channel on AHB error. Interrupt is not required */
if (!core_if->dma_desc_enable)
hc_intr_mask.b.ahberr = 1;
else {
if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
hc_intr_mask.b.xfercompl = 1;
}
if (hc->error_state && !hc->do_split &&
hc->ep_type != DWC_OTG_EP_TYPE_ISOC) {
hc_intr_mask.b.ack = 1;
if (hc->ep_is_in) {
hc_intr_mask.b.datatglerr = 1;
if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
hc_intr_mask.b.nak = 1;
}
}
}
} else {
switch (hc->ep_type) {
case DWC_OTG_EP_TYPE_CONTROL:
case DWC_OTG_EP_TYPE_BULK:
hc_intr_mask.b.xfercompl = 1;
hc_intr_mask.b.stall = 1;
hc_intr_mask.b.xacterr = 1;
hc_intr_mask.b.datatglerr = 1;
if (hc->ep_is_in) {
hc_intr_mask.b.bblerr = 1;
} else {
hc_intr_mask.b.nak = 1;
hc_intr_mask.b.nyet = 1;
if (hc->do_ping) {
hc_intr_mask.b.ack = 1;
}
}
if (hc->do_split) {
hc_intr_mask.b.nak = 1;
if (hc->complete_split) {
hc_intr_mask.b.nyet = 1;
} else {
hc_intr_mask.b.ack = 1;
}
}
if (hc->error_state) {
hc_intr_mask.b.ack = 1;
}
break;
case DWC_OTG_EP_TYPE_INTR:
hc_intr_mask.b.xfercompl = 1;
hc_intr_mask.b.nak = 1;
hc_intr_mask.b.stall = 1;
hc_intr_mask.b.xacterr = 1;
hc_intr_mask.b.datatglerr = 1;
hc_intr_mask.b.frmovrun = 1;
if (hc->ep_is_in) {
hc_intr_mask.b.bblerr = 1;
}
if (hc->error_state) {
hc_intr_mask.b.ack = 1;
}
if (hc->do_split) {
if (hc->complete_split) {
hc_intr_mask.b.nyet = 1;
} else {
hc_intr_mask.b.ack = 1;
}
}
break;
case DWC_OTG_EP_TYPE_ISOC:
hc_intr_mask.b.xfercompl = 1;
hc_intr_mask.b.frmovrun = 1;
hc_intr_mask.b.ack = 1;
if (hc->ep_is_in) {
hc_intr_mask.b.xacterr = 1;
hc_intr_mask.b.bblerr = 1;
}
break;
}
}
DWC_WRITE_REG32(&hc_regs->hcintmsk, hc_intr_mask.d32);
/* Enable the top level host channel interrupt. */
intr_enable = (1 << hc_num);
DWC_MODIFY_REG32(&host_if->host_global_regs->haintmsk, 0, intr_enable);
/* Make sure host channel interrupts are enabled. */
gintmsk.b.hcintr = 1;
DWC_MODIFY_REG32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32);
/*
* Program the HCCHARn register with the endpoint characteristics for
* the current transfer.
*/
hcchar.d32 = 0;
hcchar.b.devaddr = hc->dev_addr;
hcchar.b.epnum = hc->ep_num;
hcchar.b.epdir = hc->ep_is_in;
hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW);
hcchar.b.eptype = hc->ep_type;
hcchar.b.mps = hc->max_packet;
DWC_WRITE_REG32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32);
DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr);
DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum);
DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir);
DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev);
DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype);
DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt);
/*
* Program the HCSPLIT register for SPLITs
*/
hcsplt.d32 = 0;
if (hc->do_split) {
DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n",
hc->hc_num,
hc->complete_split ? "CSPLIT" : "SSPLIT");
hcsplt.b.compsplt = hc->complete_split;
hcsplt.b.xactpos = hc->xact_pos;
hcsplt.b.hubaddr = hc->hub_addr;
hcsplt.b.prtaddr = hc->port_addr;
DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split);
DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos);
DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr);
DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr);
DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in);
DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps);
DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len);
}
DWC_WRITE_REG32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32);
}
/**
* Attempts to halt a host channel. This function should only be called in
* Slave mode or to abort a transfer in either Slave mode or DMA mode. Under
* normal circumstances in DMA mode, the controller halts the channel when the
* transfer is complete or a condition occurs that requires application
* intervention.
*
* In slave mode, checks for a free request queue entry, then sets the Channel
* Enable and Channel Disable bits of the Host Channel Characteristics
* register of the specified channel to intiate the halt. If there is no free
* request queue entry, sets only the Channel Disable bit of the HCCHARn
* register to flush requests for this channel. In the latter case, sets a
* flag to indicate that the host channel needs to be halted when a request
* queue slot is open.
*
* In DMA mode, always sets the Channel Enable and Channel Disable bits of the
* HCCHARn register. The controller ensures there is space in the request
* queue before submitting the halt request.
*
* Some time may elapse before the core flushes any posted requests for this
* host channel and halts. The Channel Halted interrupt handler completes the
* deactivation of the host channel.
*
* @param core_if Controller register interface.
* @param hc Host channel to halt.
* @param halt_status Reason for halting the channel.
*/
void dwc_otg_hc_halt(dwc_otg_core_if_t * core_if,
dwc_hc_t * hc, dwc_otg_halt_status_e halt_status)
{
gnptxsts_data_t nptxsts;
hptxsts_data_t hptxsts;
hcchar_data_t hcchar;
dwc_otg_hc_regs_t *hc_regs;
dwc_otg_core_global_regs_t *global_regs;
dwc_otg_host_global_regs_t *host_global_regs;
hc_regs = core_if->host_if->hc_regs[hc->hc_num];
global_regs = core_if->core_global_regs;
host_global_regs = core_if->host_if->host_global_regs;
DWC_ASSERT(!(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS),
"halt_status = %d\n", halt_status);
if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE ||
halt_status == DWC_OTG_HC_XFER_AHB_ERR) {
/*
* Disable all channel interrupts except Ch Halted. The QTD
* and QH state associated with this transfer has been cleared
* (in the case of URB_DEQUEUE), so the channel needs to be
* shut down carefully to prevent crashes.
*/
hcintmsk_data_t hcintmsk;
hcintmsk.d32 = 0;
hcintmsk.b.chhltd = 1;
DWC_WRITE_REG32(&hc_regs->hcintmsk, hcintmsk.d32);
/*
* Make sure no other interrupts besides halt are currently
* pending. Handling another interrupt could cause a crash due
* to the QTD and QH state.
*/
DWC_WRITE_REG32(&hc_regs->hcint, ~hcintmsk.d32);
/*
* Make sure the halt status is set to URB_DEQUEUE or AHB_ERR
* even if the channel was already halted for some other
* reason.
*/
hc->halt_status = halt_status;
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
if (hcchar.b.chen == 0) {
/*
* The channel is either already halted or it hasn't
* started yet. In DMA mode, the transfer may halt if
* it finishes normally or a condition occurs that
* requires driver intervention. Don't want to halt
* the channel again. In either Slave or DMA mode,
* it's possible that the transfer has been assigned
* to a channel, but not started yet when an URB is
* dequeued. Don't want to halt a channel that hasn't
* started yet.
*/
return;
}
}
if (hc->halt_pending) {
/*
* A halt has already been issued for this channel. This might
* happen when a transfer is aborted by a higher level in
* the stack.
*/
#ifdef DEBUG
DWC_PRINTF
("*** %s: Channel %d, _hc->halt_pending already set ***\n",
__func__, hc->hc_num);
#endif
return;
}
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
/* No need to set the bit in DDMA for disabling the channel */
//TODO check it everywhere channel is disabled
if (!core_if->core_params->dma_desc_enable)
hcchar.b.chen = 1;
hcchar.b.chdis = 1;
if (!core_if->dma_enable) {
/* Check for space in the request queue to issue the halt. */
if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL ||
hc->ep_type == DWC_OTG_EP_TYPE_BULK) {
nptxsts.d32 = DWC_READ_REG32(&global_regs->gnptxsts);
if (nptxsts.b.nptxqspcavail == 0) {
hcchar.b.chen = 0;
}
} else {
hptxsts.d32 =
DWC_READ_REG32(&host_global_regs->hptxsts);
if ((hptxsts.b.ptxqspcavail == 0)
|| (core_if->queuing_high_bandwidth)) {
hcchar.b.chen = 0;
}
}
}
DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
hc->halt_status = halt_status;
if (hcchar.b.chen) {
hc->halt_pending = 1;
hc->halt_on_queue = 0;
} else {
hc->halt_on_queue = 1;
}
DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32);
DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending);
DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue);
DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status);
return;
}
/**
* Clears the transfer state for a host channel. This function is normally
* called after a transfer is done and the host channel is being released.
*
* @param core_if Programming view of DWC_otg controller.
* @param hc Identifies the host channel to clean up.
*/
void dwc_otg_hc_cleanup(dwc_otg_core_if_t * core_if, dwc_hc_t * hc)
{
dwc_otg_hc_regs_t *hc_regs;
hc->xfer_started = 0;
/*
* Clear channel interrupt enables and any unhandled channel interrupt
* conditions.
*/
hc_regs = core_if->host_if->hc_regs[hc->hc_num];
DWC_WRITE_REG32(&hc_regs->hcintmsk, 0);
DWC_WRITE_REG32(&hc_regs->hcint, 0xFFFFFFFF);
#ifdef DEBUG
DWC_TIMER_CANCEL(core_if->hc_xfer_timer[hc->hc_num]);
#endif
}
/**
* Sets the channel property that indicates in which frame a periodic transfer
* should occur. This is always set to the _next_ frame. This function has no
* effect on non-periodic transfers.
*
* @param core_if Programming view of DWC_otg controller.
* @param hc Identifies the host channel to set up and its properties.
* @param hcchar Current value of the HCCHAR register for the specified host
* channel.
*/
static inline void hc_set_even_odd_frame(dwc_otg_core_if_t * core_if,
dwc_hc_t * hc, hcchar_data_t * hcchar)
{
if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
hfnum_data_t hfnum;
hfnum.d32 =
DWC_READ_REG32(&core_if->host_if->host_global_regs->hfnum);
/* 1 if _next_ frame is odd, 0 if it's even */
hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1;
#ifdef DEBUG
if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split
&& !hc->complete_split) {
switch (hfnum.b.frnum & 0x7) {
case 7:
core_if->hfnum_7_samples++;
core_if->hfnum_7_frrem_accum += hfnum.b.frrem;
break;
case 0:
core_if->hfnum_0_samples++;
core_if->hfnum_0_frrem_accum += hfnum.b.frrem;
break;
default:
core_if->hfnum_other_samples++;
core_if->hfnum_other_frrem_accum +=
hfnum.b.frrem;
break;
}
}
#endif
}
}
#ifdef DEBUG
void hc_xfer_timeout(void *ptr)
{
hc_xfer_info_t *xfer_info = NULL;
int hc_num = 0;
if (ptr)
xfer_info = (hc_xfer_info_t *) ptr;
if (!xfer_info->hc) {
DWC_ERROR("xfer_info->hc = %p\n", xfer_info->hc);
return;
}
hc_num = xfer_info->hc->hc_num;
DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num);
DWC_WARN(" start_hcchar_val 0x%08x\n",
xfer_info->core_if->start_hcchar_val[hc_num]);
}
#endif
void ep_xfer_timeout(void *ptr)
{
ep_xfer_info_t *xfer_info = NULL;
int ep_num = 0;
dctl_data_t dctl = {.d32 = 0 };
gintsts_data_t gintsts = {.d32 = 0 };
gintmsk_data_t gintmsk = {.d32 = 0 };
if (ptr)
xfer_info = (ep_xfer_info_t *) ptr;
if (!xfer_info->ep) {
DWC_ERROR("xfer_info->ep = %p\n", xfer_info->ep);
return;
}
ep_num = xfer_info->ep->num;
DWC_WARN("%s: timeout on endpoit %d\n", __func__, ep_num);
/* Put the sate to 2 as it was time outed */
xfer_info->state = 2;
dctl.d32 = DWC_READ_REG32(&xfer_info->core_if->
dev_if->dev_global_regs->dctl);
gintsts.d32 = DWC_READ_REG32(&xfer_info->core_if->
core_global_regs->gintsts);
gintmsk.d32 = DWC_READ_REG32(&xfer_info->core_if->
core_global_regs->gintmsk);
if (!gintmsk.b.goutnakeff) {
/* Unmask it */
gintmsk.b.goutnakeff = 1;
DWC_WRITE_REG32(&xfer_info->core_if->
core_global_regs->gintmsk, gintmsk.d32);
}
if (!gintsts.b.goutnakeff) {
dctl.b.sgoutnak = 1;
}
DWC_WRITE_REG32(&xfer_info->core_if->dev_if->
dev_global_regs->dctl, dctl.d32);
}
void set_pid_isoc(dwc_hc_t * hc)
{
/* Set up the initial PID for the transfer. */
if (hc->speed == DWC_OTG_EP_SPEED_HIGH) {
if (hc->ep_is_in) {
if (hc->multi_count == 1) {
hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
} else if (hc->multi_count == 2) {
hc->data_pid_start = DWC_OTG_HC_PID_DATA1;
} else {
hc->data_pid_start = DWC_OTG_HC_PID_DATA2;
}
} else {
if (hc->multi_count == 1) {
hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
} else {
hc->data_pid_start = DWC_OTG_HC_PID_MDATA;
}
}
} else {
hc->data_pid_start = DWC_OTG_HC_PID_DATA0;
}
}
/**
* This function does the setup for a data transfer for a host channel and
* starts the transfer. May be called in either Slave mode or DMA mode. In
* Slave mode, the caller must ensure that there is sufficient space in the
* request queue and Tx Data FIFO.
*
* For an OUT transfer in Slave mode, it loads a data packet into the
* appropriate FIFO. If necessary, additional data packets will be loaded in
* the Host ISR.
*
* For an IN transfer in Slave mode, a data packet is requested. The data
* packets are unloaded from the Rx FIFO in the Host ISR. If necessary,
* additional data packets are requested in the Host ISR.
*
* For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ
* register along with a packet count of 1 and the channel is enabled. This
* causes a single PING transaction to occur. Other fields in HCTSIZ are
* simply set to 0 since no data transfer occurs in this case.
*
* For a PING transfer in DMA mode, the HCTSIZ register is initialized with
* all the information required to perform the subsequent data transfer. In
* addition, the Do Ping bit is set in the HCTSIZ register. In this case, the
* controller performs the entire PING protocol, then starts the data
* transfer.
*
* @param core_if Programming view of DWC_otg controller.
* @param hc Information needed to initialize the host channel. The xfer_len
* value may be reduced to accommodate the max widths of the XferSize and
* PktCnt fields in the HCTSIZn register. The multi_count value may be changed
* to reflect the final xfer_len value.
*/
void dwc_otg_hc_start_transfer(dwc_otg_core_if_t * core_if, dwc_hc_t * hc)
{
hcchar_data_t hcchar;
hctsiz_data_t hctsiz;
uint16_t num_packets;
uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size;
uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count;
dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
hctsiz.d32 = 0;
if (hc->do_ping) {
if (!core_if->dma_enable) {
dwc_otg_hc_do_ping(core_if, hc);
hc->xfer_started = 1;
return;
} else {
hctsiz.b.dopng = 1;
}
}
if (hc->do_split) {
num_packets = 1;
if (hc->complete_split && !hc->ep_is_in) {
/* For CSPLIT OUT Transfer, set the size to 0 so the
* core doesn't expect any data written to the FIFO */
hc->xfer_len = 0;
} else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) {
hc->xfer_len = hc->max_packet;
} else if (!hc->ep_is_in && (hc->xfer_len > 188)) {
hc->xfer_len = 188;
}
hctsiz.b.xfersize = hc->xfer_len;
} else {
/*
* Ensure that the transfer length and packet count will fit
* in the widths allocated for them in the HCTSIZn register.
*/
if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
/*
* Make sure the transfer size is no larger than one
* (micro)frame's worth of data. (A check was done
* when the periodic transfer was accepted to ensure
* that a (micro)frame's worth of data can be
* programmed into a channel.)
*/
uint32_t max_periodic_len =
hc->multi_count * hc->max_packet;
if (hc->xfer_len > max_periodic_len) {
hc->xfer_len = max_periodic_len;
} else {
}
} else if (hc->xfer_len > max_hc_xfer_size) {
/* Make sure that xfer_len is a multiple of max packet size. */
hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1;
}
if (hc->xfer_len > 0) {
num_packets =
(hc->xfer_len + hc->max_packet -
1) / hc->max_packet;
if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
hc->xfer_len = num_packets * hc->max_packet;
}
} else {
/* Need 1 packet for transfer length of 0. */
num_packets = 1;
}
if (hc->ep_is_in) {
/* Always program an integral # of max packets for IN transfers. */
hc->xfer_len = num_packets * hc->max_packet;
}
if (hc->ep_type == DWC_OTG_EP_TYPE_INTR ||
hc->ep_type == DWC_OTG_EP_TYPE_ISOC) {
/*
* Make sure that the multi_count field matches the
* actual transfer length.
*/
hc->multi_count = num_packets;
}
if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
set_pid_isoc(hc);
hctsiz.b.xfersize = hc->xfer_len;
}
hc->start_pkt_count = num_packets;
hctsiz.b.pktcnt = num_packets;
hctsiz.b.pid = hc->data_pid_start;
DWC_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize);
DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt);
DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
if (core_if->dma_enable) {
dwc_dma_t dma_addr;
if (hc->align_buff) {
dma_addr = hc->align_buff;
} else {
dma_addr = ((unsigned long)hc->xfer_buff & 0xffffffff);
}
DWC_WRITE_REG32(&hc_regs->hcdma, dma_addr);
}
/* Start the split */
if (hc->do_split) {
hcsplt_data_t hcsplt;
hcsplt.d32 = DWC_READ_REG32(&hc_regs->hcsplt);
hcsplt.b.spltena = 1;
DWC_WRITE_REG32(&hc_regs->hcsplt, hcsplt.d32);
}
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
hcchar.b.multicnt = hc->multi_count;
hc_set_even_odd_frame(core_if, hc, &hcchar);
#ifdef DEBUG
core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
if (hcchar.b.chdis) {
DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
__func__, hc->hc_num, hcchar.d32);
}
#endif
/* Set host channel enable after all other setup is complete. */
hcchar.b.chen = 1;
hcchar.b.chdis = 0;
DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
hc->xfer_started = 1;
hc->requests++;
if (!core_if->dma_enable && !hc->ep_is_in && hc->xfer_len > 0) {
/* Load OUT packet into the appropriate Tx FIFO. */
dwc_otg_hc_write_packet(core_if, hc);
}
#ifdef DEBUG
if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) {
core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
core_if->hc_xfer_info[hc->hc_num].hc = hc;
/* Start a timer for this transfer. */
DWC_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
}
#endif
}
/**
* This function does the setup for a data transfer for a host channel
* and starts the transfer in Descriptor DMA mode.
*
* Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set.
* Sets PID and NTD values. For periodic transfers
* initializes SCHED_INFO field with micro-frame bitmap.
*
* Initializes HCDMA register with descriptor list address and CTD value
* then starts the transfer via enabling the channel.
*
* @param core_if Programming view of DWC_otg controller.
* @param hc Information needed to initialize the host channel.
*/
void dwc_otg_hc_start_transfer_ddma(dwc_otg_core_if_t * core_if, dwc_hc_t * hc)
{
dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num];
hcchar_data_t hcchar;
hctsiz_data_t hctsiz;
hcdma_data_t hcdma;
hctsiz.d32 = 0;
if (hc->do_ping)
hctsiz.b_ddma.dopng = 1;
if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC)
set_pid_isoc(hc);
/* Packet Count and Xfer Size are not used in Descriptor DMA mode */
hctsiz.b_ddma.pid = hc->data_pid_start;
hctsiz.b_ddma.ntd = hc->ntd - 1; /* 0 - 1 descriptor, 1 - 2 descriptors, etc. */
hctsiz.b_ddma.schinfo = hc->schinfo; /* Non-zero only for high-speed interrupt endpoints */
DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num);
DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid);
DWC_DEBUGPL(DBG_HCDV, " NTD: %d\n", hctsiz.b_ddma.ntd);
DWC_WRITE_REG32(&hc_regs->hctsiz, hctsiz.d32);
hcdma.d32 = 0;
hcdma.b.dma_addr = ((uint32_t) hc->desc_list_addr) >> 11;
/* Always start from first descriptor. */
hcdma.b.ctd = 0;
DWC_WRITE_REG32(&hc_regs->hcdma, hcdma.d32);
hcchar.d32 = DWC_READ_REG32(&hc_regs->hcchar);
hcchar.b.multicnt = hc->multi_count;
#ifdef DEBUG
core_if->start_hcchar_val[hc->hc_num] = hcchar.d32;
if (hcchar.b.chdis) {
DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n",
__func__, hc->hc_num, hcchar.d32);
}
#endif
/* Set host channel enable after all other setup is complete. */
hcchar.b.chen = 1;
hcchar.b.chdis = 0;
DWC_WRITE_REG32(&hc_regs->hcchar, hcchar.d32);
hc->xfer_started = 1;
hc->requests++;
#ifdef DEBUG
if ((hc->ep_type != DWC_OTG_EP_TYPE_INTR)
&& (hc->ep_type != DWC_OTG_EP_TYPE_ISOC)) {
core_if->hc_xfer_info[hc->hc_num].core_if = core_if;
core_if->hc_xfer_info[hc->hc_num].hc = hc;
/* Start a timer for this transfer. */
DWC_TIMER_SCHEDULE(core_if->hc_xfer_timer[hc->hc_num], 10000);
}
#endif
}
/**
* This function continues a data transfer that was started by previous call
* to <code>dwc_otg_hc_start_transfer</code>. The caller must ensure there is
* sufficient space in the request queue and Tx Data FIFO. This function
* should only be called in Slave mode. In DMA mode, the controller acts
* autonomously to complete transfers programmed to a host channel.
*
* For an OUT transfer, a new data packet is loaded into the appropriate FIFO
* if there is any data remaining to be queued. For an IN transfer, another
* data packet is always requested. For the SETUP phase of a control transfer,
* this function does nothing.
*
* @return 1 if a new request is queued, 0 if no more requests are required
* for this transfer.