Broadcom Linux 4.1-1.2
Change-Id: I9d31338ba60e3d43744e03bd8f199b3dd96caa2f
diff --git a/Documentation/devicetree/bindings/cpufreq/brcm-avs-cpufreq.txt b/Documentation/devicetree/bindings/cpufreq/brcm-avs-cpufreq.txt
new file mode 100644
index 0000000..4cb2a4a
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/brcm-avs-cpufreq.txt
@@ -0,0 +1,73 @@
+Broadcom AVS CPUfreq driver
+===========================
+
+This driver provides voltage and frequency scaling on Broadcom SoCs using
+the AVS firmware with DVFS support. The AVS firmware is running on its own
+co-processor. The driver supports both uniprocessor (UP) and symmetric
+multiprocessor (SMP) systems which share clock and voltage across all CPUs.
+
+Actual voltage and frequency scaling is done solely by the AVS firmware.
+This driver does not change frequency or voltage itself. It provides a
+standard CPUfreq interface to the rest of the kernel and to userland. It
+interfaces with the AVS firmware to effect the requested changes and to
+report back the current system status in a way that is expected by existing
+tools.
+
+This driver requires two DT nodes. One node (brcm,avs-cpu-data-mem)
+references the mailbox register used to communicate with the AVS CPU. The
+second node (brcm,avs-cpu-l2-intr) is required to trigger an interrupt on
+the AVS CPU. The interrupt tells the AVS CPU that it needs to process a
+command sent to it by this driver. Interrupting the AVS CPU is mandatory for
+commands to be processed.
+
+
+
+Node brcm,avs-cpu-data-mem
+--------------------------
+
+Required properties:
+- compatible: Sould be one of: brcm,avs-cpu-data-mem, brcm,bcm7271-avs-cpu-data-mem
+ or brcm,bcm7268-avs-cpu-data-mem
+- reg: Specifies base physical address and size of the registers.
+
+Optional properties:
+- interrupts: The interrupt that the AVS CPU will use to interrupt the host
+ when a command completed.
+- interrupt-parent: The interrupt controller the above interrupt is routed
+ through.
+- interrupt-names: The name of the interrupt used to interupt the host.
+
+Either none of the interrupt properties may be defined or all three must be
+present. Without the interrupt properties, the driver will operate in
+polling mode.
+
+
+Node brcm,avs-cpu-l2-intr
+-------------------------
+
+Required properties:
+- compatible: Sould be one of: brcm,avs-cpu-l2-intr, brcm,bcm7271-avs-cpu-l2-intr
+ or brcm,bcm7268-avs-cpu-l2-intr
+- reg: Specifies base physical address and size of the registers.
+
+Optional properties:
+- None
+
+
+Example
+=======
+
+ avs-cpu-data-mem@f04c4000 {
+ compatible = "brcm,bcm7271-avs-cpu-data-mem",
+ "brcm,avs-cpu-data-mem";
+ reg = <0xf04c4000 0x60>;
+ interrupts = <0x1a>;
+ interrupt-parent = <&avs_host_l2_intc>;
+ interrupt-names = "sw_intr";
+ };
+
+ avs-cpu-l2-intr@f04d1100 {
+ compatible = "brcm,bcm7271-avs-cpu-l2-intr",
+ "brcm,avs-cpu-l2-intr";
+ reg = <0xf04d1100 0x10>;
+ };
diff --git a/Makefile b/Makefile
index ba52b02..5008c45 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 1
SUBLEVEL = 20
-EXTRAVERSION = -1.1
+EXTRAVERSION = -1.2
NAME = Series 4800
# *DOCUMENTATION*
diff --git a/arch/arm/configs/brcmstb_defconfig b/arch/arm/configs/brcmstb_defconfig
index 5d89c3b..1351786 100644
--- a/arch/arm/configs/brcmstb_defconfig
+++ b/arch/arm/configs/brcmstb_defconfig
@@ -44,6 +44,8 @@
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPUFREQ_DT=y
CONFIG_CPU_IDLE=y
+CONFIG_ARM_BRCM_AVS_CPUFREQ=y
+CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG=y
CONFIG_CRAMFS=y
CONFIG_VFP=y
CONFIG_NEON=y
@@ -66,6 +68,10 @@
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=y
CONFIG_NET_DSA=y
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 85e374f..16c3534 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -59,7 +59,34 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> (MPIDR_LEVEL_BITS * level)) & MPIDR_LEVEL_MASK)
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT 4
+#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr) \
+ (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT 16
+#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr) \
+ (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr) \
+ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT 24
+#define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr) \
+ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
+#define MIDR_CPU_PART(imp, partnum) \
+ (((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
+ ((partnum) << MIDR_PARTNUM_SHIFT))
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_BCM 0x42
#define ARM_CPU_IMP_INTEL 0x69
/* ARM implemented processors */
@@ -81,6 +108,10 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+#define BCM_CPU_PART_BRAHMA_B53 0x100
+
+#define MIDR_BRAHMA_B53 MIDR_CPU_PART(ARM_CPU_IMP_BCM, BCM_CPU_PART_BRAHMA_B53)
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm64/configs/brcmstb_defconfig b/arch/arm64/configs/brcmstb_defconfig
index bc830be..d3fa4b9 100644
--- a/arch/arm64/configs/brcmstb_defconfig
+++ b/arch/arm64/configs/brcmstb_defconfig
@@ -39,6 +39,8 @@
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPUFREQ_DT=y
CONFIG_CPU_IDLE=y
+CONFIG_ARM_BRCM_AVS_CPUFREQ=y
+CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG=y
# CONFIG_SUSPEND is not set
CONFIG_NET=y
CONFIG_PACKET=y
@@ -54,6 +56,10 @@
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
CONFIG_INET_UDP_DIAG=y
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
# CONFIG_IPV6 is not set
CONFIG_BRIDGE=y
CONFIG_NET_DSA=y
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a84ec60..6c917ad 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -61,8 +61,11 @@
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
((partnum) << MIDR_PARTNUM_SHIFT))
+#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_BCM 0x42
#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_PART_AEM_V8 0xD0F
@@ -70,8 +73,14 @@
#define ARM_CPU_PART_CORTEX_A57 0xD07
#define ARM_CPU_PART_CORTEX_A53 0xD03
+#define BCM_CPU_PART_BRAHMA_B53 0x100
+
#define APM_CPU_PART_POTENZA 0x000
+#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_BRAHMA_B53 MIDR_CPU_PART(ARM_CPU_IMP_BCM, BCM_CPU_PART_BRAHMA_B53)
+
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
#define ID_AA64MMFR0_BIGENDEL0_MASK (0xf << ID_AA64MMFR0_BIGENDEL0_SHIFT)
#define ID_AA64MMFR0_BIGENDEL0(mmfr0) \
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 7a18fab..861afb7 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -33,6 +33,11 @@
void arm64_notify_die(const char *str, struct pt_regs *regs,
struct siginfo *info, int err);
+void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *),
+ int sig, int code, const char *name);
+void * hook_serror_handler(int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *));
void hook_debug_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 6ffd914..6933774 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -21,12 +21,6 @@
#include <asm/cputype.h>
#include <asm/cpufeature.h>
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
- MIDR_ARCHITECTURE_MASK)
-
static bool __maybe_unused
is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
{
@@ -81,6 +75,12 @@
.capability = ARM64_WORKAROUND_845719,
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
},
+ {
+ /* Brahma-B53 r0p[0] */
+ .desc = "ARM erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ MIDR_RANGE(MIDR_BRAHMA_B53, 0x00, 0x00),
+ },
#endif
{
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index bddd04d..07a304f 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -209,18 +209,18 @@
ventry el1_sync // Synchronous EL1h
ventry el1_irq // IRQ EL1h
ventry el1_fiq_invalid // FIQ EL1h
- ventry el1_error_invalid // Error EL1h
+ ventry el1_error // Error EL1h
ventry el0_sync // Synchronous 64-bit EL0
ventry el0_irq // IRQ 64-bit EL0
ventry el0_fiq_invalid // FIQ 64-bit EL0
- ventry el0_error_invalid // Error 64-bit EL0
+ ventry el0_error // Error 64-bit EL0
#ifdef CONFIG_COMPAT
ventry el0_sync_compat // Synchronous 32-bit EL0
ventry el0_irq_compat // IRQ 32-bit EL0
ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
- ventry el0_error_invalid_compat // Error 32-bit EL0
+ ventry el0_error_compat // Error 32-bit EL0
#else
ventry el0_sync_invalid // Synchronous 32-bit EL0
ventry el0_irq_invalid // IRQ 32-bit EL0
@@ -234,10 +234,19 @@
*/
.macro inv_entry, el, reason, regsize = 64
kernel_entry el, \regsize
+ .if \el == 0
+ enable_dbg
+ ct_user_exit
+ .endif
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
+ .if \el == 0
+ bl bad_mode
+ b ret_to_user
+ .else
b bad_mode
+ .endif
.endm
el0_sync_invalid:
@@ -260,10 +269,6 @@
el0_fiq_invalid_compat:
inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat)
-
-el0_error_invalid_compat:
- inv_entry 0, BAD_ERROR, 32
-ENDPROC(el0_error_invalid_compat)
#endif
el1_sync_invalid:
@@ -388,6 +393,35 @@
ret x24
#endif
+ .align 6
+el1_error:
+ kernel_entry 1
+ mrs x1, esr_el1 // read the syndrome register
+ lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SERROR // SError exception in EL1
+ b.eq el1_serr
+ b el1_error_inv
+el1_serr:
+ mrs x0, far_el1
+ enable_dbg
+ // re-enable interrupts if they were enabled in the aborted context
+ tbnz x23, #7, 1f // PSR_I_BIT
+ enable_irq
+1:
+ mov x2, sp // struct pt_regs
+ bl do_serr_abort
+
+ // disable interrupts before pulling preserved data off the stack
+ disable_irq
+ kernel_exit 1
+el1_error_inv:
+ enable_dbg
+ mov x0, sp
+ mov x1, #BAD_ERROR
+ mrs x2, esr_el1
+ b bad_mode
+ENDPROC(el1_error)
+
/*
* EL0 mode handlers.
*/
@@ -462,6 +496,11 @@
el0_irq_compat:
kernel_entry 0, 32
b el0_irq_naked
+
+ .align 6
+el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
#endif
el0_da:
@@ -573,6 +612,35 @@
b ret_to_user
ENDPROC(el0_irq)
+ .align 6
+el0_error:
+ kernel_entry 0
+el0_error_naked:
+ mrs x25, esr_el1 // read the syndrome register
+ lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
+ cmp x24, #ESR_ELx_EC_SERROR // SError exception in EL0
+ b.eq el0_serr
+ b el0_error_inv
+el0_serr:
+ mrs x26, far_el1
+ // enable interrupts before calling the main handler
+ enable_dbg_and_irq
+ ct_user_exit
+ bic x0, x26, #(0xff << 56)
+ mov x1, x25
+ mov x2, sp
+ bl do_serr_abort
+ b ret_to_user
+el0_error_inv:
+ enable_dbg
+ ct_user_exit
+ mov x0, sp
+ mov x1, #BAD_ERROR
+ mrs x2, esr_el1
+ bl bad_mode
+ b ret_to_user
+ENDPROC(el0_error)
+
/*
* Register switch for AArch64. The callee-saved registers need to be saved
* and restored. On entry:
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index fa5efaa..3e727db 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -443,6 +443,19 @@
{ do_bad, SIGBUS, 0, "unknown 63" },
};
+void __init hook_fault_code(int nr,
+ int (*fn)(unsigned long, unsigned int, struct pt_regs *),
+ int sig, int code, const char *name)
+{
+ BUG_ON(nr < 0 || nr >= ARRAY_SIZE(fault_info));
+
+ fault_info[nr].fn = fn;
+ fault_info[nr].sig = sig;
+ fault_info[nr].code = code;
+ fault_info[nr].name = name;
+}
+
+
static const char *fault_name(unsigned int esr)
{
const struct fault_info *inf = fault_info + (esr & 63);
@@ -531,3 +544,33 @@
return 0;
}
+
+static int (*serror_handler)(unsigned long, unsigned int, struct pt_regs *);
+
+void * __init hook_serror_handler(int (*fn)(unsigned long, unsigned int,
+ struct pt_regs *))
+{
+ void *ret = serror_handler;
+
+ serror_handler = fn;
+ return ret;
+}
+
+asmlinkage void __exception do_serr_abort(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ struct siginfo info;
+
+ if (serror_handler)
+ if (!serror_handler(addr, esr, regs))
+ return;
+
+ pr_alert("Unhandled SError: (0x%08x) at 0x%016lx\n", esr, addr);
+ __show_regs(regs);
+
+ info.si_signo = SIGILL;
+ info.si_errno = 0;
+ info.si_code = ILL_ILLOPC;
+ info.si_addr = (void __user *)addr;
+ arm64_notify_die("", regs, &info, esr);
+}
diff --git a/arch/mips/configs/bmips_stb_defconfig b/arch/mips/configs/bmips_stb_defconfig
index 2ac3c3d..53627e0 100644
--- a/arch/mips/configs/bmips_stb_defconfig
+++ b/arch/mips/configs/bmips_stb_defconfig
@@ -30,6 +30,10 @@
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
CONFIG_WIRELESS=y
CONFIG_CFG80211=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a1d4af6..9b98d56 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -41,7 +41,7 @@
config BRCMSTB_GISB_ARB
bool "Broadcom STB GISB bus arbiter"
- depends on ARM || MIPS
+ depends on ARM || ARM64 || MIPS
help
Driver for the Broadcom Set Top Box System-on-a-chip internal bus
arbiter. This driver provides timeout and target abort error handling
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 72fe0a5..75baff2 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2016 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -30,6 +30,11 @@
#include <asm/signal.h>
#endif
+#ifdef CONFIG_ARM64
+#include <asm/signal.h>
+#include <asm/system_misc.h>
+#endif
+
#ifdef CONFIG_MIPS
#include <asm/traps.h>
#endif
@@ -127,9 +132,9 @@
return;
if (gdev->big_endian)
- iowrite32be(val, gdev->base + reg);
+ iowrite32be(val, gdev->base + offset);
else
- iowrite32(val, gdev->base + reg);
+ iowrite32(val, gdev->base + offset);
}
static ssize_t gisb_arb_get_timeout(struct device *dev,
@@ -221,31 +226,54 @@
return 0;
}
-#ifdef CONFIG_ARM
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
static int brcmstb_bus_error_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
- int ret = 0;
struct brcmstb_gisb_arb_device *gdev;
/* iterate over each GISB arb registered handlers */
list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
- ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
/*
* If it was an imprecise abort, then we need to correct the
* return address to be _after_ the instruction.
*/
if (fsr & (1 << 10))
regs->ARM_pc += 4;
+#endif
- return ret;
+ /* Always report unhandled exception */
+ return 1;
+}
+
+#ifdef CONFIG_ARM64
+static int (*serror_chain)(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+static int do_brahma_b53_serror(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ struct brcmstb_gisb_arb_device *gdev;
+
+ if (((esr & (3 << 22)) == 0) && ((esr & 3) == 2)) {
+ /* iterate over each GISB arb registered handlers */
+ list_for_each_entry(gdev, &brcmstb_gisb_arb_device_list, next)
+ brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ }
+
+ if (serror_chain)
+ return serror_chain(addr, esr, regs);
+
+ /* Always report unhandled exception */
+ return 1;
}
#endif
+#endif /* CONFIG_ARM || CONFIG_ARM64 */
#ifdef CONFIG_MIPS
static int brcmstb_bus_error_handler(struct pt_regs *regs, int is_fixup)
{
- int ret = 0;
struct brcmstb_gisb_arb_device *gdev;
u32 cap_status;
@@ -258,7 +286,7 @@
goto out;
}
- ret |= brcmstb_gisb_arb_decode_addr(gdev, "bus error");
+ brcmstb_gisb_arb_decode_addr(gdev, "bus error");
}
out:
return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
@@ -379,9 +407,22 @@
list_add_tail(&gdev->next, &brcmstb_gisb_arb_device_list);
#ifdef CONFIG_ARM
+#ifdef CONFIG_ARM_LPAE
+ hook_fault_code(16, brcmstb_bus_error_handler, SIGBUS, 0,
+ "synchronous external abort");
+ hook_fault_code(17, brcmstb_bus_error_handler, SIGBUS, 0,
+ "asynchronous external abort");
+#else
hook_fault_code(22, brcmstb_bus_error_handler, SIGBUS, 0,
"imprecise external abort");
#endif
+#endif /* CONFIG_ARM */
+#ifdef CONFIG_ARM64
+ hook_fault_code(16, brcmstb_bus_error_handler, SIGBUS, 0,
+ "synchronous external abort");
+ if ((read_cpuid_id() & CPU_MODEL_MASK) == MIDR_BRAHMA_B53)
+ serror_chain = hook_serror_handler(do_brahma_b53_serror);
+#endif
#ifdef CONFIG_MIPS
board_be_handler = brcmstb_bus_error_handler;
#endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4f3dbc8..a31fa34 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -10,6 +10,26 @@
help
This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
+config ARM_BRCM_AVS_CPUFREQ
+ tristate "Broadcom AVS CPUfreq driver"
+ depends on ARCH_BRCMSTB || COMPILE_TEST
+ help
+ Some Broadcom SoCs use a co-processor running proprietary firmware
+ ("AVS") to handle voltage and frequency scaling. This driver
+ provides a standard CPUfreq interface to AVS.
+
+ Say Y, if you have a Broadcom SoC with AVS.
+
+config ARM_BRCM_AVS_CPUFREQ_DEBUG
+ bool "Broadcom AVS CPUfreq driver sysfs debug capability"
+ depends on ARM_BRCM_AVS_CPUFREQ
+ help
+ Enabling this option turns on debug support via sysfs under
+ /sys/kernel/debug/brcm-avs-cpufreq. It is possible to read all and
+ write some AVS mailbox registers through sysfs entries.
+
+ If in doubt, say Y.
+
config ARM_DT_BL_CPUFREQ
tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
depends on ARM_BIG_LITTLE_CPUFREQ && OF
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index cdce92a..3fd0434 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -50,6 +50,7 @@
# LITTLE drivers, so that it is probed last.
obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
+obj-$(CONFIG_ARM_BRCM_AVS_CPUFREQ) += brcm-avs-cpufreq.o
obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o
diff --git a/drivers/cpufreq/brcm-avs-cpufreq.c b/drivers/cpufreq/brcm-avs-cpufreq.c
new file mode 100644
index 0000000..7605b67
--- /dev/null
+++ b/drivers/cpufreq/brcm-avs-cpufreq.c
@@ -0,0 +1,1002 @@
+/*
+ * CPU frequency scaling for Broadcom SoCs with AVS firmware
+ *
+ * Copyright (c) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+
+#ifdef CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#endif
+
+/* Max number of arguments AVS calls take */
+#define AVS_MAX_CMD_ARGS 4
+/*
+ * This macro is used to generate AVS parameter register offsets. For
+ * x >= AVS_MAX_CMD_ARGS, it returns 0 to protect against accidental memory
+ * access outside of the parameter range. (Offset 0 is the first parameter.)
+ */
+#define AVS_PARAM_MULT(x) ((x) < AVS_MAX_CMD_ARGS ? (x) : 0)
+
+/* AVS Mailbox Register offsets */
+#define AVS_MBOX_COMMAND 0x00
+#define AVS_MBOX_STATUS 0x04
+#define AVS_MBOX_VOLTAGE0 0x08
+#define AVS_MBOX_TEMP0 0x0c
+#define AVS_MBOX_PV0 0x10
+#define AVS_MBOX_MV0 0x14
+#define AVS_MBOX_PARAM(x) (0x18 + AVS_PARAM_MULT(x) * sizeof(u32))
+#define AVS_MBOX_REVISION 0x28
+#define AVS_MBOX_PSTATE 0x2c
+#define AVS_MBOX_HEARTBEAT 0x30
+#define AVS_MBOX_MAGIC 0x34
+#define AVS_MBOX_SIGMA_HVT 0x38
+#define AVS_MBOX_SIGMA_SVT 0x3c
+#define AVS_MBOX_VOLTAGE1 0x40
+#define AVS_MBOX_TEMP1 0x44
+#define AVS_MBOX_PV1 0x48
+#define AVS_MBOX_MV1 0x4c
+#define AVS_MBOX_FREQUENCY 0x50
+
+/* AVS Commands */
+#define AVS_CMD_AVAILABLE 0x00
+#define AVS_CMD_DISABLE 0x10
+#define AVS_CMD_ENABLE 0x11
+#define AVS_CMD_S2_ENTER 0x12
+#define AVS_CMD_S2_EXIT 0x13
+#define AVS_CMD_BBM_ENTER 0x14
+#define AVS_CMD_BBM_EXIT 0x15
+#define AVS_CMD_S3_ENTER 0x16
+#define AVS_CMD_S3_EXIT 0x17
+#define AVS_CMD_BALANCE 0x18
+/* PMAP and P-STATE commands */
+#define AVS_CMD_GET_PMAP 0x30
+#define AVS_CMD_SET_PMAP 0x31
+#define AVS_CMD_GET_PSTATE 0x40
+#define AVS_CMD_SET_PSTATE 0x41
+
+/* Different modes AVS supports (for GET_PMAP/SET_PMAP) */
+#define AVS_MODE_AVS 0x0
+#define AVS_MODE_DFS 0x1
+#define AVS_MODE_DVS 0x2
+#define AVS_MODE_DVFS 0x3
+
+/*
+ * PMAP parameter p1
+ * unused:31-24, mdiv_p0:23-16, unused:15-14, pdiv:13-10 , ndiv_int:9-0
+ */
+#define NDIV_INT_SHIFT 0
+#define NDIV_INT_MASK 0x3ff
+#define PDIV_SHIFT 10
+#define PDIV_MASK 0xf
+#define MDIV_P0_SHIFT 16
+#define MDIV_P0_MASK 0xff
+/*
+ * PMAP parameter p2
+ * mdiv_p4:31-24, mdiv_p3:23-16, mdiv_p2:15:8, mdiv_p1:7:0
+ */
+#define MDIV_P1_SHIFT 0
+#define MDIV_P1_MASK 0xff
+#define MDIV_P2_SHIFT 8
+#define MDIV_P2_MASK 0xff
+#define MDIV_P3_SHIFT 16
+#define MDIV_P3_MASK 0xff
+#define MDIV_P4_SHIFT 24
+#define MDIV_P4_MASK 0xff
+
+/* Different P-STATES AVS supports (for GET_PSTATE/SET_PSTATE) */
+#define AVS_PSTATE_P0 0x0
+#define AVS_PSTATE_P1 0x1
+#define AVS_PSTATE_P2 0x2
+#define AVS_PSTATE_P3 0x3
+#define AVS_PSTATE_P4 0x4
+#define AVS_PSTATE_MAX AVS_PSTATE_P4
+
+/* CPU L2 Interrupt Controller Registers */
+#define AVS_CPU_L2_SET0 0x04
+#define AVS_CPU_L2_INT_MASK BIT(31)
+
+/* AVS Command Status Values */
+#define AVS_STATUS_CLEAR 0x00
+/* Command/notification accepted */
+#define AVS_STATUS_SUCCESS 0xf0
+/* Command/notification rejected */
+#define AVS_STATUS_FAILURE 0xff
+/* Invalid command/notification (unknown) */
+#define AVS_STATUS_INVALID 0xf1
+/* Non-AVS modes are not supported */
+#define AVS_STATUS_NO_SUPP 0xf2
+/* Cannot set P-State until P-Map supplied */
+#define AVS_STATUS_NO_MAP 0xf3
+/* Cannot change P-Map after initial P-Map set */
+#define AVS_STATUS_MAP_SET 0xf4
+/* Max AVS status; higher numbers are used for debugging */
+#define AVS_STATUS_MAX 0xff
+
+/* Other AVS related constants */
+#define AVS_LOOP_LIMIT 50000
+#define AVS_FIRMWARE_MAGIC 0xa11600d1
+
+#define BRCM_AVS_CPUFREQ_NAME "brcm-avs-cpufreq"
+#define BRCM_AVS_CPU_DATA "brcm,avs-cpu-data-mem"
+#define BRCM_AVS_CPU_INTR "brcm,avs-cpu-l2-intr"
+#define BRCM_AVS_HOST_INTR "sw_intr"
+
+struct pmap {
+ unsigned mode;
+ unsigned p1;
+ unsigned p2;
+};
+
+struct private_data {
+ void __iomem *base;
+ void __iomem *avs_intr_base;
+ void __iomem *host_intr_base;
+#ifdef CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG
+ struct dentry *debugfs;
+#endif
+ spinlock_t lock;
+ struct pmap pmap;
+};
+
+#ifdef CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG
+
+enum debugfs_format {
+ DEBUGFS_NORMAL,
+ DEBUGFS_FLOAT,
+ DEBUGFS_REV,
+};
+
+struct debugfs_data {
+ struct debugfs_entry *entry;
+ struct private_data *priv;
+};
+
+struct debugfs_entry {
+ char *name;
+ u32 offset;
+ fmode_t mode;
+ enum debugfs_format format;
+};
+
+#define DEBUGFS_ENTRY(name, mode, format) { \
+ #name, AVS_MBOX_##name, mode, format \
+}
+
+/*
+ * These are used for debugfs only. Otherwise we use AVS_MBOX_PARAM() directly.
+ */
+#define AVS_MBOX_PARAM1 AVS_MBOX_PARAM(0)
+#define AVS_MBOX_PARAM2 AVS_MBOX_PARAM(1)
+#define AVS_MBOX_PARAM3 AVS_MBOX_PARAM(2)
+#define AVS_MBOX_PARAM4 AVS_MBOX_PARAM(3)
+
+/*
+ * This table stores the name, access permissions and offset for each hardware
+ * register and is used to generate debugfs entries.
+ */
+static struct debugfs_entry debugfs_entries[] = {
+ DEBUGFS_ENTRY(COMMAND, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(STATUS, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV0, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PARAM1, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM2, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM3, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(PARAM4, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(REVISION, 0, DEBUGFS_REV),
+ DEBUGFS_ENTRY(PSTATE, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(HEARTBEAT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(MAGIC, S_IWUSR, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_HVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(SIGMA_SVT, 0, DEBUGFS_NORMAL),
+ DEBUGFS_ENTRY(VOLTAGE1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(TEMP1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(PV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(MV1, 0, DEBUGFS_FLOAT),
+ DEBUGFS_ENTRY(FREQUENCY, 0, DEBUGFS_NORMAL),
+};
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy, unsigned index);
+
+static char *__strtolower(char *s)
+{
+ char *p = s;
+
+ while (*p != '\0') {
+ *p = tolower(*p);
+ p++;
+ }
+
+ return s;
+}
+
+#endif /* CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG */
+
+static void __iomem *__map_region(const char *name)
+{
+ struct device_node *np;
+ void __iomem *ptr;
+
+ np = of_find_compatible_node(NULL, NULL, name);
+ if (!np)
+ return NULL;
+ ptr = of_iomap(np, 0);
+ if (!ptr)
+ return NULL;
+
+ return ptr;
+}
+
+static int __issue_avs_command(struct private_data *priv, int cmd, bool is_send,
+ u32 args[])
+{
+ void __iomem *base = priv->base;
+ unsigned long flags;
+ int ret = 0;
+ unsigned i;
+ u32 val;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ /*
+ * Make sure no other command is currently running: cmd is 0 if AVS
+ * co-processor is idle.
+ */
+ for (i = 0, val = 1; val != 0 && i < AVS_LOOP_LIMIT; i++)
+ val = readl(base + AVS_MBOX_COMMAND);
+ /* Give the caller a chance to retry if AVS is busy. */
+ if (i >= AVS_LOOP_LIMIT) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ /* Clear status before we begin. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* We need to send arguments for this command. */
+ if (args && is_send)
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ writel(args[i], base + AVS_MBOX_PARAM(i));
+
+ /* Now issue the command. */
+ writel(cmd, base + AVS_MBOX_COMMAND);
+ /* Tell firmware to wake-up. */
+ writel(AVS_CPU_L2_INT_MASK, priv->avs_intr_base + AVS_CPU_L2_SET0);
+
+ /*
+ * Wait for AVS co-processor to finish processing the command. Status
+ * will be non-0 (and not greater than AVS_STATUS_MAX) once it's ready.
+ */
+ for (i = val = 0; (val == 0 || val > AVS_STATUS_MAX) &&
+ i < AVS_LOOP_LIMIT; i++)
+ val = readl(base + AVS_MBOX_STATUS);
+ if (i >= AVS_LOOP_LIMIT) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ /* This command returned arguments, so we read them back. */
+ if (args && !is_send)
+ for (i = 0; i < AVS_MAX_CMD_ARGS; i++)
+ args[i] = readl(base + AVS_MBOX_PARAM(i));
+
+ /* Clear status to tell AVS co-processor we are done. */
+ writel(AVS_STATUS_CLEAR, base + AVS_MBOX_STATUS);
+
+ /* Convert firmware errors to errno's as much as possible. */
+ switch (val) {
+ case AVS_STATUS_INVALID:
+ ret = -EINVAL;
+ break;
+ case AVS_STATUS_NO_SUPP:
+ ret = -ENOTSUPP;
+ break;
+ case AVS_STATUS_NO_MAP:
+ ret = -ENOENT;
+ break;
+ case AVS_STATUS_MAP_SET:
+ ret = -EEXIST;
+ break;
+ case AVS_STATUS_FAILURE:
+ ret = -EIO;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+}
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+static char *brcm_avs_mode_to_string(unsigned mode)
+{
+ switch (mode) {
+ case AVS_MODE_AVS:
+ return "AVS";
+ case AVS_MODE_DFS:
+ return "DFS";
+ case AVS_MODE_DVS:
+ return "DVS";
+ case AVS_MODE_DVFS:
+ return "DVFS";
+ }
+ return NULL;
+}
+
+static void brcm_avs_parse_p1(u32 p1, unsigned *mdiv_p0, unsigned *pdiv,
+ unsigned *ndiv)
+{
+ *mdiv_p0 = (p1 >> MDIV_P0_SHIFT) & MDIV_P0_MASK;
+ *pdiv = (p1 >> PDIV_SHIFT) & PDIV_MASK;
+ *ndiv = (p1 >> NDIV_INT_SHIFT) & NDIV_INT_MASK;
+}
+
+static void brcm_avs_parse_p2(u32 p2, unsigned *mdiv_p1, unsigned *mdiv_p2,
+ unsigned *mdiv_p3, unsigned *mdiv_p4)
+{
+ *mdiv_p4 = (p2 >> MDIV_P4_SHIFT) & MDIV_P4_MASK;
+ *mdiv_p3 = (p2 >> MDIV_P3_SHIFT) & MDIV_P3_MASK;
+ *mdiv_p2 = (p2 >> MDIV_P2_SHIFT) & MDIV_P2_MASK;
+ *mdiv_p1 = (p2 >> MDIV_P1_SHIFT) & MDIV_P1_MASK;
+}
+
+static int brcm_avs_get_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PMAP, false, args);
+ if (ret || !pmap)
+ return ret;
+
+ pmap->mode = args[0];
+ pmap->p1 = args[1];
+ pmap->p2 = args[2];
+
+ return 0;
+}
+
+static int brcm_avs_set_pmap(struct private_data *priv, struct pmap *pmap)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pmap->mode;
+ args[1] = pmap->p1;
+ args[2] = pmap->p2;
+
+ return __issue_avs_command(priv, AVS_CMD_SET_PMAP, true, args);
+}
+
+static int brcm_avs_get_pstate(struct private_data *priv, unsigned *pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+ int ret;
+
+ ret = __issue_avs_command(priv, AVS_CMD_GET_PSTATE, false, args);
+ if (ret)
+ return ret;
+ *pstate = args[0];
+
+ return 0;
+}
+
+static int brcm_avs_set_pstate(struct private_data *priv, unsigned pstate)
+{
+ u32 args[AVS_MAX_CMD_ARGS];
+
+ args[0] = pstate;
+ return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
+}
+
+/*
+ * TODO: This function will become brcm_avs_get_frequency() once the newest
+ * AVS firmware is ready.
+ */
+static unsigned long brcm_avs_get_frequency1(void __iomem *base)
+{
+ return readl(base + AVS_MBOX_FREQUENCY);
+}
+
+static unsigned long brcm_avs_get_frequency(struct private_data *priv,
+ unsigned pstate)
+{
+#define REF_CLK_FREQ 54
+ /*
+ * TODO: replace this function with the simple one-liner above.
+ */
+ unsigned mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+ unsigned pdiv, mdiv, ndiv;
+ unsigned vco, freq;
+ struct pmap pmap;
+ int ret;
+
+ ret = brcm_avs_get_pmap(priv, &pmap);
+ if (ret)
+ return ret;
+
+ brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+ brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+ switch (pstate) {
+ case AVS_PSTATE_P0:
+ mdiv = mdiv_p0;
+ break;
+ case AVS_PSTATE_P1:
+ mdiv = mdiv_p1;
+ break;
+ case AVS_PSTATE_P2:
+ mdiv = mdiv_p2;
+ break;
+ case AVS_PSTATE_P3:
+ mdiv = mdiv_p3;
+ break;
+ case AVS_PSTATE_P4:
+ mdiv = mdiv_p4;
+ break;
+ }
+
+ vco = (REF_CLK_FREQ / pdiv) * ndiv;
+ freq = (vco / mdiv) * 1000; /* in kHz */
+
+ return freq;
+
+#undef REF_CLK_FREQ
+}
+
+
+/*
+ * We determine which frequencies are supported by cycling through all P-states
+ * and reading back what frequency we are running at for each P-state.
+ */
+static struct cpufreq_frequency_table *
+brcm_avs_get_freq_table(struct device *dev, struct private_data *priv)
+{
+ struct cpufreq_frequency_table *table;
+ unsigned pstate;
+ int i, ret;
+
+ /* Remember P-state for later */
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ table = devm_kzalloc(dev, (AVS_PSTATE_MAX + 1) * sizeof(*table),
+ GFP_KERNEL);
+ if (!table)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = AVS_PSTATE_P0; i <= AVS_PSTATE_MAX; i++) {
+ ret = brcm_avs_set_pstate(priv, i);
+ if (ret)
+ return ERR_PTR(ret);
+ table[i].frequency = brcm_avs_get_frequency(priv, i);
+ table[i].driver_data = i;
+ }
+ table[i].frequency = CPUFREQ_TABLE_END;
+ table[i].driver_data = i;
+
+ /* Restore P-state */
+ ret = brcm_avs_set_pstate(priv, pstate);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return table;
+}
+
+#ifdef CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG
+
+#define MANT(x) (unsigned)(abs((x)) / 1000)
+#define FRAC(x) (unsigned)(abs((x)) - abs((x)) / 1000 * 1000)
+
+static int brcm_avs_debug_show(struct seq_file *s, void *data)
+{
+ struct debugfs_data *dbgfs = s->private;
+ void __iomem *base;
+ u32 val, offset;
+
+ if (!dbgfs) {
+ seq_puts(s, "No device pointer\n");
+ return 0;
+ }
+
+ base = dbgfs->priv->base;
+ offset = dbgfs->entry->offset;
+ val = readl(base + offset);
+ switch (dbgfs->entry->format) {
+ case DEBUGFS_NORMAL:
+ break;
+ case DEBUGFS_FLOAT:
+ seq_printf(s, "%d.%03d\n", MANT(val), FRAC(val));
+ break;
+ case DEBUGFS_REV:
+ seq_printf(s, "%c.%c.%c.%c\n", (val >> 24 & 0xff),
+ (val >> 16 & 0xff), (val >> 8 & 0xff),
+ val & 0xff);
+ break;
+ }
+ seq_printf(s, "0x%08x\n", val);
+
+ return 0;
+}
+
+#undef MANT
+#undef FRAC
+
+static ssize_t brcm_avs_seq_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct debugfs_data *dbgfs = s->private;
+ void __iomem *base, *avs_intr_base;
+ bool use_issue_command = false;
+ unsigned long val, offset;
+ char str[128];
+ int ret;
+ char *str_ptr = str;
+
+ if (size >= sizeof(str))
+ return -E2BIG;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return ret;
+
+ base = dbgfs->priv->base;
+ avs_intr_base = dbgfs->priv->avs_intr_base;
+ offset = dbgfs->entry->offset;
+ /*
+ * Special case writing to "command" entry only: if the string starts
+ * with a 'c', we use the driver's __issue_avs_command() function.
+ * Otherwise, we perform a raw write. This should allow testing of raw
+ * access as well as using the higher level function. (Raw access
+ * doesn't clear the firmware return status after issuing the command.)
+ */
+ if (str_ptr[0] == 'c' && offset == AVS_MBOX_COMMAND) {
+ use_issue_command = true;
+ str_ptr++;
+ }
+ if (kstrtoul(str_ptr, 0, &val) != 0)
+ return -EINVAL;
+
+ if (use_issue_command) {
+ /*
+ * Setting the P-state is a special case. We need to update the
+ * CPU frequency we report.
+ */
+ if (val == AVS_CMD_SET_PSTATE) {
+ struct cpufreq_policy *policy;
+ unsigned pstate;
+
+ policy = cpufreq_cpu_get(smp_processor_id());
+ /* Read back the P-state we are about to set */
+ pstate = readl(base + AVS_MBOX_PARAM(0));
+ ret = brcm_avs_target_index(policy, pstate);
+ } else {
+ ret = __issue_avs_command(dbgfs->priv, val, false,
+ NULL);
+ }
+ } else {
+ /*
+ * BEWARE: using this "raw access" code path to set the P-state
+ * will *NOT* update the frequency reported by the system. We
+ * don't perform any error checking regarding the AVS return
+ * code, nor do we interpret and "smartly" handle commands. We
+ * simply process each instruction individually as provided by
+ * userland and without context.
+ */
+ writel(val, base + offset);
+ /* We have to wake up the firmware to process a command. */
+ if (offset == AVS_MBOX_COMMAND)
+ writel(AVS_CPU_L2_INT_MASK,
+ avs_intr_base + AVS_CPU_L2_SET0);
+ }
+
+ return size;
+}
+
+static struct debugfs_entry *__find_debugfs_entry(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++)
+ if (strcasecmp(debugfs_entries[i].name, name) == 0)
+ return &debugfs_entries[i];
+
+ return NULL;
+}
+
+static int brcm_avs_debug_open(struct inode *inode, struct file *file)
+{
+ struct debugfs_data *data;
+ fmode_t fmode;
+ int ret;
+
+ /*
+ * seq_open(), which is called by single_open(), clears "write" access.
+ * We need write access to some files, so we preserve our access mode
+ * and restore it.
+ */
+ fmode = file->f_mode;
+ /*
+ * Check access permissions even for root. We don't want to be writing
+ * to read-only registers. Access for regular users has already been
+ * checked by the VFS layer.
+ */
+ if ((fmode & FMODE_WRITER) && !(inode->i_mode & S_IWUSR))
+ return -EACCES;
+
+ data = kmalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ /*
+ * We use the same file system operations for all our debug files. To
+ * produce specific output, we look up the file name upon opening a
+ * debugfs entry and map it to a memory offset. This offset is then used
+ * in the generic "show" function to read a specific register.
+ */
+ data->entry = __find_debugfs_entry(file->f_path.dentry->d_iname);
+ data->priv = inode->i_private;
+
+ ret = single_open(file, brcm_avs_debug_show, data);
+ if (ret)
+ kfree(data);
+ file->f_mode = fmode;
+
+ return ret;
+}
+
+static int brcm_avs_debug_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq_priv = file->private_data;
+ struct debugfs_data *data = seq_priv->private;
+
+ kfree(data);
+ return single_release(inode, file);
+}
+
+static const struct file_operations brcm_avs_debug_ops = {
+ .open = brcm_avs_debug_open,
+ .read = seq_read,
+ .write = brcm_avs_seq_write,
+ .llseek = seq_lseek,
+ .release = brcm_avs_debug_release,
+};
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+ struct dentry *dir;
+ int i;
+
+ if (!priv)
+ return;
+
+ dir = debugfs_create_dir(BRCM_AVS_CPUFREQ_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return;
+ priv->debugfs = dir;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_entries); i++) {
+ /*
+ * The DEBUGFS_ENTRY macro generates uppercase strings. We
+ * convert them to lowercase before creating the debugfs
+ * entries.
+ */
+ char *entry = __strtolower(debugfs_entries[i].name);
+ fmode_t mode = debugfs_entries[i].mode;
+
+ if (!debugfs_create_file(entry, S_IFREG | S_IRUGO | mode,
+ dir, priv, &brcm_avs_debug_ops)) {
+ priv->debugfs = NULL;
+ debugfs_remove_recursive(dir);
+ break;
+ }
+ }
+}
+
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev)
+{
+ struct private_data *priv = platform_get_drvdata(pdev);
+
+ if (priv && priv->debugfs) {
+ debugfs_remove_recursive(priv->debugfs);
+ priv->debugfs = NULL;
+ }
+}
+
+#else
+
+static void brcm_avs_cpufreq_debug_init(struct platform_device *pdev) {}
+static void brcm_avs_cpufreq_debug_exit(struct platform_device *pdev) {}
+
+#endif /* CONFIG_ARM_BRCM_AVS_CPUFREQ_DEBUG */
+
+/*
+ * To ensure the right firmware is running we need to
+ * - check the MAGIC matches what we expect
+ * - brcm_avs_get_pmap() doesn't return -ENOTSUPP
+ */
+static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+{
+ u32 magic;
+ int rc;
+
+ rc = brcm_avs_get_pmap(priv, NULL);
+ magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+ return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP);
+}
+
+static unsigned brcm_avs_cpufreq_get(unsigned cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ return policy->cur;
+}
+
+static int brcm_avs_target_index(struct cpufreq_policy *policy, unsigned index)
+{
+ int ret;
+
+ ret = brcm_avs_set_pstate(policy->driver_data,
+ policy->freq_table[index].driver_data);
+ if (ret)
+ return ret;
+
+ policy->cur = policy->freq_table[index].frequency;
+ return 0;
+}
+
+static int brcm_avs_suspend(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+
+ return brcm_avs_get_pmap(priv, &priv->pmap);
+}
+
+static int brcm_avs_resume(struct cpufreq_policy *policy)
+{
+ struct private_data *priv = policy->driver_data;
+ int ret;
+
+ ret = brcm_avs_set_pmap(priv, &priv->pmap);
+ if (ret == -EEXIST) {
+ struct platform_device *pdev = cpufreq_get_driver_data();
+ struct device *dev = &pdev->dev;
+
+ dev_warn(dev, "PMAP was already set\n");
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int brcm_avs_cpu_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *freq_table;
+ struct platform_device *pdev;
+ struct private_data *priv;
+ struct device *dev;
+ int host_irq;
+ int ret;
+
+ pdev = cpufreq_get_driver_data();
+ dev = &pdev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = __map_region(BRCM_AVS_CPU_DATA);
+ if (!priv->base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_DATA);
+ return -ENOENT;
+ }
+
+ priv->avs_intr_base = __map_region(BRCM_AVS_CPU_INTR);
+ if (!priv->avs_intr_base) {
+ dev_err(dev, "Couldn't find property %s in device tree.\n",
+ BRCM_AVS_CPU_INTR);
+ return -ENOENT;
+ }
+
+ /* It is not an error if this property isn't present. */
+ host_irq = platform_get_irq_byname(pdev, BRCM_AVS_HOST_INTR);
+ if (host_irq >= 0) {
+ ret = devm_request_irq(dev, host_irq, irq_handler,
+ IRQF_TRIGGER_RISING, BRCM_AVS_HOST_INTR, NULL);
+ if (ret) {
+ dev_err(dev, "IRQ request failed: %s (%d) -- %d\n",
+ BRCM_AVS_HOST_INTR, host_irq, ret);
+ host_irq = -1;
+ }
+ }
+
+ if (!brcm_avs_is_firmware_loaded(priv)) {
+ dev_err(dev,
+ "AVS firmware is not loaded or doesn't support DVFS\n");
+ return -ENODEV;
+ }
+
+ freq_table = brcm_avs_get_freq_table(dev, priv);
+ if (IS_ERR(freq_table)) {
+ dev_err(dev, "Couldn't determine frequency table (%ld).\n",
+ PTR_ERR(freq_table));
+ return PTR_ERR(freq_table);
+ }
+
+ ret = cpufreq_table_validate_and_show(policy, freq_table);
+ if (ret) {
+ dev_err(dev, "invalid frequency table: %d\n", ret);
+ return ret;
+ }
+
+ policy->driver_data = priv;
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ /* All cores share the same clock and thus the same policy. */
+ cpumask_setall(policy->cpus);
+
+ ret = __issue_avs_command(priv, AVS_CMD_ENABLE, false, NULL);
+ if (!ret) {
+ unsigned pstate;
+
+ ret = brcm_avs_get_pstate(priv, &pstate);
+ if (!ret) {
+ policy->cur = freq_table[pstate].frequency;
+ dev_info(dev, "registered\n");
+ }
+ }
+ if (ret)
+ dev_err(dev, "couldn't initialize driver (%d)\n", ret);
+
+ return ret;
+}
+
+static int brcm_avs_cpu_exit(struct cpufreq_policy *policy)
+{
+ /*
+ * All our allocations are "managed", so we don't need to do
+ * anything.
+ */
+ return 0;
+}
+
+static ssize_t show_brcm_avs_pstate(struct cpufreq_policy *policy,
+ char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ unsigned pstate;
+
+ if (brcm_avs_get_pstate(priv, &pstate))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%u\n", pstate);
+}
+
+static ssize_t show_brcm_avs_mode(struct cpufreq_policy *policy,
+ char *buf)
+{
+ struct private_data *priv = policy->driver_data;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ return sprintf(buf, "%s %u\n", brcm_avs_mode_to_string(pmap.mode),
+ pmap.mode);
+}
+
+static ssize_t show_brcm_avs_pmap(struct cpufreq_policy *policy,
+ char *buf)
+{
+ unsigned mdiv_p0, mdiv_p1, mdiv_p2, mdiv_p3, mdiv_p4;
+ struct private_data *priv = policy->driver_data;
+ unsigned ndiv, pdiv;
+ struct pmap pmap;
+
+ if (brcm_avs_get_pmap(priv, &pmap))
+ return sprintf(buf, "<unknown>\n");
+
+ brcm_avs_parse_p1(pmap.p1, &mdiv_p0, &pdiv, &ndiv);
+ brcm_avs_parse_p2(pmap.p2, &mdiv_p1, &mdiv_p2, &mdiv_p3, &mdiv_p4);
+
+ return sprintf(buf, "0x%08x 0x%08x %u %u %u %u %u %u %u\n",
+ pmap.p1, pmap.p2, ndiv, pdiv, mdiv_p0, mdiv_p1, mdiv_p2,
+ mdiv_p3, mdiv_p4);
+}
+
+cpufreq_freq_attr_ro(brcm_avs_pstate);
+cpufreq_freq_attr_ro(brcm_avs_mode);
+cpufreq_freq_attr_ro(brcm_avs_pmap);
+
+struct freq_attr *brcm_avs_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ &brcm_avs_pstate,
+ &brcm_avs_mode,
+ &brcm_avs_pmap,
+ NULL
+};
+
+static struct cpufreq_driver brcm_avs_driver = {
+ .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .verify = cpufreq_generic_frequency_table_verify,
+ .target_index = brcm_avs_target_index,
+ .get = brcm_avs_cpufreq_get,
+ .suspend = brcm_avs_suspend,
+ .resume = brcm_avs_resume,
+ .init = brcm_avs_cpu_init,
+ .exit = brcm_avs_cpu_exit,
+ .name = BRCM_AVS_CPUFREQ_NAME,
+ .attr = brcm_avs_cpufreq_attr,
+};
+
+static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ brcm_avs_driver.driver_data = pdev;
+ ret = cpufreq_register_driver(&brcm_avs_driver);
+ if (!ret)
+ brcm_avs_cpufreq_debug_init(pdev);
+
+ return ret;
+}
+
+static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+{
+ brcm_avs_cpufreq_debug_exit(pdev);
+ return cpufreq_unregister_driver(&brcm_avs_driver);
+}
+
+static const struct of_device_id brcm_avs_cpufreq_match[] = {
+ { .compatible = BRCM_AVS_CPU_DATA },
+ { }
+};
+MODULE_DEVICE_TABLE(of, brcm_avs_cpufreq_match);
+
+static struct platform_driver brcm_avs_cpufreq_platdrv = {
+ .driver = {
+ .name = BRCM_AVS_CPUFREQ_NAME,
+ .of_match_table = brcm_avs_cpufreq_match,
+ },
+ .probe = brcm_avs_cpufreq_probe,
+ .remove = brcm_avs_cpufreq_remove,
+};
+module_platform_driver(brcm_avs_cpufreq_platdrv);
+
+MODULE_AUTHOR("Markus Mayer <mmayer@broadcom.com>");
+MODULE_DESCRIPTION("CPUfreq driver for Broadcom AVS");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f30e39c..12705ba 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -131,7 +131,7 @@
default y if (ARCH_BRCMSTB || BMIPS_GENERIC)
depends on OF_GPIO && (ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST)
select GPIO_GENERIC
- select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN
help
Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs.
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index dd99cf8..b8490e6 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -40,14 +40,16 @@
struct brcmstb_gpio_priv *parent_priv;
u32 width;
u32 wake_active;
- struct irq_chip irq_chip;
u32 regs[GIO_BANK_SIZE / sizeof(u32)];
};
struct brcmstb_gpio_priv {
struct list_head bank_list;
void __iomem *reg_base;
+ int num_gpios;
struct platform_device *pdev;
+ struct irq_chip irq_chip;
+ struct irq_domain *irq_domain;
int parent_irq;
int gpio_base;
bool can_wake;
@@ -94,22 +96,41 @@
spin_unlock_irqrestore(&bgc->lock, flags);
}
+static int brcmstb_gpio_to_irq(struct gpio_chip *gc, unsigned gc_offset)
+{
+ struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ /* gc_offset is relative to this gpio_chip; want real offset */
+ int offset = gc_offset + (gc->base - priv->gpio_base);
+
+ if (offset >= priv->num_gpios)
+ return -ENXIO;
+ return irq_create_mapping(priv->irq_domain, offset);
+}
+
/* -------------------- IRQ chip functions -------------------- */
+static int brcmstb_gpio_hwirq_to_offset(irq_hw_number_t hwirq,
+ struct brcmstb_gpio_bank *bank)
+{
+ return hwirq - (bank->bgc.gc.base - bank->parent_priv->gpio_base);
+}
+
static void brcmstb_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+ int offset = brcmstb_gpio_hwirq_to_offset(d->hwirq, bank);
- brcmstb_gpio_set_imask(bank, d->hwirq, false);
+ brcmstb_gpio_set_imask(bank, offset, false);
}
static void brcmstb_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
+ int offset = brcmstb_gpio_hwirq_to_offset(d->hwirq, bank);
- brcmstb_gpio_set_imask(bank, d->hwirq, true);
+ brcmstb_gpio_set_imask(bank, offset, true);
}
static int brcmstb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
@@ -117,7 +138,7 @@
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- u32 mask = BIT(d->hwirq);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
u32 edge_insensitive, iedge_insensitive;
u32 edge_config, iedge_config;
u32 level, ilevel;
@@ -198,7 +219,7 @@
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
struct brcmstb_gpio_bank *bank = brcmstb_gpio_gc_to_bank(gc);
- u32 mask = BIT(d->hwirq);
+ u32 mask = BIT(brcmstb_gpio_hwirq_to_offset(d->hwirq, bank));
/* Do not do anything specific for now, suspend/resume callbacks will
* configure the interrupt mask appropriately
@@ -224,7 +245,8 @@
static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
{
struct brcmstb_gpio_priv *priv = bank->parent_priv;
- struct irq_domain *irq_domain = bank->bgc.gc.irqdomain;
+ struct irq_domain *domain = priv->irq_domain;
+ int hwbase = bank->bgc.gc.base - priv->gpio_base;
void __iomem *reg_base = priv->reg_base;
unsigned long status;
unsigned long flags;
@@ -240,11 +262,12 @@
spin_unlock_irqrestore(&bank->bgc.lock, flags);
for_each_set_bit(bit, &status, 32) {
+ int hwirq = hwbase + bit;
if (bit >= bank->width)
dev_warn(&priv->pdev->dev,
"IRQ for invalid GPIO (bank=%d, offset=%d)\n",
bank->id, bit);
- generic_handle_irq(irq_find_mapping(irq_domain, bit));
+ generic_handle_irq(irq_find_mapping(domain, hwirq));
}
spin_lock_irqsave(&bank->bgc.lock, flags);
@@ -255,8 +278,7 @@
/* Each UPG GIO block has one IRQ for all banks */
static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
- struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
+ struct brcmstb_gpio_priv *priv = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
struct list_head *pos;
@@ -285,6 +307,69 @@
return NOTIFY_DONE;
}
+static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank(
+ struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq)
+{
+ struct list_head *pos;
+ int i = 0;
+
+ /* banks are in descending order */
+ list_for_each_prev(pos, &priv->bank_list) {
+ struct brcmstb_gpio_bank *bank =
+ list_entry(pos, struct brcmstb_gpio_bank, node);
+ i += bank->bgc.gc.ngpio;
+ if (hwirq < i)
+ return bank;
+ }
+ return NULL;
+}
+
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key brcmstb_gpio_irq_lock_class;
+
+
+static int brcmstb_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct brcmstb_gpio_priv *priv = d->host_data;
+ struct brcmstb_gpio_bank *bank =
+ brcmstb_gpio_hwirq_to_bank(priv, hwirq);
+ struct platform_device *pdev = priv->pdev;
+ int ret;
+
+ if (!bank)
+ return -EINVAL;
+
+ dev_dbg(&pdev->dev, "Mapping irq %d for gpio line %d (bank %d)\n",
+ irq, (int)hwirq, bank->id);
+ ret = irq_set_chip_data(irq, &bank->bgc.gc);
+ if (ret < 0)
+ return ret;
+ irq_set_lockdep_class(irq, &brcmstb_gpio_irq_lock_class);
+ irq_set_chip_and_handler(irq, &priv->irq_chip, handle_simple_irq);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ irq_set_noprobe(irq);
+#endif
+ return 0;
+}
+
+static void brcmstb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static struct irq_domain_ops brcmstb_gpio_irq_domain_ops = {
+ .map = brcmstb_gpio_irq_map,
+ .unmap = brcmstb_gpio_irq_unmap,
+ .xlate = irq_domain_xlate_twocell,
+};
+
/* Make sure that the number of banks matches up between properties */
static int brcmstb_gpio_sanity_check_banks(struct device *dev,
struct device_node *np, struct resource *res)
@@ -364,22 +449,21 @@
return offset;
}
-/* Before calling, must have bank->parent_irq set and gpiochip registered */
+/* priv->parent_irq and priv->num_gpios must be set before calling */
static int brcmstb_gpio_irq_setup(struct platform_device *pdev,
- struct brcmstb_gpio_bank *bank)
+ struct brcmstb_gpio_priv *priv)
{
- struct brcmstb_gpio_priv *priv = bank->parent_priv;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- bank->irq_chip.name = dev_name(dev);
- bank->irq_chip.irq_disable = brcmstb_gpio_irq_mask;
- bank->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
- bank->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
- bank->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
+ priv->irq_chip.name = dev_name(dev);
+ priv->irq_chip.irq_disable = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_mask = brcmstb_gpio_irq_mask;
+ priv->irq_chip.irq_unmask = brcmstb_gpio_irq_unmask;
+ priv->irq_chip.irq_set_type = brcmstb_gpio_irq_set_type;
/* Ensures that interrupts are masked when changing their type */
- bank->irq_chip.flags = IRQCHIP_SET_TYPE_MASKED;
+ priv->irq_chip.flags = IRQCHIP_SET_TYPE_MASKED;
if (IS_ENABLED(CONFIG_PM_SLEEP) && !priv->can_wake &&
of_property_read_bool(np, "wakeup-source")) {
@@ -414,15 +498,22 @@
}
if (priv->can_wake)
- bank->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
+ priv->irq_chip.irq_set_wake = brcmstb_gpio_irq_set_wake;
else
/* Ensures that all non-wakeup IRQs are disabled at suspend */
- bank->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND;
+ priv->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND;
- gpiochip_irqchip_add(&bank->bgc.gc, &bank->irq_chip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
- gpiochip_set_chained_irqchip(&bank->bgc.gc, &bank->irq_chip,
- priv->parent_irq, brcmstb_gpio_irq_handler);
+ priv->irq_domain =
+ irq_domain_add_linear(np, priv->num_gpios,
+ &brcmstb_gpio_irq_domain_ops,
+ priv);
+ if (!priv->irq_domain) {
+ dev_err(dev, "Couldn't allocate IRQ domain\n");
+ return -ENXIO;
+ }
+ irq_set_chained_handler(priv->parent_irq,
+ brcmstb_gpio_irq_handler);
+ irq_set_handler_data(priv->parent_irq, priv);
return 0;
}
@@ -616,6 +707,8 @@
gc->of_xlate = brcmstb_gpio_of_xlate;
/* not all ngpio lines are valid, will use bank width later */
gc->ngpio = MAX_GPIO_PER_BANK;
+ if (priv->parent_irq >= 0)
+ gc->to_irq = brcmstb_gpio_to_irq;
/*
* Mask all interrupts by default, since wakeup interrupts may
@@ -636,12 +729,6 @@
}
gpio_base += gc->ngpio;
- if (priv->parent_irq > 0) {
- err = brcmstb_gpio_irq_setup(pdev, bank);
- if (err)
- goto post_bgpio_init_fail;
- }
-
dev_dbg(dev, "bank=%d, base=%d, ngpio=%d, width=%d\n", bank->id,
gc->base, gc->ngpio, bank->width);
@@ -654,14 +741,18 @@
if (of_property_read_bool(np, "always-on"))
priv->always_on = true;
+ priv->num_gpios = gpio_base - priv->gpio_base;
+ if (priv->parent_irq >= 0) {
+ err = brcmstb_gpio_irq_setup(pdev, priv);
+ if (err)
+ goto fail;
+ }
+
dev_info(dev, "Registered %d banks (GPIO(s): %d-%d)\n",
num_banks, priv->gpio_base, gpio_base - 1);
return 0;
-post_bgpio_init_fail:
- if (bgpio_remove(bgc))
- dev_err(dev, "bgpio_remove fail in cleanup\n");
fail:
(void) brcmstb_gpio_remove(pdev);
return err;
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index b92a690..fb9c572 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -291,6 +291,14 @@
return 0;
}
+static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+ GPIOF_DIR_OUT : GPIOF_DIR_IN;
+}
+
static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct bgpio_chip *bgc = to_bgpio_chip(gc);
@@ -340,6 +348,14 @@
return 0;
}
+static int bgpio_get_dir_inv(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+
+ return (bgc->read_reg(bgc->reg_dir) & bgc->pin2mask(bgc, gpio)) ?
+ GPIOF_DIR_IN : GPIOF_DIR_OUT;
+}
+
static int bgpio_setup_accessors(struct device *dev,
struct bgpio_chip *bgc,
bool bit_be,
@@ -452,10 +468,12 @@
bgc->reg_dir = dirout;
bgc->gc.direction_output = bgpio_dir_out;
bgc->gc.direction_input = bgpio_dir_in;
+ bgc->gc.get_direction = bgpio_get_dir;
} else if (dirin) {
bgc->reg_dir = dirin;
bgc->gc.direction_output = bgpio_dir_out_inv;
bgc->gc.direction_input = bgpio_dir_in_inv;
+ bgc->gc.get_direction = bgpio_get_dir_inv;
} else {
bgc->gc.direction_output = bgpio_simple_dir_out;
bgc->gc.direction_input = bgpio_simple_dir_in;
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
index 84deb84..0156f4f 100644
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/brcmnand/brcmnand.c
@@ -92,6 +92,12 @@
#define FLASH_DMA_ECC_ERROR (1 << 8)
#define FLASH_DMA_CORR_ERROR (1 << 9)
+/* Bitfields for DMA_MODE */
+#define FLASH_DMA_MODE_STOP_ON_ERROR BIT(1) /* stop in Uncorr ECC error */
+#define FLASH_DMA_MODE_MODE BIT(0) /* link list */
+#define FLASH_DMA_MODE_MASK (FLASH_DMA_MODE_STOP_ON_ERROR | \
+ FLASH_DMA_MODE_MODE)
+
/* 512B flash cache in the NAND controller HW */
#define FC_SHIFT 9U
#define FC_BYTES 512U
@@ -1077,18 +1083,31 @@
return IRQ_HANDLED;
}
+static int bcmnand_ctrl_busy_poll(struct brcmnand_controller *ctrl)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+
+ while (!(brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+ INTFC_CTLR_READY)) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(ctrl->dev, "timeout on ctrl_ready\n");
+ return -ETIMEDOUT;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
{
struct brcmnand_controller *ctrl = host->ctrl;
- u32 intfc;
dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
BUG_ON(ctrl->cmd_pending != 0);
ctrl->cmd_pending = cmd;
- intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
- BUG_ON(!(intfc & INTFC_CTLR_READY));
+ BUG_ON(bcmnand_ctrl_busy_poll(ctrl) == -ETIMEDOUT);
mb(); /* flush previous writes */
brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
@@ -1467,6 +1486,8 @@
/* Clear error addresses */
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
(host->cs << 16) | ((addr >> 32) & 0xffff));
@@ -1583,12 +1604,16 @@
struct brcmnand_controller *ctrl = host->ctrl;
u64 err_addr = 0;
int err;
- bool retry = true;
+ static bool retry = true;
dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
try_dmaread:
brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+ brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
@@ -1608,37 +1633,47 @@
}
if (mtd_is_eccerr(err)) {
- int ret;
/*
- * On oontroller version >=7.0 if we are doing a DMA read
- * after a prior PIO read that reported uncorrectable error,
+ * On controller version and 7.0, 7.1 , DMA read after a
+ * prior PIO read that reported uncorrectable error,
* the DMA engine captures this error following DMA read
* cleared only on subsequent DMA read, so just retry once
* to clear a possible false error reported for current DMA
* read
*/
- if ((ctrl->nand_version >= 0x0700) && retry) {
- retry = false;
- goto try_dmaread;
+ if ((ctrl->nand_version == 0x0700) ||
+ (ctrl->nand_version == 0x0701)) {
+ if (retry) {
+ retry = false;
+ goto try_dmaread;
+ }
}
- ret = brcmstb_nand_verify_erased_page(mtd, chip, buf, addr);
- if (ret < 0) {
- dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
- (unsigned long long)err_addr);
- mtd->ecc_stats.failed++;
- /* NAND layer expects zero on ECC errors */
- return 0;
- } else {
- if (buf)
- memset(buf, 0xff, FC_BYTES * trans);
- if (oob)
- memset(oob, 0xff, mtd->oobsize);
- dev_info(&host->pdev->dev,
- "corrected %d bitflips in blank page at 0x%llx\n",
- ret, (unsigned long long)addr);
- return ret;
+ /*
+ * Controller version 7.2 has hw encoder to detect erased page
+ * biflips, apply sw verification for older controller
+ */
+ if (ctrl->nand_version < 0x0702) {
+ err = brcmstb_nand_verify_erased_page(mtd, chip, buf, addr);
+
+ if (err > 0) {
+ if (buf)
+ memset(buf, 0xff, FC_BYTES * trans);
+ if (oob)
+ memset(oob, 0xff, mtd->oobsize);
+
+ dev_info(&host->pdev->dev,
+ "%d erased page bitflips @ 0x%llx\n",
+ err, (unsigned long long)addr);
+ return err;
+ }
}
+
+ dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+ (unsigned long long)err_addr);
+ mtd->ecc_stats.failed++;
+ /* NAND layer expects zero on ECC errors */
+ return 0;
}
if (mtd_is_bitflip(err)) {
@@ -2307,7 +2342,8 @@
if (IS_ERR(ctrl->flash_dma_base))
return PTR_ERR(ctrl->flash_dma_base);
- flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
+ /* linked-list and stop on error */
+ flash_dma_writel(ctrl, FLASH_DMA_MODE, FLASH_DMA_MODE_MASK);
flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
/* Allocate descriptor(s) */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ca9eceb..bef4654 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@
static inline void dmadesc_set(struct bcmgenet_priv *priv,
void __iomem *d, dma_addr_t addr, u32 val)
{
- dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr);
+ dmadesc_set_length_status(priv, d, val);
}
static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -661,6 +661,60 @@
return 0;
}
+static void bcmgenet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ u32 umac_cmd;
+
+ priv = netdev_priv(dev);
+
+ epause->autoneg = !!(priv->pause_flags & BCM_PAUSE_FLAG_AUTO);
+
+ if (priv->old_link > 0) {
+ /* report active state when link is up */
+ umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
+ epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
+ epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
+ } else {
+ /* otherwise report stored settings */
+ epause->rx_pause = !!(priv->pause_flags & BCM_PAUSE_FLAG_RX);
+ epause->tx_pause = !!(priv->pause_flags & BCM_PAUSE_FLAG_TX);
+ }
+}
+
+static int bcmgenet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ unsigned int pause_flags = 0;
+ int ret;
+
+ priv = netdev_priv(dev);
+
+ /* Pass parameters to the PHY level to set proper advertisement */
+ ret = bcmgenet_phy_ethtool_set_pauseparam(priv->phydev, epause);
+ if (ret)
+ return ret;
+
+ if (epause->rx_pause)
+ pause_flags |= BCM_PAUSE_FLAG_RX;
+ if (epause->tx_pause)
+ pause_flags |= BCM_PAUSE_FLAG_TX;
+ if (epause->autoneg)
+ pause_flags |= BCM_PAUSE_FLAG_AUTO;
+
+ priv->pause_flags = pause_flags;
+
+ /* Restart the PHY */
+ if (netif_running(dev)) {
+ priv->old_link = -1;
+ phy_start_aneg(priv->phydev);
+ }
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -1059,6 +1113,8 @@
.nway_reset = bcmgenet_nway_reset,
.get_coalesce = bcmgenet_get_coalesce,
.set_coalesce = bcmgenet_set_coalesce,
+ .get_pauseparam = bcmgenet_get_pauseparam,
+ .set_pauseparam = bcmgenet_set_pauseparam,
};
/* Power down the unimac, based on mode. */
@@ -1264,6 +1320,7 @@
struct enet_cb *tx_cb_ptr;
struct netdev_queue *txq;
unsigned int pkts_compl = 0;
+ unsigned int bytes_compl = 0;
unsigned int c_index;
unsigned int txbds_ready;
unsigned int txbds_processed = 0;
@@ -1282,16 +1339,13 @@
tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr];
if (tx_cb_ptr->skb) {
pkts_compl++;
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent;
dma_unmap_single(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
- tx_cb_ptr->skb->len,
+ dma_unmap_len(tx_cb_ptr, dma_len),
DMA_TO_DEVICE);
bcmgenet_free_cb(tx_cb_ptr);
} else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
- dev->stats.tx_bytes +=
- dma_unmap_len(tx_cb_ptr, dma_len);
dma_unmap_page(&dev->dev,
dma_unmap_addr(tx_cb_ptr, dma_addr),
dma_unmap_len(tx_cb_ptr, dma_len),
@@ -1309,6 +1363,9 @@
ring->free_bds += txbds_processed;
ring->c_index = (ring->c_index + txbds_processed) & DMA_C_INDEX_MASK;
+ dev->stats.tx_packets += pkts_compl;
+ dev->stats.tx_bytes += bytes_compl;
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
@@ -1385,7 +1442,7 @@
tx_cb_ptr->skb = skb;
- skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+ skb_len = skb_headlen(skb);
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
@@ -1397,7 +1454,7 @@
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len);
length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
DMA_TX_APPEND_CRC;
@@ -1419,6 +1476,7 @@
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
+ unsigned int frag_size;
dma_addr_t mapping;
int ret;
@@ -1426,10 +1484,12 @@
if (unlikely(!tx_cb_ptr))
BUG();
+
tx_cb_ptr->skb = NULL;
- mapping = skb_frag_dma_map(kdev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ frag_size = skb_frag_size(frag);
+
+ mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
@@ -1439,10 +1499,10 @@
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0;
@@ -1536,15 +1596,19 @@
else
index -= 1;
- nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
spin_lock_irqsave(&ring->lock, flags);
- if (ring->free_bds <= nr_frags + 1) {
- netif_tx_stop_queue(txq);
- netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
+ if (ring->free_bds <= (nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev,
+ "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ }
ret = NETDEV_TX_BUSY;
goto out;
}
@@ -1554,6 +1618,11 @@
goto out;
}
+ /* Retain how many bytes will be sent on the wire, without TSB inserted
+ * by transmit checksum offload
+ */
+ GENET_CB(skb)->bytes_sent = skb->len;
+
/* set the SKB transmit checksum */
if (priv->desc_64b_en) {
skb = bcmgenet_put_tx_csum(dev, skb);
@@ -2520,11 +2589,9 @@
}
/* Link UP/DOWN event */
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- (status & UMAC_IRQ_LINK_EVENT)) {
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
!!(status & UMAC_IRQ_LINK_UP));
- }
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index d3f1fdb..6d77224 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -543,6 +543,12 @@
u32 flags;
};
+struct bcmgenet_skb_cb {
+ unsigned int bytes_sent; /* bytes on the wire (no TSB) */
+};
+
+#define GENET_CB(skb) ((struct bcmgenet_skb_cb *)((skb)->cb))
+
struct bcmgenet_tx_ring {
spinlock_t lock; /* ring lock */
struct napi_struct napi; /* NAPI per tx queue */
@@ -577,6 +583,11 @@
struct bcmgenet_priv *priv;
};
+/* context pause flags */
+#define BCM_PAUSE_FLAG_AUTO (1 << 2)
+#define BCM_PAUSE_FLAG_RX (1 << 1)
+#define BCM_PAUSE_FLAG_TX (1 << 0)
+
/* device context */
struct bcmgenet_priv {
void __iomem *base;
@@ -638,6 +649,7 @@
bool crc_fwd_en;
unsigned int dma_rx_chk_bit;
+ unsigned int pause_flags;
u32 msg_enable;
@@ -691,6 +703,9 @@
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
+int bcmgenet_phy_ethtool_set_pauseparam(struct phy_device *phydev,
+ struct ethtool_pauseparam *epause);
+
/* Wake-on-LAN routines */
void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e84b246..54bab70 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -80,6 +80,30 @@
return 0;
}
+static u32 _flow_control_autoneg(struct phy_device *phydev)
+{
+ u32 cmd_bits;
+
+ if (phydev->pause && (phydev->advertising & ADVERTISED_Pause)) {
+ /* Symmetric Flow Control */
+ cmd_bits = 0;
+ } else {
+ /* Not Symmetric so default to no flow control */
+ cmd_bits = CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ /* Check for Asymmetric */
+ if (phydev->asym_pause &&
+ phydev->advertising & ADVERTISED_Asym_Pause) {
+ if (phydev->advertising & ADVERTISED_Pause)
+ cmd_bits &= ~CMD_RX_PAUSE_IGNORE;
+ else if (phydev->pause)
+ cmd_bits &= ~CMD_TX_PAUSE_IGNORE;
+ }
+ }
+
+ return cmd_bits;
+}
+
/* setup netdev link state when PHY link status change and
* update UMAC and RGMII block when link up
*/
@@ -126,12 +150,20 @@
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
- if (phydev->duplex != DUPLEX_FULL)
- cmd_bits |= CMD_HD_EN;
+ if (phydev->duplex != DUPLEX_FULL) {
+ cmd_bits |= CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ } else {
+ /* pause capability defaults to Symmetric */
+ if (priv->pause_flags & BCM_PAUSE_FLAG_AUTO)
+ cmd_bits |= _flow_control_autoneg(phydev);
- /* pause capability */
- if (!phydev->pause)
- cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ /* Manual override */
+ if (!(priv->pause_flags & BCM_PAUSE_FLAG_RX))
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
+ if (!(priv->pause_flags & BCM_PAUSE_FLAG_TX))
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ }
/*
* Program UMAC and RGMII block based on established
@@ -189,6 +221,46 @@
}
}
+/* This functionality really belongs in genphy_config_advert(), but is here
+ * because there is no mechanism for saving the ethtool pause settings in a
+ * generic PHY device yet
+ */
+static void _phy_pause_set(struct phy_device *phydev, unsigned int flags)
+{
+ unsigned int advertise;
+
+ advertise = phydev->advertising &
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (flags & BCM_PAUSE_FLAG_AUTO) {
+ if (flags & BCM_PAUSE_FLAG_RX)
+ advertise |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ else if (flags & BCM_PAUSE_FLAG_TX)
+ advertise |= ADVERTISED_Asym_Pause;
+ }
+
+ advertise &= phydev->supported;
+ phydev->advertising = advertise;
+}
+
+/* This function is trapped here because no generic implementation exists */
+int bcmgenet_phy_ethtool_set_pauseparam(struct phy_device *phydev,
+ struct ethtool_pauseparam *epause)
+{
+ unsigned int pause_flags = 0;
+
+ if (epause->rx_pause)
+ pause_flags |= BCM_PAUSE_FLAG_RX;
+ if (epause->tx_pause)
+ pause_flags |= BCM_PAUSE_FLAG_TX;
+ if (epause->autoneg)
+ pause_flags |= BCM_PAUSE_FLAG_AUTO;
+
+ _phy_pause_set(phydev, pause_flags);
+
+ return 0;
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -288,6 +360,8 @@
if (priv->internal_phy) {
phy_name = "internal PHY";
+ phydev->supported |=
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA";
@@ -403,6 +477,7 @@
}
phydev->advertising = phydev->supported;
+ _phy_pause_set(phydev, priv->pause_flags);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 300042b..6c8fb09 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -31,6 +31,7 @@
#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
+#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
@@ -39,6 +40,8 @@
#define MII_BCM7XXX_AN_EEE_EN BIT(1)
#define MII_BCM7XXX_SHD_3_EEE_THRESH 0xe
#define MII_BCM7XXX_EEE_THRESH_DEF 0x50
+#define MII_BCM7XXX_SHD_3_TL4 0x23
+#define MII_BCM7XXX_TL4_RST_MSK (BIT(2)|BIT(1))
/* 28nm only register definitions */
#define MISC_ADDR(base, channel) base, channel
@@ -480,6 +483,51 @@
return 0;
}
+static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Set current trim values INT_trim = -1, Ext_trim =0 */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_BIAS_TRIM, 0x3BE0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ MII_BCM7XXX_TL4_RST_MSK, 0);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Cal reset disable */
+ ret = phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL,
+ MII_BCM7XXX_SHD_3_TL4);
+ if (ret < 0)
+ goto reset_shadow_mode;
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT,
+ 0, MII_BCM7XXX_TL4_RST_MSK);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
{
u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
@@ -495,6 +543,13 @@
*/
phy_read(phydev, MII_BMSR);
+ /* Apply AFE software work-around if necessary */
+ if (rev == 0x01) {
+ ret = bcm7xxx_28nm_ephy_01_afe_config_init(phydev);
+ if (ret)
+ return ret;
+ }
+
ret = bcm7xxx_28nm_ephy_eee_enable(phydev);
if (ret)
return ret;
diff --git a/drivers/pci/host/pci-brcmstb.c b/drivers/pci/host/pci-brcmstb.c
index f5f399f..480b736 100644
--- a/drivers/pci/host/pci-brcmstb.c
+++ b/drivers/pci/host/pci-brcmstb.c
@@ -87,9 +87,12 @@
#define PCI_SLOT_SHIFT 15
#define PCI_FUNC_SHIFT 12
-#define IDX_ADDR(pcie) ((pcie->base) + pcie->reg_offsets[EXT_CFG_INDEX])
-#define DATA_ADDR(pcie) ((pcie->base) + pcie->reg_offsets[EXT_CFG_DATA])
-#define PCIE_RGR1_SW_INIT_1(pcie) ((pcie->base) + pcie->reg_offsets[RGR1_SW_INIT_1])
+#define IDX_ADDR(pcie) \
+ ((pcie->base) + pcie->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) \
+ ((pcie->base) + pcie->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) \
+ ((pcie->base) + pcie->reg_offsets[RGR1_SW_INIT_1])
enum {
RGR1_SW_INIT_1,
@@ -135,14 +138,13 @@
.type = GENERIC,
};
-static int brcm_pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *data);
-static int brcm_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 data);
+static void __iomem *brcm_pci_map_cfg(struct pci_bus *bus, unsigned int devfn,
+ int where);
static struct pci_ops brcm_pci_ops = {
- .read = brcm_pci_read_config,
- .write = brcm_pci_write_config,
+ .map_bus = brcm_pci_map_cfg,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static int brcm_setup_pcie_bridge(int nr, struct pci_sys_data *sys);
@@ -228,18 +230,21 @@
/* negative return value indicates error */
static int mdio_read(void __iomem *base, u8 phyad, u8 regad)
{
+ const int TRYS = 10;
u32 data = ((phyad & 0xf) << 16)
| (regad & 0x1f)
| 0x100000;
+ int i = 0;
__raw_writel(data, base + PCIE_RC_DL_MDIO_ADDR);
__raw_readl(base + PCIE_RC_DL_MDIO_ADDR);
data = __raw_readl(base + PCIE_RC_DL_MDIO_RD_DATA);
- if (!(data & 0x80000000)) {
- mdelay(1);
+ while (!(data & 0x80000000) && ++i < TRYS) {
+ udelay(10);
data = __raw_readl(base + PCIE_RC_DL_MDIO_RD_DATA);
}
+
return (data & 0x80000000) ? (data & 0xffff) : -EIO;
}
@@ -248,23 +253,27 @@
static int mdio_write(void __iomem *base, u8 phyad, u8 regad, u16 wrdata)
{
u32 data = ((phyad & 0xf) << 16) | (regad & 0x1f);
+ const int TRYS = 10;
+ int i = 0;
__raw_writel(data, base + PCIE_RC_DL_MDIO_ADDR);
__raw_readl(base + PCIE_RC_DL_MDIO_ADDR);
-
__raw_writel(0x80000000 | wrdata, base + PCIE_RC_DL_MDIO_WR_DATA);
+
data = __raw_readl(base + PCIE_RC_DL_MDIO_WR_DATA);
- if (!(data & 0x80000000)) {
- mdelay(1);
+ while ((data & 0x80000000) && ++i < TRYS) {
+ udelay(10);
data = __raw_readl(base + PCIE_RC_DL_MDIO_WR_DATA);
}
- return (data & 0x80000000) ? 0 : -EIO;
+
+ return (data & 0x80000000) ? -EIO : 0;
}
static void wr_fld(void __iomem *p, u32 mask, int shift, u32 val)
{
u32 reg = __raw_readl(p);
+
reg = (reg & ~mask) | (val << shift);
__raw_writel(reg, p);
}
@@ -309,6 +318,7 @@
static int is_ssc(void __iomem *base)
{
int tmp = mdio_write(base, 0, 0x1f, 0x1100);
+
if (tmp < 0)
return tmp;
tmp = mdio_read(base, 0, 1);
@@ -346,6 +356,7 @@
{
void __iomem *base = pcie->base;
u32 val = __raw_readl(base + PCIE_MISC_PCIE_STATUS);
+
return ((val & 0x30) == 0x30) ? 1 : 0;
}
@@ -479,7 +490,8 @@
static int brcm_pcie_enable_msi(struct brcm_pcie *pcie, int nr)
{
- static const char brcm_msi_name[] = "brcmstb_pcieX_msi";
+ static const char brcm_msi_name[] = "PCIeX_msi";
+ const char trailer[] = "_msi";
struct brcm_msi *msi = &pcie->msi;
u32 data_val;
char *name;
@@ -497,14 +509,11 @@
* MSI controllers for them. We want each to have a
* unique name, so we go to the trouble of having an
* irq_chip per RC (instead of one for all of them). */
- name = devm_kzalloc(pcie->dev, sizeof(brcm_msi_name),
- GFP_KERNEL);
+ name = devm_kzalloc(pcie->dev, sizeof(pcie->name)
+ + sizeof(trailer), GFP_KERNEL);
if (name) {
- char *p;
- strcpy(name, brcm_msi_name);
- p = strchr(name, 'X');
- if (p)
- *p = '0' + nr;
+ strcpy(name, pcie->name);
+ strcat(name, trailer);
msi->irq_chip.name = name;
} else {
msi->irq_chip.name = brcm_msi_name;
@@ -620,6 +629,7 @@
for (i = 0; i < pcie->num_out_wins; i++) {
struct brcm_window *w = &pcie->out_wins[i];
+
set_pcie_outbound_win(base, i, w->cpu_addr, w->size);
}
@@ -663,9 +673,6 @@
__raw_writel(0xffffffff, base + PCIE_INTR2_CPU_BASE + MASK_SET);
(void) __raw_readl(base + PCIE_INTR2_CPU_BASE + MASK_SET);
- if (pcie->ssc)
- if (set_ssc(base))
- dev_err(pcie->dev, "error while configuring ssc mode\n");
if (pcie->gen)
set_gen(base, pcie->gen);
@@ -722,10 +729,19 @@
status = __raw_readl(base + PCIE_RC_CFG_PCIE_LINK_STATUS_CONTROL);
if (pcie->ssc) {
- if (is_ssc(base) == 0)
+ if (set_ssc(base))
+ dev_err(pcie->dev,
+ "mdio rd/wt fail during ssc config\n");
+
+ ret = is_ssc(base);
+ if (ret == 0) {
ssc_good = true;
- else
+ } else {
+ if (ret < 0)
+ dev_err(pcie->dev,
+ "mdio rd/wt fail during ssc query\n");
dev_err(pcie->dev, "failed to enter SSC mode\n");
+ }
}
dev_info(pcie->dev, "link up, %s Gbps x%u %s\n",
@@ -885,103 +901,34 @@
| (reg & ~3);
}
-static u32 read_config(struct brcm_pcie *pcie, int cfg_idx)
-{
- __raw_writel(cfg_idx, IDX_ADDR(pcie));
- __raw_readl(IDX_ADDR(pcie));
- return __raw_readl(DATA_ADDR(pcie));
-}
-static void write_config(struct brcm_pcie *pcie, int cfg_idx, u32 val)
+static void __iomem *brcm_pci_map_cfg(struct pci_bus *bus, unsigned int devfn,
+ int where)
{
- __raw_writel(cfg_idx, IDX_ADDR(pcie));
- __raw_readl(IDX_ADDR(pcie));
- __raw_writel(val, DATA_ADDR(pcie));
- __raw_readl(DATA_ADDR(pcie));
-}
-
-
-static int brcm_pci_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 data)
-{
- u32 val = 0, mask, shift;
struct pci_sys_data *sys = bus->sysdata;
struct brcm_pcie *pcie = sys->private_data;
- void __iomem *base;
- bool rc_access;
+ void __iomem *base = pcie->base;
+ bool rc_access = pci_is_root_bus(bus);
int idx;
if (!is_pcie_link_up(pcie))
- return PCIBIOS_DEVICE_NOT_FOUND;
+ return NULL;
base = pcie->base;
- rc_access = sys->busnr == bus->number;
idx = cfg_index(bus->number, devfn, where);
- BUG_ON(((where & 3) + size) > 4);
- if (rc_access && PCI_SLOT(devfn))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (size < 4) {
- /* partial word - read, modify, write */
- if (rc_access)
- val = __raw_readl(base + (where & ~3));
- else
- val = read_config(pcie, idx);
- }
-
- shift = (where & 3) << 3;
- mask = (0xffffffff >> ((4 - size) << 3)) << shift;
- val = (val & ~mask) | ((data << shift) & mask);
+ __raw_writel(idx, IDX_ADDR(pcie));
if (rc_access) {
- __raw_writel(val, base + (where & ~3));
- __raw_readl(base + (where & ~3));
- } else {
- write_config(pcie, idx, val);
- }
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int brcm_pci_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *data)
-{
- struct pci_sys_data *sys = bus->sysdata;
- struct brcm_pcie *pcie = sys->private_data;
- u32 val, mask, shift;
- void __iomem *base;
- bool rc_access;
- int idx;
-
- if (!is_pcie_link_up(pcie))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- base = pcie->base;
- rc_access = sys->busnr == bus->number;
- idx = cfg_index(bus->number, devfn, where);
- BUG_ON(((where & 3) + size) > 4);
-
- if (rc_access && PCI_SLOT(devfn)) {
- *data = 0xffffffff;
- return PCIBIOS_FUNC_NOT_SUPPORTED;
+ if (PCI_SLOT(devfn))
+ return NULL;
+ return base + (where & ~3);
}
- if (rc_access)
- val = __raw_readl(base + (where & ~3));
- else
- val = read_config(pcie, idx);
-
- shift = (where & 3) << 3;
- mask = (0xffffffff >> ((4 - size) << 3)) << shift;
- *data = (val & mask) >> shift;
-
- return PCIBIOS_SUCCESSFUL;
+ return DATA_ADDR(pcie);
}
-
-
/***********************************************************************
* PCI slot to IRQ mappings (aka "fixup")
***********************************************************************/
@@ -1212,6 +1159,7 @@
for (i = 0; i < pcie->num_out_wins; i++) {
struct brcm_window *w = &pcie->out_wins[i];
+
w->info = (u32) of_read_ulong(ranges + 0, 1);
w->pci_addr = of_read_number(ranges + 1, 2);
w->cpu_addr = of_translate_address(dn, ranges + 3);
diff --git a/drivers/soc/brcmstb/Kconfig b/drivers/soc/brcmstb/Kconfig
index 8d14aa4..37ffc8c 100644
--- a/drivers/soc/brcmstb/Kconfig
+++ b/drivers/soc/brcmstb/Kconfig
@@ -52,6 +52,17 @@
depends on PM
depends on ARM && ARCH_BRCMSTB
+config BRCMSTB_PM_DEBUG
+ bool "Add debugfs interface for STB suspend/resume driver"
+ default y
+ depends on BRCMSTB_PM && PM_DEBUG && DEBUG_FS
+ help
+ This option enables debugfs support for the STB suspend/resume
+ driver. Specifically, for memory regions to be included or excluded
+ from hashing.
+
+ If you are unsure, choose Y here.
+
config BRCMSTB_SRPD
tristate "Support enabling DDR self-refresh modes"
help
diff --git a/drivers/soc/brcmstb/pm/pm.c b/drivers/soc/brcmstb/pm/pm.c
index a5ca57c..03ea505 100644
--- a/drivers/soc/brcmstb/pm/pm.c
+++ b/drivers/soc/brcmstb/pm/pm.c
@@ -21,6 +21,11 @@
#define pr_fmt(fmt) "brcmstb-pm: " fmt
+#ifdef CONFIG_BRCMSTB_PM_DEBUG
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#endif
+
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/io.h>
@@ -103,8 +108,12 @@
static struct brcmstb_pm_control ctrl;
#define MAX_EXCLUDE 16
+#define MAX_REGION 16
+#define MAX_EXTRA 8
static int num_exclusions;
+static int num_regions;
static struct dma_region exclusions[MAX_EXCLUDE];
+static struct dma_region regions[MAX_REGION];
static struct brcmstb_memory bm;
extern const unsigned long brcmstb_pm_do_s2_sz;
@@ -114,6 +123,166 @@
static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
void __iomem *ddr_phy_pll_status);
+#ifdef CONFIG_BRCMSTB_PM_DEBUG
+
+#define BRCMSTB_PM_DEBUG_NAME "brcmstb-pm"
+
+struct sysfs_data {
+ struct dma_region *region;
+ unsigned len;
+};
+
+static int brcm_pm_debug_show(struct seq_file *s, void *data)
+{
+ int i;
+ struct sysfs_data *sysfs_data = s->private;
+
+ if (!sysfs_data) {
+ seq_puts(s, "--- No region pointer ---\n");
+ return 0;
+ }
+ if (sysfs_data->len == 0) {
+ seq_puts(s, "--- Nothing to display ---\n");
+ return 0;
+ }
+ if (!sysfs_data->region) {
+ seq_printf(s, "--- Pointer is NULL, but length is %u ---\n",
+ sysfs_data->len);
+ return 0;
+ }
+
+ for (i = 0; i < sysfs_data->len; i++) {
+ unsigned long addr = sysfs_data->region[i].addr;
+ unsigned long len = sysfs_data->region[i].len;
+ unsigned long end = (addr > 0 || len > 0) ? addr + len - 1 : 0;
+
+ seq_printf(s, "%3d\t0x%08lx\t%12lu\t0x%08lx\n", i, addr, len,
+ end);
+ }
+ return 0;
+}
+
+static ssize_t brcm_pm_seq_write(struct file *file, const char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ unsigned long start_addr, len;
+ int ret;
+ char str[128];
+ char *len_ptr;
+ struct seq_file *s = file->private_data;
+ struct sysfs_data *sysfs_data = s->private;
+ bool is_exclusion;
+
+ if (!sysfs_data)
+ return -ENOMEM;
+
+ if (size >= sizeof(str))
+ return -E2BIG;
+
+ is_exclusion = (sysfs_data->region == exclusions);
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return ret;
+
+ /* Strip trailing newline */
+ len_ptr = str + strlen(str) - 1;
+ while (*len_ptr == '\r' || *len_ptr == '\n')
+ *len_ptr-- = '\0';
+
+ /* Special command "clear" empties the exclusions or regions list. */
+ if (strcmp(str, "clear") == 0) {
+ int region_len = sysfs_data->len * sizeof(*sysfs_data->region);
+
+ if (is_exclusion)
+ num_exclusions = 0;
+ else
+ num_regions = 0;
+ memset(sysfs_data->region, 0, region_len);
+ return size;
+ }
+
+ /*
+ * We expect userland input to be in the format
+ * <start-address> <length>
+ * where start-address and length are separated by one or more spaces.
+ * Both must be valid numbers. We do accept decimal, hexadecimal and
+ * octal numbers.
+ */
+ len_ptr = strchr(str, ' ');
+ if (!len_ptr)
+ return -EINVAL;
+ *len_ptr = '\0';
+ do {
+ len_ptr++;
+ } while (*len_ptr == ' ');
+
+ if (kstrtoul(str, 0, &start_addr) != 0)
+ return -EINVAL;
+ if (kstrtoul(len_ptr, 0, &len) != 0)
+ return -EINVAL;
+
+ if (is_exclusion)
+ ret = brcmstb_pm_mem_exclude(start_addr, len);
+ else
+ ret = brcmstb_pm_mem_region(start_addr, len);
+
+ return ret < 0 ? ret : size;
+}
+
+static int brcm_pm_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, brcm_pm_debug_show, inode->i_private);
+}
+
+static const struct file_operations brcm_pm_debug_ops = {
+ .open = brcm_pm_debug_open,
+ .read = seq_read,
+ .write = brcm_pm_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int brcm_pm_debug_init(void)
+{
+ struct dentry *dir;
+ struct sysfs_data *exclusion_data, *region_data;
+
+ dir = debugfs_create_dir(BRCMSTB_PM_DEBUG_NAME, NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return IS_ERR(dir) ? PTR_ERR(dir) : -ENOENT;
+
+ /*
+ * This driver has no "exit" function, so we don't worry about freeing
+ * these memory areas if setup succeeds.
+ */
+ exclusion_data = kmalloc(sizeof(*exclusion_data), GFP_KERNEL);
+ if (!exclusion_data)
+ return -ENOMEM;
+ region_data = kmalloc(sizeof(*region_data), GFP_KERNEL);
+ if (!region_data) {
+ kfree(exclusion_data);
+ return -ENOMEM;
+ }
+
+ exclusion_data->region = exclusions;
+ exclusion_data->len = ARRAY_SIZE(exclusions);
+ region_data->region = regions;
+ region_data->len = ARRAY_SIZE(regions);
+
+ debugfs_create_file("exclusions", S_IFREG | S_IRUGO | S_IWUSR, dir,
+ exclusion_data, &brcm_pm_debug_ops);
+ debugfs_create_file("regions", S_IFREG | S_IRUGO | S_IWUSR, dir,
+ region_data, &brcm_pm_debug_ops);
+
+ return 0;
+}
+
+fs_initcall(brcm_pm_debug_init);
+
+#endif /* CONFIG_BRCMSTB_PM_DEBUG */
+
static int brcmstb_init_sram(struct device_node *dn)
{
void __iomem *sram;
@@ -316,8 +485,8 @@
* Check if @regions[0] collides with regions in @exceptions, and modify
* regions[0..(max-1)] to ensure that they they exclude any area in @exceptions
*
- * Note that the regions in @exceptions must be sorted into ascending order prior
- * to calling this function
+ * Note that the regions in @exceptions must be sorted into ascending order
+ * prior to calling this function
*
* Returns the number of @regions used
*
@@ -497,7 +666,8 @@
ret = memdma_prepare_descs(desc1, pa1, ®ions[0], regions1, true);
if (ret)
return ret;
- ret = memdma_prepare_descs(desc2, pa2, ®ions[regions1], regions2, false);
+ ret = memdma_prepare_descs(desc2, pa2, ®ions[regions1], regions2,
+ false);
if (ret)
return ret;
@@ -521,16 +691,21 @@
phys_addr_t params_pa, struct dma_region *except,
int num_except)
{
- struct dma_region regions[40];
+ struct dma_region combined_regions[MAX_EXCLUDE + MAX_REGION + MAX_EXTRA];
phys_addr_t descs_pa;
struct mcpb_dma_desc *descs;
- int nregs, ret;
+ int nregs, ret, i;
+ const int max = ARRAY_SIZE(combined_regions);
- nregs = configure_main_hash(regions, ARRAY_SIZE(regions), except,
- num_except);
+ memset(&combined_regions, 0, sizeof(combined_regions));
+ nregs = configure_main_hash(combined_regions, max, except, num_except);
if (nregs < 0)
return nregs;
+ for (i = 0; i < num_regions && nregs + i < max; i++)
+ combined_regions[nregs + i] = regions[i];
+ nregs += i;
+
/* Flush out before hashing main memory */
flush_cache_all();
@@ -539,7 +714,8 @@
descs = (struct mcpb_dma_desc *)params->descriptors;
/* Split, run hash */
- ret = run_dual_hash(regions, nregs, descs, descs_pa, params->hash);
+ ret = run_dual_hash(combined_regions, nregs, descs, descs_pa,
+ params->hash);
if (ret < 0)
return ret;
params->desc_offset_2 = ret;
@@ -562,6 +738,21 @@
}
EXPORT_SYMBOL(brcmstb_pm_mem_exclude);
+int brcmstb_pm_mem_region(phys_addr_t addr, size_t len)
+{
+ if (num_regions >= MAX_REGION) {
+ pr_err("regions list is full\n");
+ return -ENOSPC;
+ }
+
+ regions[num_regions].addr = addr;
+ regions[num_regions].len = len;
+ num_regions++;
+
+ return 0;
+}
+EXPORT_SYMBOL(brcmstb_pm_mem_region);
+
/*
* This function is called on a new stack, so don't allow inlining (which will
* generate stack references on the old stack)
diff --git a/drivers/usb/host/usb-brcm-common-init.c b/drivers/usb/host/usb-brcm-common-init.c
index aeb552b..ab2c4a1 100644
--- a/drivers/usb/host/usb-brcm-common-init.c
+++ b/drivers/usb/host/usb-brcm-common-init.c
@@ -170,7 +170,7 @@
{ 0x73640000, BRCM_FAMILY_7364A0 },
{ 0x73640010, BRCM_FAMILY_7364A0 },
{ 0x73660020, BRCM_FAMILY_7366C0 },
- { 0x74371000, BRCM_FAMILY_74371A0 },
+ { 0x07437100, BRCM_FAMILY_74371A0 },
{ 0x74390010, BRCM_FAMILY_7439B0 },
{ 0x74450030, BRCM_FAMILY_7445D0 },
{ 0x74450040, BRCM_FAMILY_7445D0 },
diff --git a/include/linux/brcmstb/brcmstb.h b/include/linux/brcmstb/brcmstb.h
index c385f77..c43f731 100644
--- a/include/linux/brcmstb/brcmstb.h
+++ b/include/linux/brcmstb/brcmstb.h
@@ -369,6 +369,10 @@
int brcmstb_pm_mem_exclude(phys_addr_t addr, size_t len);
/* So users can determine whether the kernel provides this API */
#define BRCMSTB_HAS_PM_MEM_EXCLUDE
+
+/* Add region to be hashed during S3 suspend/resume. */
+int brcmstb_pm_mem_region(phys_addr_t addr, size_t len);
+#define BRCMSTB_HAS_PM_MEM_REGION
#endif
#endif /* !defined(__ASSEMBLY__) */