| // SPDX-License-Identifier: GPL-2.0 |
| // |
| // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd. |
| // http://www.samsung.com/ |
| // |
| // Exynos - CPU PMU(Power Management Unit) support |
| |
| #include <linux/array_size.h> |
| #include <linux/arm-smccc.h> |
| #include <linux/bitmap.h> |
| #include <linux/cpuhotplug.h> |
| #include <linux/cpu_pm.h> |
| #include <linux/of.h> |
| #include <linux/of_address.h> |
| #include <linux/mfd/core.h> |
| #include <linux/mfd/syscon.h> |
| #include <linux/of_platform.h> |
| #include <linux/platform_device.h> |
| #include <linux/delay.h> |
| #include <linux/reboot.h> |
| #include <linux/regmap.h> |
| |
| #include <linux/soc/samsung/exynos-regs-pmu.h> |
| #include <linux/soc/samsung/exynos-pmu.h> |
| |
| #include "exynos-pmu.h" |
| |
| #define PMUALIVE_MASK GENMASK(13, 0) |
| #define TENSOR_SET_BITS (BIT(15) | BIT(14)) |
| #define TENSOR_CLR_BITS BIT(15) |
| #define TENSOR_SMC_PMU_SEC_REG 0x82000504 |
| #define TENSOR_PMUREG_READ 0 |
| #define TENSOR_PMUREG_WRITE 1 |
| #define TENSOR_PMUREG_RMW 2 |
| |
| struct exynos_pmu_context { |
| struct device *dev; |
| const struct exynos_pmu_data *pmu_data; |
| struct regmap *pmureg; |
| struct regmap *pmuintrgen; |
| /* |
| * Serialization lock for CPU hot plug and cpuidle ACPM hint |
| * programming. Also protects in_cpuhp, sys_insuspend & sys_inreboot |
| * flags. |
| */ |
| raw_spinlock_t cpupm_lock; |
| unsigned long *in_cpuhp; |
| bool sys_insuspend; |
| bool sys_inreboot; |
| }; |
| |
| void __iomem *pmu_base_addr; |
| static struct exynos_pmu_context *pmu_context; |
| /* forward declaration */ |
| static struct platform_driver exynos_pmu_driver; |
| |
| /* |
| * Tensor SoCs are configured so that PMU_ALIVE registers can only be written |
| * from EL3, but are still read accessible. As Linux needs to write some of |
| * these registers, the following functions are provided and exposed via |
| * regmap. |
| * |
| * Note: This SMC interface is known to be implemented on gs101 and derivative |
| * SoCs. |
| */ |
| |
| /* Write to a protected PMU register. */ |
| static int tensor_sec_reg_write(void *context, unsigned int reg, |
| unsigned int val) |
| { |
| struct arm_smccc_res res; |
| unsigned long pmu_base = (unsigned long)context; |
| |
| arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, |
| TENSOR_PMUREG_WRITE, val, 0, 0, 0, 0, &res); |
| |
| /* returns -EINVAL if access isn't allowed or 0 */ |
| if (res.a0) |
| pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); |
| |
| return (int)res.a0; |
| } |
| |
| /* Read/Modify/Write a protected PMU register. */ |
| static int tensor_sec_reg_rmw(void *context, unsigned int reg, |
| unsigned int mask, unsigned int val) |
| { |
| struct arm_smccc_res res; |
| unsigned long pmu_base = (unsigned long)context; |
| |
| arm_smccc_smc(TENSOR_SMC_PMU_SEC_REG, pmu_base + reg, |
| TENSOR_PMUREG_RMW, mask, val, 0, 0, 0, &res); |
| |
| /* returns -EINVAL if access isn't allowed or 0 */ |
| if (res.a0) |
| pr_warn("%s(): SMC failed: %d\n", __func__, (int)res.a0); |
| |
| return (int)res.a0; |
| } |
| |
| /* |
| * Read a protected PMU register. All PMU registers can be read by Linux. |
| * Note: The SMC read register is not used, as only registers that can be |
| * written are readable via SMC. |
| */ |
| static int tensor_sec_reg_read(void *context, unsigned int reg, |
| unsigned int *val) |
| { |
| *val = pmu_raw_readl(reg); |
| return 0; |
| } |
| |
| /* |
| * For SoCs that have set/clear bit hardware this function can be used when |
| * the PMU register will be accessed by multiple masters. |
| * |
| * For example, to set bits 13:8 in PMU reg offset 0x3e80 |
| * tensor_set_bits_atomic(ctx, 0x3e80, 0x3f00, 0x3f00); |
| * |
| * Set bit 8, and clear bits 13:9 PMU reg offset 0x3e80 |
| * tensor_set_bits_atomic(0x3e80, 0x100, 0x3f00); |
| */ |
| static int tensor_set_bits_atomic(void *ctx, unsigned int offset, u32 val, |
| u32 mask) |
| { |
| int ret; |
| unsigned int i; |
| |
| for (i = 0; i < 32; i++) { |
| if (!(mask & BIT(i))) |
| continue; |
| |
| offset &= ~TENSOR_SET_BITS; |
| |
| if (val & BIT(i)) |
| offset |= TENSOR_SET_BITS; |
| else |
| offset |= TENSOR_CLR_BITS; |
| |
| ret = tensor_sec_reg_write(ctx, offset, i); |
| if (ret) |
| return ret; |
| } |
| return 0; |
| } |
| |
| static bool tensor_is_atomic(unsigned int reg) |
| { |
| /* |
| * Use atomic operations for PMU_ALIVE registers (offset 0~0x3FFF) |
| * as the target registers can be accessed by multiple masters. SFRs |
| * that don't support atomic are added to the switch statement below. |
| */ |
| if (reg > PMUALIVE_MASK) |
| return false; |
| |
| switch (reg) { |
| case GS101_SYSIP_DAT0: |
| case GS101_SYSTEM_CONFIGURATION: |
| return false; |
| default: |
| return true; |
| } |
| } |
| |
| static int tensor_sec_update_bits(void *ctx, unsigned int reg, |
| unsigned int mask, unsigned int val) |
| { |
| |
| if (!tensor_is_atomic(reg)) |
| return tensor_sec_reg_rmw(ctx, reg, mask, val); |
| |
| return tensor_set_bits_atomic(ctx, reg, val, mask); |
| } |
| |
| void pmu_raw_writel(u32 val, u32 offset) |
| { |
| writel_relaxed(val, pmu_base_addr + offset); |
| } |
| |
| u32 pmu_raw_readl(u32 offset) |
| { |
| return readl_relaxed(pmu_base_addr + offset); |
| } |
| |
| void exynos_sys_powerdown_conf(enum sys_powerdown mode) |
| { |
| unsigned int i; |
| const struct exynos_pmu_data *pmu_data; |
| |
| if (!pmu_context || !pmu_context->pmu_data) |
| return; |
| |
| pmu_data = pmu_context->pmu_data; |
| |
| if (pmu_data->powerdown_conf) |
| pmu_data->powerdown_conf(mode); |
| |
| if (pmu_data->pmu_config) { |
| for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++) |
| pmu_raw_writel(pmu_data->pmu_config[i].val[mode], |
| pmu_data->pmu_config[i].offset); |
| } |
| |
| if (pmu_data->powerdown_conf_extra) |
| pmu_data->powerdown_conf_extra(mode); |
| |
| if (pmu_data->pmu_config_extra) { |
| for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++) |
| pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode], |
| pmu_data->pmu_config_extra[i].offset); |
| } |
| } |
| |
| /* |
| * Split the data between ARM architectures because it is relatively big |
| * and useless on other arch. |
| */ |
| #ifdef CONFIG_EXYNOS_PMU_ARM_DRIVERS |
| #define exynos_pmu_data_arm_ptr(data) (&data) |
| #else |
| #define exynos_pmu_data_arm_ptr(data) NULL |
| #endif |
| |
| static const struct regmap_config regmap_smccfg = { |
| .name = "pmu_regs", |
| .reg_bits = 32, |
| .reg_stride = 4, |
| .val_bits = 32, |
| .fast_io = true, |
| .use_single_read = true, |
| .use_single_write = true, |
| .reg_read = tensor_sec_reg_read, |
| .reg_write = tensor_sec_reg_write, |
| .reg_update_bits = tensor_sec_update_bits, |
| .use_raw_spinlock = true, |
| }; |
| |
| static const struct regmap_config regmap_pmu_intr = { |
| .name = "pmu_intr_gen", |
| .reg_bits = 32, |
| .reg_stride = 4, |
| .val_bits = 32, |
| .use_raw_spinlock = true, |
| }; |
| |
| static const struct exynos_pmu_data gs101_pmu_data = { |
| .pmu_secure = true, |
| .pmu_cpuhp = true, |
| }; |
| |
| /* |
| * PMU platform driver and devicetree bindings. |
| */ |
| static const struct of_device_id exynos_pmu_of_device_ids[] = { |
| { |
| .compatible = "google,gs101-pmu", |
| .data = &gs101_pmu_data, |
| }, { |
| .compatible = "samsung,exynos3250-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos3250_pmu_data), |
| }, { |
| .compatible = "samsung,exynos4210-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos4210_pmu_data), |
| }, { |
| .compatible = "samsung,exynos4212-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos4212_pmu_data), |
| }, { |
| .compatible = "samsung,exynos4412-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos4412_pmu_data), |
| }, { |
| .compatible = "samsung,exynos5250-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos5250_pmu_data), |
| }, { |
| .compatible = "samsung,exynos5410-pmu", |
| }, { |
| .compatible = "samsung,exynos5420-pmu", |
| .data = exynos_pmu_data_arm_ptr(exynos5420_pmu_data), |
| }, { |
| .compatible = "samsung,exynos5433-pmu", |
| }, { |
| .compatible = "samsung,exynos7-pmu", |
| }, { |
| .compatible = "samsung,exynos850-pmu", |
| }, |
| { /*sentinel*/ }, |
| }; |
| |
| static const struct mfd_cell exynos_pmu_devs[] = { |
| { .name = "exynos-clkout", }, |
| }; |
| |
| /** |
| * exynos_get_pmu_regmap() - Obtain pmureg regmap |
| * |
| * Find the pmureg regmap previously configured in probe() and return regmap |
| * pointer. |
| * |
| * Return: A pointer to regmap if found or ERR_PTR error value. |
| */ |
| struct regmap *exynos_get_pmu_regmap(void) |
| { |
| struct device_node *np = of_find_matching_node(NULL, |
| exynos_pmu_of_device_ids); |
| if (np) |
| return exynos_get_pmu_regmap_by_phandle(np, NULL); |
| return ERR_PTR(-ENODEV); |
| } |
| EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap); |
| |
| /** |
| * exynos_get_pmu_regmap_by_phandle() - Obtain pmureg regmap via phandle |
| * @np: Device node holding PMU phandle property |
| * @propname: Name of property holding phandle value |
| * |
| * Find the pmureg regmap previously configured in probe() and return regmap |
| * pointer. |
| * |
| * Return: A pointer to regmap if found or ERR_PTR error value. |
| */ |
| struct regmap *exynos_get_pmu_regmap_by_phandle(struct device_node *np, |
| const char *propname) |
| { |
| struct device_node *pmu_np; |
| struct device *dev; |
| |
| if (propname) |
| pmu_np = of_parse_phandle(np, propname, 0); |
| else |
| pmu_np = np; |
| |
| if (!pmu_np) |
| return ERR_PTR(-ENODEV); |
| |
| /* |
| * Determine if exynos-pmu device has probed and therefore regmap |
| * has been created and can be returned to the caller. Otherwise we |
| * return -EPROBE_DEFER. |
| */ |
| dev = driver_find_device_by_of_node(&exynos_pmu_driver.driver, |
| (void *)pmu_np); |
| |
| if (propname) |
| of_node_put(pmu_np); |
| |
| if (!dev) |
| return ERR_PTR(-EPROBE_DEFER); |
| |
| return syscon_node_to_regmap(pmu_np); |
| } |
| EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap_by_phandle); |
| |
| /* |
| * CPU_INFORM register "hint" values are required to be programmed in addition to |
| * the standard PSCI calls to have functional CPU hotplug and CPU idle states. |
| * This is required to workaround limitations in the el3mon/ACPM firmware. |
| */ |
| #define CPU_INFORM_CLEAR 0 |
| #define CPU_INFORM_C2 1 |
| |
| /* |
| * __gs101_cpu_pmu_ prefix functions are common code shared by CPU PM notifiers |
| * (CPUIdle) and CPU hotplug callbacks. Functions should be called with IRQs |
| * disabled and cpupm_lock held. |
| */ |
| static int __gs101_cpu_pmu_online(unsigned int cpu) |
| { |
| unsigned int cpuhint = smp_processor_id(); |
| u32 reg, mask; |
| |
| /* clear cpu inform hint */ |
| regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), |
| CPU_INFORM_CLEAR); |
| |
| mask = BIT(cpu); |
| |
| regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE, |
| mask, (0 << cpu)); |
| |
| regmap_read(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_UPEND, ®); |
| |
| regmap_write(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_CLEAR, |
| reg & mask); |
| |
| return 0; |
| } |
| |
| /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ |
| static int gs101_cpu_pmu_online(void) |
| { |
| int cpu; |
| |
| raw_spin_lock(&pmu_context->cpupm_lock); |
| |
| if (pmu_context->sys_inreboot) { |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| return NOTIFY_OK; |
| } |
| |
| cpu = smp_processor_id(); |
| __gs101_cpu_pmu_online(cpu); |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| |
| return NOTIFY_OK; |
| } |
| |
| /* Called from CPU hot plug callback with IRQs enabled */ |
| static int gs101_cpuhp_pmu_online(unsigned int cpu) |
| { |
| unsigned long flags; |
| |
| raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); |
| |
| __gs101_cpu_pmu_online(cpu); |
| /* |
| * Mark this CPU as having finished the hotplug. |
| * This means this CPU can now enter C2 idle state. |
| */ |
| clear_bit(cpu, pmu_context->in_cpuhp); |
| raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); |
| |
| return 0; |
| } |
| |
| /* Common function shared by both CPU hot plug and CPUIdle */ |
| static int __gs101_cpu_pmu_offline(unsigned int cpu) |
| { |
| unsigned int cpuhint = smp_processor_id(); |
| u32 reg, mask; |
| |
| /* set cpu inform hint */ |
| regmap_write(pmu_context->pmureg, GS101_CPU_INFORM(cpuhint), |
| CPU_INFORM_C2); |
| |
| mask = BIT(cpu); |
| regmap_update_bits(pmu_context->pmuintrgen, GS101_GRP2_INTR_BID_ENABLE, |
| mask, BIT(cpu)); |
| |
| regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®); |
| regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, |
| reg & mask); |
| |
| mask = (BIT(cpu + 8)); |
| regmap_read(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_UPEND, ®); |
| regmap_write(pmu_context->pmuintrgen, GS101_GRP1_INTR_BID_CLEAR, |
| reg & mask); |
| |
| return 0; |
| } |
| |
| /* Called from CPU PM notifier (CPUIdle code path) with IRQs disabled */ |
| static int gs101_cpu_pmu_offline(void) |
| { |
| int cpu; |
| |
| raw_spin_lock(&pmu_context->cpupm_lock); |
| cpu = smp_processor_id(); |
| |
| if (test_bit(cpu, pmu_context->in_cpuhp)) { |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| return NOTIFY_BAD; |
| } |
| |
| /* Ignore CPU_PM_ENTER event in reboot or suspend sequence. */ |
| if (pmu_context->sys_insuspend || pmu_context->sys_inreboot) { |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| return NOTIFY_OK; |
| } |
| |
| __gs101_cpu_pmu_offline(cpu); |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| |
| return NOTIFY_OK; |
| } |
| |
| /* Called from CPU hot plug callback with IRQs enabled */ |
| static int gs101_cpuhp_pmu_offline(unsigned int cpu) |
| { |
| unsigned long flags; |
| |
| raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); |
| /* |
| * Mark this CPU as entering hotplug. So as not to confuse |
| * ACPM the CPU entering hotplug should not enter C2 idle state. |
| */ |
| set_bit(cpu, pmu_context->in_cpuhp); |
| __gs101_cpu_pmu_offline(cpu); |
| |
| raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); |
| |
| return 0; |
| } |
| |
| static int gs101_cpu_pm_notify_callback(struct notifier_block *self, |
| unsigned long action, void *v) |
| { |
| switch (action) { |
| case CPU_PM_ENTER: |
| return gs101_cpu_pmu_offline(); |
| |
| case CPU_PM_EXIT: |
| return gs101_cpu_pmu_online(); |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block gs101_cpu_pm_notifier = { |
| .notifier_call = gs101_cpu_pm_notify_callback, |
| /* |
| * We want to be called first, as the ACPM hint and handshake is what |
| * puts the CPU into C2. |
| */ |
| .priority = INT_MAX |
| }; |
| |
| static int exynos_cpupm_reboot_notifier(struct notifier_block *nb, |
| unsigned long event, void *v) |
| { |
| unsigned long flags; |
| |
| switch (event) { |
| case SYS_POWER_OFF: |
| case SYS_RESTART: |
| raw_spin_lock_irqsave(&pmu_context->cpupm_lock, flags); |
| pmu_context->sys_inreboot = true; |
| raw_spin_unlock_irqrestore(&pmu_context->cpupm_lock, flags); |
| break; |
| } |
| |
| return NOTIFY_OK; |
| } |
| |
| static struct notifier_block exynos_cpupm_reboot_nb = { |
| .priority = INT_MAX, |
| .notifier_call = exynos_cpupm_reboot_notifier, |
| }; |
| |
| static int setup_cpuhp_and_cpuidle(struct device *dev) |
| { |
| struct device_node *intr_gen_node; |
| struct resource intrgen_res; |
| void __iomem *virt_addr; |
| int ret, cpu; |
| |
| intr_gen_node = of_parse_phandle(dev->of_node, |
| "google,pmu-intr-gen-syscon", 0); |
| if (!intr_gen_node) { |
| /* |
| * To maintain support for older DTs that didn't specify syscon |
| * phandle just issue a warning rather than fail to probe. |
| */ |
| dev_warn(dev, "pmu-intr-gen syscon unavailable\n"); |
| return 0; |
| } |
| |
| /* |
| * To avoid lockdep issues (CPU PM notifiers use raw spinlocks) create |
| * a mmio regmap for pmu-intr-gen that uses raw spinlocks instead of |
| * syscon provided regmap. |
| */ |
| ret = of_address_to_resource(intr_gen_node, 0, &intrgen_res); |
| of_node_put(intr_gen_node); |
| |
| virt_addr = devm_ioremap(dev, intrgen_res.start, |
| resource_size(&intrgen_res)); |
| if (!virt_addr) |
| return -ENOMEM; |
| |
| pmu_context->pmuintrgen = devm_regmap_init_mmio(dev, virt_addr, |
| ®map_pmu_intr); |
| if (IS_ERR(pmu_context->pmuintrgen)) { |
| dev_err(dev, "failed to initialize pmu-intr-gen regmap\n"); |
| return PTR_ERR(pmu_context->pmuintrgen); |
| } |
| |
| /* register custom mmio regmap with syscon */ |
| ret = of_syscon_register_regmap(intr_gen_node, |
| pmu_context->pmuintrgen); |
| if (ret) |
| return ret; |
| |
| pmu_context->in_cpuhp = devm_bitmap_zalloc(dev, num_possible_cpus(), |
| GFP_KERNEL); |
| if (!pmu_context->in_cpuhp) |
| return -ENOMEM; |
| |
| raw_spin_lock_init(&pmu_context->cpupm_lock); |
| pmu_context->sys_inreboot = false; |
| pmu_context->sys_insuspend = false; |
| |
| /* set PMU to power on */ |
| for_each_online_cpu(cpu) |
| gs101_cpuhp_pmu_online(cpu); |
| |
| /* register CPU hotplug callbacks */ |
| cpuhp_setup_state(CPUHP_BP_PREPARE_DYN, "soc/exynos-pmu:prepare", |
| gs101_cpuhp_pmu_online, NULL); |
| |
| cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/exynos-pmu:online", |
| NULL, gs101_cpuhp_pmu_offline); |
| |
| /* register CPU PM notifiers for cpuidle */ |
| cpu_pm_register_notifier(&gs101_cpu_pm_notifier); |
| register_reboot_notifier(&exynos_cpupm_reboot_nb); |
| return 0; |
| } |
| |
| static int exynos_pmu_probe(struct platform_device *pdev) |
| { |
| struct device *dev = &pdev->dev; |
| struct regmap_config pmu_regmcfg; |
| struct regmap *regmap; |
| struct resource *res; |
| int ret; |
| |
| pmu_base_addr = devm_platform_ioremap_resource(pdev, 0); |
| if (IS_ERR(pmu_base_addr)) |
| return PTR_ERR(pmu_base_addr); |
| |
| pmu_context = devm_kzalloc(&pdev->dev, |
| sizeof(struct exynos_pmu_context), |
| GFP_KERNEL); |
| if (!pmu_context) |
| return -ENOMEM; |
| |
| res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| if (!res) |
| return -ENODEV; |
| |
| pmu_context->pmu_data = of_device_get_match_data(dev); |
| |
| /* For SoCs that secure PMU register writes use custom regmap */ |
| if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_secure) { |
| pmu_regmcfg = regmap_smccfg; |
| pmu_regmcfg.max_register = resource_size(res) - |
| pmu_regmcfg.reg_stride; |
| /* Need physical address for SMC call */ |
| regmap = devm_regmap_init(dev, NULL, |
| (void *)(uintptr_t)res->start, |
| &pmu_regmcfg); |
| |
| if (IS_ERR(regmap)) |
| return dev_err_probe(&pdev->dev, PTR_ERR(regmap), |
| "regmap init failed\n"); |
| |
| ret = of_syscon_register_regmap(dev->of_node, regmap); |
| if (ret) |
| return ret; |
| } else { |
| /* let syscon create mmio regmap */ |
| regmap = syscon_node_to_regmap(dev->of_node); |
| if (IS_ERR(regmap)) |
| return dev_err_probe(&pdev->dev, PTR_ERR(regmap), |
| "syscon_node_to_regmap failed\n"); |
| } |
| |
| pmu_context->pmureg = regmap; |
| pmu_context->dev = dev; |
| |
| if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_cpuhp) { |
| ret = setup_cpuhp_and_cpuidle(dev); |
| if (ret) |
| return ret; |
| } |
| |
| if (pmu_context->pmu_data && pmu_context->pmu_data->pmu_init) |
| pmu_context->pmu_data->pmu_init(); |
| |
| platform_set_drvdata(pdev, pmu_context); |
| |
| ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs, |
| ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL); |
| if (ret) |
| return ret; |
| |
| if (devm_of_platform_populate(dev)) |
| dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n"); |
| |
| dev_dbg(dev, "Exynos PMU Driver probe done\n"); |
| return 0; |
| } |
| |
| static int exynos_cpupm_suspend_noirq(struct device *dev) |
| { |
| raw_spin_lock(&pmu_context->cpupm_lock); |
| pmu_context->sys_insuspend = true; |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| return 0; |
| } |
| |
| static int exynos_cpupm_resume_noirq(struct device *dev) |
| { |
| raw_spin_lock(&pmu_context->cpupm_lock); |
| pmu_context->sys_insuspend = false; |
| raw_spin_unlock(&pmu_context->cpupm_lock); |
| return 0; |
| } |
| |
| static const struct dev_pm_ops cpupm_pm_ops = { |
| NOIRQ_SYSTEM_SLEEP_PM_OPS(exynos_cpupm_suspend_noirq, |
| exynos_cpupm_resume_noirq) |
| }; |
| |
| static struct platform_driver exynos_pmu_driver = { |
| .driver = { |
| .name = "exynos-pmu", |
| .of_match_table = exynos_pmu_of_device_ids, |
| .pm = pm_sleep_ptr(&cpupm_pm_ops), |
| }, |
| .probe = exynos_pmu_probe, |
| }; |
| |
| static int __init exynos_pmu_init(void) |
| { |
| return platform_driver_register(&exynos_pmu_driver); |
| |
| } |
| postcore_initcall(exynos_pmu_init); |