Jetpack/kernel/nvidia/drivers/trusty/trusty.c

726 lines
18 KiB
C

/*
* Copyright (C) 2013 Google, Inc.
* Copyright (c) 2016-2019, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <asm/compiler.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/trusty/smcall.h>
#include <linux/trusty/sm_err.h>
#include <linux/trusty/trusty.h>
#include <soc/tegra/virt/syscalls.h>
#include <linux/tegra-firmwares.h>
#include "trusty-workitem.h"
struct trusty_state;
struct trusty_work {
struct trusty_state *ts;
workitem_t work;
};
struct trusty_state {
struct mutex smc_lock;
struct mutex panic_lock;
struct atomic_notifier_head notifier;
struct raw_notifier_head panic_notifier;
struct completion cpu_idle_completion;
char *version_str;
u32 api_version;
struct device *dev;
struct workqueue_struct *nop_wq;
struct trusty_work __percpu *nop_works;
struct list_head nop_queue;
spinlock_t nop_lock; /* protects nop_queue */
};
#define TRUSTY_DEV_COMP "android,trusty-smc-v1"
#ifdef CONFIG_ARM64
#define SMC_ARG0 "x0"
#define SMC_ARG1 "x1"
#define SMC_ARG2 "x2"
#define SMC_ARG3 "x3"
#define SMC_ARCH_EXTENSION ""
#define SMC_REGISTERS_TRASHED "x4","x5","x6","x7","x8","x9","x10","x11", \
"x12","x13","x14","x15","x16","x17"
#else
#define SMC_ARG0 "r0"
#define SMC_ARG1 "r1"
#define SMC_ARG2 "r2"
#define SMC_ARG3 "r3"
#define SMC_ARCH_EXTENSION ".arch_extension sec\n"
#define SMC_REGISTERS_TRASHED "ip"
#endif
#ifdef CONFIG_PREEMPT_RT_FULL
void migrate_enable(void);
void migrate_disable(void);
#endif
int hyp_ipa_translate(uint64_t *ipa)
{
int gid, ret = 0;
struct hyp_ipa_pa_info info;
if (is_tegra_hypervisor_mode()) {
ret = hyp_read_gid(&gid);
if (ret)
return ret;
memset(&info, 0, sizeof(info));
ret = hyp_read_ipa_pa_info(&info, gid, *ipa);
*ipa = info.base + info.offset;
}
return ret;
}
EXPORT_SYMBOL(hyp_ipa_translate);
static inline ulong smc(ulong r0, ulong r1, ulong r2, ulong r3)
{
register ulong _r0 asm(SMC_ARG0) = r0;
register ulong _r1 asm(SMC_ARG1) = r1;
register ulong _r2 asm(SMC_ARG2) = r2;
register ulong _r3 asm(SMC_ARG3) = r3;
asm volatile(
__asmeq("%0", SMC_ARG0)
__asmeq("%1", SMC_ARG1)
__asmeq("%2", SMC_ARG2)
__asmeq("%3", SMC_ARG3)
__asmeq("%4", SMC_ARG0)
__asmeq("%5", SMC_ARG1)
__asmeq("%6", SMC_ARG2)
__asmeq("%7", SMC_ARG3)
SMC_ARCH_EXTENSION
"smc #0" /* switch to secure world */
: "=r" (_r0), "=r" (_r1), "=r" (_r2), "=r" (_r3)
: "r" (_r0), "r" (_r1), "r" (_r2), "r" (_r3)
: SMC_REGISTERS_TRASHED);
return _r0;
}
s32 trusty_fast_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return SM_ERR_UNDEFINED_SMC;
BUG_ON(!s);
BUG_ON(!SMC_IS_FASTCALL(smcnr));
BUG_ON(SMC_IS_SMC64(smcnr));
return smc(smcnr, a0, a1, a2);
}
EXPORT_SYMBOL(trusty_fast_call32);
#ifdef CONFIG_64BIT
s64 trusty_fast_call64(struct device *dev, u64 smcnr, u64 a0, u64 a1, u64 a2)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return SM_ERR_UNDEFINED_SMC;
BUG_ON(!s);
BUG_ON(!SMC_IS_FASTCALL(smcnr));
BUG_ON(!SMC_IS_SMC64(smcnr));
return smc(smcnr, a0, a1, a2);
}
#endif
static ulong trusty_std_call_inner(struct device *dev, ulong smcnr,
ulong a0, ulong a1, ulong a2)
{
ulong ret;
int retry = 5;
dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx)\n",
__func__, smcnr, a0, a1, a2);
while (true) {
ret = smc(smcnr, a0, a1, a2);
while ((s32)ret == SM_ERR_FIQ_INTERRUPTED)
ret = smc(SMC_SC_RESTART_FIQ, 0, 0, 0);
if ((int)ret != SM_ERR_BUSY || !retry)
break;
dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, retry\n",
__func__, smcnr, a0, a1, a2);
retry--;
}
return ret;
}
static ulong trusty_std_call_helper(struct device *dev, ulong smcnr,
ulong a0, ulong a1, ulong a2)
{
ulong ret;
int sleep_time = 1;
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
while (true) {
local_irq_disable();
atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_PREPARE,
NULL);
ret = trusty_std_call_inner(dev, smcnr, a0, a1, a2);
atomic_notifier_call_chain(&s->notifier, TRUSTY_CALL_RETURNED,
NULL);
local_irq_enable();
if (ret == SM_ERR_PANIC) {
mutex_lock(&s->panic_lock);
raw_notifier_call_chain(&s->panic_notifier, 0, NULL);
mutex_unlock(&s->panic_lock);
break;
}
if ((int)ret != SM_ERR_BUSY)
break;
if (sleep_time == 256)
dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy\n",
__func__, smcnr, a0, a1, a2);
dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) returned busy, wait %d ms\n",
__func__, smcnr, a0, a1, a2, sleep_time);
msleep(sleep_time);
if (sleep_time < 1000)
sleep_time <<= 1;
dev_dbg(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) retry\n",
__func__, smcnr, a0, a1, a2);
}
if (sleep_time > 256)
dev_warn(dev, "%s(0x%lx 0x%lx 0x%lx 0x%lx) busy cleared\n",
__func__, smcnr, a0, a1, a2);
return ret;
}
static void trusty_std_call_cpu_idle(struct trusty_state *s)
{
int ret;
ret = wait_for_completion_timeout(&s->cpu_idle_completion, HZ * 10);
if (!ret) {
pr_warn("%s: timed out waiting for cpu idle to clear, retry anyway\n",
__func__);
}
}
s32 trusty_std_call32(struct device *dev, u32 smcnr, u32 a0, u32 a1, u32 a2)
{
int ret;
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return SM_ERR_UNDEFINED_SMC;
BUG_ON(SMC_IS_FASTCALL(smcnr));
BUG_ON(SMC_IS_SMC64(smcnr));
if (smcnr != SMC_SC_NOP) {
mutex_lock(&s->smc_lock);
reinit_completion(&s->cpu_idle_completion);
}
dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) started\n",
__func__, smcnr, a0, a1, a2);
ret = trusty_std_call_helper(dev, smcnr, a0, a1, a2);
while (ret == SM_ERR_INTERRUPTED || ret == SM_ERR_CPU_IDLE) {
dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) interrupted\n",
__func__, smcnr, a0, a1, a2);
if (ret == SM_ERR_CPU_IDLE)
trusty_std_call_cpu_idle(s);
ret = trusty_std_call_helper(dev, SMC_SC_RESTART_LAST, 0, 0, 0);
}
dev_dbg(dev, "%s(0x%x 0x%x 0x%x 0x%x) returned 0x%x\n",
__func__, smcnr, a0, a1, a2, ret);
WARN_ONCE(ret == SM_ERR_PANIC, "trusty crashed");
if (smcnr == SMC_SC_NOP)
complete(&s->cpu_idle_completion);
else
mutex_unlock(&s->smc_lock);
return ret;
}
EXPORT_SYMBOL(trusty_std_call32);
int trusty_call_notifier_register(struct device *dev, struct notifier_block *n)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return -ENODEV;
return atomic_notifier_chain_register(&s->notifier, n);
}
EXPORT_SYMBOL(trusty_call_notifier_register);
int trusty_call_notifier_unregister(struct device *dev,
struct notifier_block *n)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return -ENODEV;
return atomic_notifier_chain_unregister(&s->notifier, n);
}
EXPORT_SYMBOL(trusty_call_notifier_unregister);
int trusty_panic_notifier_register(struct device *dev, struct notifier_block *n)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return -ENODEV;
return raw_notifier_chain_register(&s->panic_notifier, n);
}
EXPORT_SYMBOL(trusty_panic_notifier_register);
int trusty_panic_notifier_unregister(struct device *dev,
struct notifier_block *n)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return -ENODEV;
return raw_notifier_chain_unregister(&s->panic_notifier, n);
}
EXPORT_SYMBOL(trusty_panic_notifier_unregister);
static int trusty_remove_child(struct device *dev, void *data)
{
platform_device_unregister(to_platform_device(dev));
return 0;
}
ssize_t trusty_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
return scnprintf(buf, PAGE_SIZE, "%s\n", s->version_str);
}
DEVICE_ATTR(trusty_version, S_IRUSR, trusty_version_show, NULL);
const char *trusty_version_str_get(struct device *dev)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return NULL;
return s->version_str;
}
EXPORT_SYMBOL(trusty_version_str_get);
static void trusty_init_version(struct trusty_state *s, struct device *dev)
{
int ret;
int i;
int version_str_len;
ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, -1, 0, 0);
if (ret <= 0)
goto err_get_size;
version_str_len = ret;
s->version_str = kmalloc(version_str_len + 1, GFP_KERNEL);
for (i = 0; i < version_str_len; i++) {
ret = trusty_fast_call32(dev, SMC_FC_GET_VERSION_STR, i, 0, 0);
if (ret < 0)
goto err_get_char;
s->version_str[i] = ret;
}
s->version_str[i] = '\0';
dev_info(dev, "trusty version: %s\n", s->version_str);
ret = device_create_file(dev, &dev_attr_trusty_version);
if (ret)
goto err_create_file;
return;
err_create_file:
err_get_char:
kfree(s->version_str);
s->version_str = NULL;
err_get_size:
dev_err(dev, "failed to get version: %d\n", ret);
}
u32 trusty_get_api_version(struct device *dev)
{
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (!is_trusty_dev_enabled())
return 0;
return s->api_version;
}
EXPORT_SYMBOL(trusty_get_api_version);
static int trusty_init_api_version(struct trusty_state *s, struct device *dev)
{
u32 api_version;
api_version = trusty_fast_call32(dev, SMC_FC_API_VERSION,
TRUSTY_API_VERSION_CURRENT, 0, 0);
if (api_version == SM_ERR_UNDEFINED_SMC)
api_version = 0;
if (api_version > TRUSTY_API_VERSION_CURRENT) {
dev_err(dev, "unsupported api version %u > %u\n",
api_version, TRUSTY_API_VERSION_CURRENT);
return -EINVAL;
}
dev_info(dev, "selected api version: %u (requested %u)\n",
api_version, TRUSTY_API_VERSION_CURRENT);
s->api_version = api_version;
return 0;
}
static bool dequeue_nop(struct trusty_state *s, u32 *args)
{
unsigned long flags;
struct trusty_nop *nop = NULL;
spin_lock_irqsave(&s->nop_lock, flags);
if (!list_empty(&s->nop_queue)) {
nop = list_first_entry(&s->nop_queue,
struct trusty_nop, node);
list_del_init(&nop->node);
args[0] = nop->args[0];
args[1] = nop->args[1];
args[2] = nop->args[2];
} else {
args[0] = 0;
args[1] = 0;
args[2] = 0;
}
spin_unlock_irqrestore(&s->nop_lock, flags);
return nop;
}
static void locked_nop_work_func(workitem_t *work)
{
int ret;
struct trusty_work *tw = container_of(work, struct trusty_work, work);
struct trusty_state *s = tw->ts;
dev_dbg(s->dev, "%s\n", __func__);
ret = trusty_std_call32(s->dev, SMC_SC_LOCKED_NOP, 0, 0, 0);
if (ret != 0)
dev_err(s->dev, "%s: SMC_SC_LOCKED_NOP failed %d",
__func__, ret);
dev_dbg(s->dev, "%s: done\n", __func__);
}
static void nop_work_func(workitem_t *work)
{
int ret;
bool next;
u32 args[3];
struct trusty_work *tw = container_of(work, struct trusty_work, work);
struct trusty_state *s = tw->ts;
dev_dbg(s->dev, "%s:\n", __func__);
dequeue_nop(s, args);
do {
dev_dbg(s->dev, "%s: %x %x %x\n",
__func__, args[0], args[1], args[2]);
ret = trusty_std_call32(s->dev, SMC_SC_NOP,
args[0], args[1], args[2]);
/*
* In certain cases a NOP smc may have to be re-tried with the original
* parameters as the first call may not have registered/reached the trusty kernel.
* Example:
* In virtualization use case, if a guest's VIRQ is pending at
* the hypervisor, the HV returns the control back to the guest
* without transitioning to TOS. This ensures that the guest's IRQ is handled
* in the shortest time and guest interrupt latency is minimized.
* In such a case, since the request hasn't even reached the TOS, just restarting
* the old smc with parameters as 0 or new set of parameters causes the original
* request/smc to get dropped causing unexpected results.
*/
if (ret == SM_ERR_NOP_RETRY)
continue;
next = dequeue_nop(s, args);
if (ret == SM_ERR_NOP_INTERRUPTED)
next = true;
else if (ret != SM_ERR_NOP_DONE)
dev_err(s->dev, "%s: SMC_SC_NOP failed %d",
__func__, ret);
} while (next);
dev_dbg(s->dev, "%s: done\n", __func__);
}
void trusty_enqueue_nop(struct device *dev, struct trusty_nop *nop)
{
unsigned long flags;
struct trusty_work *tw;
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
#ifdef CONFIG_PREEMPT_RT_FULL
migrate_disable();
#else
preempt_disable();
#endif
tw = this_cpu_ptr(s->nop_works);
if (nop) {
WARN_ON(s->api_version < TRUSTY_API_VERSION_SMP_NOP);
spin_lock_irqsave(&s->nop_lock, flags);
if (list_empty(&nop->node))
list_add_tail(&nop->node, &s->nop_queue);
spin_unlock_irqrestore(&s->nop_lock, flags);
}
schedule_workitem(s->nop_wq, &tw->work);
#ifdef CONFIG_PREEMPT_RT_FULL
migrate_enable();
#else
preempt_enable();
#endif
}
EXPORT_SYMBOL(trusty_enqueue_nop);
void trusty_dequeue_nop(struct device *dev, struct trusty_nop *nop)
{
unsigned long flags;
struct trusty_state *s = platform_get_drvdata(to_platform_device(dev));
if (WARN_ON(!nop))
return;
spin_lock_irqsave(&s->nop_lock, flags);
if (!list_empty(&nop->node))
list_del_init(&nop->node);
spin_unlock_irqrestore(&s->nop_lock, flags);
}
EXPORT_SYMBOL(trusty_dequeue_nop);
static inline struct device_node *get_trusty_device_node(void)
{
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, TRUSTY_DEV_COMP);
if (!node)
pr_info("Trusty DT node not present in FDT.\n");
return node;
}
int is_trusty_dev_enabled(void)
{
static int trusty_dev_status = TRUSTY_DEV_UNINIT;
struct device_node *node;
if (unlikely(trusty_dev_status == TRUSTY_DEV_UNINIT)) {
trusty_dev_status = TRUSTY_DEV_ENABLED;
node = get_trusty_device_node();
if (!node || !of_device_is_available(node))
trusty_dev_status = TRUSTY_DEV_DISABLED;
of_node_put(node);
}
return trusty_dev_status;
}
EXPORT_SYMBOL(is_trusty_dev_enabled);
static int trusty_probe(struct platform_device *pdev)
{
int ret;
unsigned int cpu;
trusty_work_func_t work_func;
struct trusty_state *s;
struct device_node *node = pdev->dev.of_node;
if (!node) {
dev_err(&pdev->dev, "of_node required\n");
return -EINVAL;
}
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) {
ret = -ENOMEM;
goto err_allocate_state;
}
s->dev = &pdev->dev;
spin_lock_init(&s->nop_lock);
INIT_LIST_HEAD(&s->nop_queue);
mutex_init(&s->smc_lock);
mutex_init(&s->panic_lock);
ATOMIC_INIT_NOTIFIER_HEAD(&s->notifier);
RAW_INIT_NOTIFIER_HEAD(&s->panic_notifier);
init_completion(&s->cpu_idle_completion);
platform_set_drvdata(pdev, s);
trusty_init_version(s, &pdev->dev);
ret = trusty_init_api_version(s, &pdev->dev);
if (ret < 0)
goto err_api_version;
s->nop_wq = alloc_workqueue("trusty-nop-wq", WQ_CPU_INTENSIVE, 0);
if (!s->nop_wq) {
ret = -ENODEV;
dev_err(&pdev->dev, "Failed create trusty-nop-wq\n");
goto err_create_nop_wq;
}
s->nop_works = alloc_percpu(struct trusty_work);
if (!s->nop_works) {
ret = -ENOMEM;
dev_err(&pdev->dev, "Failed to allocate works\n");
goto err_alloc_works;
}
if (s->api_version < TRUSTY_API_VERSION_SMP)
work_func = locked_nop_work_func;
else
work_func = nop_work_func;
for_each_possible_cpu(cpu) {
struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
tw->ts = s;
INIT_WORKITEM(&tw->work, work_func);
}
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to add children: %d\n", ret);
goto err_add_children;
}
devm_tegrafw_register_string(&pdev->dev, "trusty", s->version_str);
return 0;
err_add_children:
for_each_possible_cpu(cpu) {
struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
cancel_workitem(&tw->work);
}
free_percpu(s->nop_works);
err_alloc_works:
destroy_workqueue(s->nop_wq);
err_create_nop_wq:
err_api_version:
if (s->version_str) {
device_remove_file(&pdev->dev, &dev_attr_trusty_version);
kfree(s->version_str);
}
device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
mutex_destroy(&s->smc_lock);
mutex_destroy(&s->panic_lock);
kfree(s);
err_allocate_state:
return ret;
}
static int trusty_remove(struct platform_device *pdev)
{
unsigned int cpu;
struct trusty_state *s = platform_get_drvdata(pdev);
of_platform_depopulate(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, trusty_remove_child);
for_each_possible_cpu(cpu) {
struct trusty_work *tw = per_cpu_ptr(s->nop_works, cpu);
cancel_workitem(&tw->work);
}
free_percpu(s->nop_works);
destroy_workqueue(s->nop_wq);
mutex_destroy(&s->smc_lock);
mutex_destroy(&s->panic_lock);
if (s->version_str) {
device_remove_file(&pdev->dev, &dev_attr_trusty_version);
kfree(s->version_str);
}
kfree(s);
return 0;
}
static const struct of_device_id trusty_of_match[] = {
{ .compatible = TRUSTY_DEV_COMP, },
{},
};
static struct platform_driver trusty_driver = {
.probe = trusty_probe,
.remove = trusty_remove,
.driver = {
.name = "trusty",
.owner = THIS_MODULE,
.of_match_table = trusty_of_match,
},
};
static int __init trusty_driver_init(void)
{
return platform_driver_register(&trusty_driver);
}
static void __exit trusty_driver_exit(void)
{
platform_driver_unregister(&trusty_driver);
}
subsys_initcall(trusty_driver_init);
module_exit(trusty_driver_exit);
MODULE_DESCRIPTION("Trusty core dirver");
MODULE_AUTHOR("Google, Inc");
MODULE_LICENSE("GPL v2");