添加rtthread相关代码

This commit is contained in:
2025-06-13 14:38:32 +08:00
parent caf2d9f0c5
commit 160f9f8201
1809 changed files with 648100 additions and 10 deletions

View File

@@ -0,0 +1,30 @@
menuconfig RT_USING_PIC
bool "Using Programmable Interrupt Controller (PIC)"
select RT_USING_BITMAP
depends on RT_USING_DM
default n
config MAX_HANDLERS
int "IRQ max handlers"
depends on RT_USING_PIC
range 1 4294967294
default 256
config RT_PIC_ARM_GIC
bool "ARM GICv2/v1"
depends on RT_USING_PIC
select RT_USING_OFW
default n
config RT_PIC_ARM_GIC_V3
bool "ARM GICv3"
depends on RT_USING_PIC
select RT_USING_OFW
default n
config RT_PIC_ARM_GIC_MAX_NR
int
depends on RT_USING_PIC
depends on RT_PIC_ARM_GIC
default 2 if SOC_REALVIEW
default 1

View File

@@ -0,0 +1,24 @@
from building import *
group = []
if not GetDepend(['RT_USING_PIC']):
Return('group')
cwd = GetCurrentDir()
CPPPATH = [cwd + '/../include']
src = ['pic.c', 'pic_rthw.c']
if GetDepend(['RT_PIC_ARM_GIC']) or GetDepend(['RT_PIC_ARM_GIC_V3']):
src += ['pic-gic-common.c']
if GetDepend(['RT_PIC_ARM_GIC']):
src += ['pic-gicv2.c']
if GetDepend(['RT_PIC_ARM_GIC_V3']):
src += ['pic-gicv3.c']
group = DefineGroup('DeviceDrivers', src, depend = [''], CPPPATH = CPPPATH)
Return('group')

View File

@@ -0,0 +1,175 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-01-30 GuEe-GUI first version
*/
#include <rthw.h>
#include <rtthread.h>
#define DBG_TAG "pic.gic*"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
#include <drivers/pic.h>
#include "pic-gicv2.h"
#include "pic-gic-common.h"
void gic_common_init_quirk_ofw(const struct rt_ofw_node *ic_np, const struct gic_quirk *quirks, void *data)
{
for (; quirks->desc; ++quirks)
{
if (!quirks->compatible || !rt_ofw_node_is_compatible(ic_np, quirks->compatible))
{
continue;
}
RT_ASSERT(quirks->init != RT_NULL);
if (!quirks->init(data))
{
LOG_I("Enable workaround for %s", quirks->desc);
}
}
}
void gic_common_init_quirk_hw(rt_uint32_t iidr, const struct gic_quirk *quirks, void *data)
{
for (; quirks->desc; ++quirks)
{
if (quirks->compatible)
{
continue;
}
if (quirks->iidr == (iidr & quirks->iidr_mask))
{
RT_ASSERT(quirks->init != RT_NULL);
if (!quirks->init(data))
{
LOG_I("Enable workaround for %s", quirks->desc);
}
}
}
}
void gic_common_sgi_config(void *base, void *data, int irq_base)
{
#ifdef RT_USING_SMP
if (irq_base < 2)
{
struct rt_pic_irq *pirq;
#define DECLARE_GIC_IPI(ipi, hwirq) \
rt_pic_config_ipi(data, ipi, hwirq); \
pirq = rt_pic_find_ipi(data, ipi); \
pirq->mode = RT_IRQ_MODE_EDGE_RISING; \
DECLARE_GIC_IPI(RT_SCHEDULE_IPI, 0);
DECLARE_GIC_IPI(RT_STOP_IPI, 1);
#undef DECLARE_GIC_IPI
}
#endif /* RT_USING_SMP */
}
rt_err_t gic_common_configure_irq(void *base, int irq, rt_uint32_t mode, void (*sync_access)(void *), void *data)
{
rt_err_t err = RT_EOK;
rt_ubase_t level;
rt_uint32_t val, oldval;
rt_uint32_t confoff = (irq / 16) * 4;
rt_uint32_t confmask = 0x2 << ((irq % 16) * 2);
static struct rt_spinlock ic_lock = { 0 };
level = rt_spin_lock_irqsave(&ic_lock);
val = oldval = HWREG32(base + confoff);
if (mode & RT_IRQ_MODE_LEVEL_MASK)
{
/* Level-sensitive */
val &= ~confmask;
}
else if (mode & RT_IRQ_MODE_EDGE_BOTH)
{
/* Edge-triggered */
val |= confmask;
}
if (val != oldval)
{
HWREG32(base + confoff) = val;
if (HWREG32(base + confoff) != val)
{
err = -RT_EINVAL;
}
if (sync_access)
{
sync_access(data);
}
}
rt_spin_unlock_irqrestore(&ic_lock, level);
return err;
}
void gic_common_dist_config(void *base, int max_irqs, void (*sync_access)(void *), void *data)
{
rt_uint32_t i;
/* Set all global interrupts to be level triggered, active low. */
for (i = 32; i < max_irqs; i += 16)
{
HWREG32(base + GIC_DIST_CONFIG + i / 4) = GICD_INT_ACTLOW_LVLTRIG;
}
/* Set priority on all global interrupts. */
for (i = 32; i < max_irqs; i += 4)
{
HWREG32(base + GIC_DIST_PRI + i * 4 / 4) = GICD_INT_DEF_PRI_X4;
}
/* Disable all SPIs. */
for (i = 32; i < max_irqs; i += 32)
{
HWREG32(base + GIC_DIST_ACTIVE_CLEAR + i / 8) = GICD_INT_EN_CLR_X32;
HWREG32(base + GIC_DIST_ENABLE_CLEAR + i / 8) = GICD_INT_EN_CLR_X32;
}
if (sync_access)
{
sync_access(data);
}
}
void gic_common_cpu_config(void *base, int nr, void (*sync_access)(void *), void *data)
{
rt_uint32_t i;
/* Disable all SGIs, PPIs. */
for (i = 0; i < nr; i += 32)
{
HWREG32(base + GIC_DIST_ACTIVE_CLEAR + i / 8) = GICD_INT_EN_CLR_X32;
HWREG32(base + GIC_DIST_ENABLE_CLEAR + i / 8) = GICD_INT_EN_CLR_X32;
}
/* Set priority on all PPI and SGI. */
for (i = 0; i < nr; i += 4)
{
HWREG32(base + GIC_DIST_PRI + i * 4 / 4) = GICD_INT_DEF_PRI_X4;
}
if (sync_access)
{
sync_access(data);
}
}

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2023-01-30 GuEe-GUI first version
*/
#ifndef __IRQ_GIC_COMMON_H__
#define __IRQ_GIC_COMMON_H__
#include <rtdef.h>
#ifdef RT_PCI_MSI
#include <drivers/pci_msi.h>
#endif
#include <drivers/ofw.h>
#define GIC_SGI_NR 16
#define GICD_INT_DEF_PRI 0xa0
#define GICD_INT_DEF_PRI_X4 \
( \
(GICD_INT_DEF_PRI << 24) | \
(GICD_INT_DEF_PRI << 16) | \
(GICD_INT_DEF_PRI << 8) | \
GICD_INT_DEF_PRI \
)
struct gic_quirk
{
const char *desc;
const char *compatible;
rt_err_t (*init)(void *data);
rt_uint32_t iidr;
rt_uint32_t iidr_mask;
};
void gic_common_init_quirk_ofw(const struct rt_ofw_node *ic_np, const struct gic_quirk *quirks, void *data);
void gic_common_init_quirk_hw(rt_uint32_t iidr, const struct gic_quirk *quirks, void *data);
void gic_common_sgi_config(void *base, void *data, int irq_base);
rt_err_t gic_common_configure_irq(void *base, int irq, rt_uint32_t mode, void (*sync_access)(void *), void *data);
void gic_common_dist_config(void *base, int max_irqs, void (*sync_access)(void *), void *data);
void gic_common_cpu_config(void *base, int nr, void (*sync_access)(void *), void *data);
#ifdef RT_PIC_ARM_GIC_V2M
rt_err_t gicv2m_ofw_probe(struct rt_ofw_node *ic_np, const struct rt_ofw_node_id *id);
#endif
#ifdef RT_PIC_ARM_GIC_V3_ITS
rt_err_t gicv3_its_ofw_probe(struct rt_ofw_node *ic_np, const struct rt_ofw_node_id *id);
#endif
#endif /* __IRQ_GIC_COMMON_H__ */

View File

@@ -0,0 +1,576 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
* 2022-08-24 GuEe-GUI add pic support
* 2022-11-07 GuEe-GUI add v2m support
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "pic.gicv2"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <cpuport.h>
#include <ioremap.h>
#include "pic-gicv2.h"
#include "pic-gic-common.h"
#define GIC_CPU_IMAX 8
#define raw_to_gicv2(raw) rt_container_of(raw, struct gicv2, parent)
static rt_bool_t needs_rmw_access = RT_FALSE;
static int _gicv2_nr = 0, _init_cpu_id = 0;
static struct gicv2 _gicv2_list[RT_PIC_ARM_GIC_MAX_NR] = {};
static rt_bool_t _gicv2_eoi_mode_ns = RT_FALSE;
static rt_uint8_t _gicv2_cpumask_map[GIC_CPU_IMAX] =
{
[0 ... GIC_CPU_IMAX - 1] = 0xff,
};
static rt_uint8_t gicv2_cpumask_map(struct gicv2 *gic)
{
rt_uint32_t mask, i;
for (i = mask = 0; i < 32; i += 4)
{
mask = HWREG32(gic->dist_base + GIC_DIST_TARGET + i);
mask |= mask >> 16;
mask |= mask >> 8;
if (mask)
{
break;
}
}
return mask;
}
static void gicv2_dist_init(struct gicv2 *gic)
{
void *base = gic->dist_base;
rt_uint32_t i;
rt_uint32_t cpumask = gicv2_cpumask_map(gic);
gic->max_irq = HWREG32(base + GIC_DIST_TYPE) & 0x1f;
gic->max_irq = (gic->max_irq + 1) * 32;
/*
* The GIC only supports up to 1020 interrupt sources.
* Limit this to either the architected maximum, or the
* platform maximum.
*/
if (gic->max_irq > 1020)
{
gic->max_irq = 1020;
}
LOG_D("Max irq = %d", gic->max_irq);
HWREG32(base + GIC_DIST_CTRL) = GICD_DISABLE;
/* Set all global (unused) interrupts to this CPU only. */
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
for (i = 32; i < gic->max_irq; i += 4)
{
HWREG32(base + GIC_DIST_TARGET + i * 4 / 4) = cpumask;
}
gic_common_dist_config(base, gic->max_irq, RT_NULL, RT_NULL);
HWREG32(base + GIC_DIST_CTRL) = GICD_ENABLE;
}
static void gicv2_cpu_init(struct gicv2 *gic)
{
rt_uint32_t cpumask;
void *base = gic->cpu_base;
rt_uint32_t config = GICC_ENABLE;
int cpu_id = _init_cpu_id = rt_hw_cpu_id();
cpumask = gicv2_cpumask_map(gic);
_gicv2_cpumask_map[cpu_id] = cpumask;
/*
* Clear our mask from the other map entries in case they're
* still undefined.
*/
for (int i = 0; i < RT_ARRAY_SIZE(_gicv2_cpumask_map); ++i)
{
if (i != cpu_id)
{
_gicv2_cpumask_map[i] &= ~cpumask;
}
}
gic_common_cpu_config(gic->dist_base, 32, RT_NULL, RT_NULL);
HWREG32(base + GIC_CPU_PRIMASK) = GICC_INT_PRI_THRESHOLD;
HWREG32(base + GIC_CPU_BINPOINT) = 0x7;
#ifdef ARCH_SUPPORT_HYP
_gicv2_eoi_mode_ns = RT_TRUE;
#endif
if (_gicv2_eoi_mode_ns)
{
config |= GIC_CPU_CTRL_EOI_MODE_NS;
}
HWREG32(base + GIC_CPU_CTRL) = config;
}
static rt_err_t gicv2_irq_init(struct rt_pic *pic)
{
gicv2_cpu_init(rt_container_of(pic, struct gicv2, parent));
return RT_EOK;
}
static void gicv2_irq_ack(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
if (!_gicv2_eoi_mode_ns)
{
HWREG32(gic->dist_base + GIC_DIST_PENDING_CLEAR + hwirq / 32 * 4) = 1U << (hwirq % 32);
}
HWREG32(gic->cpu_base + GIC_CPU_EOI) = hwirq;
}
static void gicv2_irq_mask(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
HWREG32(gic->dist_base + GIC_DIST_ENABLE_CLEAR + hwirq / 32 * 4) = 1U << (hwirq % 32);
}
static void gicv2_irq_unmask(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
HWREG32(gic->dist_base + GIC_DIST_ENABLE_SET + hwirq / 32 * 4) = 1U << (hwirq % 32);
}
static void gicv2_irq_eoi(struct rt_pic_irq *pirq)
{
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
if (_gicv2_eoi_mode_ns)
{
HWREG32(gic->cpu_base + GIC_CPU_DIR) = pirq->hwirq;
}
}
static rt_err_t gicv2_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
{
rt_uint32_t mask;
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
mask = HWREG32(gic->dist_base + GIC_DIST_PRI + hwirq / 4 * 4);
mask &= ~(0xffU << ((hwirq % 4) * 8));
mask |= ((priority & 0xffU) << ((hwirq % 4) * 8));
HWREG32(gic->dist_base + GIC_DIST_PRI + hwirq / 4 * 4) = mask;
return RT_EOK;
}
static rt_err_t gicv2_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
{
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
rt_uint32_t target_list = ((rt_uint8_t *)affinity)[gic - &_gicv2_list[0]];
rt_uint8_t valb = _gicv2_cpumask_map[__rt_ffs(target_list) - 1];
void *io_addr = gic->dist_base + GIC_DIST_TARGET + hwirq;
if (needs_rmw_access)
{
/* RMW write byte */
rt_uint32_t val;
rt_ubase_t level;
rt_ubase_t offset = (rt_ubase_t)io_addr & 3UL, shift = offset * 8;
static struct rt_spinlock rmw_lock = {};
level = rt_spin_lock_irqsave(&rmw_lock);
io_addr -= offset;
val = HWREG32(io_addr);
val &= ~RT_GENMASK(shift + 7, shift);
val |= valb << shift;
HWREG32(io_addr) = val;
rt_spin_unlock_irqrestore(&rmw_lock, level);
}
else
{
HWREG8(io_addr) = valb;
}
return RT_EOK;
}
static rt_err_t gicv2_irq_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
{
rt_err_t err = RT_EOK;
int hwirq = pirq->hwirq;
struct gicv2 *gic = raw_to_gicv2(pirq->pic);
if (hwirq >= GIC_SGI_NR)
{
err = gic_common_configure_irq(gic->dist_base + GIC_DIST_CONFIG, pirq->hwirq, mode, RT_NULL, RT_NULL);
}
else
{
err = -RT_ENOSYS;
}
return err;
}
static void gicv2_irq_send_ipi(struct rt_pic_irq *pirq, rt_bitmap_t *cpumask)
{
struct gicv2 *gic;
int sgi = pirq->hwirq;
rt_uint8_t *target_list = (rt_uint8_t *)cpumask;
for (int i = 0; i < _gicv2_nr; ++i)
{
if (*target_list)
{
gic = &_gicv2_list[i];
HWREG32(gic->dist_base + GIC_DIST_SOFTINT) = ((*target_list & 0xffU) << 16) | (sgi & 0xf);
rt_hw_dsb();
}
++target_list;
}
}
static int gicv2_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
{
int irq, irq_index = hwirq - GIC_SGI_NR;
struct rt_pic_irq *pirq = rt_pic_find_irq(pic, irq_index);
if (pirq && hwirq >= GIC_SGI_NR)
{
pirq->mode = mode;
pirq->priority = GICD_INT_DEF_PRI;
rt_bitmap_set_bit(pirq->affinity, _init_cpu_id);
irq = rt_pic_config_irq(pic, irq_index, hwirq);
if (irq >= 0 && mode != RT_IRQ_MODE_LEVEL_HIGH)
{
gicv2_irq_set_triger_mode(pirq, mode);
}
}
else
{
irq = -1;
}
return irq;
}
static rt_err_t gicv2_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
{
rt_err_t err = RT_EOK;
if (args->args_count == 3)
{
out_pirq->mode = args->args[2] & RT_IRQ_MODE_MASK;
switch (args->args[0])
{
case 0:
/* SPI */
out_pirq->hwirq = args->args[1] + 32;
break;
case 1:
/* PPI */
out_pirq->hwirq = args->args[1] + 16;
break;
default:
err = -RT_ENOSYS;
break;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
static struct rt_pic_ops gicv2_ops =
{
.name = "GICv2",
.irq_init = gicv2_irq_init,
.irq_ack = gicv2_irq_ack,
.irq_mask = gicv2_irq_mask,
.irq_unmask = gicv2_irq_unmask,
.irq_eoi = gicv2_irq_eoi,
.irq_set_priority = gicv2_irq_set_priority,
.irq_set_affinity = gicv2_irq_set_affinity,
.irq_set_triger_mode = gicv2_irq_set_triger_mode,
.irq_send_ipi = gicv2_irq_send_ipi,
.irq_map = gicv2_irq_map,
.irq_parse = gicv2_irq_parse,
};
static rt_bool_t gicv2_handler(void *data)
{
rt_bool_t res = RT_FALSE;
int hwirq;
struct gicv2 *gic = data;
hwirq = HWREG32(gic->cpu_base + GIC_CPU_INTACK) & 0x3ffUL;
if (!(hwirq >= 1020 && hwirq <= 1023))
{
struct rt_pic_irq *pirq;
if (hwirq < GIC_SGI_NR)
{
rt_hw_rmb();
pirq = rt_pic_find_ipi(&gic->parent, hwirq);
}
else
{
pirq = rt_pic_find_irq(&gic->parent, hwirq - GIC_SGI_NR);
}
gicv2_irq_ack(pirq);
rt_pic_handle_isr(pirq);
gicv2_irq_eoi(pirq);
res = RT_TRUE;
}
return res;
}
static rt_err_t gicv2_enable_rmw_access(void *data)
{
if (rt_ofw_machine_is_compatible("renesas,emev2"))
{
needs_rmw_access = RT_TRUE;
return RT_EOK;
}
return -RT_EINVAL;
}
static const struct gic_quirk _gicv2_quirks[] =
{
{
.desc = "GICv2: Broken byte access",
.compatible = "arm,pl390",
.init = gicv2_enable_rmw_access,
},
{ /* sentinel */ }
};
static rt_err_t gicv2_iomap_init(struct gicv2 *gic, rt_uint64_t *regs)
{
rt_err_t err = RT_EOK;
int idx;
const char *name[] =
{
"Distributor",
"CPU interfaces",
"Virtual interface control",
"Virtual CPU interface",
};
do {
/* GICD->GICC->GICH->GICV */
gic->dist_size = regs[1];
gic->dist_base = rt_ioremap((void *)regs[0], gic->dist_size);
if (!gic->dist_base)
{
idx = 0;
err = -RT_ERROR;
break;
}
gic->cpu_size = regs[3];
gic->cpu_base = rt_ioremap((void *)regs[2], gic->cpu_size);
if (!gic->cpu_base)
{
idx = 1;
err = -RT_ERROR;
break;
}
/* ArchRev[4:7] */
gic->version = HWREG32(gic->dist_base + GIC_DIST_ICPIDR2) >> 4;
#ifdef ARCH_SUPPORT_HYP
if (gic->version == 1)
{
break;
}
gic->hyp_size = regs[5];
gic->hyp_base = rt_ioremap((void *)regs[4], gic->hyp_size);
if (!gic->hyp_base)
{
idx = 2;
err = -RT_ERROR;
break;
}
gic->vcpu_size = regs[7];
gic->vcpu_base = rt_ioremap((void *)regs[6], gic->vcpu_size);
if (!gic->vcpu_base)
{
idx = 3;
err = -RT_ERROR;
break;
}
#endif /* ARCH_SUPPORT_HYP */
} while (0);
if (err)
{
RT_UNUSED(idx);
RT_UNUSED(name);
LOG_E("gic[%d] %s IO[%p, %p] map fail", _gicv2_nr, name[idx], regs[idx * 2], regs[idx * 2 + 1]);
}
return err;
}
static void gicv2_init(struct gicv2 *gic)
{
gicv2_dist_init(gic);
gic->parent.priv_data = gic;
gic->parent.ops = &gicv2_ops;
rt_pic_linear_irq(&gic->parent, gic->max_irq + 1 - GIC_SGI_NR);
gic_common_sgi_config(gic->dist_base, &gic->parent, _gicv2_nr * GIC_SGI_NR);
rt_pic_add_traps(gicv2_handler, gic);
rt_pic_user_extends(&gic->parent);
}
static void gicv2_init_fail(struct gicv2 *gic)
{
if (gic->dist_base)
{
rt_iounmap(gic->dist_base);
}
if (gic->cpu_base)
{
rt_iounmap(gic->cpu_base);
}
if (gic->hyp_base)
{
rt_iounmap(gic->hyp_base);
}
if (gic->vcpu_base)
{
rt_iounmap(gic->vcpu_base);
}
rt_memset(gic, 0, sizeof(*gic));
}
static rt_err_t gicv2_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
{
rt_err_t err = RT_EOK;
struct gicv2 *gic = RT_NULL;
do {
rt_uint64_t regs[8];
if (_gicv2_nr >= RT_PIC_ARM_GIC_MAX_NR)
{
LOG_W("GICv2/v1 table is full");
err = -RT_EFULL;
break;
}
gic = &_gicv2_list[_gicv2_nr];
rt_ofw_get_address_array(np, RT_ARRAY_SIZE(regs), regs);
if ((err = gicv2_iomap_init(gic, regs)))
{
break;
}
if (gic->version != 1 && gic->version != 2)
{
LOG_E("Version = %d is not support", gic->version);
err = -RT_EINVAL;
break;
}
gic_common_init_quirk_ofw(np, _gicv2_quirks, gic);
gicv2_init(gic);
rt_ofw_data(np) = &gic->parent;
if (gic->version == 2)
{
#ifdef RT_PIC_ARM_GIC_V2M
gicv2m_ofw_probe(np, id);
#endif
}
++_gicv2_nr;
} while (0);
if (err && gic)
{
gicv2_init_fail(gic);
}
return err;
}
static const struct rt_ofw_node_id gicv2_ofw_ids[] =
{
{ .compatible = "arm,gic-400" },
{ .compatible = "arm,arm11mp-gic" },
{ .compatible = "arm,arm1176jzf-devchip-gic" },
{ .compatible = "arm,cortex-a15-gic" },
{ .compatible = "arm,cortex-a9-gic" },
{ .compatible = "arm,cortex-a7-gic" },
{ .compatible = "qcom,msm-8660-qgic" },
{ .compatible = "qcom,msm-qgic2" },
{ .compatible = "arm,pl390" },
{ /* sentinel */ }
};
RT_PIC_OFW_DECLARE(gicv2, gicv2_ofw_ids, gicv2_ofw_init);

View File

@@ -0,0 +1,83 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2023-02-01 GuEe-GUI move macros to header
*/
#ifndef __PIC_GICV2_H__
#define __PIC_GICV2_H__
#include <rtdef.h>
#include <drivers/pic.h>
#define GIC_DIST_CTRL 0x000
#define GIC_DIST_TYPE 0x004
#define GIC_DIST_IIDR 0x008
#define GIC_DIST_IGROUP 0x080
#define GIC_DIST_ENABLE_SET 0x100
#define GIC_DIST_ENABLE_CLEAR 0x180
#define GIC_DIST_PENDING_SET 0x200
#define GIC_DIST_PENDING_CLEAR 0x280
#define GIC_DIST_ACTIVE_SET 0x300
#define GIC_DIST_ACTIVE_CLEAR 0x380
#define GIC_DIST_PRI 0x400
#define GIC_DIST_TARGET 0x800
#define GIC_DIST_CONFIG 0xc00
#define GIC_DIST_SOFTINT 0xf00
#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
#define GIC_DIST_SGI_PENDING_SET 0xf20
#define GIC_DIST_ICPIDR2 0xfe8
#define GICD_ENABLE 0x1
#define GICD_DISABLE 0x0
#define GICD_INT_ACTLOW_LVLTRIG 0x0
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
#define GICD_GROUP0 0
#define GICD_GROUP1 (~GICD_GROUP0)
#define GIC_CPU_CTRL 0x00
#define GIC_CPU_PRIMASK 0x04
#define GIC_CPU_BINPOINT 0x08
#define GIC_CPU_INTACK 0x0c
#define GIC_CPU_EOI 0x10
#define GIC_CPU_RUNNINGPRI 0x14
#define GIC_CPU_HIGHPRI 0x18
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IIDR 0xfc
#define GIC_CPU_DIR 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0 /* priority levels 16 */
#define GIC_CPU_CTRL_ENABLE_GRP0 (1 << 0)
#define GIC_CPU_CTRL_ENABLE_GRP1 (1 << 1)
#define GIC_CPU_CTRL_EOI_MODE_NS (1 << 9)
struct gicv2
{
struct rt_pic parent;
int version;
int max_irq;
void *dist_base;
rt_size_t dist_size;
void *cpu_base;
rt_size_t cpu_size;
void *hyp_base;
rt_size_t hyp_size;
void *vcpu_base;
rt_size_t vcpu_size;
};
#endif /* __IRQ_GICV2_H__ */

View File

@@ -0,0 +1,994 @@
/*
* Copyright (c) 2006-2023, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
* 2022-08-24 GuEe-GUI add pic support
* 2022-11-07 GuEe-GUI add v2m support
* 2023-01-30 GuEe-GUI add its and espi, eppi, lpi support
*/
#include <rthw.h>
#include <rtthread.h>
#include <rtdevice.h>
#define DBG_TAG "pic.gicv3"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>
#include <cpu.h>
#include <ioremap.h>
#include <hashmap.h>
#include "pic-gicv3.h"
#include "pic-gic-common.h"
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
static int _init_cpu_id;
static struct gicv3 _gic;
static rt_bool_t _gicv3_eoi_mode_ns = RT_FALSE;
static rt_bool_t _gicv3_arm64_2941627_erratum = RT_FALSE;
enum
{
SGI_TYPE,
PPI_TYPE,
SPI_TYPE,
EPPI_TYPE,
ESPI_TYPE,
LPI_TYPE,
UNKNOW_TYPE,
};
rt_inline void *gicv3_percpu_redist_base(void)
{
return _gic.redist_percpu_base[rt_hw_cpu_id()];
}
rt_inline void *gicv3_percpu_redist_sgi_base(void)
{
return gicv3_percpu_redist_base() + GICR_SGI_OFFSET;
}
static rt_uint16_t *gicv3_dist_espi_reg(rt_uint32_t offset)
{
#define __reg_map_bits 5
#define __reg_map_size (1 << __reg_map_bits)
static rt_uint16_t reg_map[__reg_map_size] = {};
int idx = rt_hashmap_32(offset, __reg_map_bits);
LOG_D("%s ESPI Map<0x%04x> = %2d", "Distributor", offset, idx);
return &reg_map[idx];
#undef __reg_map_bits
#undef __reg_map_size
}
static void gicv3_wait_for_rwp(void *base, rt_uint32_t rwp_bit)
{
rt_uint32_t count = 1000000;
while ((HWREG32(base + GICD_CTLR) & rwp_bit))
{
count--;
if (!count)
{
LOG_W("RWP timeout");
break;
}
rt_hw_cpu_relax();
}
}
rt_inline void gicv3_dist_wait_for_rwp(void)
{
gicv3_wait_for_rwp(_gic.dist_base, GICD_CTLR_RWP);
}
rt_inline void gicv3_redist_wait_for_rwp(void)
{
gicv3_wait_for_rwp(_gic.redist_percpu_base[rt_hw_cpu_id()], GICR_CTLR_RWP);
}
static typeof(UNKNOW_TYPE) gicv3_hwirq_type(int hwirq)
{
typeof(UNKNOW_TYPE) ret;
switch (hwirq)
{
case 0 ... 15:
ret = SGI_TYPE;
break;
case 16 ... 31:
ret = PPI_TYPE;
break;
case 32 ... 1019:
ret = SPI_TYPE;
break;
case GIC_EPPI_BASE_INTID ... (GIC_EPPI_BASE_INTID + 63):
ret = EPPI_TYPE;
break;
case GIC_ESPI_BASE_INTID ... (GIC_ESPI_BASE_INTID + 1023):
ret = ESPI_TYPE;
break;
case 8192 ... RT_GENMASK(23, 0):
ret = LPI_TYPE;
break;
default:
ret = UNKNOW_TYPE;
break;
}
return ret;
}
static rt_uint32_t gicv3_hwirq_convert_offset_index(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
{
switch (gicv3_hwirq_type(hwirq))
{
case SGI_TYPE:
case PPI_TYPE:
case SPI_TYPE:
*index = hwirq;
break;
case EPPI_TYPE:
/* EPPI range (GICR_IPRIORITYR<n>E) is contiguousto the PPI (GICR_IPRIORITYR<n>) range in the registers */
*index = hwirq - GIC_EPPI_BASE_INTID + 32;
break;
case ESPI_TYPE:
*index = hwirq - GIC_ESPI_BASE_INTID;
offset = *gicv3_dist_espi_reg(offset);
break;
default:
*index = hwirq;
break;
}
return offset;
}
rt_inline rt_bool_t gicv3_hwirq_in_redist(int hwirq)
{
switch (gicv3_hwirq_type(hwirq))
{
case SGI_TYPE:
case PPI_TYPE:
case EPPI_TYPE:
return RT_TRUE;
default:
return RT_FALSE;
}
}
static void *gicv3_hwirq_reg_base(int hwirq, rt_uint32_t offset, rt_uint32_t *index)
{
void *base;
if (gicv3_hwirq_in_redist(hwirq))
{
base = gicv3_percpu_redist_sgi_base();
}
else
{
base = _gic.dist_base;
}
return base + gicv3_hwirq_convert_offset_index(hwirq, offset, index);
}
static void gicv3_hwirq_poke(int hwirq, rt_uint32_t offset)
{
rt_uint32_t index;
void *base = gicv3_hwirq_reg_base(hwirq, offset, &index);
HWREG32(base + (index / 32) * 4) = 1 << (index % 32);
}
static void gicv3_dist_init(void)
{
rt_uint32_t i;
rt_uint64_t affinity;
void *base = _gic.dist_base;
rt_ubase_t mpidr = rt_cpu_mpidr_table[_init_cpu_id = rt_hw_cpu_id()];
_gic.line_nr = rt_min(GICD_TYPER_SPIS(_gic.gicd_typer), 1020U);
_gic.espi_nr = GICD_TYPER_ESPIS(_gic.gicd_typer);
LOG_D("%d SPIs implemented", _gic.line_nr - 32);
LOG_D("%d Extended SPIs implemented", _gic.espi_nr);
/* Disable the distributor */
HWREG32(base + GICD_CTLR) = 0;
gicv3_dist_wait_for_rwp();
/* Non-secure Group-1 */
for (i = 32; i < _gic.line_nr; i += 32)
{
HWREG32(base + GICD_IGROUPR + i / 8) = RT_UINT32_MAX;
}
/* Disable, clear, group */
for (i = 0; i < _gic.espi_nr; i += 4)
{
HWREG32(base + GICD_IPRIORITYRnE + i) = GICD_INT_DEF_PRI_X4;
if (!(i % 16))
{
HWREG32(base + GICD_ICFGRnE + i / 4) = 0;
if (!(i % 32))
{
HWREG32(base + GICD_ICENABLERnE + i / 8) = RT_UINT32_MAX;
HWREG32(base + GICD_ICACTIVERnE + i / 8) = RT_UINT32_MAX;
HWREG32(base + GICD_IGROUPRnE + i / 8) = RT_UINT32_MAX;
}
}
}
gic_common_dist_config(base, _gic.line_nr, RT_NULL, RT_NULL);
/* Enable the distributor */
HWREG32(base + GICD_CTLR) = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
gicv3_dist_wait_for_rwp();
affinity = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
/* Set all global interrupts to this CPU only. */
for (i = 32; i < _gic.line_nr; ++i)
{
HWREG64(base + GICD_IROUTER + i * 8) = affinity;
}
for (i = 0; i < _gic.espi_nr; ++i)
{
HWREG64(base + GICD_IROUTERnE + i * 8) = affinity;
}
if (GICD_TYPER_NUM_LPIS(_gic.gicd_typer))
{
/* Max LPI = 8192 + Math.pow(2, num_LPIs + 1) - 1 */
rt_size_t num_lpis = (1 << (GICD_TYPER_NUM_LPIS(_gic.gicd_typer) + 1)) + 1;
_gic.lpi_nr = rt_min_t(int, num_lpis, 1 << GICD_TYPER_ID_BITS(_gic.gicd_typer));
}
else
{
_gic.lpi_nr = 1 << GICD_TYPER_ID_BITS(_gic.gicd_typer);
}
/* SPI + eSPI + LPIs */
_gic.irq_nr = _gic.line_nr - 32 + _gic.espi_nr + _gic.lpi_nr;
}
static void gicv3_redist_enable(rt_bool_t enable)
{
void *base;
rt_uint32_t count = 1000000, waker;
do {
if (_gic.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
{
break;
}
base = gicv3_percpu_redist_base();
waker = HWREG32(base + GICR_WAKER);
if (enable)
{
waker &= ~GICR_WAKER_ProcessorSleep;
}
else
{
waker |= GICR_WAKER_ProcessorSleep;
}
HWREG32(base + GICR_WAKER) = waker;
if (!enable && !(HWREG32(base + GICR_WAKER) & GICR_WAKER_ProcessorSleep))
{
break;
}
while ((HWREG32(base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) != 0)
{
if (count-- == 0)
{
LOG_E("%s failed to %s", "Redistributor", enable ? "wakeup" : "sleep");
break;
}
}
} while (0);
}
static void gicv3_redist_init(void)
{
void *base;
rt_uint32_t affinity;
int cpu_id = rt_hw_cpu_id();
rt_bool_t find_ok = RT_TRUE;
rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id], gicr_typer;
affinity = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
for (int i = 0; i < _gic.redist_regions_nr; ++i)
{
base = _gic.redist_regions[i].base;
do {
gicr_typer = HWREG64(base + GICR_TYPER);
if ((gicr_typer >> 32) == affinity)
{
rt_size_t ppi_nr = _gic.percpu_ppi_nr[cpu_id];
rt_size_t typer_nr_ppis = GICR_TYPER_NR_PPIS(gicr_typer);
_gic.percpu_ppi_nr[cpu_id] = rt_min(typer_nr_ppis, ppi_nr);
_gic.redist_percpu_base[cpu_id] = base;
find_ok = RT_TRUE;
break;
}
if (_gic.redist_stride)
{
base += _gic.redist_stride;
}
else
{
base += GICR_RD_BASE_SIZE + GICR_SGI_BASE_SIZE;
if (gicr_typer & GICR_TYPER_VLPIS)
{
base += GICR_VLPI_BASE_SIZE + GICR_RESERVED_SIZE;
}
}
} while (!(gicr_typer & GICR_TYPER_LAST));
if (find_ok)
{
break;
}
}
if (find_ok)
{
gicv3_redist_enable(RT_TRUE);
}
}
static void gicv3_cpu_init(void)
{
void *base;
rt_size_t ppi_nr;
rt_uint64_t value;
int cpu_id = rt_hw_cpu_id();
#ifdef ARCH_SUPPORT_HYP
_gicv3_eoi_mode_ns = RT_TRUE;
#endif
base = gicv3_percpu_redist_sgi_base();
ppi_nr = _gic.percpu_ppi_nr[cpu_id] + 16;
for (rt_uint32_t i = 0; i < ppi_nr; i += 32)
{
HWREG32(base + GICR_IGROUPR0 + i / 8) = RT_UINT32_MAX;
}
gic_common_cpu_config(base, ppi_nr, (void *)gicv3_redist_wait_for_rwp, &_gic.parent);
read_gicreg(ICC_SRE_SYS, value);
value |= (1 << 0);
write_gicreg(ICC_SRE_SYS, value);
rt_hw_isb();
write_gicreg(ICC_PMR_SYS, 0xff);
/* Enable group1 interrupt */
write_gicreg(ICC_IGRPEN1_SYS, 1);
write_gicreg(ICC_BPR1_SYS, 0);
/*
* ICC_BPR0_EL1 determines the preemption group for both Group 0 and Group 1
* interrupts.
* Targeted SGIs with affinity level 0 values of 0 - 255 are supported.
*/
value = ICC_CTLR_EL1_RSS | ICC_CTLR_EL1_CBPR_MASK;
if (_gicv3_eoi_mode_ns)
{
value |= ICC_CTLR_EL1_EOImode_drop;
}
write_gicreg(ICC_CTLR_SYS, value);
}
static rt_err_t gicv3_irq_init(struct rt_pic *pic)
{
gicv3_redist_init();
gicv3_cpu_init();
return RT_EOK;
}
static void gicv3_irq_ack(struct rt_pic_irq *pirq)
{
if (!_gicv3_eoi_mode_ns)
{
write_gicreg(ICC_EOIR1_SYS, pirq->hwirq);
rt_hw_isb();
}
}
static void gicv3_irq_mask(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
gicv3_hwirq_poke(hwirq, GICD_ICENABLER);
if (gicv3_hwirq_in_redist(hwirq))
{
gicv3_redist_wait_for_rwp();
}
else
{
gicv3_dist_wait_for_rwp();
}
}
static void gicv3_irq_unmask(struct rt_pic_irq *pirq)
{
int hwirq = pirq->hwirq;
gicv3_hwirq_poke(hwirq, GICD_ISENABLER);
}
static void gicv3_irq_eoi(struct rt_pic_irq *pirq)
{
if (_gicv3_eoi_mode_ns)
{
int hwirq = pirq->hwirq;
if (hwirq < 8192)
{
write_gicreg(ICC_EOIR1_SYS, hwirq);
rt_hw_isb();
if (!_gicv3_arm64_2941627_erratum)
{
write_gicreg(ICC_DIR_SYS, hwirq);
rt_hw_isb();
}
}
}
}
static rt_err_t gicv3_irq_set_priority(struct rt_pic_irq *pirq, rt_uint32_t priority)
{
void *base;
int hwirq = pirq->hwirq;
rt_uint32_t index, offset;
if (gicv3_hwirq_in_redist(hwirq))
{
base = gicv3_percpu_redist_sgi_base();
}
else
{
base = _gic.dist_base;
}
offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IPRIORITYR, &index);
HWREG8(base + offset + index) = priority;
return RT_EOK;
}
static rt_err_t gicv3_irq_set_affinity(struct rt_pic_irq *pirq, rt_bitmap_t *affinity)
{
rt_err_t ret = RT_EOK;
rt_uint64_t val;
rt_ubase_t mpidr;
rt_uint32_t offset, index;
int hwirq = pirq->hwirq, cpu_id = rt_bitmap_next_set_bit(affinity, 0, RT_CPUS_NR);
mpidr = rt_cpu_mpidr_table[cpu_id];
offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_IROUTER, &index);
val = ((rt_uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
MPIDR_AFFINITY_LEVEL(mpidr, 0));
HWREG64(_gic.dist_base + offset + (index * 8)) = val;
return ret;
}
static rt_err_t gicv3_irq_set_triger_mode(struct rt_pic_irq *pirq, rt_uint32_t mode)
{
void *base;
rt_err_t ret = RT_EOK;
int hwirq = pirq->hwirq;
rt_uint32_t index, offset;
if (hwirq > 15)
{
if (gicv3_hwirq_in_redist(hwirq))
{
base = gicv3_percpu_redist_sgi_base();
}
else
{
base = _gic.dist_base;
}
offset = gicv3_hwirq_convert_offset_index(hwirq, GICD_ICFGR, &index);
ret = gic_common_configure_irq(base + offset, hwirq, mode, RT_NULL, RT_NULL);
}
else
{
ret = -RT_ENOSYS;
}
return ret;
}
static void gicv3_irq_send_ipi(struct rt_pic_irq *pirq, rt_bitmap_t *cpumask)
{
#define __mpidr_to_sgi_affinity(cluster_id, level) \
(MPIDR_AFFINITY_LEVEL(cluster_id, level) << ICC_SGI1R_AFFINITY_##level##_SHIFT)
int cpu_id, last_cpu_id, limit;
rt_uint64_t initid, range_sel, target_list, cluster_id;
range_sel = 0;
initid = ((pirq->hwirq) << ICC_SGI1R_SGI_ID_SHIFT);
rt_bitmap_for_each_set_bit(cpumask, cpu_id, RT_CPUS_NR)
{
rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
cluster_id = mpidr & (~MPIDR_LEVEL_MASK);
target_list = 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
limit = rt_min(cpu_id + ICC_SGI1R_TARGET_LIST_MAX, RT_CPUS_NR);
last_cpu_id = cpu_id;
rt_bitmap_for_each_set_bit_from(cpumask, cpu_id, cpu_id, limit)
{
rt_uint64_t mpidr = rt_cpu_mpidr_table[cpu_id];
if (cluster_id != (mpidr & (~MPIDR_LEVEL_MASK)))
{
range_sel = 0;
/* Don't break next cpuid */
cpu_id = last_cpu_id;
break;
}
last_cpu_id = cpu_id;
target_list |= 1 << ((mpidr & MPIDR_LEVEL_MASK) % ICC_SGI1R_TARGET_LIST_MAX);
}
rt_hw_dsb();
write_gicreg(ICC_SGI1R_SYS,
__mpidr_to_sgi_affinity(cluster_id, 3) |
(range_sel << ICC_SGI1R_RS_SHIFT) |
__mpidr_to_sgi_affinity(cluster_id, 2) |
initid |
__mpidr_to_sgi_affinity(cluster_id, 1) |
target_list);
rt_hw_isb();
++range_sel;
}
#undef __mpidr_to_sgi_affinity
}
static int gicv3_irq_map(struct rt_pic *pic, int hwirq, rt_uint32_t mode)
{
struct rt_pic_irq *pirq;
int irq, hwirq_type, irq_index;
hwirq_type = gicv3_hwirq_type(hwirq);
if (hwirq_type != LPI_TYPE)
{
irq_index = hwirq - GIC_SGI_NR;
}
else
{
irq_index = _gic.irq_nr - _gic.lpi_nr + hwirq - 8192;
}
pirq = rt_pic_find_irq(pic, irq_index);
if (pirq && hwirq >= GIC_SGI_NR)
{
pirq->mode = mode;
switch (gicv3_hwirq_type(hwirq))
{
case SPI_TYPE:
case ESPI_TYPE:
pirq->priority = GICD_INT_DEF_PRI;
rt_bitmap_set_bit(pirq->affinity, _init_cpu_id);
default:
break;
}
irq = rt_pic_config_irq(pic, irq_index, hwirq);
if (irq >= 0 && mode != RT_IRQ_MODE_LEVEL_HIGH)
{
gicv3_irq_set_triger_mode(pirq, mode);
}
}
else
{
irq = -1;
}
return irq;
}
static rt_err_t gicv3_irq_parse(struct rt_pic *pic, struct rt_ofw_cell_args *args, struct rt_pic_irq *out_pirq)
{
rt_err_t err = RT_EOK;
if (args->args_count == 3)
{
out_pirq->mode = args->args[2] & RT_IRQ_MODE_MASK;
switch (args->args[0])
{
case 0:
/* SPI */
out_pirq->hwirq = args->args[1] + 32;
break;
case 1:
/* PPI */
out_pirq->hwirq = args->args[1] + 16;
break;
case 2:
/* ESPI */
out_pirq->hwirq = args->args[1] + GIC_ESPI_BASE_INTID;
break;
case 3:
/* EPPI */
out_pirq->hwirq = args->args[1] + GIC_EPPI_BASE_INTID;
break;
case GIC_IRQ_TYPE_LPI:
/* LPI */
out_pirq->hwirq = args->args[1];
break;
case GIC_IRQ_TYPE_PARTITION:
out_pirq->hwirq = args->args[1];
if (args->args[1] >= 16)
{
out_pirq->hwirq += GIC_EPPI_BASE_INTID - 16;
}
else
{
out_pirq->hwirq += 16;
}
break;
default:
err = -RT_ENOSYS;
break;
}
}
else
{
err = -RT_EINVAL;
}
return err;
}
static struct rt_pic_ops gicv3_ops =
{
.name = "GICv3",
.irq_init = gicv3_irq_init,
.irq_ack = gicv3_irq_ack,
.irq_mask = gicv3_irq_mask,
.irq_unmask = gicv3_irq_unmask,
.irq_eoi = gicv3_irq_eoi,
.irq_set_priority = gicv3_irq_set_priority,
.irq_set_affinity = gicv3_irq_set_affinity,
.irq_set_triger_mode = gicv3_irq_set_triger_mode,
.irq_send_ipi = gicv3_irq_send_ipi,
.irq_map = gicv3_irq_map,
.irq_parse = gicv3_irq_parse,
};
static rt_bool_t gicv3_handler(void *data)
{
rt_bool_t res = RT_FALSE;
int hwirq;
struct gicv3 *gic = data;
read_gicreg(ICC_IAR1_SYS, hwirq);
if (!(hwirq >= 1020 && hwirq <= 1023))
{
struct rt_pic_irq *pirq;
if (hwirq < GIC_SGI_NR)
{
rt_hw_rmb();
pirq = rt_pic_find_ipi(&gic->parent, hwirq);
}
else
{
pirq = rt_pic_find_irq(&gic->parent, hwirq - GIC_SGI_NR);
}
gicv3_irq_ack(pirq);
rt_pic_handle_isr(pirq);
gicv3_irq_eoi(pirq);
res = RT_TRUE;
}
return res;
}
static rt_err_t gicv3_enable_quirk_msm8996(void *data)
{
struct gicv3 *gic = data;
gic->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
return RT_EOK;
}
static rt_err_t gicv3_enable_quirk_arm64_2941627(void *data)
{
_gicv3_arm64_2941627_erratum = RT_TRUE;
return RT_EOK;
}
static const struct gic_quirk _gicv3_quirks[] =
{
{
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
.compatible = "qcom,msm8996-gic-v3",
.init = gicv3_enable_quirk_msm8996,
},
{
/* GIC-700: 2941627 workaround - IP variant [0,1] */
.desc = "GICv3: ARM64 erratum 2941627",
.iidr = 0x0400043b,
.iidr_mask = 0xff0e0fff,
.init = gicv3_enable_quirk_arm64_2941627,
},
{
/* GIC-700: 2941627 workaround - IP variant [2] */
.desc = "GICv3: ARM64 erratum 2941627",
.iidr = 0x0402043b,
.iidr_mask = 0xff0f0fff,
.init = gicv3_enable_quirk_arm64_2941627,
},
{ /* sentinel */ }
};
static rt_err_t gicv3_iomap_init(rt_uint64_t *regs)
{
rt_err_t ret = RT_EOK;
int idx;
char *name;
do {
/* GICD->GICR */
_gic.dist_size = regs[1];
_gic.dist_base = rt_ioremap((void *)regs[0], _gic.dist_size);
if (!_gic.dist_base)
{
name = "Distributor";
idx = 0;
ret = -RT_ERROR;
break;
}
name = "Redistributor";
_gic.redist_regions = rt_malloc(sizeof(_gic.redist_regions[0]) * _gic.redist_regions_nr);
if (!_gic.redist_regions)
{
idx = -1;
ret = -RT_ENOMEM;
LOG_E("No memory to save %s", name);
break;
}
for (int i = 0, off = 2; i < _gic.redist_regions_nr; ++i)
{
void *base = (void *)regs[off++];
rt_size_t size = regs[off++];
_gic.redist_regions[i].size = size;
_gic.redist_regions[i].base = rt_ioremap(base, size);
_gic.redist_regions[i].base_phy = base;
if (!base)
{
idx = 1;
ret = -RT_ERROR;
break;
}
}
if (ret)
{
break;
}
/* ArchRev[4:7] */
_gic.version = HWREG32(_gic.dist_base + GICD_PIDR2) >> 4;
} while (0);
if (ret && idx >= 0)
{
RT_UNUSED(name);
LOG_E("%s IO[%p, %p] map fail", name[idx], regs[idx * 2], regs[idx * 2 + 1]);
}
return ret;
}
static void gicv3_init(void)
{
#define __dist_espi_regs_do(func, expr, ...) \
__VA_ARGS__(*func(GICD_IGROUPR) expr GICD_IGROUPRnE); \
__VA_ARGS__(*func(GICD_ISENABLER) expr GICD_ISENABLERnE); \
__VA_ARGS__(*func(GICD_ICENABLER) expr GICD_ICENABLERnE); \
__VA_ARGS__(*func(GICD_ISPENDR) expr GICD_ISPENDRnE); \
__VA_ARGS__(*func(GICD_ICPENDR) expr GICD_ICPENDRnE); \
__VA_ARGS__(*func(GICD_ISACTIVER) expr GICD_ISACTIVERnE); \
__VA_ARGS__(*func(GICD_ICACTIVER) expr GICD_ICACTIVERnE); \
__VA_ARGS__(*func(GICD_IPRIORITYR) expr GICD_IPRIORITYRnE); \
__VA_ARGS__(*func(GICD_ICFGR) expr GICD_ICFGRnE); \
__VA_ARGS__(*func(GICD_IROUTER) expr GICD_IROUTERnE);
/* Map registers for ESPI */
__dist_espi_regs_do(gicv3_dist_espi_reg, =);
__dist_espi_regs_do(gicv3_dist_espi_reg, ==, RT_ASSERT);
#undef __dist_espi_regs_do
_gic.gicd_typer = HWREG32(_gic.dist_base + GICD_TYPER);
gic_common_init_quirk_hw(HWREG32(_gic.dist_base + GICD_IIDR), _gicv3_quirks, &_gic.parent);
gicv3_dist_init();
_gic.parent.priv_data = &_gic;
_gic.parent.ops = &gicv3_ops;
rt_pic_linear_irq(&_gic.parent, _gic.irq_nr - GIC_SGI_NR);
gic_common_sgi_config(_gic.dist_base, &_gic.parent, 0);
rt_pic_add_traps(gicv3_handler, &_gic);
rt_pic_user_extends(&_gic.parent);
}
static void gicv3_init_fail(void)
{
if (_gic.dist_base)
{
rt_iounmap(_gic.dist_base);
}
if (_gic.redist_regions)
{
for (int i = 0; i < _gic.redist_regions_nr; ++i)
{
if (_gic.redist_regions[i].base)
{
rt_iounmap(_gic.redist_regions[i].base);
}
}
rt_free(_gic.redist_regions);
}
rt_memset(&_gic, 0, sizeof(_gic));
}
static rt_err_t gicv3_ofw_init(struct rt_ofw_node *np, const struct rt_ofw_node_id *id)
{
rt_err_t err = RT_EOK;
do {
rt_size_t reg_nr_max;
rt_err_t msi_init = -RT_ENOSYS;
rt_uint32_t redist_regions_nr;
rt_uint64_t *regs, redist_stride;
if (rt_ofw_prop_read_u32(np, "#redistributor-regions", &redist_regions_nr))
{
redist_regions_nr = 1;
}
/* GICD + n * GICR */
reg_nr_max = 2 + (2 * redist_regions_nr);
regs = rt_calloc(1, sizeof(rt_uint64_t) * reg_nr_max);
if (!regs)
{
err = -RT_ENOMEM;
break;
}
rt_ofw_get_address_array(np, reg_nr_max, regs);
_gic.redist_regions_nr = redist_regions_nr;
err = gicv3_iomap_init(regs);
rt_free(regs);
if (err)
{
break;
}
if (_gic.version != 3 && _gic.version != 4)
{
LOG_E("Version = %d is not support", _gic.version);
err = -RT_EINVAL;
break;
}
if (rt_ofw_prop_read_u64(np, "redistributor-stride", &redist_stride))
{
redist_stride = 0;
}
_gic.redist_stride = redist_stride;
gic_common_init_quirk_ofw(np, _gicv3_quirks, &_gic.parent);
gicv3_init();
rt_ofw_data(np) = &_gic.parent;
#ifdef RT_PIC_ARM_GIC_V3_ITS
msi_init = gicv3_its_ofw_probe(np, id);
#endif
/* V2M or ITS only */
if (msi_init)
{
#ifdef RT_PIC_ARM_GIC_V2M
gicv2m_ofw_probe(np, id);
#endif
}
} while (0);
if (err)
{
gicv3_init_fail();
}
return err;
}
static const struct rt_ofw_node_id gicv3_ofw_ids[] =
{
{ .compatible = "arm,gic-v3" },
{ /* sentinel */ }
};
RT_PIC_OFW_DECLARE(gicv3, gicv3_ofw_ids, gicv3_ofw_init);

View File

@@ -0,0 +1,299 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2013-07-20 Bernard first version
* 2014-04-03 Grissiom many enhancements
* 2018-11-22 Jesven add rt_hw_ipi_send()
* add rt_hw_ipi_handler_install()
* 2023-02-01 GuEe-GUI move macros to header
*/
#ifndef __IRQ_GICV3_H__
#define __IRQ_GICV3_H__
#include <rtdef.h>
#include <cpuport.h>
#include <drivers/pic.h>
#include <drivers/core/dm.h>
#include <dt-bindings/size.h>
/* Distributor registers */
#define GICD_CTLR 0x0000
#define GICD_TYPER 0x0004
#define GICD_IIDR 0x0008
#define GICD_TYPER2 0x000C
#define GICD_STATUSR 0x0010
#define GICD_SETSPI_NSR 0x0040
#define GICD_CLRSPI_NSR 0x0048
#define GICD_SETSPI_SR 0x0050
#define GICD_CLRSPI_SR 0x0058
#define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
#define GICD_ICPENDR 0x0280
#define GICD_ISACTIVER 0x0300
#define GICD_ICACTIVER 0x0380
#define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
#define GICD_IGRPMODR 0x0D00
#define GICD_NSACR 0x0E00
#define GICD_IGROUPRnE 0x1000
#define GICD_ISENABLERnE 0x1200
#define GICD_ICENABLERnE 0x1400
#define GICD_ISPENDRnE 0x1600
#define GICD_ICPENDRnE 0x1800
#define GICD_ISACTIVERnE 0x1A00
#define GICD_ICACTIVERnE 0x1C00
#define GICD_IPRIORITYRnE 0x2000
#define GICD_ICFGRnE 0x3000
#define GICD_IROUTER 0x6000
#define GICD_IROUTERnE 0x8000
#define GICD_IDREGS 0xFFD0
#define GICD_PIDR2 0xFFE8
#define GICD_ITARGETSR 0x0800
#define GICD_SGIR 0x0F00
#define GICD_CPENDSGIR 0x0F10
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
#define GICD_TYPER_RSS (1U << 26)
#define GICD_TYPER_LPIS (1U << 17)
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ESPI (1U << 8)
#define GICD_TYPER_ID_BITS(t) ((((t) >> 19) & 0x1f) + 1)
#define GICD_TYPER_NUM_LPIS(t) ((((t) >> 11) & 0x1f) + 1)
#define GICD_TYPER_SPIS(t) ((((t) & 0x1f) + 1) * 32)
#define GICD_TYPER_ESPIS(t) (((t) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((t) >> 27) : 0)
/* Redistributor registers */
#define GICR_CTLR 0x0000
#define GICR_IIDR 0x0004
#define GICR_TYPER 0x0008
#define GICR_STATUSR 0x0010
#define GICR_WAKER 0x0014
#define GICR_MPAMIDR 0x0018
#define GICR_PARTIDR 0x001C
#define GICR_SETLPIR 0x0040
#define GICR_CLRLPIR 0x0048
#define GICR_PROPBASER 0x0070
#define GICR_PENDBASER 0x0078
#define GICR_INVLPIR 0x00A0
#define GICR_INVALLR 0x00B0
#define GICR_SYNCR 0x00C0
#define GICR_PIDR2 GICD_PIDR2
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
#define GICR_CTLR_CES (1UL << 1)
#define GICR_CTLR_IR (1UL << 2)
#define GICR_CTLR_RWP (1UL << 3)
#define GICR_RD_BASE_SIZE (64 * SIZE_KB)
#define GICR_SGI_OFFSET (64 * SIZE_KB)
#define GICR_SGI_BASE_SIZE GICR_SGI_OFFSET
/* Re-Distributor registers, offsets from SGI_base */
#define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
#define GICR_ICPENDR0 GICD_ICPENDR
#define GICR_ISACTIVER0 GICD_ISACTIVER
#define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
#define GICR_ICFGR0 GICD_ICFGR
#define GICR_IGRPMODR0 GICD_IGRPMODR
#define GICR_NSACR GICD_NSACR
#define GICR_TYPER_PLPIS (1U << 0)
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_DIRTY (1U << 2)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
#define GICR_TYPER_RVPEID (1U << 7)
#define GICR_TYPER_COM_LPI_AFF RT_GENMASK_ULL(25, 24)
#define GICR_TYPER_AFFINITY RT_GENMASK_ULL(63, 32)
#define GICR_INVLPIR_INTID RT_GENMASK_ULL(31, 0)
#define GICR_INVLPIR_VPEID RT_GENMASK_ULL(47, 32)
#define GICR_INVLPIR_V RT_GENMASK_ULL(63, 63)
#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID
#define GICR_INVALLR_V GICR_INVLPIR_V
#define GICR_VLPI_BASE_SIZE (64 * SIZE_KB)
#define GICR_RESERVED_SIZE (64 * SIZE_KB)
#define GIC_V3_REDIST_SIZE 0x20000
#define GICR_TYPER_NR_PPIS(t) (16 + ({ int __ppinum = (((t) >> 27) & 0x1f); __ppinum <= 2 ? __ppinum : 0; }) * 32)
#define GICR_WAKER_ProcessorSleep (1U << 1)
#define GICR_WAKER_ChildrenAsleep (1U << 2)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
#define GICR_PROPBASER_ADDRESS(x) ((x) & RT_GENMASK_ULL(51, 12))
#define GICR_PENDBASER_ADDRESS(x) ((x) & RT_GENMASK_ULL(51, 16))
/* ITS registers */
#define GITS_CTLR 0x0000
#define GITS_IIDR 0x0004
#define GITS_TYPER 0x0008
#define GITS_MPAMIDR 0x0010
#define GITS_PARTIDR 0x0014
#define GITS_MPIDR 0x0018
#define GITS_STATUSR 0x0040
#define GITS_UMSIR 0x0048
#define GITS_CBASER 0x0048
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
#define GITS_BASER 0x0100 /* 0x0100~0x0138 */
/*
* ITS commands
*/
#define GITS_CMD_MAPD 0x08
#define GITS_CMD_MAPC 0x09
#define GITS_CMD_MAPTI 0x0a
#define GITS_CMD_MAPI 0x0b
#define GITS_CMD_MOVI 0x01
#define GITS_CMD_DISCARD 0x0f
#define GITS_CMD_INV 0x0c
#define GITS_CMD_MOVALL 0x0e
#define GITS_CMD_INVALL 0x0d
#define GITS_CMD_INT 0x03
#define GITS_CMD_CLEAR 0x04
#define GITS_CMD_SYNC 0x05
/* ITS Config Area */
#define GITS_LPI_CFG_GROUP1 (1 << 1)
#define GITS_LPI_CFG_ENABLED (1 << 0)
/* ITS Command Queue Descriptor */
#define GITS_CBASER_VALID (1UL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53)
#define GITS_TRANSLATION_TABLE_DESCRIPTORS_NR 8
#define GITS_BASER_CACHEABILITY(reg, inner_outer, type) \
(GITS_CBASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT)
#define GITS_BASER_SHAREABILITY(reg, type) \
(GITS_CBASER_##type << reg##_SHAREABILITY_SHIFT)
#define GITS_CBASER_CACHE_DnGnRnE 0x0UL /* Device-nGnRnE. */
#define GITS_CBASER_CACHE_NIN 0x1UL /* Normal Inner Non-cacheable. */
#define GITS_CBASER_CACHE_NIRAWT 0x2UL /* Normal Inner Cacheable Read-allocate, Write-through. */
#define GITS_CBASER_CACHE_NIRAWB 0x3UL /* Normal Inner Cacheable Read-allocate, Write-back. */
#define GITS_CBASER_CACHE_NIWAWT 0x4UL /* Normal Inner Cacheable Write-allocate, Write-through. */
#define GITS_CBASER_CACHE_NIWAWB 0x5UL /* Normal Inner Cacheable Write-allocate, Write-back. */
#define GITS_CBASER_CACHE_NIRAWAWT 0x6UL /* Normal Inner Cacheable Read-allocate, Write-allocate, Write-through. */
#define GITS_CBASER_CACHE_NIRAWAWB 0x7UL /* Normal Inner Cacheable Read-allocate, Write-allocate, Write-back. */
#define GITS_CBASER_CACHE_MASK 0x7UL
#define GITS_CBASER_SHARE_NS 0x0UL /* Non-shareable. */
#define GITS_CBASER_SHARE_IS 0x1UL /* Inner Shareable. */
#define GITS_CBASER_SHARE_OS 0x2UL /* Outer Shareable. */
#define GITS_CBASER_SHARE_RES 0x3UL /* Reserved. Treated as 0b00 */
#define GITS_CBASER_SHARE_MASK 0x3UL
#define GITS_CBASER_InnerShareable GITS_BASER_SHAREABILITY(GITS_CBASER, SHARE_IS)
#define GITS_CBASER_SHARE_MASK_ALL GITS_BASER_SHAREABILITY(GITS_CBASER, SHARE_MASK)
#define GITS_CBASER_nCnB GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, DnGnRnE)
#define GITS_CBASER_nC GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIN)
#define GITS_CBASER_RaWt GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIRAWT)
#define GITS_CBASER_RaWb GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIRAWB)
#define GITS_CBASER_WaWt GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIWAWT)
#define GITS_CBASER_WaWb GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIWAWB)
#define GITS_CBASER_RaWaWt GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIRAWAWT)
#define GITS_CBASER_RaWaWb GITS_BASER_CACHEABILITY(GITS_CBASER, INNER, NIRAWAWB)
#define GIC_EPPI_BASE_INTID 1056
#define GIC_ESPI_BASE_INTID 4096
#define GIC_IRQ_TYPE_LPI 0xa110c8ed
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
#define read_gicreg(reg, out) rt_hw_sysreg_read(reg, out)
#define write_gicreg(reg, in) rt_hw_sysreg_write(reg, in)
#define ICC_CTLR_EOImode 0x2
#define ICC_PMR_MASK 0xff
#define ICC_PMR_DEFAULT 0xf0
#define ICC_IGRPEN1_EN 0x1
#define ICC_SGIR_AFF3_SHIFT 48
#define ICC_SGIR_AFF2_SHIFT 32
#define ICC_SGIR_AFF1_SHIFT 16
#define ICC_SGIR_TARGET_MASK 0xffff
#define ICC_SGIR_IRQN_SHIFT 24
#define ICC_SGIR_ROUTING_BIT (1ULL << 40)
#define ICC_SGI1R_TARGET_LIST_SHIFT 0
#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
#define ICC_SGI1R_TARGET_LIST_MAX 16
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
#define ICC_SGI1R_SGI_ID_SHIFT 24
#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT)
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT)
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
#define ICC_SGI1R_RS_SHIFT 44
#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT)
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT)
#define ICC_CTLR_EL1_CBPR_SHIFT 0
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
#define ICC_CTLR_EL1_EOImode_SHIFT (1)
#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_PRI_BITS_SHIFT (8)
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
#define ICC_CTLR_EL1_RSS (0x1 << 18)
#define ICC_CTLR_EL1_ExtRange (0x1 << 19)
struct gicv3
{
struct rt_pic parent;
int version;
int irq_nr;
rt_uint32_t gicd_typer;
rt_size_t line_nr;
rt_size_t espi_nr;
rt_size_t lpi_nr;
rt_ubase_t flags;
void *dist_base;
rt_size_t dist_size;
void *redist_percpu_base[RT_CPUS_NR];
rt_size_t percpu_ppi_nr[RT_CPUS_NR];
struct
{
void *base;
void *base_phy;
rt_size_t size;
} *redist_regions;
rt_uint64_t redist_flags;
rt_size_t redist_stride;
rt_size_t redist_regions_nr;
};
#endif /* __IRQ_GICV3_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,79 @@
/*
* Copyright (c) 2006-2022, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2022-08-24 GuEe-GUI first version
*/
#include <rtthread.h>
#include <rtdevice.h>
/**
* This function will initialize hardware interrupt
*/
void rt_hw_interrupt_init(void)
{
/* initialize pic */
rt_pic_irq_init();
}
/**
* This function will mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_mask(int vector)
{
rt_pic_irq_mask(vector);
}
/**
* This function will un-mask a interrupt.
* @param vector the interrupt number
*/
void rt_hw_interrupt_umask(int vector)
{
rt_pic_irq_unmask(vector);
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
rt_isr_handler_t rt_hw_interrupt_install(int vector, rt_isr_handler_t handler,
void *param, const char *name)
{
rt_pic_attach_irq(vector, handler, param, name, RT_IRQ_F_NONE);
return RT_NULL;
}
/**
* This function will install a interrupt service routine to a interrupt.
* @param vector the interrupt number
* @param new_handler the interrupt service routine to be installed
* @param old_handler the old interrupt service routine
*/
void rt_hw_interrupt_uninstall(int vector, rt_isr_handler_t handler, void *param)
{
rt_pic_detach_irq(vector, param);
}
#if defined(RT_USING_SMP) || defined(RT_USING_AMP)
void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask)
{
RT_BITMAP_DECLARE(cpu_masks, RT_CPUS_NR) = { cpu_mask };
rt_pic_irq_send_ipi(ipi_vector, cpu_masks);
}
void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler)
{
/* note: ipi_vector maybe different with irq_vector */
rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER");
}
#endif