3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-01-28 22:02:18 +00:00

Cleanup old crap , add new

This commit is contained in:
Raziel K. Crowe 2022-09-09 14:08:13 +05:00
parent 09d25b6de9
commit 5b9ff8c28c
12988 changed files with 217730 additions and 4409437 deletions

View File

@ -1,6 +1,7 @@
Qortal Team
===========
Scare Crowe, moosey Sean, gay jason aka Crowetic
Scare Crowe
List of maintainers and how to submit kernel changes
====================================================

View File

@ -1,11 +0,0 @@
hf
OUTPUT=~/out
bin/jlink --module-path jmods --compress=2 --add-modules java.base --output $OUTPUT
rm -r $OUTPUT/lib/client $OUTPUT/lib/server
echo "-minimal KNOWN" > $OUTPUT/lib/jvm.cfg
v8
-Xmx24G -Xms24G -Xmn16G -XX:+AlwaysPreTouch -XX:+UseParallelGC
-XX:+UseTransparentHugePages -XX:+UseBiasedLocking

View File

@ -0,0 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_ERDMA
tristate "Alibaba Elastic RDMA Adapter (ERDMA) support"
depends on PCI_MSI && 64BIT
depends on INFINIBAND_ADDR_TRANS
depends on INFINIBAND_USER_ACCESS
help
This is a RDMA/iWarp driver for Alibaba Elastic RDMA Adapter(ERDMA),
which supports RDMA features in Alibaba cloud environment.
To compile this driver as module, choose M here. The module will be
called erdma.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_INFINIBAND_ERDMA) := erdma.o
erdma-y := erdma_cm.o erdma_main.o erdma_cmdq.o erdma_cq.o erdma_verbs.o erdma_qp.o erdma_eq.o

View File

@ -0,0 +1,287 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#ifndef __ERDMA_H__
#define __ERDMA_H__
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/xarray.h>
#include <rdma/ib_verbs.h>
#include "erdma_hw.h"
#define DRV_MODULE_NAME "erdma"
#define ERDMA_NODE_DESC "Elastic RDMA(iWARP) stack"
struct erdma_eq {
void *qbuf;
dma_addr_t qbuf_dma_addr;
spinlock_t lock;
u32 depth;
u16 ci;
u16 rsvd;
atomic64_t event_num;
atomic64_t notify_num;
u64 __iomem *db_addr;
u64 *db_record;
};
struct erdma_cmdq_sq {
void *qbuf;
dma_addr_t qbuf_dma_addr;
spinlock_t lock;
u32 depth;
u16 ci;
u16 pi;
u16 wqebb_cnt;
u64 *db_record;
};
struct erdma_cmdq_cq {
void *qbuf;
dma_addr_t qbuf_dma_addr;
spinlock_t lock;
u32 depth;
u32 ci;
u32 cmdsn;
u64 *db_record;
atomic64_t armed_num;
};
enum {
ERDMA_CMD_STATUS_INIT,
ERDMA_CMD_STATUS_ISSUED,
ERDMA_CMD_STATUS_FINISHED,
ERDMA_CMD_STATUS_TIMEOUT
};
struct erdma_comp_wait {
struct completion wait_event;
u32 cmd_status;
u32 ctx_id;
u16 sq_pi;
u8 comp_status;
u8 rsvd;
u32 comp_data[4];
};
enum {
ERDMA_CMDQ_STATE_OK_BIT = 0,
ERDMA_CMDQ_STATE_TIMEOUT_BIT = 1,
ERDMA_CMDQ_STATE_CTX_ERR_BIT = 2,
};
#define ERDMA_CMDQ_TIMEOUT_MS 15000
#define ERDMA_REG_ACCESS_WAIT_MS 20
#define ERDMA_WAIT_DEV_DONE_CNT 500
struct erdma_cmdq {
unsigned long *comp_wait_bitmap;
struct erdma_comp_wait *wait_pool;
spinlock_t lock;
bool use_event;
struct erdma_cmdq_sq sq;
struct erdma_cmdq_cq cq;
struct erdma_eq eq;
unsigned long state;
struct semaphore credits;
u16 max_outstandings;
};
#define COMPROMISE_CC ERDMA_CC_CUBIC
enum erdma_cc_alg {
ERDMA_CC_NEWRENO = 0,
ERDMA_CC_CUBIC,
ERDMA_CC_HPCC_RTT,
ERDMA_CC_HPCC_ECN,
ERDMA_CC_HPCC_INT,
ERDMA_CC_METHODS_NUM
};
struct erdma_devattr {
u32 fw_version;
unsigned char peer_addr[ETH_ALEN];
int numa_node;
enum erdma_cc_alg cc;
u32 grp_num;
u32 irq_num;
bool disable_dwqe;
u16 dwqe_pages;
u16 dwqe_entries;
u32 max_qp;
u32 max_send_wr;
u32 max_recv_wr;
u32 max_ord;
u32 max_ird;
u32 max_send_sge;
u32 max_recv_sge;
u32 max_sge_rd;
u32 max_cq;
u32 max_cqe;
u64 max_mr_size;
u32 max_mr;
u32 max_pd;
u32 max_mw;
u32 local_dma_key;
};
#define ERDMA_IRQNAME_SIZE 50
struct erdma_irq {
char name[ERDMA_IRQNAME_SIZE];
u32 msix_vector;
cpumask_t affinity_hint_mask;
};
struct erdma_eq_cb {
bool ready;
void *dev; /* All EQs use this fields to get erdma_dev struct */
struct erdma_irq irq;
struct erdma_eq eq;
struct tasklet_struct tasklet;
};
struct erdma_resource_cb {
unsigned long *bitmap;
spinlock_t lock;
u32 next_alloc_idx;
u32 max_cap;
};
enum {
ERDMA_RES_TYPE_PD = 0,
ERDMA_RES_TYPE_STAG_IDX = 1,
ERDMA_RES_CNT = 2,
};
#define ERDMA_EXTRA_BUFFER_SIZE ERDMA_DB_SIZE
#define WARPPED_BUFSIZE(size) ((size) + ERDMA_EXTRA_BUFFER_SIZE)
struct erdma_dev {
struct ib_device ibdev;
struct net_device *netdev;
struct pci_dev *pdev;
struct notifier_block netdev_nb;
resource_size_t func_bar_addr;
resource_size_t func_bar_len;
u8 __iomem *func_bar;
struct erdma_devattr attrs;
/* physical port state (only one port per device) */
enum ib_port_state state;
/* cmdq and aeq use the same msix vector */
struct erdma_irq comm_irq;
struct erdma_cmdq cmdq;
struct erdma_eq aeq;
struct erdma_eq_cb ceqs[ERDMA_NUM_MSIX_VEC - 1];
spinlock_t lock;
struct erdma_resource_cb res_cb[ERDMA_RES_CNT];
struct xarray qp_xa;
struct xarray cq_xa;
u32 next_alloc_qpn;
u32 next_alloc_cqn;
spinlock_t db_bitmap_lock;
/* We provide max 64 uContexts that each has one SQ doorbell Page. */
DECLARE_BITMAP(sdb_page, ERDMA_DWQE_TYPE0_CNT);
/*
* We provide max 496 uContexts that each has one SQ normal Db,
* and one directWQE db
*/
DECLARE_BITMAP(sdb_entry, ERDMA_DWQE_TYPE1_CNT);
atomic_t num_ctx;
struct list_head cep_list;
};
static inline void *get_queue_entry(void *qbuf, u32 idx, u32 depth, u32 shift)
{
idx &= (depth - 1);
return qbuf + (idx << shift);
}
static inline struct erdma_dev *to_edev(struct ib_device *ibdev)
{
return container_of(ibdev, struct erdma_dev, ibdev);
}
static inline u32 erdma_reg_read32(struct erdma_dev *dev, u32 reg)
{
return readl(dev->func_bar + reg);
}
static inline u64 erdma_reg_read64(struct erdma_dev *dev, u32 reg)
{
return readq(dev->func_bar + reg);
}
static inline void erdma_reg_write32(struct erdma_dev *dev, u32 reg, u32 value)
{
writel(value, dev->func_bar + reg);
}
static inline void erdma_reg_write64(struct erdma_dev *dev, u32 reg, u64 value)
{
writeq(value, dev->func_bar + reg);
}
static inline u32 erdma_reg_read32_filed(struct erdma_dev *dev, u32 reg,
u32 filed_mask)
{
u32 val = erdma_reg_read32(dev, reg);
return FIELD_GET(filed_mask, val);
}
int erdma_cmdq_init(struct erdma_dev *dev);
void erdma_finish_cmdq_init(struct erdma_dev *dev);
void erdma_cmdq_destroy(struct erdma_dev *dev);
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op);
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
u64 *resp0, u64 *resp1);
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq);
int erdma_ceqs_init(struct erdma_dev *dev);
void erdma_ceqs_uninit(struct erdma_dev *dev);
void notify_eq(struct erdma_eq *eq);
void *get_next_valid_eqe(struct erdma_eq *eq);
int erdma_aeq_init(struct erdma_dev *dev);
void erdma_aeq_destroy(struct erdma_dev *dev);
void erdma_aeq_event_handler(struct erdma_dev *dev);
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,167 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Greg Joyce <greg@opengridcomputing.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
/* Copyright (c) 2017, Open Grid Computing, Inc. */
#ifndef __ERDMA_CM_H__
#define __ERDMA_CM_H__
#include <linux/tcp.h>
#include <net/sock.h>
#include <rdma/iw_cm.h>
/* iWarp MPA protocol defs */
#define MPA_REVISION_EXT_1 129
#define MPA_MAX_PRIVDATA RDMA_MAX_PRIVATE_DATA
#define MPA_KEY_REQ "MPA ID Req Frame"
#define MPA_KEY_REP "MPA ID Rep Frame"
#define MPA_KEY_SIZE 16
#define MPA_DEFAULT_HDR_LEN 28
struct mpa_rr_params {
__be16 bits;
__be16 pd_len;
};
/*
* MPA request/response Hdr bits & fields
*/
enum {
MPA_RR_FLAG_MARKERS = __cpu_to_be16(0x8000),
MPA_RR_FLAG_CRC = __cpu_to_be16(0x4000),
MPA_RR_FLAG_REJECT = __cpu_to_be16(0x2000),
MPA_RR_RESERVED = __cpu_to_be16(0x1f00),
MPA_RR_MASK_REVISION = __cpu_to_be16(0x00ff)
};
/*
* MPA request/reply header
*/
struct mpa_rr {
u8 key[16];
struct mpa_rr_params params;
};
struct erdma_mpa_ext {
__be32 cookie;
__be32 bits;
};
enum {
MPA_EXT_FLAG_CC = cpu_to_be32(0x0000000f),
};
struct erdma_mpa_info {
struct mpa_rr hdr; /* peer mpa hdr in host byte order */
struct erdma_mpa_ext ext_data;
char *pdata;
int bytes_rcvd;
};
struct erdma_sk_upcalls {
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk, int bytes);
void (*sk_error_report)(struct sock *sk);
};
struct erdma_dev;
enum erdma_cep_state {
ERDMA_EPSTATE_IDLE = 1,
ERDMA_EPSTATE_LISTENING,
ERDMA_EPSTATE_CONNECTING,
ERDMA_EPSTATE_AWAIT_MPAREQ,
ERDMA_EPSTATE_RECVD_MPAREQ,
ERDMA_EPSTATE_AWAIT_MPAREP,
ERDMA_EPSTATE_RDMA_MODE,
ERDMA_EPSTATE_CLOSED
};
struct erdma_cep {
struct iw_cm_id *cm_id;
struct erdma_dev *dev;
struct list_head devq;
spinlock_t lock;
struct kref ref;
int in_use;
wait_queue_head_t waitq;
enum erdma_cep_state state;
struct list_head listenq;
struct erdma_cep *listen_cep;
struct erdma_qp *qp;
struct socket *sock;
struct erdma_cm_work *mpa_timer;
struct list_head work_freelist;
struct erdma_mpa_info mpa;
int ord;
int ird;
int pd_len;
/* hold user's private data. */
void *private_data;
/* Saved upcalls of socket llp.sock */
void (*sk_state_change)(struct sock *sk);
void (*sk_data_ready)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
};
#define MPAREQ_TIMEOUT (HZ * 20)
#define MPAREP_TIMEOUT (HZ * 10)
#define CONNECT_TIMEOUT (HZ * 10)
enum erdma_work_type {
ERDMA_CM_WORK_ACCEPT = 1,
ERDMA_CM_WORK_READ_MPAHDR,
ERDMA_CM_WORK_CLOSE_LLP, /* close socket */
ERDMA_CM_WORK_PEER_CLOSE, /* socket indicated peer close */
ERDMA_CM_WORK_MPATIMEOUT,
ERDMA_CM_WORK_CONNECTED,
ERDMA_CM_WORK_CONNECTTIMEOUT
};
struct erdma_cm_work {
struct delayed_work work;
struct list_head list;
enum erdma_work_type type;
struct erdma_cep *cep;
};
#define to_sockaddr_in(a) (*(struct sockaddr_in *)(&(a)))
static inline int getname_peer(struct socket *s, struct sockaddr_storage *a)
{
return s->ops->getname(s, (struct sockaddr *)a, 1);
}
static inline int getname_local(struct socket *s, struct sockaddr_storage *a)
{
return s->ops->getname(s, (struct sockaddr *)a, 0);
}
int erdma_connect(struct iw_cm_id *id, struct iw_cm_conn_param *param);
int erdma_accept(struct iw_cm_id *id, struct iw_cm_conn_param *param);
int erdma_reject(struct iw_cm_id *id, const void *pdata, u8 plen);
int erdma_create_listen(struct iw_cm_id *id, int backlog);
int erdma_destroy_listen(struct iw_cm_id *id);
void erdma_cep_get(struct erdma_cep *ceq);
void erdma_cep_put(struct erdma_cep *ceq);
int erdma_cm_queue_work(struct erdma_cep *ceq, enum erdma_work_type type);
int erdma_cm_init(void);
void erdma_cm_exit(void);
#define sk_to_cep(sk) ((struct erdma_cep *)((sk)->sk_user_data))
#endif

View File

@ -0,0 +1,493 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "erdma.h"
#include "erdma_hw.h"
#include "erdma_verbs.h"
static void arm_cmdq_cq(struct erdma_cmdq *cmdq)
{
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) |
FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn);
*cmdq->cq.db_record = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_CQDB_REG);
atomic64_inc(&cmdq->cq.armed_num);
}
static void kick_cmdq_db(struct erdma_cmdq *cmdq)
{
struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq);
u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi);
*cmdq->sq.db_record = db_data;
writeq(db_data, dev->func_bar + ERDMA_CMDQ_SQDB_REG);
}
static struct erdma_comp_wait *get_comp_wait(struct erdma_cmdq *cmdq)
{
int comp_idx;
spin_lock(&cmdq->lock);
comp_idx = find_first_zero_bit(cmdq->comp_wait_bitmap,
cmdq->max_outstandings);
if (comp_idx == cmdq->max_outstandings) {
spin_unlock(&cmdq->lock);
return ERR_PTR(-ENOMEM);
}
__set_bit(comp_idx, cmdq->comp_wait_bitmap);
spin_unlock(&cmdq->lock);
return &cmdq->wait_pool[comp_idx];
}
static void put_comp_wait(struct erdma_cmdq *cmdq,
struct erdma_comp_wait *comp_wait)
{
int used;
cmdq->wait_pool[comp_wait->ctx_id].cmd_status = ERDMA_CMD_STATUS_INIT;
spin_lock(&cmdq->lock);
used = __test_and_clear_bit(comp_wait->ctx_id, cmdq->comp_wait_bitmap);
spin_unlock(&cmdq->lock);
WARN_ON(!used);
}
static int erdma_cmdq_wait_res_init(struct erdma_dev *dev,
struct erdma_cmdq *cmdq)
{
int i;
cmdq->wait_pool =
devm_kcalloc(&dev->pdev->dev, cmdq->max_outstandings,
sizeof(struct erdma_comp_wait), GFP_KERNEL);
if (!cmdq->wait_pool)
return -ENOMEM;
spin_lock_init(&cmdq->lock);
cmdq->comp_wait_bitmap = devm_bitmap_zalloc(
&dev->pdev->dev, cmdq->max_outstandings, GFP_KERNEL);
if (!cmdq->comp_wait_bitmap)
return -ENOMEM;
for (i = 0; i < cmdq->max_outstandings; i++) {
init_completion(&cmdq->wait_pool[i].wait_event);
cmdq->wait_pool[i].ctx_id = i;
}
return 0;
}
static int erdma_cmdq_sq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_sq *sq = &cmdq->sq;
u32 buf_size;
sq->wqebb_cnt = SQEBB_COUNT(ERDMA_CMDQ_SQE_SIZE);
sq->depth = cmdq->max_outstandings * sq->wqebb_cnt;
buf_size = sq->depth << SQEBB_SHIFT;
sq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&sq->qbuf_dma_addr, GFP_KERNEL);
if (!sq->qbuf)
return -ENOMEM;
sq->db_record = (u64 *)(sq->qbuf + buf_size);
spin_lock_init(&sq->lock);
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_H_REG,
upper_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_SQ_ADDR_L_REG,
lower_32_bits(sq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_DEPTH_REG, sq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG,
sq->qbuf_dma_addr + buf_size);
return 0;
}
static int erdma_cmdq_cq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_cmdq_cq *cq = &cmdq->cq;
u32 buf_size;
cq->depth = cmdq->sq.depth;
buf_size = cq->depth << CQE_SHIFT;
cq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&cq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!cq->qbuf)
return -ENOMEM;
spin_lock_init(&cq->lock);
cq->db_record = (u64 *)(cq->qbuf + buf_size);
atomic64_set(&cq->armed_num, 0);
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_H_REG,
upper_32_bits(cq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_CQ_ADDR_L_REG,
lower_32_bits(cq->qbuf_dma_addr));
erdma_reg_write64(dev, ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG,
cq->qbuf_dma_addr + buf_size);
return 0;
}
static int erdma_cmdq_eq_init(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
struct erdma_eq *eq = &cmdq->eq;
u32 buf_size;
eq->depth = cmdq->max_outstandings;
buf_size = eq->depth << EQE_SHIFT;
eq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf)
return -ENOMEM;
spin_lock_init(&eq->lock);
atomic64_set(&eq->event_num, 0);
eq->db_addr =
(u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG);
eq->db_record = (u64 *)(eq->qbuf + buf_size);
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_CMDQ_EQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG,
eq->qbuf_dma_addr + buf_size);
return 0;
}
int erdma_cmdq_init(struct erdma_dev *dev)
{
int err, i;
struct erdma_cmdq *cmdq = &dev->cmdq;
u32 sts, ctrl;
cmdq->max_outstandings = ERDMA_CMDQ_MAX_OUTSTANDING;
cmdq->use_event = false;
sema_init(&cmdq->credits, cmdq->max_outstandings);
err = erdma_cmdq_wait_res_init(dev, cmdq);
if (err)
return err;
err = erdma_cmdq_sq_init(dev);
if (err)
return err;
err = erdma_cmdq_cq_init(dev);
if (err)
goto err_destroy_sq;
err = erdma_cmdq_eq_init(dev);
if (err)
goto err_destroy_cq;
ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_INIT_MASK, 1);
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
for (i = 0; i < ERDMA_WAIT_DEV_DONE_CNT; i++) {
sts = erdma_reg_read32_filed(dev, ERDMA_REGS_DEV_ST_REG,
ERDMA_REG_DEV_ST_INIT_DONE_MASK);
if (sts)
break;
msleep(ERDMA_REG_ACCESS_WAIT_MS);
}
if (i == ERDMA_WAIT_DEV_DONE_CNT) {
dev_err(&dev->pdev->dev, "wait init done failed.\n");
err = -ETIMEDOUT;
goto err_destroy_eq;
}
set_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
return 0;
err_destroy_eq:
dma_free_coherent(&dev->pdev->dev,
(cmdq->eq.depth << EQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
err_destroy_cq:
dma_free_coherent(&dev->pdev->dev,
(cmdq->cq.depth << CQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
err_destroy_sq:
dma_free_coherent(&dev->pdev->dev,
(cmdq->sq.depth << SQEBB_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
return err;
}
void erdma_finish_cmdq_init(struct erdma_dev *dev)
{
/* after device init successfully, change cmdq to event mode. */
dev->cmdq.use_event = true;
arm_cmdq_cq(&dev->cmdq);
}
void erdma_cmdq_destroy(struct erdma_dev *dev)
{
struct erdma_cmdq *cmdq = &dev->cmdq;
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
dma_free_coherent(&dev->pdev->dev,
(cmdq->eq.depth << EQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->eq.qbuf, cmdq->eq.qbuf_dma_addr);
dma_free_coherent(&dev->pdev->dev,
(cmdq->sq.depth << SQEBB_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->sq.qbuf, cmdq->sq.qbuf_dma_addr);
dma_free_coherent(&dev->pdev->dev,
(cmdq->cq.depth << CQE_SHIFT) +
ERDMA_EXTRA_BUFFER_SIZE,
cmdq->cq.qbuf, cmdq->cq.qbuf_dma_addr);
}
static void *get_next_valid_cmdq_cqe(struct erdma_cmdq *cmdq)
{
__be32 *cqe = get_queue_entry(cmdq->cq.qbuf, cmdq->cq.ci,
cmdq->cq.depth, CQE_SHIFT);
u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
__be32_to_cpu(READ_ONCE(*cqe)));
return owner ^ !!(cmdq->cq.ci & cmdq->cq.depth) ? cqe : NULL;
}
static void push_cmdq_sqe(struct erdma_cmdq *cmdq, u64 *req, size_t req_len,
struct erdma_comp_wait *comp_wait)
{
__le64 *wqe;
u64 hdr = *req;
comp_wait->cmd_status = ERDMA_CMD_STATUS_ISSUED;
reinit_completion(&comp_wait->wait_event);
comp_wait->sq_pi = cmdq->sq.pi;
wqe = get_queue_entry(cmdq->sq.qbuf, cmdq->sq.pi, cmdq->sq.depth,
SQEBB_SHIFT);
memcpy(wqe, req, req_len);
cmdq->sq.pi += cmdq->sq.wqebb_cnt;
hdr |= FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi) |
FIELD_PREP(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK,
comp_wait->ctx_id) |
FIELD_PREP(ERDMA_CMD_HDR_WQEBB_CNT_MASK, cmdq->sq.wqebb_cnt - 1);
*wqe = cpu_to_le64(hdr);
kick_cmdq_db(cmdq);
}
static int erdma_poll_single_cmd_completion(struct erdma_cmdq *cmdq)
{
struct erdma_comp_wait *comp_wait;
u32 hdr0, sqe_idx;
__be32 *cqe;
u16 ctx_id;
u64 *sqe;
int i;
cqe = get_next_valid_cmdq_cqe(cmdq);
if (!cqe)
return -EAGAIN;
cmdq->cq.ci++;
dma_rmb();
hdr0 = __be32_to_cpu(*cqe);
sqe_idx = __be32_to_cpu(*(cqe + 1));
sqe = get_queue_entry(cmdq->sq.qbuf, sqe_idx, cmdq->sq.depth,
SQEBB_SHIFT);
ctx_id = FIELD_GET(ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK, *sqe);
comp_wait = &cmdq->wait_pool[ctx_id];
if (comp_wait->cmd_status != ERDMA_CMD_STATUS_ISSUED)
return -EIO;
comp_wait->cmd_status = ERDMA_CMD_STATUS_FINISHED;
comp_wait->comp_status = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, hdr0);
cmdq->sq.ci += cmdq->sq.wqebb_cnt;
for (i = 0; i < 4; i++)
comp_wait->comp_data[i] = __be32_to_cpu(*(cqe + 2 + i));
if (cmdq->use_event)
complete(&comp_wait->wait_event);
return 0;
}
static void erdma_polling_cmd_completions(struct erdma_cmdq *cmdq)
{
unsigned long flags;
u16 comp_num;
spin_lock_irqsave(&cmdq->cq.lock, flags);
/* We must have less than # of max_outstandings
* completions at one time.
*/
for (comp_num = 0; comp_num < cmdq->max_outstandings; comp_num++)
if (erdma_poll_single_cmd_completion(cmdq))
break;
if (comp_num && cmdq->use_event)
arm_cmdq_cq(cmdq);
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
}
void erdma_cmdq_completion_handler(struct erdma_cmdq *cmdq)
{
int got_event = 0;
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state) ||
!cmdq->use_event)
return;
while (get_next_valid_eqe(&cmdq->eq)) {
cmdq->eq.ci++;
got_event++;
}
if (got_event) {
cmdq->cq.cmdsn++;
erdma_polling_cmd_completions(cmdq);
}
notify_eq(&cmdq->eq);
}
static int erdma_poll_cmd_completion(struct erdma_comp_wait *comp_ctx,
struct erdma_cmdq *cmdq, u32 timeout)
{
unsigned long comp_timeout = jiffies + msecs_to_jiffies(timeout);
while (1) {
erdma_polling_cmd_completions(cmdq);
if (comp_ctx->cmd_status != ERDMA_CMD_STATUS_ISSUED)
break;
if (time_is_before_jiffies(comp_timeout))
return -ETIME;
msleep(20);
}
return 0;
}
static int erdma_wait_cmd_completion(struct erdma_comp_wait *comp_ctx,
struct erdma_cmdq *cmdq, u32 timeout)
{
unsigned long flags = 0;
wait_for_completion_timeout(&comp_ctx->wait_event,
msecs_to_jiffies(timeout));
if (unlikely(comp_ctx->cmd_status != ERDMA_CMD_STATUS_FINISHED)) {
spin_lock_irqsave(&cmdq->cq.lock, flags);
comp_ctx->cmd_status = ERDMA_CMD_STATUS_TIMEOUT;
spin_unlock_irqrestore(&cmdq->cq.lock, flags);
return -ETIME;
}
return 0;
}
void erdma_cmdq_build_reqhdr(u64 *hdr, u32 mod, u32 op)
{
*hdr = FIELD_PREP(ERDMA_CMD_HDR_SUB_MOD_MASK, mod) |
FIELD_PREP(ERDMA_CMD_HDR_OPCODE_MASK, op);
}
int erdma_post_cmd_wait(struct erdma_cmdq *cmdq, u64 *req, u32 req_size,
u64 *resp0, u64 *resp1)
{
struct erdma_comp_wait *comp_wait;
int ret;
if (!test_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state))
return -ENODEV;
down(&cmdq->credits);
comp_wait = get_comp_wait(cmdq);
if (IS_ERR(comp_wait)) {
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
set_bit(ERDMA_CMDQ_STATE_CTX_ERR_BIT, &cmdq->state);
up(&cmdq->credits);
return PTR_ERR(comp_wait);
}
spin_lock(&cmdq->sq.lock);
push_cmdq_sqe(cmdq, req, req_size, comp_wait);
spin_unlock(&cmdq->sq.lock);
if (cmdq->use_event)
ret = erdma_wait_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
else
ret = erdma_poll_cmd_completion(comp_wait, cmdq,
ERDMA_CMDQ_TIMEOUT_MS);
if (ret) {
set_bit(ERDMA_CMDQ_STATE_TIMEOUT_BIT, &cmdq->state);
clear_bit(ERDMA_CMDQ_STATE_OK_BIT, &cmdq->state);
goto out;
}
if (comp_wait->comp_status)
ret = -EIO;
if (resp0 && resp1) {
*resp0 = *((u64 *)&comp_wait->comp_data[0]);
*resp1 = *((u64 *)&comp_wait->comp_data[2]);
}
put_comp_wait(cmdq, comp_wait);
out:
up(&cmdq->credits);
return ret;
}

View File

@ -0,0 +1,205 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#include <rdma/ib_verbs.h>
#include "erdma_hw.h"
#include "erdma_verbs.h"
static void *get_next_valid_cqe(struct erdma_cq *cq)
{
__be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
cq->depth, CQE_SHIFT);
u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
__be32_to_cpu(READ_ONCE(*cqe)));
return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
}
static void notify_cq(struct erdma_cq *cq, u8 solcitied)
{
u64 db_data =
FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
*cq->kern_cq.db_record = db_data;
writeq(db_data, cq->kern_cq.db);
}
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct erdma_cq *cq = to_ecq(ibcq);
unsigned long irq_flags;
int ret = 0;
spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
ret = 1;
cq->kern_cq.notify_cnt++;
spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
return ret;
}
static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
[ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
[ERDMA_OP_READ] = IB_WC_RDMA_READ,
[ERDMA_OP_SEND] = IB_WC_SEND,
[ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
[ERDMA_OP_RECEIVE] = IB_WC_RECV,
[ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
[ERDMA_OP_RECV_INV] = IB_WC_RECV,
[ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
[ERDMA_OP_INVALIDATE] = IB_WC_LOCAL_INV,
[ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
[ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
[ERDMA_OP_REG_MR] = IB_WC_REG_MR,
[ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
[ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
};
static const struct {
enum erdma_wc_status erdma;
enum ib_wc_status base;
enum erdma_vendor_err vendor;
} map_cqe_status[ERDMA_NUM_WC_STATUS] = {
{ ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
{ ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
{ ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
ERDMA_WC_VENDOR_INVALID_RQE },
{ ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
ERDMA_WC_VENDOR_RQE_INVALID_STAG },
{ ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
{ ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
{ ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
ERDMA_WC_VENDOR_RQE_INVALID_PD },
{ ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
ERDMA_WC_VENDOR_RQE_WRAP_ERR },
{ ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
ERDMA_WC_VENDOR_INVALID_SQE },
{ ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
ERDMA_WC_VENDOR_ZERO_ORD },
{ ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
ERDMA_WC_VENDOR_SQE_INVALID_STAG },
{ ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
{ ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
{ ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
ERDMA_WC_VENDOR_SQE_INVALID_PD },
{ ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
ERDMA_WC_VENDOR_SQE_WARP_ERR },
{ ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
{ ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
};
#define ERDMA_POLLCQ_NO_QP 1
static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
{
struct erdma_dev *dev = to_edev(cq->ibcq.device);
u8 opcode, syndrome, qtype;
struct erdma_kqp *kern_qp;
struct erdma_cqe *cqe;
struct erdma_qp *qp;
u16 wqe_idx, depth;
u32 qpn, cqe_hdr;
u64 *id_table;
u64 *wqe_hdr;
cqe = get_next_valid_cqe(cq);
if (!cqe)
return -EAGAIN;
cq->kern_cq.ci++;
/* cqbuf should be ready when we poll */
dma_rmb();
qpn = be32_to_cpu(cqe->qpn);
wqe_idx = be32_to_cpu(cqe->qe_idx);
cqe_hdr = be32_to_cpu(cqe->hdr);
qp = find_qp_by_qpn(dev, qpn);
if (!qp)
return ERDMA_POLLCQ_NO_QP;
kern_qp = &qp->kern_qp;
qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
if (qtype == ERDMA_CQE_QTYPE_SQ) {
id_table = kern_qp->swr_tbl;
depth = qp->attrs.sq_size;
wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
qp->attrs.sq_size, SQEBB_SHIFT);
kern_qp->sq_ci =
FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
wqe_idx + 1;
} else {
id_table = kern_qp->rwr_tbl;
depth = qp->attrs.rq_size;
}
wc->wr_id = id_table[wqe_idx & (depth - 1)];
wc->byte_len = be32_to_cpu(cqe->size);
wc->wc_flags = 0;
wc->opcode = wc_mapping_table[opcode];
if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
wc->wc_flags |= IB_WC_WITH_IMM;
} else if (opcode == ERDMA_OP_RECV_INV) {
wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
wc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
if (syndrome >= ERDMA_NUM_WC_STATUS)
syndrome = ERDMA_WC_GENERAL_ERR;
wc->status = map_cqe_status[syndrome].base;
wc->vendor_err = map_cqe_status[syndrome].vendor;
wc->qp = &qp->ibqp;
return 0;
}
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
struct erdma_cq *cq = to_ecq(ibcq);
unsigned long flags;
int npolled, ret;
spin_lock_irqsave(&cq->kern_cq.lock, flags);
for (npolled = 0; npolled < num_entries;) {
ret = erdma_poll_one_cqe(cq, wc + npolled);
if (ret == -EAGAIN) /* no received new CQEs. */
break;
else if (ret) /* ignore invalid CQEs. */
continue;
npolled++;
}
spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
return npolled;
}

View File

@ -0,0 +1,329 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/types.h>
#include "erdma.h"
#include "erdma_hw.h"
#include "erdma_verbs.h"
#define MAX_POLL_CHUNK_SIZE 16
void notify_eq(struct erdma_eq *eq)
{
u64 db_data = FIELD_PREP(ERDMA_EQDB_CI_MASK, eq->ci) |
FIELD_PREP(ERDMA_EQDB_ARM_MASK, 1);
*eq->db_record = db_data;
writeq(db_data, eq->db_addr);
atomic64_inc(&eq->notify_num);
}
void *get_next_valid_eqe(struct erdma_eq *eq)
{
u64 *eqe = get_queue_entry(eq->qbuf, eq->ci, eq->depth, EQE_SHIFT);
u32 owner = FIELD_GET(ERDMA_CEQE_HDR_O_MASK, READ_ONCE(*eqe));
return owner ^ !!(eq->ci & eq->depth) ? eqe : NULL;
}
void erdma_aeq_event_handler(struct erdma_dev *dev)
{
struct erdma_aeqe *aeqe;
u32 cqn, qpn;
struct erdma_qp *qp;
struct erdma_cq *cq;
struct ib_event event;
u32 poll_cnt = 0;
memset(&event, 0, sizeof(event));
while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
aeqe = get_next_valid_eqe(&dev->aeq);
if (!aeqe)
break;
dma_rmb();
dev->aeq.ci++;
atomic64_inc(&dev->aeq.event_num);
poll_cnt++;
if (FIELD_GET(ERDMA_AEQE_HDR_TYPE_MASK,
le32_to_cpu(aeqe->hdr)) == ERDMA_AE_TYPE_CQ_ERR) {
cqn = le32_to_cpu(aeqe->event_data0);
cq = find_cq_by_cqn(dev, cqn);
if (!cq)
continue;
event.device = cq->ibcq.device;
event.element.cq = &cq->ibcq;
event.event = IB_EVENT_CQ_ERR;
if (cq->ibcq.event_handler)
cq->ibcq.event_handler(&event,
cq->ibcq.cq_context);
} else {
qpn = le32_to_cpu(aeqe->event_data0);
qp = find_qp_by_qpn(dev, qpn);
if (!qp)
continue;
event.device = qp->ibqp.device;
event.element.qp = &qp->ibqp;
event.event = IB_EVENT_QP_FATAL;
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event,
qp->ibqp.qp_context);
}
}
notify_eq(&dev->aeq);
}
int erdma_aeq_init(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
u32 buf_size;
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
buf_size = eq->depth << EQE_SHIFT;
eq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf)
return -ENOMEM;
spin_lock_init(&eq->lock);
atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0);
eq->db_addr = (u64 __iomem *)(dev->func_bar + ERDMA_REGS_AEQ_DB_REG);
eq->db_record = (u64 *)(eq->qbuf + buf_size);
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_H_REG,
upper_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_ADDR_L_REG,
lower_32_bits(eq->qbuf_dma_addr));
erdma_reg_write32(dev, ERDMA_REGS_AEQ_DEPTH_REG, eq->depth);
erdma_reg_write64(dev, ERDMA_AEQ_DB_HOST_ADDR_REG,
eq->qbuf_dma_addr + buf_size);
return 0;
}
void erdma_aeq_destroy(struct erdma_dev *dev)
{
struct erdma_eq *eq = &dev->aeq;
dma_free_coherent(&dev->pdev->dev,
WARPPED_BUFSIZE(eq->depth << EQE_SHIFT), eq->qbuf,
eq->qbuf_dma_addr);
}
void erdma_ceq_completion_handler(struct erdma_eq_cb *ceq_cb)
{
struct erdma_dev *dev = ceq_cb->dev;
struct erdma_cq *cq;
u32 poll_cnt = 0;
u64 *ceqe;
int cqn;
if (!ceq_cb->ready)
return;
while (poll_cnt < MAX_POLL_CHUNK_SIZE) {
ceqe = get_next_valid_eqe(&ceq_cb->eq);
if (!ceqe)
break;
dma_rmb();
ceq_cb->eq.ci++;
poll_cnt++;
cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe));
cq = find_cq_by_cqn(dev, cqn);
if (!cq)
continue;
if (rdma_is_kernel_res(&cq->ibcq.res))
cq->kern_cq.cmdsn++;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
notify_eq(&ceq_cb->eq);
}
static irqreturn_t erdma_intr_ceq_handler(int irq, void *data)
{
struct erdma_eq_cb *ceq_cb = data;
tasklet_schedule(&ceq_cb->tasklet);
return IRQ_HANDLED;
}
static void erdma_intr_ceq_task(unsigned long data)
{
erdma_ceq_completion_handler((struct erdma_eq_cb *)data);
}
static int erdma_set_ceq_irq(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
int err;
snprintf(eqc->irq.name, ERDMA_IRQNAME_SIZE, "erdma-ceq%u@pci:%s", ceqn,
pci_name(dev->pdev));
eqc->irq.msix_vector = pci_irq_vector(dev->pdev, ceqn + 1);
tasklet_init(&dev->ceqs[ceqn].tasklet, erdma_intr_ceq_task,
(unsigned long)&dev->ceqs[ceqn]);
cpumask_set_cpu(cpumask_local_spread(ceqn + 1, dev->attrs.numa_node),
&eqc->irq.affinity_hint_mask);
err = request_irq(eqc->irq.msix_vector, erdma_intr_ceq_handler, 0,
eqc->irq.name, eqc);
if (err) {
dev_err(&dev->pdev->dev, "failed to request_irq(%d)\n", err);
return err;
}
irq_set_affinity_hint(eqc->irq.msix_vector,
&eqc->irq.affinity_hint_mask);
return 0;
}
static void erdma_free_ceq_irq(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq_cb *eqc = &dev->ceqs[ceqn];
irq_set_affinity_hint(eqc->irq.msix_vector, NULL);
free_irq(eqc->irq.msix_vector, eqc);
}
static int create_eq_cmd(struct erdma_dev *dev, u32 eqn, struct erdma_eq *eq)
{
struct erdma_cmdq_create_eq_req req;
dma_addr_t db_info_dma_addr;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_CREATE_EQ);
req.eqn = eqn;
req.depth = ilog2(eq->depth);
req.qbuf_addr = eq->qbuf_dma_addr;
req.qtype = ERDMA_EQ_TYPE_CEQ;
/* Vector index is the same as EQN. */
req.vector_idx = eqn;
db_info_dma_addr = eq->qbuf_dma_addr + (eq->depth << EQE_SHIFT);
req.db_dma_addr_l = lower_32_bits(db_info_dma_addr);
req.db_dma_addr_h = upper_32_bits(db_info_dma_addr);
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req,
sizeof(struct erdma_cmdq_create_eq_req),
NULL, NULL);
}
static int erdma_ceq_init_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
int ret;
eq->qbuf =
dma_alloc_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size),
&eq->qbuf_dma_addr, GFP_KERNEL | __GFP_ZERO);
if (!eq->qbuf)
return -ENOMEM;
spin_lock_init(&eq->lock);
atomic64_set(&eq->event_num, 0);
atomic64_set(&eq->notify_num, 0);
eq->depth = ERDMA_DEFAULT_EQ_DEPTH;
eq->db_addr =
(u64 __iomem *)(dev->func_bar + ERDMA_REGS_CEQ_DB_BASE_REG +
(ceqn + 1) * ERDMA_DB_SIZE);
eq->db_record = (u64 *)(eq->qbuf + buf_size);
eq->ci = 0;
dev->ceqs[ceqn].dev = dev;
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
ret = create_eq_cmd(dev, ceqn + 1, eq);
dev->ceqs[ceqn].ready = ret ? false : true;
return ret;
}
static void erdma_ceq_uninit_one(struct erdma_dev *dev, u16 ceqn)
{
struct erdma_eq *eq = &dev->ceqs[ceqn].eq;
u32 buf_size = ERDMA_DEFAULT_EQ_DEPTH << EQE_SHIFT;
struct erdma_cmdq_destroy_eq_req req;
int err;
dev->ceqs[ceqn].ready = 0;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_DESTROY_EQ);
/* CEQ indexed from 1, 0 rsvd for CMDQ-EQ. */
req.eqn = ceqn + 1;
req.qtype = ERDMA_EQ_TYPE_CEQ;
req.vector_idx = ceqn + 1;
err = erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
NULL);
if (err)
return;
dma_free_coherent(&dev->pdev->dev, WARPPED_BUFSIZE(buf_size), eq->qbuf,
eq->qbuf_dma_addr);
}
int erdma_ceqs_init(struct erdma_dev *dev)
{
u32 i, j;
int err;
for (i = 0; i < dev->attrs.irq_num - 1; i++) {
err = erdma_ceq_init_one(dev, i);
if (err)
goto out_err;
err = erdma_set_ceq_irq(dev, i);
if (err) {
erdma_ceq_uninit_one(dev, i);
goto out_err;
}
}
return 0;
out_err:
for (j = 0; j < i; j++) {
erdma_free_ceq_irq(dev, j);
erdma_ceq_uninit_one(dev, j);
}
return err;
}
void erdma_ceqs_uninit(struct erdma_dev *dev)
{
u32 i;
for (i = 0; i < dev->attrs.irq_num - 1; i++) {
erdma_free_ceq_irq(dev, i);
erdma_ceq_uninit_one(dev, i);
}
}

View File

@ -0,0 +1,508 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#ifndef __ERDMA_HW_H__
#define __ERDMA_HW_H__
#include <linux/kernel.h>
#include <linux/types.h>
/* PCIe device related definition. */
#define PCI_VENDOR_ID_ALIBABA 0x1ded
#define ERDMA_PCI_WIDTH 64
#define ERDMA_FUNC_BAR 0
#define ERDMA_MISX_BAR 2
#define ERDMA_BAR_MASK (BIT(ERDMA_FUNC_BAR) | BIT(ERDMA_MISX_BAR))
/* MSI-X related. */
#define ERDMA_NUM_MSIX_VEC 32U
#define ERDMA_MSIX_VECTOR_CMDQ 0
/* PCIe Bar0 Registers. */
#define ERDMA_REGS_VERSION_REG 0x0
#define ERDMA_REGS_DEV_CTRL_REG 0x10
#define ERDMA_REGS_DEV_ST_REG 0x14
#define ERDMA_REGS_NETDEV_MAC_L_REG 0x18
#define ERDMA_REGS_NETDEV_MAC_H_REG 0x1C
#define ERDMA_REGS_CMDQ_SQ_ADDR_L_REG 0x20
#define ERDMA_REGS_CMDQ_SQ_ADDR_H_REG 0x24
#define ERDMA_REGS_CMDQ_CQ_ADDR_L_REG 0x28
#define ERDMA_REGS_CMDQ_CQ_ADDR_H_REG 0x2C
#define ERDMA_REGS_CMDQ_DEPTH_REG 0x30
#define ERDMA_REGS_CMDQ_EQ_DEPTH_REG 0x34
#define ERDMA_REGS_CMDQ_EQ_ADDR_L_REG 0x38
#define ERDMA_REGS_CMDQ_EQ_ADDR_H_REG 0x3C
#define ERDMA_REGS_AEQ_ADDR_L_REG 0x40
#define ERDMA_REGS_AEQ_ADDR_H_REG 0x44
#define ERDMA_REGS_AEQ_DEPTH_REG 0x48
#define ERDMA_REGS_GRP_NUM_REG 0x4c
#define ERDMA_REGS_AEQ_DB_REG 0x50
#define ERDMA_CMDQ_SQ_DB_HOST_ADDR_REG 0x60
#define ERDMA_CMDQ_CQ_DB_HOST_ADDR_REG 0x68
#define ERDMA_CMDQ_EQ_DB_HOST_ADDR_REG 0x70
#define ERDMA_AEQ_DB_HOST_ADDR_REG 0x78
#define ERDMA_REGS_STATS_TSO_IN_PKTS_REG 0x80
#define ERDMA_REGS_STATS_TSO_OUT_PKTS_REG 0x88
#define ERDMA_REGS_STATS_TSO_OUT_BYTES_REG 0x90
#define ERDMA_REGS_STATS_TX_DROP_PKTS_REG 0x98
#define ERDMA_REGS_STATS_TX_BPS_METER_DROP_PKTS_REG 0xa0
#define ERDMA_REGS_STATS_TX_PPS_METER_DROP_PKTS_REG 0xa8
#define ERDMA_REGS_STATS_RX_PKTS_REG 0xc0
#define ERDMA_REGS_STATS_RX_BYTES_REG 0xc8
#define ERDMA_REGS_STATS_RX_DROP_PKTS_REG 0xd0
#define ERDMA_REGS_STATS_RX_BPS_METER_DROP_PKTS_REG 0xd8
#define ERDMA_REGS_STATS_RX_PPS_METER_DROP_PKTS_REG 0xe0
#define ERDMA_REGS_CEQ_DB_BASE_REG 0x100
#define ERDMA_CMDQ_SQDB_REG 0x200
#define ERDMA_CMDQ_CQDB_REG 0x300
/* DEV_CTRL_REG details. */
#define ERDMA_REG_DEV_CTRL_RESET_MASK 0x00000001
#define ERDMA_REG_DEV_CTRL_INIT_MASK 0x00000002
/* DEV_ST_REG details. */
#define ERDMA_REG_DEV_ST_RESET_DONE_MASK 0x00000001U
#define ERDMA_REG_DEV_ST_INIT_DONE_MASK 0x00000002U
/* eRDMA PCIe DBs definition. */
#define ERDMA_BAR_DB_SPACE_BASE 4096
#define ERDMA_BAR_SQDB_SPACE_OFFSET ERDMA_BAR_DB_SPACE_BASE
#define ERDMA_BAR_SQDB_SPACE_SIZE (384 * 1024)
#define ERDMA_BAR_RQDB_SPACE_OFFSET \
(ERDMA_BAR_SQDB_SPACE_OFFSET + ERDMA_BAR_SQDB_SPACE_SIZE)
#define ERDMA_BAR_RQDB_SPACE_SIZE (96 * 1024)
#define ERDMA_BAR_CQDB_SPACE_OFFSET \
(ERDMA_BAR_RQDB_SPACE_OFFSET + ERDMA_BAR_RQDB_SPACE_SIZE)
/* Doorbell page resources related. */
/*
* Max # of parallelly issued directSQE is 3072 per device,
* hardware organizes this into 24 group, per group has 128 credits.
*/
#define ERDMA_DWQE_MAX_GRP_CNT 24
#define ERDMA_DWQE_NUM_PER_GRP 128
#define ERDMA_DWQE_TYPE0_CNT 64
#define ERDMA_DWQE_TYPE1_CNT 496
/* type1 DB contains 2 DBs, takes 256Byte. */
#define ERDMA_DWQE_TYPE1_CNT_PER_PAGE 16
#define ERDMA_SDB_SHARED_PAGE_INDEX 95
/* Doorbell related. */
#define ERDMA_DB_SIZE 8
#define ERDMA_CQDB_IDX_MASK GENMASK_ULL(63, 56)
#define ERDMA_CQDB_CQN_MASK GENMASK_ULL(55, 32)
#define ERDMA_CQDB_ARM_MASK BIT_ULL(31)
#define ERDMA_CQDB_SOL_MASK BIT_ULL(30)
#define ERDMA_CQDB_CMDSN_MASK GENMASK_ULL(29, 28)
#define ERDMA_CQDB_CI_MASK GENMASK_ULL(23, 0)
#define ERDMA_EQDB_ARM_MASK BIT(31)
#define ERDMA_EQDB_CI_MASK GENMASK_ULL(23, 0)
#define ERDMA_PAGE_SIZE_SUPPORT 0x7FFFF000
/* WQE related. */
#define EQE_SIZE 16
#define EQE_SHIFT 4
#define RQE_SIZE 32
#define RQE_SHIFT 5
#define CQE_SIZE 32
#define CQE_SHIFT 5
#define SQEBB_SIZE 32
#define SQEBB_SHIFT 5
#define SQEBB_MASK (~(SQEBB_SIZE - 1))
#define SQEBB_ALIGN(size) ((size + SQEBB_SIZE - 1) & SQEBB_MASK)
#define SQEBB_COUNT(size) (SQEBB_ALIGN(size) >> SQEBB_SHIFT)
#define ERDMA_MAX_SQE_SIZE 128
#define ERDMA_MAX_WQEBB_PER_SQE 4
/* CMDQ related. */
#define ERDMA_CMDQ_MAX_OUTSTANDING 128
#define ERDMA_CMDQ_SQE_SIZE 64
/* cmdq sub module definition. */
enum CMDQ_WQE_SUB_MOD {
CMDQ_SUBMOD_RDMA = 0,
CMDQ_SUBMOD_COMMON = 1
};
enum CMDQ_RDMA_OPCODE {
CMDQ_OPCODE_QUERY_DEVICE = 0,
CMDQ_OPCODE_CREATE_QP = 1,
CMDQ_OPCODE_DESTROY_QP = 2,
CMDQ_OPCODE_MODIFY_QP = 3,
CMDQ_OPCODE_CREATE_CQ = 4,
CMDQ_OPCODE_DESTROY_CQ = 5,
CMDQ_OPCODE_REG_MR = 8,
CMDQ_OPCODE_DEREG_MR = 9
};
enum CMDQ_COMMON_OPCODE {
CMDQ_OPCODE_CREATE_EQ = 0,
CMDQ_OPCODE_DESTROY_EQ = 1,
CMDQ_OPCODE_QUERY_FW_INFO = 2,
};
/* cmdq-SQE HDR */
#define ERDMA_CMD_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
#define ERDMA_CMD_HDR_CONTEXT_COOKIE_MASK GENMASK_ULL(47, 32)
#define ERDMA_CMD_HDR_SUB_MOD_MASK GENMASK_ULL(25, 24)
#define ERDMA_CMD_HDR_OPCODE_MASK GENMASK_ULL(23, 16)
#define ERDMA_CMD_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
struct erdma_cmdq_destroy_cq_req {
u64 hdr;
u32 cqn;
};
#define ERDMA_EQ_TYPE_AEQ 0
#define ERDMA_EQ_TYPE_CEQ 1
struct erdma_cmdq_create_eq_req {
u64 hdr;
u64 qbuf_addr;
u8 vector_idx;
u8 eqn;
u8 depth;
u8 qtype;
u32 db_dma_addr_l;
u32 db_dma_addr_h;
};
struct erdma_cmdq_destroy_eq_req {
u64 hdr;
u64 rsvd0;
u8 vector_idx;
u8 eqn;
u8 rsvd1;
u8 qtype;
};
/* create_cq cfg0 */
#define ERDMA_CMD_CREATE_CQ_DEPTH_MASK GENMASK(31, 24)
#define ERDMA_CMD_CREATE_CQ_PAGESIZE_MASK GENMASK(23, 20)
#define ERDMA_CMD_CREATE_CQ_CQN_MASK GENMASK(19, 0)
/* create_cq cfg1 */
#define ERDMA_CMD_CREATE_CQ_MTT_CNT_MASK GENMASK(31, 16)
#define ERDMA_CMD_CREATE_CQ_MTT_TYPE_MASK BIT(15)
#define ERDMA_CMD_CREATE_CQ_EQN_MASK GENMASK(9, 0)
struct erdma_cmdq_create_cq_req {
u64 hdr;
u32 cfg0;
u32 qbuf_addr_l;
u32 qbuf_addr_h;
u32 cfg1;
u64 cq_db_info_addr;
u32 first_page_offset;
};
/* regmr/deregmr cfg0 */
#define ERDMA_CMD_MR_VALID_MASK BIT(31)
#define ERDMA_CMD_MR_KEY_MASK GENMASK(27, 20)
#define ERDMA_CMD_MR_MPT_IDX_MASK GENMASK(19, 0)
/* regmr cfg1 */
#define ERDMA_CMD_REGMR_PD_MASK GENMASK(31, 12)
#define ERDMA_CMD_REGMR_TYPE_MASK GENMASK(7, 6)
#define ERDMA_CMD_REGMR_RIGHT_MASK GENMASK(5, 2)
#define ERDMA_CMD_REGMR_ACC_MODE_MASK GENMASK(1, 0)
/* regmr cfg2 */
#define ERDMA_CMD_REGMR_PAGESIZE_MASK GENMASK(31, 27)
#define ERDMA_CMD_REGMR_MTT_TYPE_MASK GENMASK(21, 20)
#define ERDMA_CMD_REGMR_MTT_CNT_MASK GENMASK(19, 0)
struct erdma_cmdq_reg_mr_req {
u64 hdr;
u32 cfg0;
u32 cfg1;
u64 start_va;
u32 size;
u32 cfg2;
u64 phy_addr[4];
};
struct erdma_cmdq_dereg_mr_req {
u64 hdr;
u32 cfg;
};
/* modify qp cfg */
#define ERDMA_CMD_MODIFY_QP_STATE_MASK GENMASK(31, 24)
#define ERDMA_CMD_MODIFY_QP_CC_MASK GENMASK(23, 20)
#define ERDMA_CMD_MODIFY_QP_QPN_MASK GENMASK(19, 0)
struct erdma_cmdq_modify_qp_req {
u64 hdr;
u32 cfg;
u32 cookie;
__be32 dip;
__be32 sip;
__be16 sport;
__be16 dport;
u32 send_nxt;
u32 recv_nxt;
};
/* create qp cfg0 */
#define ERDMA_CMD_CREATE_QP_SQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_QPN_MASK GENMASK(19, 0)
/* create qp cfg1 */
#define ERDMA_CMD_CREATE_QP_RQ_DEPTH_MASK GENMASK(31, 20)
#define ERDMA_CMD_CREATE_QP_PD_MASK GENMASK(19, 0)
/* create qp cqn_mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_SIZE_MASK GENMASK(31, 28)
#define ERDMA_CMD_CREATE_QP_CQN_MASK GENMASK(23, 0)
/* create qp mtt_cfg */
#define ERDMA_CMD_CREATE_QP_PAGE_OFFSET_MASK GENMASK(31, 12)
#define ERDMA_CMD_CREATE_QP_MTT_CNT_MASK GENMASK(11, 1)
#define ERDMA_CMD_CREATE_QP_MTT_TYPE_MASK BIT(0)
#define ERDMA_CMDQ_CREATE_QP_RESP_COOKIE_MASK GENMASK_ULL(31, 0)
struct erdma_cmdq_create_qp_req {
u64 hdr;
u32 cfg0;
u32 cfg1;
u32 sq_cqn_mtt_cfg;
u32 rq_cqn_mtt_cfg;
u64 sq_buf_addr;
u64 rq_buf_addr;
u32 sq_mtt_cfg;
u32 rq_mtt_cfg;
u64 sq_db_info_dma_addr;
u64 rq_db_info_dma_addr;
};
struct erdma_cmdq_destroy_qp_req {
u64 hdr;
u32 qpn;
};
/* cap qword 0 definition */
#define ERDMA_CMD_DEV_CAP_MAX_CQE_MASK GENMASK_ULL(47, 40)
#define ERDMA_CMD_DEV_CAP_MAX_RECV_WR_MASK GENMASK_ULL(23, 16)
#define ERDMA_CMD_DEV_CAP_MAX_MR_SIZE_MASK GENMASK_ULL(7, 0)
/* cap qword 1 definition */
#define ERDMA_CMD_DEV_CAP_DMA_LOCAL_KEY_MASK GENMASK_ULL(63, 32)
#define ERDMA_CMD_DEV_CAP_DEFAULT_CC_MASK GENMASK_ULL(31, 28)
#define ERDMA_CMD_DEV_CAP_QBLOCK_MASK GENMASK_ULL(27, 16)
#define ERDMA_CMD_DEV_CAP_MAX_MW_MASK GENMASK_ULL(7, 0)
#define ERDMA_NQP_PER_QBLOCK 1024
#define ERDMA_CMD_INFO0_FW_VER_MASK GENMASK_ULL(31, 0)
/* CQE hdr */
#define ERDMA_CQE_HDR_OWNER_MASK BIT(31)
#define ERDMA_CQE_HDR_OPCODE_MASK GENMASK(23, 16)
#define ERDMA_CQE_HDR_QTYPE_MASK GENMASK(15, 8)
#define ERDMA_CQE_HDR_SYNDROME_MASK GENMASK(7, 0)
#define ERDMA_CQE_QTYPE_SQ 0
#define ERDMA_CQE_QTYPE_RQ 1
#define ERDMA_CQE_QTYPE_CMDQ 2
struct erdma_cqe {
__be32 hdr;
__be32 qe_idx;
__be32 qpn;
union {
__le32 imm_data;
__be32 inv_rkey;
};
__be32 size;
__be32 rsvd[3];
};
struct erdma_sge {
__aligned_le64 laddr;
__le32 length;
__le32 lkey;
};
/* Receive Queue Element */
struct erdma_rqe {
__le16 qe_idx;
__le16 rsvd0;
__le32 qpn;
__le32 rsvd1;
__le32 rsvd2;
__le64 to;
__le32 length;
__le32 stag;
};
/* SQE */
#define ERDMA_SQE_HDR_SGL_LEN_MASK GENMASK_ULL(63, 56)
#define ERDMA_SQE_HDR_WQEBB_CNT_MASK GENMASK_ULL(54, 52)
#define ERDMA_SQE_HDR_QPN_MASK GENMASK_ULL(51, 32)
#define ERDMA_SQE_HDR_OPCODE_MASK GENMASK_ULL(31, 27)
#define ERDMA_SQE_HDR_DWQE_MASK BIT_ULL(26)
#define ERDMA_SQE_HDR_INLINE_MASK BIT_ULL(25)
#define ERDMA_SQE_HDR_FENCE_MASK BIT_ULL(24)
#define ERDMA_SQE_HDR_SE_MASK BIT_ULL(23)
#define ERDMA_SQE_HDR_CE_MASK BIT_ULL(22)
#define ERDMA_SQE_HDR_WQEBB_INDEX_MASK GENMASK_ULL(15, 0)
/* REG MR attrs */
#define ERDMA_SQE_MR_MODE_MASK GENMASK(1, 0)
#define ERDMA_SQE_MR_ACCESS_MASK GENMASK(5, 2)
#define ERDMA_SQE_MR_MTT_TYPE_MASK GENMASK(7, 6)
#define ERDMA_SQE_MR_MTT_CNT_MASK GENMASK(31, 12)
struct erdma_write_sqe {
__le64 hdr;
__be32 imm_data;
__le32 length;
__le32 sink_stag;
__le32 sink_to_l;
__le32 sink_to_h;
__le32 rsvd;
struct erdma_sge sgl[0];
};
struct erdma_send_sqe {
__le64 hdr;
union {
__be32 imm_data;
__le32 invalid_stag;
};
__le32 length;
struct erdma_sge sgl[0];
};
struct erdma_readreq_sqe {
__le64 hdr;
__le32 invalid_stag;
__le32 length;
__le32 sink_stag;
__le32 sink_to_l;
__le32 sink_to_h;
__le32 rsvd;
};
struct erdma_reg_mr_sqe {
__le64 hdr;
__le64 addr;
__le32 length;
__le32 stag;
__le32 attrs;
__le32 rsvd;
};
/* EQ related. */
#define ERDMA_DEFAULT_EQ_DEPTH 256
/* ceqe */
#define ERDMA_CEQE_HDR_DB_MASK BIT_ULL(63)
#define ERDMA_CEQE_HDR_PI_MASK GENMASK_ULL(55, 32)
#define ERDMA_CEQE_HDR_O_MASK BIT_ULL(31)
#define ERDMA_CEQE_HDR_CQN_MASK GENMASK_ULL(19, 0)
/* aeqe */
#define ERDMA_AEQE_HDR_O_MASK BIT(31)
#define ERDMA_AEQE_HDR_TYPE_MASK GENMASK(23, 16)
#define ERDMA_AEQE_HDR_SUBTYPE_MASK GENMASK(7, 0)
#define ERDMA_AE_TYPE_QP_FATAL_EVENT 0
#define ERDMA_AE_TYPE_QP_ERQ_ERR_EVENT 1
#define ERDMA_AE_TYPE_ACC_ERR_EVENT 2
#define ERDMA_AE_TYPE_CQ_ERR 3
#define ERDMA_AE_TYPE_OTHER_ERROR 4
struct erdma_aeqe {
__le32 hdr;
__le32 event_data0;
__le32 event_data1;
__le32 rsvd;
};
enum erdma_opcode {
ERDMA_OP_WRITE = 0,
ERDMA_OP_READ = 1,
ERDMA_OP_SEND = 2,
ERDMA_OP_SEND_WITH_IMM = 3,
ERDMA_OP_RECEIVE = 4,
ERDMA_OP_RECV_IMM = 5,
ERDMA_OP_RECV_INV = 6,
ERDMA_OP_REQ_ERR = 7,
ERDMA_OP_READ_RESPONSE = 8,
ERDMA_OP_WRITE_WITH_IMM = 9,
ERDMA_OP_RECV_ERR = 10,
ERDMA_OP_INVALIDATE = 11,
ERDMA_OP_RSP_SEND_IMM = 12,
ERDMA_OP_SEND_WITH_INV = 13,
ERDMA_OP_REG_MR = 14,
ERDMA_OP_LOCAL_INV = 15,
ERDMA_OP_READ_WITH_INV = 16,
ERDMA_NUM_OPCODES = 17,
ERDMA_OP_INVALID = ERDMA_NUM_OPCODES + 1
};
enum erdma_wc_status {
ERDMA_WC_SUCCESS = 0,
ERDMA_WC_GENERAL_ERR = 1,
ERDMA_WC_RECV_WQE_FORMAT_ERR = 2,
ERDMA_WC_RECV_STAG_INVALID_ERR = 3,
ERDMA_WC_RECV_ADDR_VIOLATION_ERR = 4,
ERDMA_WC_RECV_RIGHT_VIOLATION_ERR = 5,
ERDMA_WC_RECV_PDID_ERR = 6,
ERDMA_WC_RECV_WARRPING_ERR = 7,
ERDMA_WC_SEND_WQE_FORMAT_ERR = 8,
ERDMA_WC_SEND_WQE_ORD_EXCEED = 9,
ERDMA_WC_SEND_STAG_INVALID_ERR = 10,
ERDMA_WC_SEND_ADDR_VIOLATION_ERR = 11,
ERDMA_WC_SEND_RIGHT_VIOLATION_ERR = 12,
ERDMA_WC_SEND_PDID_ERR = 13,
ERDMA_WC_SEND_WARRPING_ERR = 14,
ERDMA_WC_FLUSH_ERR = 15,
ERDMA_WC_RETRY_EXC_ERR = 16,
ERDMA_NUM_WC_STATUS
};
enum erdma_vendor_err {
ERDMA_WC_VENDOR_NO_ERR = 0,
ERDMA_WC_VENDOR_INVALID_RQE = 1,
ERDMA_WC_VENDOR_RQE_INVALID_STAG = 2,
ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION = 3,
ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR = 4,
ERDMA_WC_VENDOR_RQE_INVALID_PD = 5,
ERDMA_WC_VENDOR_RQE_WRAP_ERR = 6,
ERDMA_WC_VENDOR_INVALID_SQE = 0x20,
ERDMA_WC_VENDOR_ZERO_ORD = 0x21,
ERDMA_WC_VENDOR_SQE_INVALID_STAG = 0x30,
ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION = 0x31,
ERDMA_WC_VENDOR_SQE_ACCESS_ERR = 0x32,
ERDMA_WC_VENDOR_SQE_INVALID_PD = 0x33,
ERDMA_WC_VENDOR_SQE_WARP_ERR = 0x34
};
#endif

View File

@ -0,0 +1,608 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <net/addrconf.h>
#include <rdma/erdma-abi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_hw.h"
#include "erdma_verbs.h"
MODULE_AUTHOR("Cheng Xu <chengyou@linux.alibaba.com>");
MODULE_DESCRIPTION("Alibaba elasticRDMA adapter driver");
MODULE_LICENSE("Dual BSD/GPL");
static int erdma_netdev_event(struct notifier_block *nb, unsigned long event,
void *arg)
{
struct net_device *netdev = netdev_notifier_info_to_dev(arg);
struct erdma_dev *dev = container_of(nb, struct erdma_dev, netdev_nb);
if (dev->netdev == NULL || dev->netdev != netdev)
goto done;
switch (event) {
case NETDEV_UP:
dev->state = IB_PORT_ACTIVE;
erdma_port_event(dev, IB_EVENT_PORT_ACTIVE);
break;
case NETDEV_DOWN:
dev->state = IB_PORT_DOWN;
erdma_port_event(dev, IB_EVENT_PORT_ERR);
break;
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
case NETDEV_CHANGEADDR:
case NETDEV_CHANGEMTU:
case NETDEV_GOING_DOWN:
case NETDEV_CHANGE:
default:
break;
}
done:
return NOTIFY_OK;
}
static int erdma_enum_and_get_netdev(struct erdma_dev *dev)
{
struct net_device *netdev;
int ret = -ENODEV;
/* Already binded to a net_device, so we skip. */
if (dev->netdev)
return 0;
rtnl_lock();
for_each_netdev(&init_net, netdev) {
/*
* In erdma, the paired netdev and ibdev should have the same
* MAC address. erdma can get the value from its PCIe bar
* registers. Since erdma can not get the paired netdev
* reference directly, we do a traverse here to get the paired
* netdev.
*/
if (ether_addr_equal_unaligned(netdev->perm_addr,
dev->attrs.peer_addr)) {
ret = ib_device_set_netdev(&dev->ibdev, netdev, 1);
if (ret) {
rtnl_unlock();
ibdev_warn(&dev->ibdev,
"failed (%d) to link netdev", ret);
return ret;
}
dev->netdev = netdev;
break;
}
}
rtnl_unlock();
return ret;
}
static int erdma_device_register(struct erdma_dev *dev)
{
struct ib_device *ibdev = &dev->ibdev;
int ret;
ret = erdma_enum_and_get_netdev(dev);
if (ret)
return ret;
addrconf_addr_eui48((u8 *)&ibdev->node_guid, dev->netdev->dev_addr);
ret = ib_register_device(ibdev, "erdma_%d", &dev->pdev->dev);
if (ret) {
dev_err(&dev->pdev->dev,
"ib_register_device failed: ret = %d\n", ret);
return ret;
}
dev->netdev_nb.notifier_call = erdma_netdev_event;
ret = register_netdevice_notifier(&dev->netdev_nb);
if (ret) {
ibdev_err(&dev->ibdev, "failed to register notifier.\n");
ib_unregister_device(ibdev);
}
return ret;
}
static irqreturn_t erdma_comm_irq_handler(int irq, void *data)
{
struct erdma_dev *dev = data;
erdma_cmdq_completion_handler(&dev->cmdq);
erdma_aeq_event_handler(dev);
return IRQ_HANDLED;
}
static void erdma_dwqe_resource_init(struct erdma_dev *dev)
{
int total_pages, type0, type1;
dev->attrs.grp_num = erdma_reg_read32(dev, ERDMA_REGS_GRP_NUM_REG);
if (dev->attrs.grp_num < 4)
dev->attrs.disable_dwqe = true;
else
dev->attrs.disable_dwqe = false;
/* One page contains 4 goups. */
total_pages = dev->attrs.grp_num * 4;
if (dev->attrs.grp_num >= ERDMA_DWQE_MAX_GRP_CNT) {
dev->attrs.grp_num = ERDMA_DWQE_MAX_GRP_CNT;
type0 = ERDMA_DWQE_TYPE0_CNT;
type1 = ERDMA_DWQE_TYPE1_CNT / ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
} else {
type1 = total_pages / 3;
type0 = total_pages - type1 - 1;
}
dev->attrs.dwqe_pages = type0;
dev->attrs.dwqe_entries = type1 * ERDMA_DWQE_TYPE1_CNT_PER_PAGE;
}
static int erdma_request_vectors(struct erdma_dev *dev)
{
int expect_irq_num = min(num_possible_cpus() + 1, ERDMA_NUM_MSIX_VEC);
int ret;
ret = pci_alloc_irq_vectors(dev->pdev, 1, expect_irq_num, PCI_IRQ_MSIX);
if (ret < 0) {
dev_err(&dev->pdev->dev, "request irq vectors failed(%d)\n",
ret);
return ret;
}
dev->attrs.irq_num = ret;
return 0;
}
static int erdma_comm_irq_init(struct erdma_dev *dev)
{
snprintf(dev->comm_irq.name, ERDMA_IRQNAME_SIZE, "erdma-common@pci:%s",
pci_name(dev->pdev));
dev->comm_irq.msix_vector =
pci_irq_vector(dev->pdev, ERDMA_MSIX_VECTOR_CMDQ);
cpumask_set_cpu(cpumask_first(cpumask_of_pcibus(dev->pdev->bus)),
&dev->comm_irq.affinity_hint_mask);
irq_set_affinity_hint(dev->comm_irq.msix_vector,
&dev->comm_irq.affinity_hint_mask);
return request_irq(dev->comm_irq.msix_vector, erdma_comm_irq_handler, 0,
dev->comm_irq.name, dev);
}
static void erdma_comm_irq_uninit(struct erdma_dev *dev)
{
irq_set_affinity_hint(dev->comm_irq.msix_vector, NULL);
free_irq(dev->comm_irq.msix_vector, dev);
}
static int erdma_device_init(struct erdma_dev *dev, struct pci_dev *pdev)
{
int ret;
erdma_dwqe_resource_init(dev);
ret = dma_set_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(ERDMA_PCI_WIDTH));
if (ret)
return ret;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
}
static void erdma_device_uninit(struct erdma_dev *dev)
{
u32 ctrl = FIELD_PREP(ERDMA_REG_DEV_CTRL_RESET_MASK, 1);
erdma_reg_write32(dev, ERDMA_REGS_DEV_CTRL_REG, ctrl);
}
static const struct pci_device_id erdma_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ALIBABA, 0x107f) },
{}
};
static int erdma_probe_dev(struct pci_dev *pdev)
{
struct erdma_dev *dev;
int bars, err;
u32 version;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device failed(%d)\n", err);
return err;
}
pci_set_master(pdev);
dev = ib_alloc_device(erdma_dev, ibdev);
if (!dev) {
dev_err(&pdev->dev, "ib_alloc_device failed\n");
err = -ENOMEM;
goto err_disable_device;
}
pci_set_drvdata(pdev, dev);
dev->pdev = pdev;
dev->attrs.numa_node = dev_to_node(&pdev->dev);
bars = pci_select_bars(pdev, IORESOURCE_MEM);
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
if (bars != ERDMA_BAR_MASK || err) {
err = err ? err : -EINVAL;
goto err_ib_device_release;
}
dev->func_bar_addr = pci_resource_start(pdev, ERDMA_FUNC_BAR);
dev->func_bar_len = pci_resource_len(pdev, ERDMA_FUNC_BAR);
dev->func_bar =
devm_ioremap(&pdev->dev, dev->func_bar_addr, dev->func_bar_len);
if (!dev->func_bar) {
dev_err(&pdev->dev, "devm_ioremap failed.\n");
err = -EFAULT;
goto err_release_bars;
}
version = erdma_reg_read32(dev, ERDMA_REGS_VERSION_REG);
if (version == 0) {
/* we knows that it is a non-functional function. */
err = -ENODEV;
goto err_iounmap_func_bar;
}
err = erdma_device_init(dev, pdev);
if (err)
goto err_iounmap_func_bar;
err = erdma_request_vectors(dev);
if (err)
goto err_iounmap_func_bar;
err = erdma_comm_irq_init(dev);
if (err)
goto err_free_vectors;
err = erdma_aeq_init(dev);
if (err)
goto err_uninit_comm_irq;
err = erdma_cmdq_init(dev);
if (err)
goto err_uninit_aeq;
err = erdma_ceqs_init(dev);
if (err)
goto err_uninit_cmdq;
erdma_finish_cmdq_init(dev);
return 0;
err_uninit_cmdq:
erdma_device_uninit(dev);
erdma_cmdq_destroy(dev);
err_uninit_aeq:
erdma_aeq_destroy(dev);
err_uninit_comm_irq:
erdma_comm_irq_uninit(dev);
err_free_vectors:
pci_free_irq_vectors(dev->pdev);
err_iounmap_func_bar:
devm_iounmap(&pdev->dev, dev->func_bar);
err_release_bars:
pci_release_selected_regions(pdev, bars);
err_ib_device_release:
ib_dealloc_device(&dev->ibdev);
err_disable_device:
pci_disable_device(pdev);
return err;
}
static void erdma_remove_dev(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
erdma_ceqs_uninit(dev);
erdma_device_uninit(dev);
erdma_cmdq_destroy(dev);
erdma_aeq_destroy(dev);
erdma_comm_irq_uninit(dev);
pci_free_irq_vectors(dev->pdev);
devm_iounmap(&pdev->dev, dev->func_bar);
pci_release_selected_regions(pdev, ERDMA_BAR_MASK);
ib_dealloc_device(&dev->ibdev);
pci_disable_device(pdev);
}
#define ERDMA_GET_CAP(name, cap) FIELD_GET(ERDMA_CMD_DEV_CAP_##name##_MASK, cap)
static int erdma_dev_attrs_init(struct erdma_dev *dev)
{
int err;
u64 req_hdr, cap0, cap1;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_QUERY_DEVICE);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
if (err)
return err;
dev->attrs.max_cqe = 1 << ERDMA_GET_CAP(MAX_CQE, cap0);
dev->attrs.max_mr_size = 1ULL << ERDMA_GET_CAP(MAX_MR_SIZE, cap0);
dev->attrs.max_mw = 1 << ERDMA_GET_CAP(MAX_MW, cap1);
dev->attrs.max_recv_wr = 1 << ERDMA_GET_CAP(MAX_RECV_WR, cap0);
dev->attrs.local_dma_key = ERDMA_GET_CAP(DMA_LOCAL_KEY, cap1);
dev->attrs.cc = ERDMA_GET_CAP(DEFAULT_CC, cap1);
dev->attrs.max_qp = ERDMA_NQP_PER_QBLOCK * ERDMA_GET_CAP(QBLOCK, cap1);
dev->attrs.max_mr = dev->attrs.max_qp << 1;
dev->attrs.max_cq = dev->attrs.max_qp << 1;
dev->attrs.max_send_wr = ERDMA_MAX_SEND_WR;
dev->attrs.max_ord = ERDMA_MAX_ORD;
dev->attrs.max_ird = ERDMA_MAX_IRD;
dev->attrs.max_send_sge = ERDMA_MAX_SEND_SGE;
dev->attrs.max_recv_sge = ERDMA_MAX_RECV_SGE;
dev->attrs.max_sge_rd = ERDMA_MAX_SGE_RD;
dev->attrs.max_pd = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_PD].max_cap = ERDMA_MAX_PD;
dev->res_cb[ERDMA_RES_TYPE_STAG_IDX].max_cap = dev->attrs.max_mr;
erdma_cmdq_build_reqhdr(&req_hdr, CMDQ_SUBMOD_COMMON,
CMDQ_OPCODE_QUERY_FW_INFO);
err = erdma_post_cmd_wait(&dev->cmdq, &req_hdr, sizeof(req_hdr), &cap0,
&cap1);
if (!err)
dev->attrs.fw_version =
FIELD_GET(ERDMA_CMD_INFO0_FW_VER_MASK, cap0);
return err;
}
static int erdma_res_cb_init(struct erdma_dev *dev)
{
int i, j;
for (i = 0; i < ERDMA_RES_CNT; i++) {
dev->res_cb[i].next_alloc_idx = 1;
spin_lock_init(&dev->res_cb[i].lock);
dev->res_cb[i].bitmap =
bitmap_zalloc(dev->res_cb[i].max_cap, GFP_KERNEL);
if (!dev->res_cb[i].bitmap)
goto err;
}
return 0;
err:
for (j = 0; j < i; j++)
bitmap_free(dev->res_cb[j].bitmap);
return -ENOMEM;
}
static void erdma_res_cb_free(struct erdma_dev *dev)
{
int i;
for (i = 0; i < ERDMA_RES_CNT; i++)
bitmap_free(dev->res_cb[i].bitmap);
}
static const struct ib_device_ops erdma_device_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_ERDMA,
.uverbs_abi_ver = ERDMA_ABI_VERSION,
.alloc_mr = erdma_ib_alloc_mr,
.alloc_pd = erdma_alloc_pd,
.alloc_ucontext = erdma_alloc_ucontext,
.create_cq = erdma_create_cq,
.create_qp = erdma_create_qp,
.dealloc_pd = erdma_dealloc_pd,
.dealloc_ucontext = erdma_dealloc_ucontext,
.dereg_mr = erdma_dereg_mr,
.destroy_cq = erdma_destroy_cq,
.destroy_qp = erdma_destroy_qp,
.get_dma_mr = erdma_get_dma_mr,
.get_port_immutable = erdma_get_port_immutable,
.iw_accept = erdma_accept,
.iw_add_ref = erdma_qp_get_ref,
.iw_connect = erdma_connect,
.iw_create_listen = erdma_create_listen,
.iw_destroy_listen = erdma_destroy_listen,
.iw_get_qp = erdma_get_ibqp,
.iw_reject = erdma_reject,
.iw_rem_ref = erdma_qp_put_ref,
.map_mr_sg = erdma_map_mr_sg,
.mmap = erdma_mmap,
.mmap_free = erdma_mmap_free,
.modify_qp = erdma_modify_qp,
.post_recv = erdma_post_recv,
.post_send = erdma_post_send,
.poll_cq = erdma_poll_cq,
.query_device = erdma_query_device,
.query_gid = erdma_query_gid,
.query_port = erdma_query_port,
.query_qp = erdma_query_qp,
.req_notify_cq = erdma_req_notify_cq,
.reg_user_mr = erdma_reg_user_mr,
INIT_RDMA_OBJ_SIZE(ib_cq, erdma_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, erdma_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_ucontext, erdma_ucontext, ibucontext),
INIT_RDMA_OBJ_SIZE(ib_qp, erdma_qp, ibqp),
};
static int erdma_ib_device_add(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
struct ib_device *ibdev = &dev->ibdev;
u64 mac;
int ret;
ret = erdma_dev_attrs_init(dev);
if (ret)
return ret;
ibdev->node_type = RDMA_NODE_RNIC;
memcpy(ibdev->node_desc, ERDMA_NODE_DESC, sizeof(ERDMA_NODE_DESC));
/*
* Current model (one-to-one device association):
* One ERDMA device per net_device or, equivalently,
* per physical port.
*/
ibdev->phys_port_cnt = 1;
ibdev->num_comp_vectors = dev->attrs.irq_num - 1;
ib_set_device_ops(ibdev, &erdma_device_ops);
INIT_LIST_HEAD(&dev->cep_list);
spin_lock_init(&dev->lock);
xa_init_flags(&dev->qp_xa, XA_FLAGS_ALLOC1);
xa_init_flags(&dev->cq_xa, XA_FLAGS_ALLOC1);
dev->next_alloc_cqn = 1;
dev->next_alloc_qpn = 1;
ret = erdma_res_cb_init(dev);
if (ret)
return ret;
spin_lock_init(&dev->db_bitmap_lock);
bitmap_zero(dev->sdb_page, ERDMA_DWQE_TYPE0_CNT);
bitmap_zero(dev->sdb_entry, ERDMA_DWQE_TYPE1_CNT);
atomic_set(&dev->num_ctx, 0);
mac = erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_L_REG);
mac |= (u64)erdma_reg_read32(dev, ERDMA_REGS_NETDEV_MAC_H_REG) << 32;
u64_to_ether_addr(mac, dev->attrs.peer_addr);
ret = erdma_device_register(dev);
if (ret)
goto err_out;
return 0;
err_out:
xa_destroy(&dev->qp_xa);
xa_destroy(&dev->cq_xa);
erdma_res_cb_free(dev);
return ret;
}
static void erdma_ib_device_remove(struct pci_dev *pdev)
{
struct erdma_dev *dev = pci_get_drvdata(pdev);
unregister_netdevice_notifier(&dev->netdev_nb);
ib_unregister_device(&dev->ibdev);
erdma_res_cb_free(dev);
xa_destroy(&dev->qp_xa);
xa_destroy(&dev->cq_xa);
}
static int erdma_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
ret = erdma_probe_dev(pdev);
if (ret)
return ret;
ret = erdma_ib_device_add(pdev);
if (ret) {
erdma_remove_dev(pdev);
return ret;
}
return 0;
}
static void erdma_remove(struct pci_dev *pdev)
{
erdma_ib_device_remove(pdev);
erdma_remove_dev(pdev);
}
static struct pci_driver erdma_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = erdma_pci_tbl,
.probe = erdma_probe,
.remove = erdma_remove
};
MODULE_DEVICE_TABLE(pci, erdma_pci_tbl);
static __init int erdma_init_module(void)
{
int ret;
ret = erdma_cm_init();
if (ret)
return ret;
ret = pci_register_driver(&erdma_pci_driver);
if (ret)
erdma_cm_exit();
return ret;
}
static void __exit erdma_exit_module(void)
{
pci_unregister_driver(&erdma_pci_driver);
erdma_cm_exit();
}
module_init(erdma_init_module);
module_exit(erdma_exit_module);

View File

@ -0,0 +1,566 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2021, Alibaba Group */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/ib_verbs.h>
#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_verbs.h"
void erdma_qp_llp_close(struct erdma_qp *qp)
{
struct erdma_qp_attrs qp_attrs;
down_write(&qp->state_lock);
switch (qp->attrs.state) {
case ERDMA_QP_STATE_RTS:
case ERDMA_QP_STATE_RTR:
case ERDMA_QP_STATE_IDLE:
case ERDMA_QP_STATE_TERMINATE:
qp_attrs.state = ERDMA_QP_STATE_CLOSING;
erdma_modify_qp_internal(qp, &qp_attrs, ERDMA_QP_ATTR_STATE);
break;
case ERDMA_QP_STATE_CLOSING:
qp->attrs.state = ERDMA_QP_STATE_IDLE;
break;
default:
break;
}
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
up_write(&qp->state_lock);
}
struct ib_qp *erdma_get_ibqp(struct ib_device *ibdev, int id)
{
struct erdma_qp *qp = find_qp_by_qpn(to_edev(ibdev), id);
if (qp)
return &qp->ibqp;
return NULL;
}
static int erdma_modify_qp_state_to_rts(struct erdma_qp *qp,
struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
{
int ret;
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
struct tcp_sock *tp;
struct erdma_cep *cep = qp->cep;
struct sockaddr_storage local_addr, remote_addr;
if (!(mask & ERDMA_QP_ATTR_LLP_HANDLE))
return -EINVAL;
if (!(mask & ERDMA_QP_ATTR_MPA))
return -EINVAL;
ret = getname_local(cep->sock, &local_addr);
if (ret < 0)
return ret;
ret = getname_peer(cep->sock, &remote_addr);
if (ret < 0)
return ret;
qp->attrs.state = ERDMA_QP_STATE_RTS;
tp = tcp_sk(qp->cep->sock->sk);
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, qp->attrs.state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_CC_MASK, qp->attrs.cc) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
req.cookie = be32_to_cpu(qp->cep->mpa.ext_data.cookie);
req.dip = to_sockaddr_in(remote_addr).sin_addr.s_addr;
req.sip = to_sockaddr_in(local_addr).sin_addr.s_addr;
req.dport = to_sockaddr_in(remote_addr).sin_port;
req.sport = to_sockaddr_in(local_addr).sin_port;
req.send_nxt = tp->snd_nxt;
/* rsvd tcp seq for mpa-rsp in server. */
if (qp->attrs.qp_type == ERDMA_QP_PASSIVE)
req.send_nxt += MPA_DEFAULT_HDR_LEN + qp->attrs.pd_len;
req.recv_nxt = tp->rcv_nxt;
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
NULL);
}
static int erdma_modify_qp_state_to_stop(struct erdma_qp *qp,
struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
{
struct erdma_dev *dev = qp->dev;
struct erdma_cmdq_modify_qp_req req;
qp->attrs.state = attrs->state;
erdma_cmdq_build_reqhdr(&req.hdr, CMDQ_SUBMOD_RDMA,
CMDQ_OPCODE_MODIFY_QP);
req.cfg = FIELD_PREP(ERDMA_CMD_MODIFY_QP_STATE_MASK, attrs->state) |
FIELD_PREP(ERDMA_CMD_MODIFY_QP_QPN_MASK, QP_ID(qp));
return erdma_post_cmd_wait(&dev->cmdq, (u64 *)&req, sizeof(req), NULL,
NULL);
}
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask)
{
int drop_conn, ret = 0;
if (!mask)
return 0;
if (!(mask & ERDMA_QP_ATTR_STATE))
return 0;
switch (qp->attrs.state) {
case ERDMA_QP_STATE_IDLE:
case ERDMA_QP_STATE_RTR:
if (attrs->state == ERDMA_QP_STATE_RTS) {
ret = erdma_modify_qp_state_to_rts(qp, attrs, mask);
} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
qp->attrs.state = ERDMA_QP_STATE_ERROR;
if (qp->cep) {
erdma_cep_put(qp->cep);
qp->cep = NULL;
}
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
}
break;
case ERDMA_QP_STATE_RTS:
drop_conn = 0;
if (attrs->state == ERDMA_QP_STATE_CLOSING) {
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
drop_conn = 1;
} else if (attrs->state == ERDMA_QP_STATE_TERMINATE) {
qp->attrs.state = ERDMA_QP_STATE_TERMINATE;
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
drop_conn = 1;
} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
qp->attrs.state = ERDMA_QP_STATE_ERROR;
drop_conn = 1;
}
if (drop_conn)
erdma_qp_cm_drop(qp);
break;
case ERDMA_QP_STATE_TERMINATE:
if (attrs->state == ERDMA_QP_STATE_ERROR)
qp->attrs.state = ERDMA_QP_STATE_ERROR;
break;
case ERDMA_QP_STATE_CLOSING:
if (attrs->state == ERDMA_QP_STATE_IDLE) {
qp->attrs.state = ERDMA_QP_STATE_IDLE;
} else if (attrs->state == ERDMA_QP_STATE_ERROR) {
ret = erdma_modify_qp_state_to_stop(qp, attrs, mask);
qp->attrs.state = ERDMA_QP_STATE_ERROR;
} else if (attrs->state != ERDMA_QP_STATE_CLOSING) {
return -ECONNABORTED;
}
break;
default:
break;
}
return ret;
}
static void erdma_qp_safe_free(struct kref *ref)
{
struct erdma_qp *qp = container_of(ref, struct erdma_qp, ref);
complete(&qp->safe_free);
}
void erdma_qp_put(struct erdma_qp *qp)
{
WARN_ON(kref_read(&qp->ref) < 1);
kref_put(&qp->ref, erdma_qp_safe_free);
}
void erdma_qp_get(struct erdma_qp *qp)
{
kref_get(&qp->ref);
}
static int fill_inline_data(struct erdma_qp *qp,
const struct ib_send_wr *send_wr, u16 wqe_idx,
u32 sgl_offset, __le32 *length_field)
{
u32 remain_size, copy_size, data_off, bytes = 0;
char *data;
int i = 0;
wqe_idx += (sgl_offset >> SQEBB_SHIFT);
sgl_offset &= (SQEBB_SIZE - 1);
data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx, qp->attrs.sq_size,
SQEBB_SHIFT);
while (i < send_wr->num_sge) {
bytes += send_wr->sg_list[i].length;
if (bytes > (int)ERDMA_MAX_INLINE)
return -EINVAL;
remain_size = send_wr->sg_list[i].length;
data_off = 0;
while (1) {
copy_size = min(remain_size, SQEBB_SIZE - sgl_offset);
memcpy(data + sgl_offset,
(void *)(uintptr_t)send_wr->sg_list[i].addr +
data_off,
copy_size);
remain_size -= copy_size;
data_off += copy_size;
sgl_offset += copy_size;
wqe_idx += (sgl_offset >> SQEBB_SHIFT);
sgl_offset &= (SQEBB_SIZE - 1);
data = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
qp->attrs.sq_size, SQEBB_SHIFT);
if (!remain_size)
break;
}
i++;
}
*length_field = cpu_to_le32(bytes);
return bytes;
}
static int fill_sgl(struct erdma_qp *qp, const struct ib_send_wr *send_wr,
u16 wqe_idx, u32 sgl_offset, __le32 *length_field)
{
int i = 0;
u32 bytes = 0;
char *sgl;
if (send_wr->num_sge > qp->dev->attrs.max_send_sge)
return -EINVAL;
if (sgl_offset & 0xF)
return -EINVAL;
while (i < send_wr->num_sge) {
wqe_idx += (sgl_offset >> SQEBB_SHIFT);
sgl_offset &= (SQEBB_SIZE - 1);
sgl = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
qp->attrs.sq_size, SQEBB_SHIFT);
bytes += send_wr->sg_list[i].length;
memcpy(sgl + sgl_offset, &send_wr->sg_list[i],
sizeof(struct ib_sge));
sgl_offset += sizeof(struct ib_sge);
i++;
}
*length_field = cpu_to_le32(bytes);
return 0;
}
static int erdma_push_one_sqe(struct erdma_qp *qp, u16 *pi,
const struct ib_send_wr *send_wr)
{
u32 wqe_size, wqebb_cnt, hw_op, flags, sgl_offset;
u32 idx = *pi & (qp->attrs.sq_size - 1);
enum ib_wr_opcode op = send_wr->opcode;
struct erdma_readreq_sqe *read_sqe;
struct erdma_reg_mr_sqe *regmr_sge;
struct erdma_write_sqe *write_sqe;
struct erdma_send_sqe *send_sqe;
struct ib_rdma_wr *rdma_wr;
struct erdma_mr *mr;
__le32 *length_field;
u64 wqe_hdr, *entry;
struct ib_sge *sge;
u32 attrs;
int ret;
entry = get_queue_entry(qp->kern_qp.sq_buf, idx, qp->attrs.sq_size,
SQEBB_SHIFT);
/* Clear the SQE header section. */
*entry = 0;
qp->kern_qp.swr_tbl[idx] = send_wr->wr_id;
flags = send_wr->send_flags;
wqe_hdr = FIELD_PREP(
ERDMA_SQE_HDR_CE_MASK,
((flags & IB_SEND_SIGNALED) || qp->kern_qp.sig_all) ? 1 : 0);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SE_MASK,
flags & IB_SEND_SOLICITED ? 1 : 0);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_FENCE_MASK,
flags & IB_SEND_FENCE ? 1 : 0);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_INLINE_MASK,
flags & IB_SEND_INLINE ? 1 : 0);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp));
switch (op) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
hw_op = ERDMA_OP_WRITE;
if (op == IB_WR_RDMA_WRITE_WITH_IMM)
hw_op = ERDMA_OP_WRITE_WITH_IMM;
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
write_sqe = (struct erdma_write_sqe *)entry;
write_sqe->imm_data = send_wr->ex.imm_data;
write_sqe->sink_stag = cpu_to_le32(rdma_wr->rkey);
write_sqe->sink_to_h =
cpu_to_le32(upper_32_bits(rdma_wr->remote_addr));
write_sqe->sink_to_l =
cpu_to_le32(lower_32_bits(rdma_wr->remote_addr));
length_field = &write_sqe->length;
wqe_size = sizeof(struct erdma_write_sqe);
sgl_offset = wqe_size;
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_READ_WITH_INV:
read_sqe = (struct erdma_readreq_sqe *)entry;
if (unlikely(send_wr->num_sge != 1))
return -EINVAL;
hw_op = ERDMA_OP_READ;
if (op == IB_WR_RDMA_READ_WITH_INV) {
hw_op = ERDMA_OP_READ_WITH_INV;
read_sqe->invalid_stag =
cpu_to_le32(send_wr->ex.invalidate_rkey);
}
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
rdma_wr = container_of(send_wr, struct ib_rdma_wr, wr);
read_sqe->length = cpu_to_le32(send_wr->sg_list[0].length);
read_sqe->sink_stag = cpu_to_le32(send_wr->sg_list[0].lkey);
read_sqe->sink_to_l =
cpu_to_le32(lower_32_bits(send_wr->sg_list[0].addr));
read_sqe->sink_to_h =
cpu_to_le32(upper_32_bits(send_wr->sg_list[0].addr));
sge = get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT);
sge->addr = rdma_wr->remote_addr;
sge->lkey = rdma_wr->rkey;
sge->length = send_wr->sg_list[0].length;
wqe_size = sizeof(struct erdma_readreq_sqe) +
send_wr->num_sge * sizeof(struct ib_sge);
goto out;
case IB_WR_SEND:
case IB_WR_SEND_WITH_IMM:
case IB_WR_SEND_WITH_INV:
send_sqe = (struct erdma_send_sqe *)entry;
hw_op = ERDMA_OP_SEND;
if (op == IB_WR_SEND_WITH_IMM) {
hw_op = ERDMA_OP_SEND_WITH_IMM;
send_sqe->imm_data = send_wr->ex.imm_data;
} else if (op == IB_WR_SEND_WITH_INV) {
hw_op = ERDMA_OP_SEND_WITH_INV;
send_sqe->invalid_stag =
cpu_to_le32(send_wr->ex.invalidate_rkey);
}
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, hw_op);
length_field = &send_sqe->length;
wqe_size = sizeof(struct erdma_send_sqe);
sgl_offset = wqe_size;
break;
case IB_WR_REG_MR:
wqe_hdr |=
FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK, ERDMA_OP_REG_MR);
regmr_sge = (struct erdma_reg_mr_sqe *)entry;
mr = to_emr(reg_wr(send_wr)->mr);
mr->access = ERDMA_MR_ACC_LR |
to_erdma_access_flags(reg_wr(send_wr)->access);
regmr_sge->addr = cpu_to_le64(mr->ibmr.iova);
regmr_sge->length = cpu_to_le32(mr->ibmr.length);
regmr_sge->stag = cpu_to_le32(reg_wr(send_wr)->key);
attrs = FIELD_PREP(ERDMA_SQE_MR_MODE_MASK, 0) |
FIELD_PREP(ERDMA_SQE_MR_ACCESS_MASK, mr->access) |
FIELD_PREP(ERDMA_SQE_MR_MTT_CNT_MASK,
mr->mem.mtt_nents);
if (mr->mem.mtt_nents < ERDMA_MAX_INLINE_MTT_ENTRIES) {
attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 0);
/* Copy SGLs to SQE content to accelerate */
memcpy(get_queue_entry(qp->kern_qp.sq_buf, idx + 1,
qp->attrs.sq_size, SQEBB_SHIFT),
mr->mem.mtt_buf, MTT_SIZE(mr->mem.mtt_nents));
wqe_size = sizeof(struct erdma_reg_mr_sqe) +
MTT_SIZE(mr->mem.mtt_nents);
} else {
attrs |= FIELD_PREP(ERDMA_SQE_MR_MTT_TYPE_MASK, 1);
wqe_size = sizeof(struct erdma_reg_mr_sqe);
}
regmr_sge->attrs = cpu_to_le32(attrs);
goto out;
case IB_WR_LOCAL_INV:
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_OPCODE_MASK,
ERDMA_OP_LOCAL_INV);
regmr_sge = (struct erdma_reg_mr_sqe *)entry;
regmr_sge->stag = cpu_to_le32(send_wr->ex.invalidate_rkey);
wqe_size = sizeof(struct erdma_reg_mr_sqe);
goto out;
default:
return -EOPNOTSUPP;
}
if (flags & IB_SEND_INLINE) {
ret = fill_inline_data(qp, send_wr, idx, sgl_offset,
length_field);
if (ret < 0)
return -EINVAL;
wqe_size += ret;
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK, ret);
} else {
ret = fill_sgl(qp, send_wr, idx, sgl_offset, length_field);
if (ret)
return -EINVAL;
wqe_size += send_wr->num_sge * sizeof(struct ib_sge);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_SGL_LEN_MASK,
send_wr->num_sge);
}
out:
wqebb_cnt = SQEBB_COUNT(wqe_size);
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_CNT_MASK, wqebb_cnt - 1);
*pi += wqebb_cnt;
wqe_hdr |= FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, *pi);
*entry = wqe_hdr;
return 0;
}
static void kick_sq_db(struct erdma_qp *qp, u16 pi)
{
u64 db_data = FIELD_PREP(ERDMA_SQE_HDR_QPN_MASK, QP_ID(qp)) |
FIELD_PREP(ERDMA_SQE_HDR_WQEBB_INDEX_MASK, pi);
*(u64 *)qp->kern_qp.sq_db_info = db_data;
writeq(db_data, qp->kern_qp.hw_sq_db);
}
int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr)
{
struct erdma_qp *qp = to_eqp(ibqp);
int ret = 0;
const struct ib_send_wr *wr = send_wr;
unsigned long flags;
u16 sq_pi;
if (!send_wr)
return -EINVAL;
spin_lock_irqsave(&qp->lock, flags);
sq_pi = qp->kern_qp.sq_pi;
while (wr) {
if ((u16)(sq_pi - qp->kern_qp.sq_ci) >= qp->attrs.sq_size) {
ret = -ENOMEM;
*bad_send_wr = send_wr;
break;
}
ret = erdma_push_one_sqe(qp, &sq_pi, wr);
if (ret) {
*bad_send_wr = wr;
break;
}
qp->kern_qp.sq_pi = sq_pi;
kick_sq_db(qp, sq_pi);
wr = wr->next;
}
spin_unlock_irqrestore(&qp->lock, flags);
return ret;
}
static int erdma_post_recv_one(struct erdma_qp *qp,
const struct ib_recv_wr *recv_wr)
{
struct erdma_rqe *rqe =
get_queue_entry(qp->kern_qp.rq_buf, qp->kern_qp.rq_pi,
qp->attrs.rq_size, RQE_SHIFT);
rqe->qe_idx = cpu_to_le16(qp->kern_qp.rq_pi + 1);
rqe->qpn = cpu_to_le32(QP_ID(qp));
if (recv_wr->num_sge == 0) {
rqe->length = 0;
} else if (recv_wr->num_sge == 1) {
rqe->stag = cpu_to_le32(recv_wr->sg_list[0].lkey);
rqe->to = cpu_to_le64(recv_wr->sg_list[0].addr);
rqe->length = cpu_to_le32(recv_wr->sg_list[0].length);
} else {
return -EINVAL;
}
*(u64 *)qp->kern_qp.rq_db_info = *(u64 *)rqe;
writeq(*(u64 *)rqe, qp->kern_qp.hw_rq_db);
qp->kern_qp.rwr_tbl[qp->kern_qp.rq_pi & (qp->attrs.rq_size - 1)] =
recv_wr->wr_id;
qp->kern_qp.rq_pi++;
return 0;
}
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr)
{
const struct ib_recv_wr *wr = recv_wr;
struct erdma_qp *qp = to_eqp(ibqp);
unsigned long flags;
int ret;
spin_lock_irqsave(&qp->lock, flags);
while (wr) {
ret = erdma_post_recv_one(qp, wr);
if (ret) {
*bad_recv_wr = wr;
break;
}
wr = wr->next;
}
spin_unlock_irqrestore(&qp->lock, flags);
return ret;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,342 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/* Authors: Cheng Xu <chengyou@linux.alibaba.com> */
/* Kai Shen <kaishen@linux.alibaba.com> */
/* Copyright (c) 2020-2022, Alibaba Group. */
#ifndef __ERDMA_VERBS_H__
#define __ERDMA_VERBS_H__
#include <linux/errno.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
#include "erdma.h"
#include "erdma_cm.h"
#include "erdma_hw.h"
/* RDMA Capability. */
#define ERDMA_MAX_PD (128 * 1024)
#define ERDMA_MAX_SEND_WR 4096
#define ERDMA_MAX_ORD 128
#define ERDMA_MAX_IRD 128
#define ERDMA_MAX_SGE_RD 1
#define ERDMA_MAX_CONTEXT (128 * 1024)
#define ERDMA_MAX_SEND_SGE 6
#define ERDMA_MAX_RECV_SGE 1
#define ERDMA_MAX_INLINE (sizeof(struct erdma_sge) * (ERDMA_MAX_SEND_SGE))
#define ERDMA_MAX_FRMR_PA 512
enum {
ERDMA_MMAP_IO_NC = 0, /* no cache */
};
struct erdma_user_mmap_entry {
struct rdma_user_mmap_entry rdma_entry;
u64 address;
u8 mmap_flag;
};
struct erdma_ucontext {
struct ib_ucontext ibucontext;
u32 sdb_type;
u32 sdb_idx;
u32 sdb_page_idx;
u32 sdb_page_off;
u64 sdb;
u64 rdb;
u64 cdb;
struct rdma_user_mmap_entry *sq_db_mmap_entry;
struct rdma_user_mmap_entry *rq_db_mmap_entry;
struct rdma_user_mmap_entry *cq_db_mmap_entry;
/* doorbell records */
struct list_head dbrecords_page_list;
struct mutex dbrecords_page_mutex;
};
struct erdma_pd {
struct ib_pd ibpd;
u32 pdn;
};
/*
* MemoryRegion definition.
*/
#define ERDMA_MAX_INLINE_MTT_ENTRIES 4
#define MTT_SIZE(mtt_cnt) (mtt_cnt << 3) /* per mtt takes 8 Bytes. */
#define ERDMA_MR_MAX_MTT_CNT 524288
#define ERDMA_MTT_ENTRY_SIZE 8
#define ERDMA_MR_TYPE_NORMAL 0
#define ERDMA_MR_TYPE_FRMR 1
#define ERDMA_MR_TYPE_DMA 2
#define ERDMA_MR_INLINE_MTT 0
#define ERDMA_MR_INDIRECT_MTT 1
#define ERDMA_MR_ACC_LR BIT(0)
#define ERDMA_MR_ACC_LW BIT(1)
#define ERDMA_MR_ACC_RR BIT(2)
#define ERDMA_MR_ACC_RW BIT(3)
static inline u8 to_erdma_access_flags(int access)
{
return (access & IB_ACCESS_REMOTE_READ ? ERDMA_MR_ACC_RR : 0) |
(access & IB_ACCESS_LOCAL_WRITE ? ERDMA_MR_ACC_LW : 0) |
(access & IB_ACCESS_REMOTE_WRITE ? ERDMA_MR_ACC_RW : 0);
}
struct erdma_mem {
struct ib_umem *umem;
void *mtt_buf;
u32 mtt_type;
u32 page_size;
u32 page_offset;
u32 page_cnt;
u32 mtt_nents;
u64 va;
u64 len;
u64 mtt_entry[ERDMA_MAX_INLINE_MTT_ENTRIES];
};
struct erdma_mr {
struct ib_mr ibmr;
struct erdma_mem mem;
u8 type;
u8 access;
u8 valid;
};
struct erdma_user_dbrecords_page {
struct list_head list;
struct ib_umem *umem;
u64 va;
int refcnt;
};
struct erdma_uqp {
struct erdma_mem sq_mtt;
struct erdma_mem rq_mtt;
dma_addr_t sq_db_info_dma_addr;
dma_addr_t rq_db_info_dma_addr;
struct erdma_user_dbrecords_page *user_dbr_page;
u32 rq_offset;
};
struct erdma_kqp {
u16 sq_pi;
u16 sq_ci;
u16 rq_pi;
u16 rq_ci;
u64 *swr_tbl;
u64 *rwr_tbl;
void __iomem *hw_sq_db;
void __iomem *hw_rq_db;
void *sq_buf;
dma_addr_t sq_buf_dma_addr;
void *rq_buf;
dma_addr_t rq_buf_dma_addr;
void *sq_db_info;
void *rq_db_info;
u8 sig_all;
};
enum erdma_qp_state {
ERDMA_QP_STATE_IDLE = 0,
ERDMA_QP_STATE_RTR = 1,
ERDMA_QP_STATE_RTS = 2,
ERDMA_QP_STATE_CLOSING = 3,
ERDMA_QP_STATE_TERMINATE = 4,
ERDMA_QP_STATE_ERROR = 5,
ERDMA_QP_STATE_UNDEF = 7,
ERDMA_QP_STATE_COUNT = 8
};
enum erdma_qp_attr_mask {
ERDMA_QP_ATTR_STATE = (1 << 0),
ERDMA_QP_ATTR_LLP_HANDLE = (1 << 2),
ERDMA_QP_ATTR_ORD = (1 << 3),
ERDMA_QP_ATTR_IRD = (1 << 4),
ERDMA_QP_ATTR_SQ_SIZE = (1 << 5),
ERDMA_QP_ATTR_RQ_SIZE = (1 << 6),
ERDMA_QP_ATTR_MPA = (1 << 7)
};
struct erdma_qp_attrs {
enum erdma_qp_state state;
enum erdma_cc_alg cc; /* Congestion control algorithm */
u32 sq_size;
u32 rq_size;
u32 orq_size;
u32 irq_size;
u32 max_send_sge;
u32 max_recv_sge;
u32 cookie;
#define ERDMA_QP_ACTIVE 0
#define ERDMA_QP_PASSIVE 1
u8 qp_type;
u8 pd_len;
};
struct erdma_qp {
struct ib_qp ibqp;
struct kref ref;
struct completion safe_free;
struct erdma_dev *dev;
struct erdma_cep *cep;
struct rw_semaphore state_lock;
union {
struct erdma_kqp kern_qp;
struct erdma_uqp user_qp;
};
struct erdma_cq *scq;
struct erdma_cq *rcq;
struct erdma_qp_attrs attrs;
spinlock_t lock;
};
struct erdma_kcq_info {
void *qbuf;
dma_addr_t qbuf_dma_addr;
u32 ci;
u32 cmdsn;
u32 notify_cnt;
spinlock_t lock;
u8 __iomem *db;
u64 *db_record;
};
struct erdma_ucq_info {
struct erdma_mem qbuf_mtt;
struct erdma_user_dbrecords_page *user_dbr_page;
dma_addr_t db_info_dma_addr;
};
struct erdma_cq {
struct ib_cq ibcq;
u32 cqn;
u32 depth;
u32 assoc_eqn;
union {
struct erdma_kcq_info kern_cq;
struct erdma_ucq_info user_cq;
};
};
#define QP_ID(qp) ((qp)->ibqp.qp_num)
static inline struct erdma_qp *find_qp_by_qpn(struct erdma_dev *dev, int id)
{
return (struct erdma_qp *)xa_load(&dev->qp_xa, id);
}
static inline struct erdma_cq *find_cq_by_cqn(struct erdma_dev *dev, int id)
{
return (struct erdma_cq *)xa_load(&dev->cq_xa, id);
}
void erdma_qp_get(struct erdma_qp *qp);
void erdma_qp_put(struct erdma_qp *qp);
int erdma_modify_qp_internal(struct erdma_qp *qp, struct erdma_qp_attrs *attrs,
enum erdma_qp_attr_mask mask);
void erdma_qp_llp_close(struct erdma_qp *qp);
void erdma_qp_cm_drop(struct erdma_qp *qp);
static inline struct erdma_ucontext *to_ectx(struct ib_ucontext *ibctx)
{
return container_of(ibctx, struct erdma_ucontext, ibucontext);
}
static inline struct erdma_pd *to_epd(struct ib_pd *pd)
{
return container_of(pd, struct erdma_pd, ibpd);
}
static inline struct erdma_mr *to_emr(struct ib_mr *ibmr)
{
return container_of(ibmr, struct erdma_mr, ibmr);
}
static inline struct erdma_qp *to_eqp(struct ib_qp *qp)
{
return container_of(qp, struct erdma_qp, ibqp);
}
static inline struct erdma_cq *to_ecq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct erdma_cq, ibcq);
}
static inline struct erdma_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *ibmmap)
{
return container_of(ibmmap, struct erdma_user_mmap_entry, rdma_entry);
}
int erdma_alloc_ucontext(struct ib_ucontext *ibctx, struct ib_udata *data);
void erdma_dealloc_ucontext(struct ib_ucontext *ibctx);
int erdma_query_device(struct ib_device *dev, struct ib_device_attr *attr,
struct ib_udata *data);
int erdma_get_port_immutable(struct ib_device *dev, u32 port,
struct ib_port_immutable *ib_port_immutable);
int erdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *data);
int erdma_query_port(struct ib_device *dev, u32 port,
struct ib_port_attr *attr);
int erdma_query_gid(struct ib_device *dev, u32 port, int idx,
union ib_gid *gid);
int erdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *data);
int erdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attr,
struct ib_udata *data);
int erdma_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_qp_init_attr *init_attr);
int erdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int mask,
struct ib_udata *data);
int erdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
int erdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
struct ib_mr *erdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 virt, int access, struct ib_udata *udata);
struct ib_mr *erdma_get_dma_mr(struct ib_pd *ibpd, int rights);
int erdma_dereg_mr(struct ib_mr *ibmr, struct ib_udata *data);
int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma);
void erdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
void erdma_qp_get_ref(struct ib_qp *ibqp);
void erdma_qp_put_ref(struct ib_qp *ibqp);
struct ib_qp *erdma_get_ibqp(struct ib_device *dev, int id);
int erdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *send_wr,
const struct ib_send_wr **bad_send_wr);
int erdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *recv_wr,
const struct ib_recv_wr **bad_recv_wr);
int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
struct ib_mr *erdma_ib_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
u32 max_num_sg);
int erdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
unsigned int *sg_offset);
void erdma_port_event(struct erdma_dev *dev, enum ib_event_type reason);
#endif

View File

@ -0,0 +1,758 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
#include <rdma/ib_umem_odp.h>
#include "mlx5_ib.h"
#include "umr.h"
#include "wr.h"
/*
* We can't use an array for xlt_emergency_page because dma_map_single doesn't
* work on kernel modules memory
*/
void *xlt_emergency_page;
static DEFINE_MUTEX(xlt_emergency_page_mutex);
static __be64 get_umr_enable_mr_mask(void)
{
u64 result;
result = MLX5_MKEY_MASK_KEY |
MLX5_MKEY_MASK_FREE;
return cpu_to_be64(result);
}
static __be64 get_umr_disable_mr_mask(void)
{
u64 result;
result = MLX5_MKEY_MASK_FREE;
return cpu_to_be64(result);
}
static __be64 get_umr_update_translation_mask(void)
{
u64 result;
result = MLX5_MKEY_MASK_LEN |
MLX5_MKEY_MASK_PAGE_SIZE |
MLX5_MKEY_MASK_START_ADDR;
return cpu_to_be64(result);
}
static __be64 get_umr_update_access_mask(struct mlx5_ib_dev *dev)
{
u64 result;
result = MLX5_MKEY_MASK_LR |
MLX5_MKEY_MASK_LW |
MLX5_MKEY_MASK_RR |
MLX5_MKEY_MASK_RW;
if (MLX5_CAP_GEN(dev->mdev, atomic))
result |= MLX5_MKEY_MASK_A;
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
result |= MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE;
if (MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
result |= MLX5_MKEY_MASK_RELAXED_ORDERING_READ;
return cpu_to_be64(result);
}
static __be64 get_umr_update_pd_mask(void)
{
u64 result;
result = MLX5_MKEY_MASK_PD;
return cpu_to_be64(result);
}
static int umr_check_mkey_mask(struct mlx5_ib_dev *dev, u64 mask)
{
if (mask & MLX5_MKEY_MASK_PAGE_SIZE &&
MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return -EPERM;
if (mask & MLX5_MKEY_MASK_A &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return -EPERM;
if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return -EPERM;
if (mask & MLX5_MKEY_MASK_RELAXED_ORDERING_READ &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return -EPERM;
return 0;
}
enum {
MAX_UMR_WR = 128,
};
static int mlx5r_umr_qp_rst2rts(struct mlx5_ib_dev *dev, struct ib_qp *qp)
{
struct ib_qp_attr attr = {};
int ret;
attr.qp_state = IB_QPS_INIT;
attr.port_num = 1;
ret = ib_modify_qp(qp, &attr,
IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
return ret;
}
memset(&attr, 0, sizeof(attr));
attr.qp_state = IB_QPS_RTR;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
return ret;
}
memset(&attr, 0, sizeof(attr));
attr.qp_state = IB_QPS_RTS;
ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
if (ret) {
mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
return ret;
}
return 0;
}
int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
{
struct ib_qp_init_attr init_attr = {};
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
int ret;
pd = ib_alloc_pd(&dev->ib_dev, 0);
if (IS_ERR(pd)) {
mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
return PTR_ERR(pd);
}
cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
if (IS_ERR(cq)) {
mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
ret = PTR_ERR(cq);
goto destroy_pd;
}
init_attr.send_cq = cq;
init_attr.recv_cq = cq;
init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr.cap.max_send_wr = MAX_UMR_WR;
init_attr.cap.max_send_sge = 1;
init_attr.qp_type = MLX5_IB_QPT_REG_UMR;
init_attr.port_num = 1;
qp = ib_create_qp(pd, &init_attr);
if (IS_ERR(qp)) {
mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
ret = PTR_ERR(qp);
goto destroy_cq;
}
ret = mlx5r_umr_qp_rst2rts(dev, qp);
if (ret)
goto destroy_qp;
dev->umrc.qp = qp;
dev->umrc.cq = cq;
dev->umrc.pd = pd;
sema_init(&dev->umrc.sem, MAX_UMR_WR);
mutex_init(&dev->umrc.lock);
return 0;
destroy_qp:
ib_destroy_qp(qp);
destroy_cq:
ib_free_cq(cq);
destroy_pd:
ib_dealloc_pd(pd);
return ret;
}
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{
ib_destroy_qp(dev->umrc.qp);
ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
}
static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
{
struct umr_common *umrc = &dev->umrc;
struct ib_qp_attr attr;
int err;
attr.qp_state = IB_QPS_RESET;
err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
if (err) {
mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
goto err;
}
err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
if (err)
goto err;
umrc->state = MLX5_UMR_STATE_ACTIVE;
return 0;
err:
umrc->state = MLX5_UMR_STATE_ERR;
return err;
}
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
{
unsigned int wqe_size =
with_data ? sizeof(struct mlx5r_umr_wqe) :
sizeof(struct mlx5r_umr_wqe) -
sizeof(struct mlx5_wqe_data_seg);
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_wqe_ctrl_seg *ctrl;
union {
struct ib_cqe *ib_cqe;
u64 wr_id;
} id;
void *cur_edge, *seg;
unsigned long flags;
unsigned int idx;
int size, err;
if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR))
return -EIO;
spin_lock_irqsave(&qp->sq.lock, flags);
err = mlx5r_begin_wqe(qp, &seg, &ctrl, &idx, &size, &cur_edge, 0,
cpu_to_be32(mkey), false, false);
if (WARN_ON(err))
goto out;
qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
mlx5r_memcpy_send_wqe(&qp->sq, &cur_edge, &seg, &size, wqe, wqe_size);
id.ib_cqe = cqe;
mlx5r_finish_wqe(qp, ctrl, seg, size, cur_edge, idx, id.wr_id, 0,
MLX5_FENCE_MODE_INITIATOR_SMALL, MLX5_OPCODE_UMR);
mlx5r_ring_db(qp, 1, ctrl);
out:
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
context->status = wc->status;
complete(&context->done);
}
static inline void mlx5r_umr_init_context(struct mlx5r_umr_context *context)
{
context->cqe.done = mlx5r_umr_done;
init_completion(&context->done);
}
static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
struct mlx5r_umr_wqe *wqe, bool with_data)
{
struct umr_common *umrc = &dev->umrc;
struct mlx5r_umr_context umr_context;
int err;
err = umr_check_mkey_mask(dev, be64_to_cpu(wqe->ctrl_seg.mkey_mask));
if (WARN_ON(err))
return err;
mlx5r_umr_init_context(&umr_context);
down(&umrc->sem);
while (true) {
mutex_lock(&umrc->lock);
if (umrc->state == MLX5_UMR_STATE_ERR) {
mutex_unlock(&umrc->lock);
err = -EFAULT;
break;
}
if (umrc->state == MLX5_UMR_STATE_RECOVER) {
mutex_unlock(&umrc->lock);
usleep_range(3000, 5000);
continue;
}
err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context.cqe, wqe,
with_data);
mutex_unlock(&umrc->lock);
if (err) {
mlx5_ib_warn(dev, "UMR post send failed, err %d\n",
err);
break;
}
wait_for_completion(&umr_context.done);
if (umr_context.status == IB_WC_SUCCESS)
break;
if (umr_context.status == IB_WC_WR_FLUSH_ERR)
continue;
WARN_ON_ONCE(1);
mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs\n",
umr_context.status);
mutex_lock(&umrc->lock);
err = mlx5r_umr_recover(dev);
mutex_unlock(&umrc->lock);
if (err)
mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
err);
err = -EFAULT;
break;
}
up(&umrc->sem);
return err;
}
/**
* mlx5r_umr_revoke_mr - Fence all DMA on the MR
* @mr: The MR to fence
*
* Upon return the NIC will not be doing any DMA to the pages under the MR,
* and any DMA in progress will be completed. Failure of this function
* indicates the HW has failed catastrophically.
*/
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct mlx5r_umr_wqe wqe = {};
if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
return 0;
wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
mlx5_mkey_variant(mr->mmkey.key));
return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
}
static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
struct mlx5_mkey_seg *seg,
unsigned int access_flags)
{
MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, seg, lr, 1);
MLX5_SET(mkc, seg, relaxed_ordering_write,
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
MLX5_SET(mkc, seg, relaxed_ordering_read,
!!(access_flags & IB_ACCESS_RELAXED_ORDERING));
}
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct mlx5r_umr_wqe wqe = {};
int err;
wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
mlx5_mkey_variant(mr->mmkey.key));
err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
if (err)
return err;
mr->access_flags = access_flags;
return 0;
}
#define MLX5_MAX_UMR_CHUNK \
((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
/*
* Allocate a temporary buffer to hold the per-page information to transfer to
* HW. For efficiency this should be as large as it can be, but buffer
* allocation failure is not allowed, so try smaller sizes.
*/
static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
/*
* MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
* allocation can't trigger any kind of reclaim.
*/
might_sleep();
gfp_mask |= __GFP_ZERO | __GFP_NORETRY;
/*
* If the system already has a suitable high order page then just use
* that, but don't try hard to create one. This max is about 1M, so a
* free x86 huge page will satisfy it.
*/
size = min_t(size_t, ent_size * ALIGN(*nents, xlt_chunk_align),
MLX5_MAX_UMR_CHUNK);
*nents = size / ent_size;
res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
get_order(size));
if (res)
return res;
if (size > MLX5_SPARE_UMR_CHUNK) {
size = MLX5_SPARE_UMR_CHUNK;
*nents = size / ent_size;
res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
get_order(size));
if (res)
return res;
}
*nents = PAGE_SIZE / ent_size;
res = (void *)__get_free_page(gfp_mask);
if (res)
return res;
mutex_lock(&xlt_emergency_page_mutex);
memset(xlt_emergency_page, 0, PAGE_SIZE);
return xlt_emergency_page;
}
static void mlx5r_umr_free_xlt(void *xlt, size_t length)
{
if (xlt == xlt_emergency_page) {
mutex_unlock(&xlt_emergency_page_mutex);
return;
}
free_pages((unsigned long)xlt, get_order(length));
}
static void mlx5r_umr_unmap_free_xlt(struct mlx5_ib_dev *dev, void *xlt,
struct ib_sge *sg)
{
struct device *ddev = &dev->mdev->pdev->dev;
dma_unmap_single(ddev, sg->addr, sg->length, DMA_TO_DEVICE);
mlx5r_umr_free_xlt(xlt, sg->length);
}
/*
* Create an XLT buffer ready for submission.
*/
static void *mlx5r_umr_create_xlt(struct mlx5_ib_dev *dev, struct ib_sge *sg,
size_t nents, size_t ent_size,
unsigned int flags)
{
struct device *ddev = &dev->mdev->pdev->dev;
dma_addr_t dma;
void *xlt;
xlt = mlx5r_umr_alloc_xlt(&nents, ent_size,
flags & MLX5_IB_UPD_XLT_ATOMIC ? GFP_ATOMIC :
GFP_KERNEL);
sg->length = nents * ent_size;
dma = dma_map_single(ddev, xlt, sg->length, DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_ib_err(dev, "unable to map DMA during XLT update.\n");
mlx5r_umr_free_xlt(xlt, sg->length);
return NULL;
}
sg->addr = dma;
sg->lkey = dev->umrc.pd->local_dma_lkey;
return xlt;
}
static void
mlx5r_umr_set_update_xlt_ctrl_seg(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
unsigned int flags, struct ib_sge *sg)
{
if (!(flags & MLX5_IB_UPD_XLT_ENABLE))
/* fail if free */
ctrl_seg->flags = MLX5_UMR_CHECK_FREE;
else
/* fail if not free */
ctrl_seg->flags = MLX5_UMR_CHECK_NOT_FREE;
ctrl_seg->xlt_octowords =
cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
}
static void mlx5r_umr_set_update_xlt_mkey_seg(struct mlx5_ib_dev *dev,
struct mlx5_mkey_seg *mkey_seg,
struct mlx5_ib_mr *mr,
unsigned int page_shift)
{
mlx5r_umr_set_access_flags(dev, mkey_seg, mr->access_flags);
MLX5_SET(mkc, mkey_seg, pd, to_mpd(mr->ibmr.pd)->pdn);
MLX5_SET64(mkc, mkey_seg, start_addr, mr->ibmr.iova);
MLX5_SET64(mkc, mkey_seg, len, mr->ibmr.length);
MLX5_SET(mkc, mkey_seg, log_page_size, page_shift);
MLX5_SET(mkc, mkey_seg, qpn, 0xffffff);
MLX5_SET(mkc, mkey_seg, mkey_7_0, mlx5_mkey_variant(mr->mmkey.key));
}
static void
mlx5r_umr_set_update_xlt_data_seg(struct mlx5_wqe_data_seg *data_seg,
struct ib_sge *sg)
{
data_seg->byte_count = cpu_to_be32(sg->length);
data_seg->lkey = cpu_to_be32(sg->lkey);
data_seg->addr = cpu_to_be64(sg->addr);
}
static void mlx5r_umr_update_offset(struct mlx5_wqe_umr_ctrl_seg *ctrl_seg,
u64 offset)
{
u64 octo_offset = mlx5r_umr_get_xlt_octo(offset);
ctrl_seg->xlt_offset = cpu_to_be16(octo_offset & 0xffff);
ctrl_seg->xlt_offset_47_16 = cpu_to_be32(octo_offset >> 16);
ctrl_seg->flags |= MLX5_UMR_TRANSLATION_OFFSET_EN;
}
static void mlx5r_umr_final_update_xlt(struct mlx5_ib_dev *dev,
struct mlx5r_umr_wqe *wqe,
struct mlx5_ib_mr *mr, struct ib_sge *sg,
unsigned int flags)
{
bool update_pd_access, update_translation;
if (flags & MLX5_IB_UPD_XLT_ENABLE)
wqe->ctrl_seg.mkey_mask |= get_umr_enable_mr_mask();
update_pd_access = flags & MLX5_IB_UPD_XLT_ENABLE ||
flags & MLX5_IB_UPD_XLT_PD ||
flags & MLX5_IB_UPD_XLT_ACCESS;
if (update_pd_access) {
wqe->ctrl_seg.mkey_mask |= get_umr_update_access_mask(dev);
wqe->ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
}
update_translation =
flags & MLX5_IB_UPD_XLT_ENABLE || flags & MLX5_IB_UPD_XLT_ADDR;
if (update_translation) {
wqe->ctrl_seg.mkey_mask |= get_umr_update_translation_mask();
if (!mr->ibmr.length)
MLX5_SET(mkc, &wqe->mkey_seg, length64, 1);
}
wqe->ctrl_seg.xlt_octowords =
cpu_to_be16(mlx5r_umr_get_xlt_octo(sg->length));
wqe->data_seg.byte_count = cpu_to_be32(sg->length);
}
/*
* Send the DMA list to the HW for a normal MR using UMR.
* Dmabuf MR is handled in a similar way, except that the MLX5_IB_UPD_XLT_ZAP
* flag may be used.
*/
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
{
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
struct mlx5r_umr_wqe wqe = {};
struct ib_block_iter biter;
struct mlx5_mtt *cur_mtt;
size_t orig_sg_length;
struct mlx5_mtt *mtt;
size_t final_size;
struct ib_sge sg;
u64 offset = 0;
int err = 0;
if (WARN_ON(mr->umem->is_odp))
return -EINVAL;
mtt = mlx5r_umr_create_xlt(
dev, &sg, ib_umem_num_dma_blocks(mr->umem, 1 << mr->page_shift),
sizeof(*mtt), flags);
if (!mtt)
return -ENOMEM;
orig_sg_length = sg.length;
mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr,
mr->page_shift);
mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
cur_mtt = mtt;
rdma_for_each_block(mr->umem->sgt_append.sgt.sgl, &biter,
mr->umem->sgt_append.sgt.nents,
BIT(mr->page_shift)) {
if (cur_mtt == (void *)mtt + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe,
true);
if (err)
goto err;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
offset += sg.length;
mlx5r_umr_update_offset(&wqe.ctrl_seg, offset);
cur_mtt = mtt;
}
cur_mtt->ptag =
cpu_to_be64(rdma_block_iter_dma_address(&biter) |
MLX5_IB_MTT_PRESENT);
if (mr->umem->is_dmabuf && (flags & MLX5_IB_UPD_XLT_ZAP))
cur_mtt->ptag = 0;
cur_mtt++;
}
final_size = (void *)cur_mtt - (void *)mtt;
sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
memset(cur_mtt, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length, DMA_TO_DEVICE);
err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
err:
sg.length = orig_sg_length;
mlx5r_umr_unmap_free_xlt(dev, mtt, &sg);
return err;
}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags)
{
int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
? sizeof(struct mlx5_klm)
: sizeof(struct mlx5_mtt);
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
const int page_mask = page_align - 1;
struct mlx5r_umr_wqe wqe = {};
size_t pages_mapped = 0;
size_t pages_to_map = 0;
size_t size_to_map = 0;
size_t orig_sg_length;
size_t pages_iter;
struct ib_sge sg;
int err = 0;
void *xlt;
if ((flags & MLX5_IB_UPD_XLT_INDIRECT) &&
!umr_can_use_indirect_mkey(dev))
return -EPERM;
if (WARN_ON(!mr->umem->is_odp))
return -EINVAL;
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
* so we need to align the offset and length accordingly
*/
if (idx & page_mask) {
npages += idx & page_mask;
idx &= ~page_mask;
}
pages_to_map = ALIGN(npages, page_align);
xlt = mlx5r_umr_create_xlt(dev, &sg, npages, desc_size, flags);
if (!xlt)
return -ENOMEM;
pages_iter = sg.length / desc_size;
orig_sg_length = sg.length;
if (!(flags & MLX5_IB_UPD_XLT_INDIRECT)) {
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
size_t max_pages = ib_umem_odp_num_pages(odp) - idx;
pages_to_map = min_t(size_t, pages_to_map, max_pages);
}
mlx5r_umr_set_update_xlt_ctrl_seg(&wqe.ctrl_seg, flags, &sg);
mlx5r_umr_set_update_xlt_mkey_seg(dev, &wqe.mkey_seg, mr, page_shift);
mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
for (pages_mapped = 0;
pages_mapped < pages_to_map && !err;
pages_mapped += pages_iter, idx += pages_iter) {
npages = min_t(int, pages_iter, pages_to_map - pages_mapped);
size_to_map = npages * desc_size;
dma_sync_single_for_cpu(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map)
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
mlx5r_umr_update_offset(&wqe.ctrl_seg, idx * desc_size);
err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, true);
}
sg.length = orig_sg_length;
mlx5r_umr_unmap_free_xlt(dev, xlt, &sg);
return err;
}

View File

@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. */
#ifndef _MLX5_IB_UMR_H
#define _MLX5_IB_UMR_H
#include "mlx5_ib.h"
#define MLX5_MAX_UMR_SHIFT 16
#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
#define MLX5_IB_UMR_OCTOWORD 16
#define MLX5_IB_UMR_XLT_ALIGNMENT 64
int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
size_t length)
{
/*
* umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
* always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
* MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
* can never be enabled without this capability. Simplify this weird
* quirky hardware by just saying it can't use PAS lists with UMR at
* all.
*/
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
/*
* length is the size of the MR in bytes when mlx5_ib_update_xlt() is
* used.
*/
if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
return false;
return true;
}
/*
* true if an existing MR can be reconfigured to new access_flags using UMR.
* Older HW cannot use UMR to update certain elements of the MKC. See
* get_umr_update_access_mask() and umr_check_mkey_mask()
*/
static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
unsigned int current_access_flags,
unsigned int target_access_flags)
{
unsigned int diffs = current_access_flags ^ target_access_flags;
if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
MLX5_CAP_GEN(dev->mdev, atomic) &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return false;
return true;
}
static inline u64 mlx5r_umr_get_xlt_octo(u64 bytes)
{
return ALIGN(bytes, MLX5_IB_UMR_XLT_ALIGNMENT) /
MLX5_IB_UMR_OCTOWORD;
}
struct mlx5r_umr_context {
struct ib_cqe cqe;
enum ib_wc_status status;
struct completion done;
};
struct mlx5r_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg ctrl_seg;
struct mlx5_mkey_seg mkey_seg;
struct mlx5_wqe_data_seg data_seg;
};
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
int access_flags);
int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int page_shift, int flags);
#endif /* _MLX5_IB_UMR_H */

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _INPUT_CORE_PRIVATE_H
#define _INPUT_CORE_PRIVATE_H
/*
* Functions and definitions that are private to input core,
* should not be used by input drivers or handlers.
*/
struct input_dev;
void input_mt_release_slots(struct input_dev *dev);
void input_handle_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value);
#endif /* _INPUT_CORE_PRIVATE_H */

View File

@ -0,0 +1,135 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Raspberry Pi Sense HAT joystick driver
* http://raspberrypi.org
*
* Copyright (C) 2015 Raspberry Pi
* Copyright (C) 2021 Charles Mirabile, Mwesigwa Guma, Joel Savitz
*
* Original Author: Serge Schneider
* Revised for upstream Linux by: Charles Mirabile, Mwesigwa Guma, Joel Savitz
*/
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/property.h>
#define JOYSTICK_SMB_REG 0xf2
struct sensehat_joystick {
struct platform_device *pdev;
struct input_dev *keys_dev;
unsigned long prev_states;
struct regmap *regmap;
};
static const unsigned int keymap[] = {
BTN_DPAD_DOWN, BTN_DPAD_RIGHT, BTN_DPAD_UP, BTN_SELECT, BTN_DPAD_LEFT,
};
static irqreturn_t sensehat_joystick_report(int irq, void *cookie)
{
struct sensehat_joystick *sensehat_joystick = cookie;
unsigned long curr_states, changes;
unsigned int keys;
int error;
int i;
error = regmap_read(sensehat_joystick->regmap, JOYSTICK_SMB_REG, &keys);
if (error < 0) {
dev_err(&sensehat_joystick->pdev->dev,
"Failed to read joystick state: %d", error);
return IRQ_NONE;
}
curr_states = keys;
bitmap_xor(&changes, &curr_states, &sensehat_joystick->prev_states,
ARRAY_SIZE(keymap));
for_each_set_bit(i, &changes, ARRAY_SIZE(keymap))
input_report_key(sensehat_joystick->keys_dev, keymap[i],
curr_states & BIT(i));
input_sync(sensehat_joystick->keys_dev);
sensehat_joystick->prev_states = keys;
return IRQ_HANDLED;
}
static int sensehat_joystick_probe(struct platform_device *pdev)
{
struct sensehat_joystick *sensehat_joystick;
int error, i, irq;
sensehat_joystick = devm_kzalloc(&pdev->dev, sizeof(*sensehat_joystick),
GFP_KERNEL);
if (!sensehat_joystick)
return -ENOMEM;
sensehat_joystick->pdev = pdev;
sensehat_joystick->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!sensehat_joystick->regmap) {
dev_err(&pdev->dev, "unable to get sensehat regmap");
return -ENODEV;
}
sensehat_joystick->keys_dev = devm_input_allocate_device(&pdev->dev);
if (!sensehat_joystick->keys_dev) {
dev_err(&pdev->dev, "Could not allocate input device");
return -ENOMEM;
}
sensehat_joystick->keys_dev->name = "Raspberry Pi Sense HAT Joystick";
sensehat_joystick->keys_dev->phys = "sensehat-joystick/input0";
sensehat_joystick->keys_dev->id.bustype = BUS_I2C;
__set_bit(EV_KEY, sensehat_joystick->keys_dev->evbit);
__set_bit(EV_REP, sensehat_joystick->keys_dev->evbit);
for (i = 0; i < ARRAY_SIZE(keymap); i++)
__set_bit(keymap[i], sensehat_joystick->keys_dev->keybit);
error = input_register_device(sensehat_joystick->keys_dev);
if (error) {
dev_err(&pdev->dev, "Could not register input device");
return error;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, sensehat_joystick_report,
IRQF_ONESHOT, "keys",
sensehat_joystick);
if (error) {
dev_err(&pdev->dev, "IRQ request failed");
return error;
}
return 0;
}
static const struct of_device_id sensehat_joystick_device_id[] = {
{ .compatible = "raspberrypi,sensehat-joystick" },
{},
};
MODULE_DEVICE_TABLE(of, sensehat_joystick_device_id);
static struct platform_driver sensehat_joystick_driver = {
.probe = sensehat_joystick_probe,
.driver = {
.name = "sensehat-joystick",
.of_match_table = sensehat_joystick_device_id,
},
};
module_platform_driver(sensehat_joystick_driver);
MODULE_DESCRIPTION("Raspberry Pi Sense HAT joystick driver");
MODULE_AUTHOR("Charles Mirabile <cmirabil@redhat.com>");
MODULE_AUTHOR("Serge Schneider <serge@raspberrypi.org>");
MODULE_LICENSE("GPL");

2500
drivers/input/misc/iqs7222.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,259 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Interconnect framework driver for i.MX8MP SoC
*
* Copyright 2022 NXP
* Peng Fan <peng.fan@nxp.com>
*/
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <dt-bindings/interconnect/fsl,imx8mp.h>
#include "imx.h"
static const struct imx_icc_node_adj_desc imx8mp_noc_adj = {
.bw_mul = 1,
.bw_div = 16,
.main_noc = true,
};
static struct imx_icc_noc_setting noc_setting_nodes[] = {
[IMX8MP_ICM_MLMIX] = {
.reg = 0x180,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_DSP] = {
.reg = 0x200,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_SDMA2PER] = {
.reg = 0x280,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA2BURST] = {
.reg = 0x300,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA3PER] = {
.reg = 0x380,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_SDMA3BURST] = {
.reg = 0x400,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_EDMA] = {
.reg = 0x480,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 4,
},
[IMX8MP_ICM_GPU3D] = {
.reg = 0x500,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_GPU2D] = {
.reg = 0x580,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_HRV] = {
.reg = 0x600,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_LCDIF_HDMI] = {
.reg = 0x680,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_HDCP] = {
.reg = 0x700,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 5,
},
[IMX8MP_ICM_NOC_PCIE] = {
.reg = 0x780,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_USB1] = {
.reg = 0x800,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_USB2] = {
.reg = 0x880,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_PCIE] = {
.reg = 0x900,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_LCDIF_RD] = {
.reg = 0x980,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_LCDIF_WR] = {
.reg = 0xa00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI0] = {
.reg = 0xa80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI1] = {
.reg = 0xb00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISI2] = {
.reg = 0xb80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 2,
.ext_control = 1,
},
[IMX8MP_ICM_ISP0] = {
.reg = 0xc00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_ISP1] = {
.reg = 0xc80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_DWE] = {
.reg = 0xd00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 7,
},
[IMX8MP_ICM_VPU_G1] = {
.reg = 0xd80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_VPU_G2] = {
.reg = 0xe00,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICM_VPU_H1] = {
.reg = 0xe80,
.mode = IMX_NOC_MODE_FIXED,
.prio_level = 3,
},
[IMX8MP_ICN_MEDIA] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_VIDEO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_AUDIO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_HDMI] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_GPU] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
[IMX8MP_ICN_HSIO] = {
.mode = IMX_NOC_MODE_UNCONFIGURED,
},
};
/* Describe bus masters, slaves and connections between them */
static struct imx_icc_node_desc nodes[] = {
DEFINE_BUS_INTERCONNECT("NOC", IMX8MP_ICN_NOC, &imx8mp_noc_adj,
IMX8MP_ICS_DRAM, IMX8MP_ICN_MAIN),
DEFINE_BUS_SLAVE("OCRAM", IMX8MP_ICS_OCRAM, NULL),
DEFINE_BUS_SLAVE("DRAM", IMX8MP_ICS_DRAM, NULL),
DEFINE_BUS_MASTER("A53", IMX8MP_ICM_A53, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("SUPERMIX", IMX8MP_ICM_SUPERMIX, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("GIC", IMX8MP_ICM_GIC, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("MLMIX", IMX8MP_ICM_MLMIX, IMX8MP_ICN_NOC),
DEFINE_BUS_INTERCONNECT("NOC_AUDIO", IMX8MP_ICN_AUDIO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("DSP", IMX8MP_ICM_DSP, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA2PER", IMX8MP_ICM_SDMA2PER, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA2BURST", IMX8MP_ICM_SDMA2BURST, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3PER", IMX8MP_ICM_SDMA3PER, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("SDMA3BURST", IMX8MP_ICM_SDMA3BURST, IMX8MP_ICN_AUDIO),
DEFINE_BUS_MASTER("EDMA", IMX8MP_ICM_EDMA, IMX8MP_ICN_AUDIO),
DEFINE_BUS_INTERCONNECT("NOC_GPU", IMX8MP_ICN_GPU, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("GPU 2D", IMX8MP_ICM_GPU2D, IMX8MP_ICN_GPU),
DEFINE_BUS_MASTER("GPU 3D", IMX8MP_ICM_GPU3D, IMX8MP_ICN_GPU),
DEFINE_BUS_INTERCONNECT("NOC_HDMI", IMX8MP_ICN_HDMI, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("HRV", IMX8MP_ICM_HRV, IMX8MP_ICN_HDMI),
DEFINE_BUS_MASTER("LCDIF_HDMI", IMX8MP_ICM_LCDIF_HDMI, IMX8MP_ICN_HDMI),
DEFINE_BUS_MASTER("HDCP", IMX8MP_ICM_HDCP, IMX8MP_ICN_HDMI),
DEFINE_BUS_INTERCONNECT("NOC_HSIO", IMX8MP_ICN_HSIO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("NOC_PCIE", IMX8MP_ICM_NOC_PCIE, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("USB1", IMX8MP_ICM_USB1, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("USB2", IMX8MP_ICM_USB2, IMX8MP_ICN_HSIO),
DEFINE_BUS_MASTER("PCIE", IMX8MP_ICM_PCIE, IMX8MP_ICN_HSIO),
DEFINE_BUS_INTERCONNECT("NOC_MEDIA", IMX8MP_ICN_MEDIA, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("LCDIF_RD", IMX8MP_ICM_LCDIF_RD, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("LCDIF_WR", IMX8MP_ICM_LCDIF_WR, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI0", IMX8MP_ICM_ISI0, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI1", IMX8MP_ICM_ISI1, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISI2", IMX8MP_ICM_ISI2, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISP0", IMX8MP_ICM_ISP0, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("ISP1", IMX8MP_ICM_ISP1, IMX8MP_ICN_MEDIA),
DEFINE_BUS_MASTER("DWE", IMX8MP_ICM_DWE, IMX8MP_ICN_MEDIA),
DEFINE_BUS_INTERCONNECT("NOC_VIDEO", IMX8MP_ICN_VIDEO, NULL, IMX8MP_ICN_NOC),
DEFINE_BUS_MASTER("VPU G1", IMX8MP_ICM_VPU_G1, IMX8MP_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU G2", IMX8MP_ICM_VPU_G2, IMX8MP_ICN_VIDEO),
DEFINE_BUS_MASTER("VPU H1", IMX8MP_ICM_VPU_H1, IMX8MP_ICN_VIDEO),
DEFINE_BUS_INTERCONNECT("PL301_MAIN", IMX8MP_ICN_MAIN, NULL,
IMX8MP_ICN_NOC, IMX8MP_ICS_OCRAM),
};
static int imx8mp_icc_probe(struct platform_device *pdev)
{
return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes);
}
static int imx8mp_icc_remove(struct platform_device *pdev)
{
return imx_icc_unregister(pdev);
}
static struct platform_driver imx8mp_icc_driver = {
.probe = imx8mp_icc_probe,
.remove = imx8mp_icc_remove,
.driver = {
.name = "imx8mp-interconnect",
},
};
module_platform_driver(imx8mp_icc_driver);
MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:imx8mp-interconnect");

View File

@ -0,0 +1,34 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#include <linux/of.h>
#include <linux/slab.h>
#include "icc-common.h"
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
node = of_icc_xlate_onecell(spec, data);
if (IS_ERR(node))
return ERR_CAST(node);
ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
if (!ndata)
return ERR_PTR(-ENOMEM);
ndata->node = node;
if (spec->args_count == 2)
ndata->tag = spec->args[1];
if (spec->args_count > 2)
pr_warn("%pOF: Too many arguments, path tag is not parsed\n", spec->np);
return ndata;
}
EXPORT_SYMBOL_GPL(qcom_icc_xlate_extended);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2022 Linaro Ltd.
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
#define __DRIVERS_INTERCONNECT_QCOM_ICC_COMMON_H__
#include <linux/interconnect-provider.h>
struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,209 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
#define __DRIVERS_INTERCONNECT_QCOM_SC8280XP_H
#define SC8280XP_MASTER_GPU_TCU 0
#define SC8280XP_MASTER_PCIE_TCU 1
#define SC8280XP_MASTER_SYS_TCU 2
#define SC8280XP_MASTER_APPSS_PROC 3
#define SC8280XP_MASTER_IPA_CORE 4
#define SC8280XP_MASTER_LLCC 5
#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6
#define SC8280XP_MASTER_CDSP_NOC_CFG 7
#define SC8280XP_MASTER_CDSPB_NOC_CFG 8
#define SC8280XP_MASTER_QDSS_BAM 9
#define SC8280XP_MASTER_QSPI_0 10
#define SC8280XP_MASTER_QUP_0 11
#define SC8280XP_MASTER_QUP_1 12
#define SC8280XP_MASTER_QUP_2 13
#define SC8280XP_MASTER_A1NOC_CFG 14
#define SC8280XP_MASTER_A2NOC_CFG 15
#define SC8280XP_MASTER_A1NOC_SNOC 16
#define SC8280XP_MASTER_A2NOC_SNOC 17
#define SC8280XP_MASTER_USB_NOC_SNOC 18
#define SC8280XP_MASTER_CAMNOC_HF 19
#define SC8280XP_MASTER_COMPUTE_NOC 20
#define SC8280XP_MASTER_COMPUTE_NOC_1 21
#define SC8280XP_MASTER_CNOC_DC_NOC 22
#define SC8280XP_MASTER_GEM_NOC_CFG 23
#define SC8280XP_MASTER_GEM_NOC_CNOC 24
#define SC8280XP_MASTER_GEM_NOC_PCIE_SNOC 25
#define SC8280XP_MASTER_GFX3D 26
#define SC8280XP_MASTER_LPASS_ANOC 27
#define SC8280XP_MASTER_MDP0 28
#define SC8280XP_MASTER_MDP1 29
#define SC8280XP_MASTER_MDP_CORE1_0 30
#define SC8280XP_MASTER_MDP_CORE1_1 31
#define SC8280XP_MASTER_CNOC_MNOC_CFG 32
#define SC8280XP_MASTER_MNOC_HF_MEM_NOC 33
#define SC8280XP_MASTER_MNOC_SF_MEM_NOC 34
#define SC8280XP_MASTER_ANOC_PCIE_GEM_NOC 35
#define SC8280XP_MASTER_ROTATOR 36
#define SC8280XP_MASTER_ROTATOR_1 37
#define SC8280XP_MASTER_SNOC_CFG 38
#define SC8280XP_MASTER_SNOC_GC_MEM_NOC 39
#define SC8280XP_MASTER_SNOC_SF_MEM_NOC 40
#define SC8280XP_MASTER_VIDEO_P0 41
#define SC8280XP_MASTER_VIDEO_P1 42
#define SC8280XP_MASTER_VIDEO_PROC 43
#define SC8280XP_MASTER_QUP_CORE_0 44
#define SC8280XP_MASTER_QUP_CORE_1 45
#define SC8280XP_MASTER_QUP_CORE_2 46
#define SC8280XP_MASTER_CAMNOC_ICP 47
#define SC8280XP_MASTER_CAMNOC_SF 48
#define SC8280XP_MASTER_CRYPTO 49
#define SC8280XP_MASTER_IPA 50
#define SC8280XP_MASTER_LPASS_PROC 51
#define SC8280XP_MASTER_CDSP_PROC 52
#define SC8280XP_MASTER_CDSP_PROC_B 53
#define SC8280XP_MASTER_PIMEM 54
#define SC8280XP_MASTER_SENSORS_PROC 55
#define SC8280XP_MASTER_SP 56
#define SC8280XP_MASTER_EMAC 57
#define SC8280XP_MASTER_EMAC_1 58
#define SC8280XP_MASTER_GIC 59
#define SC8280XP_MASTER_PCIE_0 60
#define SC8280XP_MASTER_PCIE_1 61
#define SC8280XP_MASTER_PCIE_2A 62
#define SC8280XP_MASTER_PCIE_2B 63
#define SC8280XP_MASTER_PCIE_3A 64
#define SC8280XP_MASTER_PCIE_3B 65
#define SC8280XP_MASTER_PCIE_4 66
#define SC8280XP_MASTER_QDSS_ETR 67
#define SC8280XP_MASTER_SDCC_2 68
#define SC8280XP_MASTER_SDCC_4 69
#define SC8280XP_MASTER_UFS_CARD 70
#define SC8280XP_MASTER_UFS_MEM 71
#define SC8280XP_MASTER_USB3_0 72
#define SC8280XP_MASTER_USB3_1 73
#define SC8280XP_MASTER_USB3_MP 74
#define SC8280XP_MASTER_USB4_0 75
#define SC8280XP_MASTER_USB4_1 76
#define SC8280XP_SLAVE_EBI1 512
#define SC8280XP_SLAVE_IPA_CORE 513
#define SC8280XP_SLAVE_AHB2PHY_0 514
#define SC8280XP_SLAVE_AHB2PHY_1 515
#define SC8280XP_SLAVE_AHB2PHY_2 516
#define SC8280XP_SLAVE_AOSS 517
#define SC8280XP_SLAVE_APPSS 518
#define SC8280XP_SLAVE_CAMERA_CFG 519
#define SC8280XP_SLAVE_CLK_CTL 520
#define SC8280XP_SLAVE_CDSP_CFG 521
#define SC8280XP_SLAVE_CDSP1_CFG 522
#define SC8280XP_SLAVE_RBCPR_CX_CFG 523
#define SC8280XP_SLAVE_RBCPR_MMCX_CFG 524
#define SC8280XP_SLAVE_RBCPR_MX_CFG 525
#define SC8280XP_SLAVE_CPR_NSPCX 526
#define SC8280XP_SLAVE_CRYPTO_0_CFG 527
#define SC8280XP_SLAVE_CX_RDPM 528
#define SC8280XP_SLAVE_DCC_CFG 529
#define SC8280XP_SLAVE_DISPLAY_CFG 530
#define SC8280XP_SLAVE_DISPLAY1_CFG 531
#define SC8280XP_SLAVE_EMAC_CFG 532
#define SC8280XP_SLAVE_EMAC1_CFG 533
#define SC8280XP_SLAVE_GFX3D_CFG 534
#define SC8280XP_SLAVE_HWKM 535
#define SC8280XP_SLAVE_IMEM_CFG 536
#define SC8280XP_SLAVE_IPA_CFG 537
#define SC8280XP_SLAVE_IPC_ROUTER_CFG 538
#define SC8280XP_SLAVE_LLCC_CFG 539
#define SC8280XP_SLAVE_LPASS 540
#define SC8280XP_SLAVE_LPASS_CORE_CFG 541
#define SC8280XP_SLAVE_LPASS_LPI_CFG 542
#define SC8280XP_SLAVE_LPASS_MPU_CFG 543
#define SC8280XP_SLAVE_LPASS_TOP_CFG 544
#define SC8280XP_SLAVE_MX_RDPM 545
#define SC8280XP_SLAVE_MXC_RDPM 546
#define SC8280XP_SLAVE_PCIE_0_CFG 547
#define SC8280XP_SLAVE_PCIE_1_CFG 548
#define SC8280XP_SLAVE_PCIE_2A_CFG 549
#define SC8280XP_SLAVE_PCIE_2B_CFG 550
#define SC8280XP_SLAVE_PCIE_3A_CFG 551
#define SC8280XP_SLAVE_PCIE_3B_CFG 552
#define SC8280XP_SLAVE_PCIE_4_CFG 553
#define SC8280XP_SLAVE_PCIE_RSC_CFG 554
#define SC8280XP_SLAVE_PDM 555
#define SC8280XP_SLAVE_PIMEM_CFG 556
#define SC8280XP_SLAVE_PKA_WRAPPER_CFG 557
#define SC8280XP_SLAVE_PMU_WRAPPER_CFG 558
#define SC8280XP_SLAVE_QDSS_CFG 559
#define SC8280XP_SLAVE_QSPI_0 560
#define SC8280XP_SLAVE_QUP_0 561
#define SC8280XP_SLAVE_QUP_1 562
#define SC8280XP_SLAVE_QUP_2 563
#define SC8280XP_SLAVE_SDCC_2 564
#define SC8280XP_SLAVE_SDCC_4 565
#define SC8280XP_SLAVE_SECURITY 566
#define SC8280XP_SLAVE_SMMUV3_CFG 567
#define SC8280XP_SLAVE_SMSS_CFG 568
#define SC8280XP_SLAVE_SPSS_CFG 569
#define SC8280XP_SLAVE_TCSR 570
#define SC8280XP_SLAVE_TLMM 571
#define SC8280XP_SLAVE_UFS_CARD_CFG 572
#define SC8280XP_SLAVE_UFS_MEM_CFG 573
#define SC8280XP_SLAVE_USB3_0 574
#define SC8280XP_SLAVE_USB3_1 575
#define SC8280XP_SLAVE_USB3_MP 576
#define SC8280XP_SLAVE_USB4_0 577
#define SC8280XP_SLAVE_USB4_1 578
#define SC8280XP_SLAVE_VENUS_CFG 579
#define SC8280XP_SLAVE_VSENSE_CTRL_CFG 580
#define SC8280XP_SLAVE_VSENSE_CTRL_R_CFG 581
#define SC8280XP_SLAVE_A1NOC_CFG 582
#define SC8280XP_SLAVE_A1NOC_SNOC 583
#define SC8280XP_SLAVE_A2NOC_CFG 584
#define SC8280XP_SLAVE_A2NOC_SNOC 585
#define SC8280XP_SLAVE_USB_NOC_SNOC 586
#define SC8280XP_SLAVE_ANOC_PCIE_BRIDGE_CFG 587
#define SC8280XP_SLAVE_DDRSS_CFG 588
#define SC8280XP_SLAVE_GEM_NOC_CNOC 589
#define SC8280XP_SLAVE_GEM_NOC_CFG 590
#define SC8280XP_SLAVE_SNOC_GEM_NOC_GC 591
#define SC8280XP_SLAVE_SNOC_GEM_NOC_SF 592
#define SC8280XP_SLAVE_LLCC 593
#define SC8280XP_SLAVE_MNOC_HF_MEM_NOC 594
#define SC8280XP_SLAVE_MNOC_SF_MEM_NOC 595
#define SC8280XP_SLAVE_CNOC_MNOC_CFG 596
#define SC8280XP_SLAVE_CDSP_MEM_NOC 597
#define SC8280XP_SLAVE_CDSPB_MEM_NOC 598
#define SC8280XP_SLAVE_GEM_NOC_PCIE_CNOC 599
#define SC8280XP_SLAVE_ANOC_PCIE_GEM_NOC 600
#define SC8280XP_SLAVE_SNOC_CFG 601
#define SC8280XP_SLAVE_SNOC_SF_BRIDGE_CFG 602
#define SC8280XP_SLAVE_LPASS_SNOC 603
#define SC8280XP_SLAVE_QUP_CORE_0 604
#define SC8280XP_SLAVE_QUP_CORE_1 605
#define SC8280XP_SLAVE_QUP_CORE_2 606
#define SC8280XP_SLAVE_IMEM 607
#define SC8280XP_SLAVE_NSP_XFR 608
#define SC8280XP_SLAVE_NSPB_XFR 609
#define SC8280XP_SLAVE_PIMEM 610
#define SC8280XP_SLAVE_SERVICE_NSP_NOC 611
#define SC8280XP_SLAVE_SERVICE_NSPB_NOC 612
#define SC8280XP_SLAVE_SERVICE_A1NOC 613
#define SC8280XP_SLAVE_SERVICE_A2NOC 614
#define SC8280XP_SLAVE_SERVICE_CNOC 615
#define SC8280XP_SLAVE_SERVICE_GEM_NOC_1 616
#define SC8280XP_SLAVE_SERVICE_MNOC 617
#define SC8280XP_SLAVE_SERVICES_LPASS_AML_NOC 618
#define SC8280XP_SLAVE_SERVICE_LPASS_AG_NOC 619
#define SC8280XP_SLAVE_SERVICE_GEM_NOC_2 620
#define SC8280XP_SLAVE_SERVICE_SNOC 621
#define SC8280XP_SLAVE_SERVICE_GEM_NOC 622
#define SC8280XP_SLAVE_PCIE_0 623
#define SC8280XP_SLAVE_PCIE_1 624
#define SC8280XP_SLAVE_PCIE_2A 625
#define SC8280XP_SLAVE_PCIE_2B 626
#define SC8280XP_SLAVE_PCIE_3A 627
#define SC8280XP_SLAVE_PCIE_3B 628
#define SC8280XP_SLAVE_PCIE_4 629
#define SC8280XP_SLAVE_QDSS_STM 630
#define SC8280XP_SLAVE_SMSS 631
#define SC8280XP_SLAVE_TCU 632
#endif

View File

@ -0,0 +1,231 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <dt-bindings/interconnect/qcom,sdx65.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sdx65.h"
DEFINE_QNODE(llcc_mc, SDX65_MASTER_LLCC, 1, 4, SDX65_SLAVE_EBI1);
DEFINE_QNODE(acm_tcu, SDX65_MASTER_TCU_0, 1, 8, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
DEFINE_QNODE(qnm_snoc_gc, SDX65_MASTER_SNOC_GC_MEM_NOC, 1, 16, SDX65_SLAVE_LLCC);
DEFINE_QNODE(xm_apps_rdwr, SDX65_MASTER_APPSS_PROC, 1, 16, SDX65_SLAVE_LLCC, SDX65_SLAVE_MEM_NOC_SNOC, SDX65_SLAVE_MEM_NOC_PCIE_SNOC);
DEFINE_QNODE(qhm_audio, SDX65_MASTER_AUDIO, 1, 4, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(qhm_blsp1, SDX65_MASTER_BLSP_1, 1, 4, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(qhm_qdss_bam, SDX65_MASTER_QDSS_BAM, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
DEFINE_QNODE(qhm_qpic, SDX65_MASTER_QPIC, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(qhm_snoc_cfg, SDX65_MASTER_SNOC_CFG, 1, 4, SDX65_SLAVE_SERVICE_SNOC);
DEFINE_QNODE(qhm_spmi_fetcher1, SDX65_MASTER_SPMI_FETCHER, 1, 4, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(qnm_aggre_noc, SDX65_MASTER_ANOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
DEFINE_QNODE(qnm_ipa, SDX65_MASTER_IPA, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_PCIE_0, SDX65_SLAVE_QDSS_STM);
DEFINE_QNODE(qnm_memnoc, SDX65_MASTER_MEM_NOC_SNOC, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_APPSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_IMEM, SDX65_SLAVE_QDSS_STM, SDX65_SLAVE_TCU);
DEFINE_QNODE(qnm_memnoc_pcie, SDX65_MASTER_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_SLAVE_PCIE_0);
DEFINE_QNODE(qxm_crypto, SDX65_MASTER_CRYPTO, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(xm_ipa2pcie_slv, SDX65_MASTER_IPA_PCIE, 1, 8, SDX65_SLAVE_PCIE_0);
DEFINE_QNODE(xm_pcie, SDX65_MASTER_PCIE_0, 1, 8, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(xm_qdss_etr, SDX65_MASTER_QDSS_ETR, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_BLSP_1, SDX65_SLAVE_CLK_CTL, SDX65_SLAVE_CRYPTO_0_CFG, SDX65_SLAVE_CNOC_DDRSS, SDX65_SLAVE_ECC_CFG, SDX65_SLAVE_IMEM_CFG, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_CNOC_MSS, SDX65_SLAVE_PCIE_PARF, SDX65_SLAVE_PDM, SDX65_SLAVE_PRNG, SDX65_SLAVE_QDSS_CFG, SDX65_SLAVE_QPIC, SDX65_SLAVE_SDCC_1, SDX65_SLAVE_SNOC_CFG, SDX65_SLAVE_SPMI_FETCHER, SDX65_SLAVE_SPMI_VGI_COEX, SDX65_SLAVE_TCSR, SDX65_SLAVE_TLMM, SDX65_SLAVE_USB3, SDX65_SLAVE_USB3_PHY_CFG, SDX65_SLAVE_SNOC_MEM_NOC_GC, SDX65_SLAVE_IMEM, SDX65_SLAVE_TCU);
DEFINE_QNODE(xm_sdc1, SDX65_MASTER_SDCC_1, 1, 8, SDX65_SLAVE_AOSS, SDX65_SLAVE_AUDIO, SDX65_SLAVE_IPA_CFG, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(xm_usb3, SDX65_MASTER_USB3, 1, 8, SDX65_SLAVE_ANOC_SNOC);
DEFINE_QNODE(ebi, SDX65_SLAVE_EBI1, 1, 4);
DEFINE_QNODE(qns_llcc, SDX65_SLAVE_LLCC, 1, 16, SDX65_MASTER_LLCC);
DEFINE_QNODE(qns_memnoc_snoc, SDX65_SLAVE_MEM_NOC_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_SNOC);
DEFINE_QNODE(qns_sys_pcie, SDX65_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SDX65_MASTER_MEM_NOC_PCIE_SNOC);
DEFINE_QNODE(qhs_aoss, SDX65_SLAVE_AOSS, 1, 4);
DEFINE_QNODE(qhs_apss, SDX65_SLAVE_APPSS, 1, 4);
DEFINE_QNODE(qhs_audio, SDX65_SLAVE_AUDIO, 1, 4);
DEFINE_QNODE(qhs_blsp1, SDX65_SLAVE_BLSP_1, 1, 4);
DEFINE_QNODE(qhs_clk_ctl, SDX65_SLAVE_CLK_CTL, 1, 4);
DEFINE_QNODE(qhs_crypto0_cfg, SDX65_SLAVE_CRYPTO_0_CFG, 1, 4);
DEFINE_QNODE(qhs_ddrss_cfg, SDX65_SLAVE_CNOC_DDRSS, 1, 4);
DEFINE_QNODE(qhs_ecc_cfg, SDX65_SLAVE_ECC_CFG, 1, 4);
DEFINE_QNODE(qhs_imem_cfg, SDX65_SLAVE_IMEM_CFG, 1, 4);
DEFINE_QNODE(qhs_ipa, SDX65_SLAVE_IPA_CFG, 1, 4);
DEFINE_QNODE(qhs_mss_cfg, SDX65_SLAVE_CNOC_MSS, 1, 4);
DEFINE_QNODE(qhs_pcie_parf, SDX65_SLAVE_PCIE_PARF, 1, 4);
DEFINE_QNODE(qhs_pdm, SDX65_SLAVE_PDM, 1, 4);
DEFINE_QNODE(qhs_prng, SDX65_SLAVE_PRNG, 1, 4);
DEFINE_QNODE(qhs_qdss_cfg, SDX65_SLAVE_QDSS_CFG, 1, 4);
DEFINE_QNODE(qhs_qpic, SDX65_SLAVE_QPIC, 1, 4);
DEFINE_QNODE(qhs_sdc1, SDX65_SLAVE_SDCC_1, 1, 4);
DEFINE_QNODE(qhs_snoc_cfg, SDX65_SLAVE_SNOC_CFG, 1, 4, SDX65_MASTER_SNOC_CFG);
DEFINE_QNODE(qhs_spmi_fetcher, SDX65_SLAVE_SPMI_FETCHER, 1, 4);
DEFINE_QNODE(qhs_spmi_vgi_coex, SDX65_SLAVE_SPMI_VGI_COEX, 1, 4);
DEFINE_QNODE(qhs_tcsr, SDX65_SLAVE_TCSR, 1, 4);
DEFINE_QNODE(qhs_tlmm, SDX65_SLAVE_TLMM, 1, 4);
DEFINE_QNODE(qhs_usb3, SDX65_SLAVE_USB3, 1, 4);
DEFINE_QNODE(qhs_usb3_phy, SDX65_SLAVE_USB3_PHY_CFG, 1, 4);
DEFINE_QNODE(qns_aggre_noc, SDX65_SLAVE_ANOC_SNOC, 1, 8, SDX65_MASTER_ANOC_SNOC);
DEFINE_QNODE(qns_snoc_memnoc, SDX65_SLAVE_SNOC_MEM_NOC_GC, 1, 16, SDX65_MASTER_SNOC_GC_MEM_NOC);
DEFINE_QNODE(qxs_imem, SDX65_SLAVE_IMEM, 1, 8);
DEFINE_QNODE(srvc_snoc, SDX65_SLAVE_SERVICE_SNOC, 1, 4);
DEFINE_QNODE(xs_pcie, SDX65_SLAVE_PCIE_0, 1, 8);
DEFINE_QNODE(xs_qdss_stm, SDX65_SLAVE_QDSS_STM, 1, 4);
DEFINE_QNODE(xs_sys_tcu_cfg, SDX65_SLAVE_TCU, 1, 8);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_pn0, "PN0", true, &qhm_snoc_cfg, &qhs_aoss, &qhs_apss, &qhs_audio, &qhs_blsp1, &qhs_clk_ctl, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_ecc_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mss_cfg, &qhs_pcie_parf, &qhs_pdm, &qhs_prng, &qhs_qdss_cfg, &qhs_qpic, &qhs_sdc1, &qhs_snoc_cfg, &qhs_spmi_fetcher, &qhs_spmi_vgi_coex, &qhs_tcsr, &qhs_tlmm, &qhs_usb3, &qhs_usb3_phy, &srvc_snoc);
DEFINE_QBCM(bcm_pn1, "PN1", false, &xm_sdc1);
DEFINE_QBCM(bcm_pn2, "PN2", false, &qhm_audio, &qhm_spmi_fetcher1);
DEFINE_QBCM(bcm_pn3, "PN3", false, &qhm_blsp1, &qhm_qpic);
DEFINE_QBCM(bcm_pn4, "PN4", false, &qxm_crypto);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_memnoc_snoc);
DEFINE_QBCM(bcm_sh3, "SH3", false, &xm_apps_rdwr);
DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_snoc_memnoc);
DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
DEFINE_QBCM(bcm_sn2, "SN2", false, &xs_qdss_stm);
DEFINE_QBCM(bcm_sn3, "SN3", false, &xs_sys_tcu_cfg);
DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_pcie);
DEFINE_QBCM(bcm_sn6, "SN6", false, &qhm_qdss_bam, &xm_qdss_etr);
DEFINE_QBCM(bcm_sn7, "SN7", false, &qnm_aggre_noc, &xm_pcie, &xm_usb3, &qns_aggre_noc);
DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_memnoc);
DEFINE_QBCM(bcm_sn9, "SN9", false, &qnm_memnoc_pcie);
DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_ipa, &xm_ipa2pcie_slv);
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
static const struct qcom_icc_desc sdx65_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
.bcms = mc_virt_bcms,
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
static struct qcom_icc_bcm * const mem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
&bcm_sh3,
};
static struct qcom_icc_node * const mem_noc_nodes[] = {
[MASTER_TCU_0] = &acm_tcu,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_APPSS_PROC] = &xm_apps_rdwr,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_sys_pcie,
};
static const struct qcom_icc_desc sdx65_mem_noc = {
.nodes = mem_noc_nodes,
.num_nodes = ARRAY_SIZE(mem_noc_nodes),
.bcms = mem_noc_bcms,
.num_bcms = ARRAY_SIZE(mem_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_ce0,
&bcm_pn0,
&bcm_pn1,
&bcm_pn2,
&bcm_pn3,
&bcm_pn4,
&bcm_sn0,
&bcm_sn1,
&bcm_sn2,
&bcm_sn3,
&bcm_sn5,
&bcm_sn6,
&bcm_sn7,
&bcm_sn8,
&bcm_sn9,
&bcm_sn10,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_AUDIO] = &qhm_audio,
[MASTER_BLSP_1] = &qhm_blsp1,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QPIC] = &qhm_qpic,
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_SPMI_FETCHER] = &qhm_spmi_fetcher1,
[MASTER_ANOC_SNOC] = &qnm_aggre_noc,
[MASTER_IPA] = &qnm_ipa,
[MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
[MASTER_MEM_NOC_PCIE_SNOC] = &qnm_memnoc_pcie,
[MASTER_CRYPTO] = &qxm_crypto,
[MASTER_IPA_PCIE] = &xm_ipa2pcie_slv,
[MASTER_PCIE_0] = &xm_pcie,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_1] = &xm_sdc1,
[MASTER_USB3] = &xm_usb3,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_APPSS] = &qhs_apss,
[SLAVE_AUDIO] = &qhs_audio,
[SLAVE_BLSP_1] = &qhs_blsp1,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_ECC_CFG] = &qhs_ecc_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_PCIE_PARF] = &qhs_pcie_parf,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QPIC] = &qhs_qpic,
[SLAVE_SDCC_1] = &qhs_sdc1,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_SPMI_FETCHER] = &qhs_spmi_fetcher,
[SLAVE_SPMI_VGI_COEX] = &qhs_spmi_vgi_coex,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_TLMM] = &qhs_tlmm,
[SLAVE_USB3] = &qhs_usb3,
[SLAVE_USB3_PHY_CFG] = &qhs_usb3_phy,
[SLAVE_ANOC_SNOC] = &qns_aggre_noc,
[SLAVE_SNOC_MEM_NOC_GC] = &qns_snoc_memnoc,
[SLAVE_IMEM] = &qxs_imem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_PCIE_0] = &xs_pcie,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sdx65_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sdx65-mc-virt",
.data = &sdx65_mc_virt},
{ .compatible = "qcom,sdx65-mem-noc",
.data = &sdx65_mem_noc},
{ .compatible = "qcom,sdx65-system-noc",
.data = &sdx65_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sdx65",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SDX65 NoC driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX65_H
#define __DRIVERS_INTERCONNECT_QCOM_SDX65_H
#define SDX65_MASTER_TCU_0 0
#define SDX65_MASTER_LLCC 1
#define SDX65_MASTER_AUDIO 2
#define SDX65_MASTER_BLSP_1 3
#define SDX65_MASTER_QDSS_BAM 4
#define SDX65_MASTER_QPIC 5
#define SDX65_MASTER_SNOC_CFG 6
#define SDX65_MASTER_SPMI_FETCHER 7
#define SDX65_MASTER_ANOC_SNOC 8
#define SDX65_MASTER_IPA 9
#define SDX65_MASTER_MEM_NOC_SNOC 10
#define SDX65_MASTER_MEM_NOC_PCIE_SNOC 11
#define SDX65_MASTER_SNOC_GC_MEM_NOC 12
#define SDX65_MASTER_CRYPTO 13
#define SDX65_MASTER_APPSS_PROC 14
#define SDX65_MASTER_IPA_PCIE 15
#define SDX65_MASTER_PCIE_0 16
#define SDX65_MASTER_QDSS_ETR 17
#define SDX65_MASTER_SDCC_1 18
#define SDX65_MASTER_USB3 19
#define SDX65_SLAVE_EBI1 512
#define SDX65_SLAVE_AOSS 513
#define SDX65_SLAVE_APPSS 514
#define SDX65_SLAVE_AUDIO 515
#define SDX65_SLAVE_BLSP_1 516
#define SDX65_SLAVE_CLK_CTL 517
#define SDX65_SLAVE_CRYPTO_0_CFG 518
#define SDX65_SLAVE_CNOC_DDRSS 519
#define SDX65_SLAVE_ECC_CFG 520
#define SDX65_SLAVE_IMEM_CFG 521
#define SDX65_SLAVE_IPA_CFG 522
#define SDX65_SLAVE_CNOC_MSS 523
#define SDX65_SLAVE_PCIE_PARF 524
#define SDX65_SLAVE_PDM 525
#define SDX65_SLAVE_PRNG 526
#define SDX65_SLAVE_QDSS_CFG 527
#define SDX65_SLAVE_QPIC 528
#define SDX65_SLAVE_SDCC_1 529
#define SDX65_SLAVE_SNOC_CFG 530
#define SDX65_SLAVE_SPMI_FETCHER 531
#define SDX65_SLAVE_SPMI_VGI_COEX 532
#define SDX65_SLAVE_TCSR 533
#define SDX65_SLAVE_TLMM 534
#define SDX65_SLAVE_USB3 535
#define SDX65_SLAVE_USB3_PHY_CFG 536
#define SDX65_SLAVE_ANOC_SNOC 537
#define SDX65_SLAVE_LLCC 538
#define SDX65_SLAVE_MEM_NOC_SNOC 539
#define SDX65_SLAVE_SNOC_MEM_NOC_GC 540
#define SDX65_SLAVE_MEM_NOC_PCIE_SNOC 541
#define SDX65_SLAVE_IMEM 542
#define SDX65_SLAVE_SERVICE_SNOC 543
#define SDX65_SLAVE_PCIE_0 544
#define SDX65_SLAVE_QDSS_STM 545
#define SDX65_SLAVE_TCU 546
#endif

View File

@ -0,0 +1,493 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
*/
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/interconnect-provider.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <dt-bindings/interconnect/qcom,sm6350.h>
#include "bcm-voter.h"
#include "icc-rpmh.h"
#include "sm6350.h"
DEFINE_QNODE(qhm_a1noc_cfg, SM6350_MASTER_A1NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A1NOC);
DEFINE_QNODE(qhm_qup_0, SM6350_MASTER_QUP_0, 1, 4, SM6350_A1NOC_SNOC_SLV);
DEFINE_QNODE(xm_emmc, SM6350_MASTER_EMMC, 1, 8, SM6350_A1NOC_SNOC_SLV);
DEFINE_QNODE(xm_ufs_mem, SM6350_MASTER_UFS_MEM, 1, 8, SM6350_A1NOC_SNOC_SLV);
DEFINE_QNODE(qhm_a2noc_cfg, SM6350_MASTER_A2NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_A2NOC);
DEFINE_QNODE(qhm_qdss_bam, SM6350_MASTER_QDSS_BAM, 1, 4, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(qhm_qup_1, SM6350_MASTER_QUP_1, 1, 4, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(qxm_crypto, SM6350_MASTER_CRYPTO_CORE_0, 1, 8, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(qxm_ipa, SM6350_MASTER_IPA, 1, 8, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(xm_qdss_etr, SM6350_MASTER_QDSS_ETR, 1, 8, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(xm_sdc2, SM6350_MASTER_SDCC_2, 1, 8, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(xm_usb3_0, SM6350_MASTER_USB3, 1, 8, SM6350_A2NOC_SNOC_SLV);
DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SM6350_MASTER_CAMNOC_HF0_UNCOMP, 2, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
DEFINE_QNODE(qxm_camnoc_icp_uncomp, SM6350_MASTER_CAMNOC_ICP_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
DEFINE_QNODE(qxm_camnoc_sf_uncomp, SM6350_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SM6350_SLAVE_CAMNOC_UNCOMP);
DEFINE_QNODE(qup0_core_master, SM6350_MASTER_QUP_CORE_0, 1, 4, SM6350_SLAVE_QUP_CORE_0);
DEFINE_QNODE(qup1_core_master, SM6350_MASTER_QUP_CORE_1, 1, 4, SM6350_SLAVE_QUP_CORE_1);
DEFINE_QNODE(qnm_npu, SM6350_MASTER_NPU, 2, 32, SM6350_SLAVE_CDSP_GEM_NOC);
DEFINE_QNODE(qxm_npu_dsp, SM6350_MASTER_NPU_PROC, 1, 8, SM6350_SLAVE_CDSP_GEM_NOC);
DEFINE_QNODE(qnm_snoc, SM6350_SNOC_CNOC_MAS, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
DEFINE_QNODE(xm_qdss_dap, SM6350_MASTER_QDSS_DAP, 1, 8, SM6350_SLAVE_CAMERA_CFG, SM6350_SLAVE_SDCC_2, SM6350_SLAVE_CNOC_MNOC_CFG, SM6350_SLAVE_UFS_MEM_CFG, SM6350_SLAVE_QM_CFG, SM6350_SLAVE_SNOC_CFG, SM6350_SLAVE_QM_MPU_CFG, SM6350_SLAVE_GLM, SM6350_SLAVE_PDM, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, SM6350_SLAVE_A2NOC_CFG, SM6350_SLAVE_QDSS_CFG, SM6350_SLAVE_VSENSE_CTRL_CFG, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, SM6350_SLAVE_DISPLAY_CFG, SM6350_SLAVE_TCSR, SM6350_SLAVE_DCC_CFG, SM6350_SLAVE_CNOC_DDRSS, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, SM6350_SLAVE_NPU_CFG, SM6350_SLAVE_AHB2PHY, SM6350_SLAVE_GRAPHICS_3D_CFG, SM6350_SLAVE_BOOT_ROM, SM6350_SLAVE_VENUS_CFG, SM6350_SLAVE_IPA_CFG, SM6350_SLAVE_SECURITY, SM6350_SLAVE_IMEM_CFG, SM6350_SLAVE_CNOC_MSS, SM6350_SLAVE_SERVICE_CNOC, SM6350_SLAVE_USB3, SM6350_SLAVE_VENUS_THROTTLE_CFG, SM6350_SLAVE_RBCPR_CX_CFG, SM6350_SLAVE_A1NOC_CFG, SM6350_SLAVE_AOSS, SM6350_SLAVE_PRNG, SM6350_SLAVE_EMMC_CFG, SM6350_SLAVE_CRYPTO_0_CFG, SM6350_SLAVE_PIMEM_CFG, SM6350_SLAVE_RBCPR_MX_CFG, SM6350_SLAVE_QUP_0, SM6350_SLAVE_QUP_1, SM6350_SLAVE_CLK_CTL);
DEFINE_QNODE(qhm_cnoc_dc_noc, SM6350_MASTER_CNOC_DC_NOC, 1, 4, SM6350_SLAVE_LLCC_CFG, SM6350_SLAVE_GEM_NOC_CFG);
DEFINE_QNODE(acm_apps, SM6350_MASTER_AMPSS_M0, 1, 16, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(acm_sys_tcu, SM6350_MASTER_SYS_TCU, 1, 8, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(qhm_gemnoc_cfg, SM6350_MASTER_GEM_NOC_CFG, 1, 4, SM6350_SLAVE_MCDMA_MS_MPU_CFG, SM6350_SLAVE_SERVICE_GEM_NOC, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG);
DEFINE_QNODE(qnm_cmpnoc, SM6350_MASTER_COMPUTE_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(qnm_mnoc_hf, SM6350_MASTER_MNOC_HF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(qnm_mnoc_sf, SM6350_MASTER_MNOC_SF_MEM_NOC, 1, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(qnm_snoc_gc, SM6350_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM6350_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SM6350_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM6350_SLAVE_LLCC);
DEFINE_QNODE(qxm_gpu, SM6350_MASTER_GRAPHICS_3D, 2, 32, SM6350_SLAVE_LLCC, SM6350_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(llcc_mc, SM6350_MASTER_LLCC, 2, 4, SM6350_SLAVE_EBI_CH0);
DEFINE_QNODE(qhm_mnoc_cfg, SM6350_MASTER_CNOC_MNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qnm_video0, SM6350_MASTER_VIDEO_P0, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
DEFINE_QNODE(qnm_video_cvp, SM6350_MASTER_VIDEO_PROC, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
DEFINE_QNODE(qxm_camnoc_hf, SM6350_MASTER_CAMNOC_HF, 2, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qxm_camnoc_icp, SM6350_MASTER_CAMNOC_ICP, 1, 8, SM6350_SLAVE_MNOC_SF_MEM_NOC);
DEFINE_QNODE(qxm_camnoc_sf, SM6350_MASTER_CAMNOC_SF, 1, 32, SM6350_SLAVE_MNOC_SF_MEM_NOC);
DEFINE_QNODE(qxm_mdp0, SM6350_MASTER_MDP_PORT0, 1, 32, SM6350_SLAVE_MNOC_HF_MEM_NOC);
DEFINE_QNODE(amm_npu_sys, SM6350_MASTER_NPU_SYS, 2, 32, SM6350_SLAVE_NPU_COMPUTE_NOC);
DEFINE_QNODE(qhm_npu_cfg, SM6350_MASTER_NPU_NOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_NPU_NOC, SM6350_SLAVE_ISENSE_CFG, SM6350_SLAVE_NPU_LLM_CFG, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, SM6350_SLAVE_NPU_CP, SM6350_SLAVE_NPU_TCM, SM6350_SLAVE_NPU_CAL_DP0, SM6350_SLAVE_NPU_DPM);
DEFINE_QNODE(qhm_snoc_cfg, SM6350_MASTER_SNOC_CFG, 1, 4, SM6350_SLAVE_SERVICE_SNOC);
DEFINE_QNODE(qnm_aggre1_noc, SM6350_A1NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_QDSS_STM);
DEFINE_QNODE(qnm_aggre2_noc, SM6350_A2NOC_SNOC_MAS, 1, 16, SM6350_SLAVE_SNOC_GEM_NOC_SF, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
DEFINE_QNODE(qnm_gemnoc, SM6350_MASTER_GEM_NOC_SNOC, 1, 8, SM6350_SLAVE_PIMEM, SM6350_SLAVE_OCIMEM, SM6350_SLAVE_APPSS, SM6350_SNOC_CNOC_SLV, SM6350_SLAVE_TCU, SM6350_SLAVE_QDSS_STM);
DEFINE_QNODE(qxm_pimem, SM6350_MASTER_PIMEM, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC, SM6350_SLAVE_OCIMEM);
DEFINE_QNODE(xm_gic, SM6350_MASTER_GIC, 1, 8, SM6350_SLAVE_SNOC_GEM_NOC_GC);
DEFINE_QNODE(qns_a1noc_snoc, SM6350_A1NOC_SNOC_SLV, 1, 16, SM6350_A1NOC_SNOC_MAS);
DEFINE_QNODE(srvc_aggre1_noc, SM6350_SLAVE_SERVICE_A1NOC, 1, 4);
DEFINE_QNODE(qns_a2noc_snoc, SM6350_A2NOC_SNOC_SLV, 1, 16, SM6350_A2NOC_SNOC_MAS);
DEFINE_QNODE(srvc_aggre2_noc, SM6350_SLAVE_SERVICE_A2NOC, 1, 4);
DEFINE_QNODE(qns_camnoc_uncomp, SM6350_SLAVE_CAMNOC_UNCOMP, 1, 32);
DEFINE_QNODE(qup0_core_slave, SM6350_SLAVE_QUP_CORE_0, 1, 4);
DEFINE_QNODE(qup1_core_slave, SM6350_SLAVE_QUP_CORE_1, 1, 4);
DEFINE_QNODE(qns_cdsp_gemnoc, SM6350_SLAVE_CDSP_GEM_NOC, 1, 32, SM6350_MASTER_COMPUTE_NOC);
DEFINE_QNODE(qhs_a1_noc_cfg, SM6350_SLAVE_A1NOC_CFG, 1, 4, SM6350_MASTER_A1NOC_CFG);
DEFINE_QNODE(qhs_a2_noc_cfg, SM6350_SLAVE_A2NOC_CFG, 1, 4, SM6350_MASTER_A2NOC_CFG);
DEFINE_QNODE(qhs_ahb2phy0, SM6350_SLAVE_AHB2PHY, 1, 4);
DEFINE_QNODE(qhs_ahb2phy2, SM6350_SLAVE_AHB2PHY_2, 1, 4);
DEFINE_QNODE(qhs_aoss, SM6350_SLAVE_AOSS, 1, 4);
DEFINE_QNODE(qhs_boot_rom, SM6350_SLAVE_BOOT_ROM, 1, 4);
DEFINE_QNODE(qhs_camera_cfg, SM6350_SLAVE_CAMERA_CFG, 1, 4);
DEFINE_QNODE(qhs_camera_nrt_thrott_cfg, SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG, 1, 4);
DEFINE_QNODE(qhs_camera_rt_throttle_cfg, SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG, 1, 4);
DEFINE_QNODE(qhs_clk_ctl, SM6350_SLAVE_CLK_CTL, 1, 4);
DEFINE_QNODE(qhs_cpr_cx, SM6350_SLAVE_RBCPR_CX_CFG, 1, 4);
DEFINE_QNODE(qhs_cpr_mx, SM6350_SLAVE_RBCPR_MX_CFG, 1, 4);
DEFINE_QNODE(qhs_crypto0_cfg, SM6350_SLAVE_CRYPTO_0_CFG, 1, 4);
DEFINE_QNODE(qhs_dcc_cfg, SM6350_SLAVE_DCC_CFG, 1, 4);
DEFINE_QNODE(qhs_ddrss_cfg, SM6350_SLAVE_CNOC_DDRSS, 1, 4, SM6350_MASTER_CNOC_DC_NOC);
DEFINE_QNODE(qhs_display_cfg, SM6350_SLAVE_DISPLAY_CFG, 1, 4);
DEFINE_QNODE(qhs_display_throttle_cfg, SM6350_SLAVE_DISPLAY_THROTTLE_CFG, 1, 4);
DEFINE_QNODE(qhs_emmc_cfg, SM6350_SLAVE_EMMC_CFG, 1, 4);
DEFINE_QNODE(qhs_glm, SM6350_SLAVE_GLM, 1, 4);
DEFINE_QNODE(qhs_gpuss_cfg, SM6350_SLAVE_GRAPHICS_3D_CFG, 1, 8);
DEFINE_QNODE(qhs_imem_cfg, SM6350_SLAVE_IMEM_CFG, 1, 4);
DEFINE_QNODE(qhs_ipa, SM6350_SLAVE_IPA_CFG, 1, 4);
DEFINE_QNODE(qhs_mnoc_cfg, SM6350_SLAVE_CNOC_MNOC_CFG, 1, 4, SM6350_MASTER_CNOC_MNOC_CFG);
DEFINE_QNODE(qhs_mss_cfg, SM6350_SLAVE_CNOC_MSS, 1, 4);
DEFINE_QNODE(qhs_npu_cfg, SM6350_SLAVE_NPU_CFG, 1, 4, SM6350_MASTER_NPU_NOC_CFG);
DEFINE_QNODE(qhs_pdm, SM6350_SLAVE_PDM, 1, 4);
DEFINE_QNODE(qhs_pimem_cfg, SM6350_SLAVE_PIMEM_CFG, 1, 4);
DEFINE_QNODE(qhs_prng, SM6350_SLAVE_PRNG, 1, 4);
DEFINE_QNODE(qhs_qdss_cfg, SM6350_SLAVE_QDSS_CFG, 1, 4);
DEFINE_QNODE(qhs_qm_cfg, SM6350_SLAVE_QM_CFG, 1, 4);
DEFINE_QNODE(qhs_qm_mpu_cfg, SM6350_SLAVE_QM_MPU_CFG, 1, 4);
DEFINE_QNODE(qhs_qup0, SM6350_SLAVE_QUP_0, 1, 4);
DEFINE_QNODE(qhs_qup1, SM6350_SLAVE_QUP_1, 1, 4);
DEFINE_QNODE(qhs_sdc2, SM6350_SLAVE_SDCC_2, 1, 4);
DEFINE_QNODE(qhs_security, SM6350_SLAVE_SECURITY, 1, 4);
DEFINE_QNODE(qhs_snoc_cfg, SM6350_SLAVE_SNOC_CFG, 1, 4, SM6350_MASTER_SNOC_CFG);
DEFINE_QNODE(qhs_tcsr, SM6350_SLAVE_TCSR, 1, 4);
DEFINE_QNODE(qhs_ufs_mem_cfg, SM6350_SLAVE_UFS_MEM_CFG, 1, 4);
DEFINE_QNODE(qhs_usb3_0, SM6350_SLAVE_USB3, 1, 4);
DEFINE_QNODE(qhs_venus_cfg, SM6350_SLAVE_VENUS_CFG, 1, 4);
DEFINE_QNODE(qhs_venus_throttle_cfg, SM6350_SLAVE_VENUS_THROTTLE_CFG, 1, 4);
DEFINE_QNODE(qhs_vsense_ctrl_cfg, SM6350_SLAVE_VSENSE_CTRL_CFG, 1, 4);
DEFINE_QNODE(srvc_cnoc, SM6350_SLAVE_SERVICE_CNOC, 1, 4);
DEFINE_QNODE(qhs_gemnoc, SM6350_SLAVE_GEM_NOC_CFG, 1, 4, SM6350_MASTER_GEM_NOC_CFG);
DEFINE_QNODE(qhs_llcc, SM6350_SLAVE_LLCC_CFG, 1, 4);
DEFINE_QNODE(qhs_mcdma_ms_mpu_cfg, SM6350_SLAVE_MCDMA_MS_MPU_CFG, 1, 4);
DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SM6350_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
DEFINE_QNODE(qns_gem_noc_snoc, SM6350_SLAVE_GEM_NOC_SNOC, 1, 8, SM6350_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SM6350_SLAVE_LLCC, 1, 16, SM6350_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SM6350_SLAVE_SERVICE_GEM_NOC, 1, 4);
DEFINE_QNODE(ebi, SM6350_SLAVE_EBI_CH0, 2, 4);
DEFINE_QNODE(qns_mem_noc_hf, SM6350_SLAVE_MNOC_HF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SM6350_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM6350_MASTER_MNOC_SF_MEM_NOC);
DEFINE_QNODE(srvc_mnoc, SM6350_SLAVE_SERVICE_MNOC, 1, 4);
DEFINE_QNODE(qhs_cal_dp0, SM6350_SLAVE_NPU_CAL_DP0, 1, 4);
DEFINE_QNODE(qhs_cp, SM6350_SLAVE_NPU_CP, 1, 4);
DEFINE_QNODE(qhs_dma_bwmon, SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG, 1, 4);
DEFINE_QNODE(qhs_dpm, SM6350_SLAVE_NPU_DPM, 1, 4);
DEFINE_QNODE(qhs_isense, SM6350_SLAVE_ISENSE_CFG, 1, 4);
DEFINE_QNODE(qhs_llm, SM6350_SLAVE_NPU_LLM_CFG, 1, 4);
DEFINE_QNODE(qhs_tcm, SM6350_SLAVE_NPU_TCM, 1, 4);
DEFINE_QNODE(qns_npu_sys, SM6350_SLAVE_NPU_COMPUTE_NOC, 2, 32);
DEFINE_QNODE(srvc_noc, SM6350_SLAVE_SERVICE_NPU_NOC, 1, 4);
DEFINE_QNODE(qhs_apss, SM6350_SLAVE_APPSS, 1, 8);
DEFINE_QNODE(qns_cnoc, SM6350_SNOC_CNOC_SLV, 1, 8, SM6350_SNOC_CNOC_MAS);
DEFINE_QNODE(qns_gemnoc_gc, SM6350_SLAVE_SNOC_GEM_NOC_GC, 1, 8, SM6350_MASTER_SNOC_GC_MEM_NOC);
DEFINE_QNODE(qns_gemnoc_sf, SM6350_SLAVE_SNOC_GEM_NOC_SF, 1, 16, SM6350_MASTER_SNOC_SF_MEM_NOC);
DEFINE_QNODE(qxs_imem, SM6350_SLAVE_OCIMEM, 1, 8);
DEFINE_QNODE(qxs_pimem, SM6350_SLAVE_PIMEM, 1, 8);
DEFINE_QNODE(srvc_snoc, SM6350_SLAVE_SERVICE_SNOC, 1, 4);
DEFINE_QNODE(xs_qdss_stm, SM6350_SLAVE_QDSS_STM, 1, 4);
DEFINE_QNODE(xs_sys_tcu_cfg, SM6350_SLAVE_TCU, 1, 8);
DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy0, &qhs_aoss, &qhs_boot_rom, &qhs_camera_cfg, &qhs_camera_nrt_thrott_cfg, &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl, &qhs_cpr_cx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_display_throttle_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_mss_cfg, &qhs_npu_cfg, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qm_cfg, &qhs_qm_mpu_cfg, &qhs_qup0, &qhs_qup1, &qhs_security, &qhs_snoc_cfg, &qhs_tcsr, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_venus_throttle_cfg, &qhs_vsense_ctrl_cfg, &srvc_cnoc);
DEFINE_QBCM(bcm_cn1, "CN1", false, &xm_emmc, &xm_sdc2, &qhs_ahb2phy2, &qhs_emmc_cfg, &qhs_pdm, &qhs_sdc2);
DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_gemnoc);
DEFINE_QBCM(bcm_co2, "CO2", false, &qnm_npu);
DEFINE_QBCM(bcm_co3, "CO3", false, &qxm_npu_dsp);
DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_icp_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf, &qxm_mdp0);
DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
DEFINE_QBCM(bcm_mm3, "MM3", false, &qhm_mnoc_cfg, &qnm_video0, &qnm_video_cvp, &qxm_camnoc_sf);
DEFINE_QBCM(bcm_qup0, "QUP0", false, &qup0_core_master, &qup1_core_master, &qup0_core_slave, &qup1_core_slave);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_sh2, "SH2", false, &acm_sys_tcu);
DEFINE_QBCM(bcm_sh3, "SH3", false, &qnm_cmpnoc);
DEFINE_QBCM(bcm_sh4, "SH4", false, &acm_apps);
DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_gemnoc_sf);
DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
DEFINE_QBCM(bcm_sn3, "SN3", false, &qxs_pimem);
DEFINE_QBCM(bcm_sn4, "SN4", false, &xs_qdss_stm);
DEFINE_QBCM(bcm_sn5, "SN5", false, &qnm_aggre1_noc);
DEFINE_QBCM(bcm_sn6, "SN6", false, &qnm_aggre2_noc);
DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_gemnoc);
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_cn1,
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
[MASTER_QUP_0] = &qhm_qup_0,
[MASTER_EMMC] = &xm_emmc,
[MASTER_UFS_MEM] = &xm_ufs_mem,
[A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
};
static const struct qcom_icc_desc sm6350_aggre1_noc = {
.nodes = aggre1_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
.bcms = aggre1_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_cn1,
};
static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_1] = &qhm_qup_1,
[MASTER_CRYPTO_CORE_0] = &qxm_crypto,
[MASTER_IPA] = &qxm_ipa,
[MASTER_QDSS_ETR] = &xm_qdss_etr,
[MASTER_SDCC_2] = &xm_sdc2,
[MASTER_USB3] = &xm_usb3_0,
[A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
};
static const struct qcom_icc_desc sm6350_aggre2_noc = {
.nodes = aggre2_noc_nodes,
.num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
.bcms = aggre2_noc_bcms,
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
&bcm_mm1,
&bcm_qup0,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
[MASTER_CAMNOC_ICP_UNCOMP] = &qxm_camnoc_icp_uncomp,
[MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_LLCC] = &llcc_mc,
[SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_EBI_CH0] = &ebi,
};
static const struct qcom_icc_desc sm6350_clk_virt = {
.nodes = clk_virt_nodes,
.num_nodes = ARRAY_SIZE(clk_virt_nodes),
.bcms = clk_virt_bcms,
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
static struct qcom_icc_bcm * const compute_noc_bcms[] = {
&bcm_co0,
&bcm_co2,
&bcm_co3,
};
static struct qcom_icc_node * const compute_noc_nodes[] = {
[MASTER_NPU] = &qnm_npu,
[MASTER_NPU_PROC] = &qxm_npu_dsp,
[SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
};
static const struct qcom_icc_desc sm6350_compute_noc = {
.nodes = compute_noc_nodes,
.num_nodes = ARRAY_SIZE(compute_noc_nodes),
.bcms = compute_noc_bcms,
.num_bcms = ARRAY_SIZE(compute_noc_bcms),
};
static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
};
static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &qnm_snoc,
[MASTER_QDSS_DAP] = &xm_qdss_dap,
[SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
[SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
[SLAVE_AHB2PHY] = &qhs_ahb2phy0,
[SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
[SLAVE_AOSS] = &qhs_aoss,
[SLAVE_BOOT_ROM] = &qhs_boot_rom,
[SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
[SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_thrott_cfg,
[SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
[SLAVE_CLK_CTL] = &qhs_clk_ctl,
[SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
[SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
[SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
[SLAVE_DCC_CFG] = &qhs_dcc_cfg,
[SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
[SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
[SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
[SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
[SLAVE_GLM] = &qhs_glm,
[SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
[SLAVE_IMEM_CFG] = &qhs_imem_cfg,
[SLAVE_IPA_CFG] = &qhs_ipa,
[SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
[SLAVE_CNOC_MSS] = &qhs_mss_cfg,
[SLAVE_NPU_CFG] = &qhs_npu_cfg,
[SLAVE_PDM] = &qhs_pdm,
[SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
[SLAVE_PRNG] = &qhs_prng,
[SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
[SLAVE_QM_CFG] = &qhs_qm_cfg,
[SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
[SLAVE_QUP_0] = &qhs_qup0,
[SLAVE_QUP_1] = &qhs_qup1,
[SLAVE_SDCC_2] = &qhs_sdc2,
[SLAVE_SECURITY] = &qhs_security,
[SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
[SLAVE_TCSR] = &qhs_tcsr,
[SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
[SLAVE_USB3] = &qhs_usb3_0,
[SLAVE_VENUS_CFG] = &qhs_venus_cfg,
[SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
[SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
[SLAVE_SERVICE_CNOC] = &srvc_cnoc,
};
static const struct qcom_icc_desc sm6350_config_noc = {
.nodes = config_noc_nodes,
.num_nodes = ARRAY_SIZE(config_noc_nodes),
.bcms = config_noc_bcms,
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
[SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
};
static const struct qcom_icc_desc sm6350_dc_noc = {
.nodes = dc_noc_nodes,
.num_nodes = ARRAY_SIZE(dc_noc_nodes),
.bcms = dc_noc_bcms,
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
&bcm_sh3,
&bcm_sh4,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_AMPSS_M0] = &acm_apps,
[MASTER_SYS_TCU] = &acm_sys_tcu,
[MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
[MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
[MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
[MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
[MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
[MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
[MASTER_GRAPHICS_3D] = &qxm_gpu,
[SLAVE_MCDMA_MS_MPU_CFG] = &qhs_mcdma_ms_mpu_cfg,
[SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
[SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
};
static const struct qcom_icc_desc sm6350_gem_noc = {
.nodes = gem_noc_nodes,
.num_nodes = ARRAY_SIZE(gem_noc_nodes),
.bcms = gem_noc_bcms,
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
&bcm_mm2,
&bcm_mm3,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
[MASTER_VIDEO_P0] = &qnm_video0,
[MASTER_VIDEO_PROC] = &qnm_video_cvp,
[MASTER_CAMNOC_HF] = &qxm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qxm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
};
static const struct qcom_icc_desc sm6350_mmss_noc = {
.nodes = mmss_noc_nodes,
.num_nodes = ARRAY_SIZE(mmss_noc_nodes),
.bcms = mmss_noc_bcms,
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
static struct qcom_icc_bcm * const npu_noc_bcms[] = {
};
static struct qcom_icc_node * const npu_noc_nodes[] = {
[MASTER_NPU_SYS] = &amm_npu_sys,
[MASTER_NPU_NOC_CFG] = &qhm_npu_cfg,
[SLAVE_NPU_CAL_DP0] = &qhs_cal_dp0,
[SLAVE_NPU_CP] = &qhs_cp,
[SLAVE_NPU_INT_DMA_BWMON_CFG] = &qhs_dma_bwmon,
[SLAVE_NPU_DPM] = &qhs_dpm,
[SLAVE_ISENSE_CFG] = &qhs_isense,
[SLAVE_NPU_LLM_CFG] = &qhs_llm,
[SLAVE_NPU_TCM] = &qhs_tcm,
[SLAVE_NPU_COMPUTE_NOC] = &qns_npu_sys,
[SLAVE_SERVICE_NPU_NOC] = &srvc_noc,
};
static const struct qcom_icc_desc sm6350_npu_noc = {
.nodes = npu_noc_nodes,
.num_nodes = ARRAY_SIZE(npu_noc_nodes),
.bcms = npu_noc_bcms,
.num_bcms = ARRAY_SIZE(npu_noc_bcms),
};
static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn10,
&bcm_sn2,
&bcm_sn3,
&bcm_sn4,
&bcm_sn5,
&bcm_sn6,
};
static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
[A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
[MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
[MASTER_PIMEM] = &qxm_pimem,
[MASTER_GIC] = &xm_gic,
[SLAVE_APPSS] = &qhs_apss,
[SNOC_CNOC_SLV] = &qns_cnoc,
[SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
[SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
[SLAVE_OCIMEM] = &qxs_imem,
[SLAVE_PIMEM] = &qxs_pimem,
[SLAVE_SERVICE_SNOC] = &srvc_snoc,
[SLAVE_QDSS_STM] = &xs_qdss_stm,
[SLAVE_TCU] = &xs_sys_tcu_cfg,
};
static const struct qcom_icc_desc sm6350_system_noc = {
.nodes = system_noc_nodes,
.num_nodes = ARRAY_SIZE(system_noc_nodes),
.bcms = system_noc_bcms,
.num_bcms = ARRAY_SIZE(system_noc_bcms),
};
static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sm6350-aggre1-noc",
.data = &sm6350_aggre1_noc},
{ .compatible = "qcom,sm6350-aggre2-noc",
.data = &sm6350_aggre2_noc},
{ .compatible = "qcom,sm6350-clk-virt",
.data = &sm6350_clk_virt},
{ .compatible = "qcom,sm6350-compute-noc",
.data = &sm6350_compute_noc},
{ .compatible = "qcom,sm6350-config-noc",
.data = &sm6350_config_noc},
{ .compatible = "qcom,sm6350-dc-noc",
.data = &sm6350_dc_noc},
{ .compatible = "qcom,sm6350-gem-noc",
.data = &sm6350_gem_noc},
{ .compatible = "qcom,sm6350-mmss-noc",
.data = &sm6350_mmss_noc},
{ .compatible = "qcom,sm6350-npu-noc",
.data = &sm6350_npu_noc},
{ .compatible = "qcom,sm6350-system-noc",
.data = &sm6350_system_noc},
{ }
};
MODULE_DEVICE_TABLE(of, qnoc_of_match);
static struct platform_driver qnoc_driver = {
.probe = qcom_icc_rpmh_probe,
.remove = qcom_icc_rpmh_remove,
.driver = {
.name = "qnoc-sm6350",
.of_match_table = qnoc_of_match,
.sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
MODULE_DESCRIPTION("Qualcomm SM6350 NoC driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,139 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Qualcomm #define SM6350 interconnect IDs
*
* Copyright (C) 2022 Luca Weiss <luca.weiss@fairphone.com>
*/
#ifndef __DRIVERS_INTERCONNECT_QCOM_SM6350_H
#define __DRIVERS_INTERCONNECT_QCOM_SM6350_H
#define SM6350_A1NOC_SNOC_MAS 0
#define SM6350_A1NOC_SNOC_SLV 1
#define SM6350_A2NOC_SNOC_MAS 2
#define SM6350_A2NOC_SNOC_SLV 3
#define SM6350_MASTER_A1NOC_CFG 4
#define SM6350_MASTER_A2NOC_CFG 5
#define SM6350_MASTER_AMPSS_M0 6
#define SM6350_MASTER_CAMNOC_HF 7
#define SM6350_MASTER_CAMNOC_HF0_UNCOMP 8
#define SM6350_MASTER_CAMNOC_ICP 9
#define SM6350_MASTER_CAMNOC_ICP_UNCOMP 10
#define SM6350_MASTER_CAMNOC_SF 11
#define SM6350_MASTER_CAMNOC_SF_UNCOMP 12
#define SM6350_MASTER_CNOC_DC_NOC 13
#define SM6350_MASTER_CNOC_MNOC_CFG 14
#define SM6350_MASTER_COMPUTE_NOC 15
#define SM6350_MASTER_CRYPTO_CORE_0 16
#define SM6350_MASTER_EMMC 17
#define SM6350_MASTER_GEM_NOC_CFG 18
#define SM6350_MASTER_GEM_NOC_SNOC 19
#define SM6350_MASTER_GIC 20
#define SM6350_MASTER_GRAPHICS_3D 21
#define SM6350_MASTER_IPA 22
#define SM6350_MASTER_LLCC 23
#define SM6350_MASTER_MDP_PORT0 24
#define SM6350_MASTER_MNOC_HF_MEM_NOC 25
#define SM6350_MASTER_MNOC_SF_MEM_NOC 26
#define SM6350_MASTER_NPU 27
#define SM6350_MASTER_NPU_NOC_CFG 28
#define SM6350_MASTER_NPU_PROC 29
#define SM6350_MASTER_NPU_SYS 30
#define SM6350_MASTER_PIMEM 31
#define SM6350_MASTER_QDSS_BAM 32
#define SM6350_MASTER_QDSS_DAP 33
#define SM6350_MASTER_QDSS_ETR 34
#define SM6350_MASTER_QUP_0 35
#define SM6350_MASTER_QUP_1 36
#define SM6350_MASTER_QUP_CORE_0 37
#define SM6350_MASTER_QUP_CORE_1 38
#define SM6350_MASTER_SDCC_2 39
#define SM6350_MASTER_SNOC_CFG 40
#define SM6350_MASTER_SNOC_GC_MEM_NOC 41
#define SM6350_MASTER_SNOC_SF_MEM_NOC 42
#define SM6350_MASTER_SYS_TCU 43
#define SM6350_MASTER_UFS_MEM 44
#define SM6350_MASTER_USB3 45
#define SM6350_MASTER_VIDEO_P0 46
#define SM6350_MASTER_VIDEO_PROC 47
#define SM6350_SLAVE_A1NOC_CFG 48
#define SM6350_SLAVE_A2NOC_CFG 49
#define SM6350_SLAVE_AHB2PHY 50
#define SM6350_SLAVE_AHB2PHY_2 51
#define SM6350_SLAVE_AOSS 52
#define SM6350_SLAVE_APPSS 53
#define SM6350_SLAVE_BOOT_ROM 54
#define SM6350_SLAVE_CAMERA_CFG 55
#define SM6350_SLAVE_CAMERA_NRT_THROTTLE_CFG 56
#define SM6350_SLAVE_CAMERA_RT_THROTTLE_CFG 57
#define SM6350_SLAVE_CAMNOC_UNCOMP 58
#define SM6350_SLAVE_CDSP_GEM_NOC 59
#define SM6350_SLAVE_CLK_CTL 60
#define SM6350_SLAVE_CNOC_DDRSS 61
#define SM6350_SLAVE_CNOC_MNOC_CFG 62
#define SM6350_SLAVE_CNOC_MSS 63
#define SM6350_SLAVE_CRYPTO_0_CFG 64
#define SM6350_SLAVE_DCC_CFG 65
#define SM6350_SLAVE_DISPLAY_CFG 66
#define SM6350_SLAVE_DISPLAY_THROTTLE_CFG 67
#define SM6350_SLAVE_EBI_CH0 68
#define SM6350_SLAVE_EMMC_CFG 69
#define SM6350_SLAVE_GEM_NOC_CFG 70
#define SM6350_SLAVE_GEM_NOC_SNOC 71
#define SM6350_SLAVE_GLM 72
#define SM6350_SLAVE_GRAPHICS_3D_CFG 73
#define SM6350_SLAVE_IMEM_CFG 74
#define SM6350_SLAVE_IPA_CFG 75
#define SM6350_SLAVE_ISENSE_CFG 76
#define SM6350_SLAVE_LLCC 77
#define SM6350_SLAVE_LLCC_CFG 78
#define SM6350_SLAVE_MCDMA_MS_MPU_CFG 79
#define SM6350_SLAVE_MNOC_HF_MEM_NOC 80
#define SM6350_SLAVE_MNOC_SF_MEM_NOC 81
#define SM6350_SLAVE_MSS_PROC_MS_MPU_CFG 82
#define SM6350_SLAVE_NPU_CAL_DP0 83
#define SM6350_SLAVE_NPU_CFG 84
#define SM6350_SLAVE_NPU_COMPUTE_NOC 85
#define SM6350_SLAVE_NPU_CP 86
#define SM6350_SLAVE_NPU_DPM 87
#define SM6350_SLAVE_NPU_INT_DMA_BWMON_CFG 88
#define SM6350_SLAVE_NPU_LLM_CFG 89
#define SM6350_SLAVE_NPU_TCM 90
#define SM6350_SLAVE_OCIMEM 91
#define SM6350_SLAVE_PDM 92
#define SM6350_SLAVE_PIMEM 93
#define SM6350_SLAVE_PIMEM_CFG 94
#define SM6350_SLAVE_PRNG 95
#define SM6350_SLAVE_QDSS_CFG 96
#define SM6350_SLAVE_QDSS_STM 97
#define SM6350_SLAVE_QM_CFG 98
#define SM6350_SLAVE_QM_MPU_CFG 99
#define SM6350_SLAVE_QUP_0 100
#define SM6350_SLAVE_QUP_1 101
#define SM6350_SLAVE_QUP_CORE_0 102
#define SM6350_SLAVE_QUP_CORE_1 103
#define SM6350_SLAVE_RBCPR_CX_CFG 104
#define SM6350_SLAVE_RBCPR_MX_CFG 105
#define SM6350_SLAVE_SDCC_2 106
#define SM6350_SLAVE_SECURITY 107
#define SM6350_SLAVE_SERVICE_A1NOC 108
#define SM6350_SLAVE_SERVICE_A2NOC 109
#define SM6350_SLAVE_SERVICE_CNOC 110
#define SM6350_SLAVE_SERVICE_GEM_NOC 111
#define SM6350_SLAVE_SERVICE_MNOC 112
#define SM6350_SLAVE_SERVICE_NPU_NOC 113
#define SM6350_SLAVE_SERVICE_SNOC 114
#define SM6350_SLAVE_SNOC_CFG 115
#define SM6350_SLAVE_SNOC_GEM_NOC_GC 116
#define SM6350_SLAVE_SNOC_GEM_NOC_SF 117
#define SM6350_SLAVE_TCSR 118
#define SM6350_SLAVE_TCU 119
#define SM6350_SLAVE_UFS_MEM_CFG 120
#define SM6350_SLAVE_USB3 121
#define SM6350_SLAVE_VENUS_CFG 122
#define SM6350_SLAVE_VENUS_THROTTLE_CFG 123
#define SM6350_SLAVE_VSENSE_CTRL_CFG 124
#define SM6350_SNOC_CNOC_MAS 125
#define SM6350_SNOC_CNOC_SLV 126
#endif

View File

@ -0,0 +1,142 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_device.h>
#include <linux/qcom_scm.h>
#include <linux/ratelimit.h>
#include "arm-smmu.h"
#include "arm-smmu-qcom.h"
enum qcom_smmu_impl_reg_offset {
QCOM_SMMU_TBU_PWR_STATUS,
QCOM_SMMU_STATS_SYNC_INV_TBU_ACK,
QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR,
};
struct qcom_smmu_config {
const u32 *reg_offset;
};
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu)
{
int ret;
u32 tbu_pwr_status, sync_inv_ack, sync_inv_progress;
struct qcom_smmu *qsmmu = container_of(smmu, struct qcom_smmu, smmu);
const struct qcom_smmu_config *cfg;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
if (__ratelimit(&rs)) {
dev_err(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
cfg = qsmmu->cfg;
if (!cfg)
return;
ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_TBU_PWR_STATUS],
&tbu_pwr_status);
if (ret)
dev_err(smmu->dev,
"Failed to read TBU power status: %d\n", ret);
ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK],
&sync_inv_ack);
if (ret)
dev_err(smmu->dev,
"Failed to read TBU sync/inv ack status: %d\n", ret);
ret = qcom_scm_io_readl(smmu->ioaddr + cfg->reg_offset[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR],
&sync_inv_progress);
if (ret)
dev_err(smmu->dev,
"Failed to read TCU syn/inv progress: %d\n", ret);
dev_err(smmu->dev,
"TBU: power_status %#x sync_inv_ack %#x sync_inv_progress %#x\n",
tbu_pwr_status, sync_inv_ack, sync_inv_progress);
}
}
/* Implementation Defined Register Space 0 register offsets */
static const u32 qcom_smmu_impl0_reg_offset[] = {
[QCOM_SMMU_TBU_PWR_STATUS] = 0x2204,
[QCOM_SMMU_STATS_SYNC_INV_TBU_ACK] = 0x25dc,
[QCOM_SMMU_MMU2QSS_AND_SAFE_WAIT_CNTR] = 0x2670,
};
static const struct qcom_smmu_config qcm2290_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sc7180_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sc7280_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sc8180x_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sc8280xp_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm6125_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm6350_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm8150_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm8250_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm8350_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct qcom_smmu_config sm8450_smmu_cfg = {
.reg_offset = qcom_smmu_impl0_reg_offset,
};
static const struct of_device_id __maybe_unused qcom_smmu_impl_debug_match[] = {
{ .compatible = "qcom,msm8998-smmu-v2" },
{ .compatible = "qcom,qcm2290-smmu-500", .data = &qcm2290_smmu_cfg },
{ .compatible = "qcom,sc7180-smmu-500", .data = &sc7180_smmu_cfg },
{ .compatible = "qcom,sc7280-smmu-500", .data = &sc7280_smmu_cfg},
{ .compatible = "qcom,sc8180x-smmu-500", .data = &sc8180x_smmu_cfg },
{ .compatible = "qcom,sc8280xp-smmu-500", .data = &sc8280xp_smmu_cfg },
{ .compatible = "qcom,sdm630-smmu-v2" },
{ .compatible = "qcom,sdm845-smmu-500" },
{ .compatible = "qcom,sm6125-smmu-500", .data = &sm6125_smmu_cfg},
{ .compatible = "qcom,sm6350-smmu-500", .data = &sm6350_smmu_cfg},
{ .compatible = "qcom,sm8150-smmu-500", .data = &sm8150_smmu_cfg },
{ .compatible = "qcom,sm8250-smmu-500", .data = &sm8250_smmu_cfg },
{ .compatible = "qcom,sm8350-smmu-500", .data = &sm8350_smmu_cfg },
{ .compatible = "qcom,sm8450-smmu-500", .data = &sm8450_smmu_cfg },
{ }
};
const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
{
const struct of_device_id *match;
const struct device_node *np = smmu->dev->of_node;
match = of_match_node(qcom_smmu_impl_debug_match, np);
if (!match)
return NULL;
return match->data;
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ARM_SMMU_QCOM_H
#define _ARM_SMMU_QCOM_H
struct qcom_smmu {
struct arm_smmu_device smmu;
const struct qcom_smmu_config *cfg;
bool bypass_quirk;
u8 bypass_cbndx;
u32 stall_enabled;
};
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu);
#else
static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
static inline const void *qcom_smmu_impl_data(struct arm_smmu_device *smmu)
{
return NULL;
}
#endif
#endif /* _ARM_SMMU_QCOM_H */

839
drivers/iommu/intel/iommu.h Normal file
View File

@ -0,0 +1,839 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2006-2015, Intel Corporation.
*
* Authors: Ashok Raj <ashok.raj@intel.com>
* Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
* David Woodhouse <David.Woodhouse@intel.com>
*/
#ifndef _INTEL_IOMMU_H_
#define _INTEL_IOMMU_H_
#include <linux/types.h>
#include <linux/iova.h>
#include <linux/io.h>
#include <linux/idr.h>
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmar.h>
#include <linux/ioasid.h>
#include <linux/bitfield.h>
#include <linux/xarray.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
/*
* VT-d hardware uses 4KiB page size regardless of host page size.
*/
#define VTD_PAGE_SHIFT (12)
#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
#define VTD_STRIDE_SHIFT (9)
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
#define DMA_PTE_READ BIT_ULL(0)
#define DMA_PTE_WRITE BIT_ULL(1)
#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
#define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_ACCESS BIT_ULL(5)
#define DMA_FL_PTE_DIRTY BIT_ULL(6)
#define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57)
#define ADDR_WIDTH_4LEVEL (48)
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
#define CONTEXT_TT_PASS_THROUGH 2
#define CONTEXT_PASIDE BIT_ULL(3)
/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
#define DMAR_GCMD_REG 0x18 /* Global command register */
#define DMAR_GSTS_REG 0x1c /* Global status register */
#define DMAR_RTADDR_REG 0x20 /* Root entry table */
#define DMAR_CCMD_REG 0x28 /* Context command reg */
#define DMAR_FSTS_REG 0x34 /* Fault Status register */
#define DMAR_FECTL_REG 0x38 /* Fault control register */
#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
#define DMAR_PRS_REG 0xdc /* Page request status register */
#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
#define DMAR_MTRR_FIX16K_80000_REG 0x128
#define DMAR_MTRR_FIX16K_A0000_REG 0x130
#define DMAR_MTRR_FIX4K_C0000_REG 0x138
#define DMAR_MTRR_FIX4K_C8000_REG 0x140
#define DMAR_MTRR_FIX4K_D0000_REG 0x148
#define DMAR_MTRR_FIX4K_D8000_REG 0x150
#define DMAR_MTRR_FIX4K_E0000_REG 0x158
#define DMAR_MTRR_FIX4K_E8000_REG 0x160
#define DMAR_MTRR_FIX4K_F0000_REG 0x168
#define DMAR_MTRR_FIX4K_F8000_REG 0x170
#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
#define DMAR_MTRR_PHYSMASK0_REG 0x188
#define DMAR_MTRR_PHYSBASE1_REG 0x190
#define DMAR_MTRR_PHYSMASK1_REG 0x198
#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
#define DMAR_MTRR_PHYSBASE8_REG 0x200
#define DMAR_MTRR_PHYSMASK8_REG 0x208
#define DMAR_MTRR_PHYSBASE9_REG 0x210
#define DMAR_MTRR_PHYSMASK9_REG 0x218
#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
#define OFFSET_STRIDE (9)
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
#define dmar_readl(a) readl(a)
#define dmar_writel(a, v) writel(v, a)
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
/*
* Decoding Capability Register
*/
#define cap_5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
#define cap_read_drain(c) (((c) >> 55) & 1)
#define cap_write_drain(c) (((c) >> 54) & 1)
#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
#define cap_pgsel_inv(c) (((c) >> 39) & 1)
#define cap_super_page_val(c) (((c) >> 34) & 0xf)
#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
* OFFSET_STRIDE) + 21)
#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
#define cap_max_fault_reg_offset(c) \
(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
#define cap_zlr(c) (((c) >> 22) & 1)
#define cap_isoch(c) (((c) >> 23) & 1)
#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
#define cap_sagaw(c) (((c) >> 8) & 0x1f)
#define cap_caching_mode(c) (((c) >> 7) & 1)
#define cap_phmr(c) (((c) >> 6) & 1)
#define cap_plmr(c) (((c) >> 5) & 1)
#define cap_rwbf(c) (((c) >> 4) & 1)
#define cap_afl(c) (((c) >> 3) & 1)
#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
/*
* Extended Capability Register
*/
#define ecap_rps(e) (((e) >> 49) & 0x1)
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
#define ecap_slads(e) (((e) >> 45) & 0x1)
#define ecap_vcs(e) (((e) >> 44) & 0x1)
#define ecap_smts(e) (((e) >> 43) & 0x1)
#define ecap_dit(e) (((e) >> 41) & 0x1)
#define ecap_pds(e) (((e) >> 42) & 0x1)
#define ecap_pasid(e) (((e) >> 40) & 0x1)
#define ecap_pss(e) (((e) >> 35) & 0x1f)
#define ecap_eafs(e) (((e) >> 34) & 0x1)
#define ecap_nwfs(e) (((e) >> 33) & 0x1)
#define ecap_srs(e) (((e) >> 31) & 0x1)
#define ecap_ers(e) (((e) >> 30) & 0x1)
#define ecap_prs(e) (((e) >> 29) & 0x1)
#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
#define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1)
#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
#define ecap_qis(e) ((e) & 0x2)
#define ecap_pass_through(e) (((e) >> 6) & 0x1)
#define ecap_eim_support(e) (((e) >> 4) & 0x1)
#define ecap_ir_support(e) (((e) >> 3) & 0x1)
#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
/* Virtual command interface capability */
#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
/* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60
#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
#define DMA_TLB_IVT (((u64)1) << 63)
#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_TLB_MAX_SIZE (0x3f)
/* INVALID_DESC */
#define DMA_CCMD_INVL_GRANU_OFFSET 61
#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
#define DMA_ID_TLB_ADDR(addr) (addr)
#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
/* PMEN_REG */
#define DMA_PMEN_EPM (((u32)1)<<31)
#define DMA_PMEN_PRS (((u32)1)<<0)
/* GCMD_REG */
#define DMA_GCMD_TE (((u32)1) << 31)
#define DMA_GCMD_SRTP (((u32)1) << 30)
#define DMA_GCMD_SFL (((u32)1) << 29)
#define DMA_GCMD_EAFL (((u32)1) << 28)
#define DMA_GCMD_WBF (((u32)1) << 27)
#define DMA_GCMD_QIE (((u32)1) << 26)
#define DMA_GCMD_SIRTP (((u32)1) << 24)
#define DMA_GCMD_IRE (((u32) 1) << 25)
#define DMA_GCMD_CFI (((u32) 1) << 23)
/* GSTS_REG */
#define DMA_GSTS_TES (((u32)1) << 31)
#define DMA_GSTS_RTPS (((u32)1) << 30)
#define DMA_GSTS_FLS (((u32)1) << 29)
#define DMA_GSTS_AFLS (((u32)1) << 28)
#define DMA_GSTS_WBFS (((u32)1) << 27)
#define DMA_GSTS_QIES (((u32)1) << 26)
#define DMA_GSTS_IRTPS (((u32)1) << 24)
#define DMA_GSTS_IRES (((u32)1) << 25)
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
#define DMA_CCMD_ICC (((u64)1) << 63)
#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
#define DMA_CCMD_MASK_NOBIT 0
#define DMA_CCMD_MASK_1BIT 1
#define DMA_CCMD_MASK_2BIT 2
#define DMA_CCMD_MASK_3BIT 3
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
/* FECTL_REG */
#define DMA_FECTL_IM (((u32)1) << 31)
/* FSTS_REG */
#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
#define DMA_FRCD_F (((u32)1) << 31)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
/* PRS_REG */
#define DMA_PRS_PPR ((u32)1)
#define DMA_PRS_PRO ((u32)2)
#define DMA_VCS_PAS ((u64)1)
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
while (1) { \
sts = op(iommu->reg + offset); \
if (cond) \
break; \
if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
panic("DMAR hardware is malfunctioning\n"); \
cpu_relax(); \
} \
} while (0)
#define QI_LENGTH 256 /* queue length */
enum {
QI_FREE,
QI_IN_USE,
QI_DONE,
QI_ABORT
};
#define QI_CC_TYPE 0x1
#define QI_IOTLB_TYPE 0x2
#define QI_DIOTLB_TYPE 0x3
#define QI_IEC_TYPE 0x4
#define QI_IWD_TYPE 0x5
#define QI_EIOTLB_TYPE 0x6
#define QI_PC_TYPE 0x7
#define QI_DEIOTLB_TYPE 0x8
#define QI_PGRP_RESP_TYPE 0x9
#define QI_PSTRM_RESP_TYPE 0xa
#define QI_IEC_SELECTIVE (((u64)1) << 4)
#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
#define QI_IWD_FENCE (((u64)1) << 6)
#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
#define QI_IOTLB_DID(did) (((u64)did) << 16)
#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
#define QI_CC_FM(fm) (((u64)fm) << 48)
#define QI_CC_SID(sid) (((u64)sid) << 32)
#define QI_CC_DID(did) (((u64)did) << 16)
#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
#define QI_PC_DID(did) (((u64)did) << 16)
#define QI_PC_GRAN(gran) (((u64)gran) << 4)
/* PASID cache invalidation granu */
#define QI_PC_ALL_PASIDS 0
#define QI_PC_PASID_SEL 1
#define QI_PC_GLOBAL 3
#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
#define QI_EIOTLB_DID(did) (((u64)did) << 16)
#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
/* QI Dev-IOTLB inv granu */
#define QI_DEV_IOTLB_GRAN_ALL 1
#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
((u64)((pfsid >> 4) & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
/* Page group response descriptor QW0 */
#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
/* Page group response descriptor QW1 */
#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
#define QI_RESP_SUCCESS 0x0
#define QI_RESP_INVALID 0x1
#define QI_RESP_FAILURE 0xf
#define QI_GRAN_NONG_PASID 2
#define QI_GRAN_PSI_PASID 3
#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
struct qi_desc {
u64 qw0;
u64 qw1;
u64 qw2;
u64 qw3;
};
struct q_inval {
raw_spinlock_t q_lock;
void *desc; /* invalidation queue */
int *desc_status; /* desc status */
int free_head; /* first free entry */
int free_tail; /* last free entry */
int free_cnt;
};
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
/* 1MB - maximum possible interrupt remapping table size */
#define INTR_REMAP_PAGE_ORDER 8
#define INTR_REMAP_TABLE_REG_SIZE 0xf
#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
#define INTR_REMAP_TABLE_ENTRIES 65536
struct irq_domain;
struct ir_table {
struct irte *base;
unsigned long *bitmap;
};
void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
#else
static inline void
intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
#endif
struct iommu_flush {
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
};
enum {
SR_DMAR_FECTL_REG,
SR_DMAR_FEDATA_REG,
SR_DMAR_FEADDR_REG,
SR_DMAR_FEUADDR_REG,
MAX_SR_DMAR_REGS
};
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
#define VTD_FLAG_SVM_CAPABLE (1 << 2)
extern int intel_iommu_sm;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
ecap_pasid((iommu)->ecap))
struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
/*
* 0: Present
* 1-11: Reserved
* 12-63: Context Ptr (12 - (haw-1))
* 64-127: Reserved
*/
struct root_entry {
u64 lo;
u64 hi;
};
/*
* low 64 bits:
* 0: present
* 1: fault processing disable
* 2-3: translation type
* 12-63: address space root
* high 64 bits:
* 0-2: address width
* 3-6: aval
* 8-23: domain id
*/
struct context_entry {
u64 lo;
u64 hi;
};
/*
* When VT-d works in the scalable mode, it allows DMA translation to
* happen through either first level or second level page table. This
* bit marks that the DMA translation for the domain goes through the
* first level page table, otherwise, it goes through the second level.
*/
#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
struct iommu_domain_info {
struct intel_iommu *iommu;
unsigned int refcnt; /* Refcount of devices per iommu */
u16 did; /* Domain ids per IOMMU. Use u16 since
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
};
struct dmar_domain {
int nid; /* node id */
struct xarray iommu_array; /* Attached IOMMU array */
u8 has_iotlb_device: 1;
u8 iommu_coherency: 1; /* indicate coherency of iommu access */
u8 force_snooping : 1; /* Create IOPTEs with snoop control */
u8 set_pte_snp:1;
spinlock_t lock; /* Protect device tracking lists */
struct list_head devices; /* all devices' list */
struct dma_pte *pgd; /* virtual address */
int gaw; /* max guest address width */
/* adjusted guest address width, 0 is level 2 30-bit */
int agaw;
int flags; /* flags to find out type of domain */
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
u64 max_addr; /* maximum mapped address */
struct iommu_domain domain; /* generic domain data structure for
iommu core */
};
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
u64 vccap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
unsigned int irq, pr_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
struct iommu_flush flush;
#endif
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
struct completion prq_complete;
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[16];
struct q_inval *qi; /* Queued invalidation info */
u32 *iommu_state; /* Store iommu states between suspend and resume.*/
#ifdef CONFIG_IRQ_REMAP
struct ir_table *ir_table; /* Interrupt remapping info */
struct irq_domain *ir_domain;
struct irq_domain *ir_msi_domain;
#endif
struct iommu_device iommu; /* IOMMU core code handle */
int node;
u32 flags; /* Software defined flags */
struct dmar_drhd_unit *drhd;
void *perf_statistic;
};
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
u16 pfsid; /* SRIOV physical function source ID */
u8 pasid_supported:3;
u8 pasid_enabled:1;
u8 pri_supported:1;
u8 pri_enabled:1;
u8 ats_supported:1;
u8 ats_enabled:1;
u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
struct pasid_table *pasid_table; /* pasid table */
};
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(addr, size);
}
/* Convert generic struct iommu_domain to private struct dmar_domain */
static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
{
return container_of(dom, struct dmar_domain, domain);
}
/* Retrieve the domain ID which has allocated to the domain */
static inline u16
domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
{
struct iommu_domain_info *info =
xa_load(&domain->iommu_array, iommu->seq_id);
return info->did;
}
/*
* 0: readable
* 1: writable
* 2-6: reserved
* 7: super page
* 8-10: available
* 11: snoop behavior
* 12-63: Host physical address
*/
struct dma_pte {
u64 val;
};
static inline void dma_clear_pte(struct dma_pte *pte)
{
pte->val = 0;
}
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#else
/* Must have a full atomic 64-bit read */
return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#endif
}
static inline bool dma_pte_present(struct dma_pte *pte)
{
return (pte->val & 3) != 0;
}
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
return (pte->val & DMA_PTE_LARGE_PAGE);
}
static inline bool first_pte_in_page(struct dma_pte *pte)
{
return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
}
static inline int nr_pte_to_next_page(struct dma_pte *pte)
{
return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
unsigned long npages, bool ih);
void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u32 pasid, u16 qdep, u64 addr,
unsigned int size_order);
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
u32 pasid);
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
unsigned int count, unsigned long options);
/*
* Options used in qi_submit_sync:
* QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
*/
#define QI_OPT_WAIT_DRAIN BIT(0)
extern int dmar_ir_support(void);
void *alloc_pgtable_page(int node);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle);
u32 intel_svm_get_pasid(struct iommu_sva *handle);
int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
struct iommu_page_response *msg);
struct intel_svm_dev {
struct list_head list;
struct rcu_head rcu;
struct device *dev;
struct intel_iommu *iommu;
struct iommu_sva sva;
unsigned long prq_seq_number;
u32 pasid;
int users;
u16 did;
u16 dev_iotlb:1;
u16 sid, qdep;
};
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
unsigned int flags;
u32 pasid;
struct list_head devs;
};
#else
static inline void intel_svm_check(struct intel_iommu *iommu) {}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
void intel_iommu_debugfs_init(void);
#else
static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
extern const struct iommu_ops intel_iommu_ops;
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
return 0;
}
static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
{
return 0;
}
#define dmar_disabled (1)
#define intel_iommu_enabled (0)
#endif
static inline const char *decode_prq_descriptor(char *str, size_t size,
u64 dw0, u64 dw1, u64 dw2, u64 dw3)
{
char *buf = str;
int bytes;
bytes = snprintf(buf, size,
"rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
FIELD_GET(GENMASK_ULL(31, 16), dw0),
FIELD_GET(GENMASK_ULL(63, 12), dw1),
dw1 & BIT_ULL(0) ? 'r' : '-',
dw1 & BIT_ULL(1) ? 'w' : '-',
dw0 & BIT_ULL(52) ? 'x' : '-',
dw0 & BIT_ULL(53) ? 'p' : '-',
dw1 & BIT_ULL(2) ? 'l' : '-',
FIELD_GET(GENMASK_ULL(51, 32), dw0),
FIELD_GET(GENMASK_ULL(11, 3), dw1));
/* Private Data */
if (dw0 & BIT_ULL(9)) {
size -= bytes;
buf += bytes;
snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
}
return str;
}
#endif

View File

@ -0,0 +1,99 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Intel IOMMU trace support
*
* Copyright (C) 2019 Intel Corporation
*
* Author: Lu Baolu <baolu.lu@linux.intel.com>
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM intel_iommu
#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_INTEL_IOMMU_H
#include <linux/tracepoint.h>
#include "iommu.h"
#define MSG_MAX 256
TRACE_EVENT(qi_submit,
TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
TP_ARGS(iommu, qw0, qw1, qw2, qw3),
TP_STRUCT__entry(
__field(u64, qw0)
__field(u64, qw1)
__field(u64, qw2)
__field(u64, qw3)
__string(iommu, iommu->name)
),
TP_fast_assign(
__assign_str(iommu, iommu->name);
__entry->qw0 = qw0;
__entry->qw1 = qw1;
__entry->qw2 = qw2;
__entry->qw3 = qw3;
),
TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
__print_symbolic(__entry->qw0 & 0xf,
{ QI_CC_TYPE, "cc_inv" },
{ QI_IOTLB_TYPE, "iotlb_inv" },
{ QI_DIOTLB_TYPE, "dev_tlb_inv" },
{ QI_IEC_TYPE, "iec_inv" },
{ QI_IWD_TYPE, "inv_wait" },
{ QI_EIOTLB_TYPE, "p_iotlb_inv" },
{ QI_PC_TYPE, "pc_inv" },
{ QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
{ QI_PGRP_RESP_TYPE, "page_grp_resp" }),
__get_str(iommu),
__entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
)
);
TRACE_EVENT(prq_report,
TP_PROTO(struct intel_iommu *iommu, struct device *dev,
u64 dw0, u64 dw1, u64 dw2, u64 dw3,
unsigned long seq),
TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
TP_STRUCT__entry(
__field(u64, dw0)
__field(u64, dw1)
__field(u64, dw2)
__field(u64, dw3)
__field(unsigned long, seq)
__string(iommu, iommu->name)
__string(dev, dev_name(dev))
__dynamic_array(char, buff, MSG_MAX)
),
TP_fast_assign(
__entry->dw0 = dw0;
__entry->dw1 = dw1;
__entry->dw2 = dw2;
__entry->dw3 = dw3;
__entry->seq = seq;
__assign_str(iommu, iommu->name);
__assign_str(dev, dev_name(dev));
),
TP_printk("%s/%s seq# %ld: %s",
__get_str(iommu), __get_str(dev), __entry->seq,
decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
__entry->dw1, __entry->dw2, __entry->dw3)
)
);
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_PATH ../../drivers/iommu/intel/
#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>

View File

@ -0,0 +1,148 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <asm/loongarch.h>
#include <asm/setup.h>
static struct irq_domain *irq_domain;
struct fwnode_handle *cpuintc_handle;
static u32 lpic_gsi_to_irq(u32 gsi)
{
/* Only pch irqdomain transferring is required for LoongArch. */
if (gsi >= GSI_MIN_PCH_IRQ && gsi <= GSI_MAX_PCH_IRQ)
return acpi_register_gsi(NULL, gsi, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_HIGH);
return 0;
}
static struct fwnode_handle *lpic_get_gsi_domain_id(u32 gsi)
{
int id;
struct fwnode_handle *domain_handle = NULL;
switch (gsi) {
case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
if (liointc_handle)
domain_handle = liointc_handle;
break;
case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
if (pch_lpc_handle)
domain_handle = pch_lpc_handle;
break;
case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
id = find_pch_pic(gsi);
if (id >= 0 && pch_pic_handle[id])
domain_handle = pch_pic_handle[id];
break;
}
return domain_handle;
}
static void mask_loongarch_irq(struct irq_data *d)
{
clear_csr_ecfg(ECFGF(d->hwirq));
}
static void unmask_loongarch_irq(struct irq_data *d)
{
set_csr_ecfg(ECFGF(d->hwirq));
}
static struct irq_chip cpu_irq_controller = {
.name = "CPUINTC",
.irq_mask = mask_loongarch_irq,
.irq_unmask = unmask_loongarch_irq,
};
static void handle_cpu_irq(struct pt_regs *regs)
{
int hwirq;
unsigned int estat = read_csr_estat() & CSR_ESTAT_IS;
while ((hwirq = ffs(estat))) {
estat &= ~BIT(hwirq - 1);
generic_handle_domain_irq(irq_domain, hwirq - 1);
}
}
static int loongarch_cpu_intc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_noprobe(irq);
irq_set_chip_and_handler(irq, &cpu_irq_controller, handle_percpu_irq);
return 0;
}
static const struct irq_domain_ops loongarch_cpu_intc_irq_domain_ops = {
.map = loongarch_cpu_intc_map,
.xlate = irq_domain_xlate_onecell,
};
static int __init
liointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_lio_pic *liointc_entry = (struct acpi_madt_lio_pic *)header;
return liointc_acpi_init(irq_domain, liointc_entry);
}
static int __init
eiointc_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_eio_pic *eiointc_entry = (struct acpi_madt_eio_pic *)header;
return eiointc_acpi_init(irq_domain, eiointc_entry);
}
static int __init acpi_cascade_irqdomain_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_LIO_PIC,
liointc_parse_madt, 0);
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
eiointc_parse_madt, 0);
return 0;
}
static int __init cpuintc_acpi_init(union acpi_subtable_headers *header,
const unsigned long end)
{
if (irq_domain)
return 0;
/* Mask interrupts. */
clear_csr_ecfg(ECFG0_IM);
clear_csr_estat(ESTATF_IP);
cpuintc_handle = irq_domain_alloc_named_fwnode("CPUINTC");
irq_domain = irq_domain_create_linear(cpuintc_handle, EXCCODE_INT_NUM,
&loongarch_cpu_intc_irq_domain_ops, NULL);
if (!irq_domain)
panic("Failed to add irqdomain for LoongArch CPU");
set_handle_irq(&handle_cpu_irq);
acpi_set_irq_model(ACPI_IRQ_MODEL_LPIC, lpic_get_gsi_domain_id);
acpi_set_gsi_to_irq_fallback(lpic_gsi_to_irq);
acpi_cascade_irqdomain_init();
return 0;
}
IRQCHIP_ACPI_DECLARE(cpuintc_v1, ACPI_MADT_TYPE_CORE_PIC,
NULL, ACPI_MADT_CORE_PIC_VERSION_V1, cpuintc_acpi_init);

View File

@ -0,0 +1,400 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Loongson Extend I/O Interrupt Controller support
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "eiointc: " fmt
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#define EIOINTC_REG_NODEMAP 0x14a0
#define EIOINTC_REG_IPMAP 0x14c0
#define EIOINTC_REG_ENABLE 0x1600
#define EIOINTC_REG_BOUNCE 0x1680
#define EIOINTC_REG_ISR 0x1800
#define EIOINTC_REG_ROUTE 0x1c00
#define VEC_REG_COUNT 4
#define VEC_COUNT_PER_REG 64
#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
#define EIOINTC_ALL_ENABLE 0xffffffff
#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
static int nr_pics;
struct eiointc_priv {
u32 node;
nodemask_t node_map;
cpumask_t cpuspan_map;
struct fwnode_handle *domain_handle;
struct irq_domain *eiointc_domain;
};
static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
static void eiointc_enable(void)
{
uint64_t misc;
misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
}
static int cpu_to_eio_node(int cpu)
{
return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
}
static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
{
int i, node, cpu_node, route_node;
unsigned char coremap;
uint32_t pos_off, data, data_byte, data_mask;
pos_off = pos & ~3;
data_byte = pos & 3;
data_mask = ~BIT_MASK(data_byte) & 0xf;
/* Calculate node and coremap of target irq */
cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
for_each_online_cpu(i) {
node = cpu_to_eio_node(i);
if (!node_isset(node, *node_map))
continue;
/* EIO node 0 is in charge of inter-node interrupt dispatch */
route_node = (node == mnode) ? cpu_node : node;
data = ((coremap | (route_node << 4)) << (data_byte * 8));
csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
}
}
static DEFINE_RAW_SPINLOCK(affinity_lock);
static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
{
unsigned int cpu;
unsigned long flags;
uint32_t vector, regaddr;
struct cpumask intersect_affinity;
struct eiointc_priv *priv = d->domain->host_data;
raw_spin_lock_irqsave(&affinity_lock, flags);
cpumask_and(&intersect_affinity, affinity, cpu_online_mask);
cpumask_and(&intersect_affinity, &intersect_affinity, &priv->cpuspan_map);
if (cpumask_empty(&intersect_affinity)) {
raw_spin_unlock_irqrestore(&affinity_lock, flags);
return -EINVAL;
}
cpu = cpumask_first(&intersect_affinity);
vector = d->hwirq;
regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
/* Mask target vector */
csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
0x0, priv->node * CORES_PER_EIO_NODE);
/* Set route for target vector */
eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map);
/* Unmask target vector */
csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
0x0, priv->node * CORES_PER_EIO_NODE);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
raw_spin_unlock_irqrestore(&affinity_lock, flags);
return IRQ_SET_MASK_OK;
}
static int eiointc_index(int node)
{
int i;
for (i = 0; i < nr_pics; i++) {
if (node_isset(node, eiointc_priv[i]->node_map))
return i;
}
return -1;
}
static int eiointc_router_init(unsigned int cpu)
{
int i, bit;
uint32_t data;
uint32_t node = cpu_to_eio_node(cpu);
uint32_t index = eiointc_index(node);
if (index < 0) {
pr_err("Error: invalid nodemap!\n");
return -1;
}
if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
eiointc_enable();
for (i = 0; i < VEC_COUNT / 32; i++) {
data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
}
for (i = 0; i < VEC_COUNT / 32 / 4; i++) {
bit = BIT(1 + index); /* Route to IP[1 + index] */
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
}
for (i = 0; i < VEC_COUNT / 4; i++) {
/* Route to Node-0 Core-0 */
if (index == 0)
bit = BIT(cpu_logical_map(0));
else
bit = (eiointc_priv[index]->node << 4) | 1;
data = bit | (bit << 8) | (bit << 16) | (bit << 24);
iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
}
for (i = 0; i < VEC_COUNT / 32; i++) {
data = 0xffffffff;
iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
}
}
return 0;
}
static void eiointc_irq_dispatch(struct irq_desc *desc)
{
int i;
u64 pending;
bool handled = false;
struct irq_chip *chip = irq_desc_get_chip(desc);
struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
chained_irq_enter(chip, desc);
for (i = 0; i < VEC_REG_COUNT; i++) {
pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
while (pending) {
int bit = __ffs(pending);
int irq = bit + VEC_COUNT_PER_REG * i;
generic_handle_domain_irq(priv->eiointc_domain, irq);
pending &= ~BIT(bit);
handled = true;
}
}
if (!handled)
spurious_interrupt();
chained_irq_exit(chip, desc);
}
static void eiointc_ack_irq(struct irq_data *d)
{
}
static void eiointc_mask_irq(struct irq_data *d)
{
}
static void eiointc_unmask_irq(struct irq_data *d)
{
}
static struct irq_chip eiointc_irq_chip = {
.name = "EIOINTC",
.irq_ack = eiointc_ack_irq,
.irq_mask = eiointc_mask_irq,
.irq_unmask = eiointc_unmask_irq,
.irq_set_affinity = eiointc_set_irq_affinity,
};
static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
int ret;
unsigned int i, type;
unsigned long hwirq = 0;
struct eiointc *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret)
return ret;
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, hwirq + i, &eiointc_irq_chip,
priv, handle_edge_irq, NULL, NULL);
}
return 0;
}
static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
int i;
for (i = 0; i < nr_irqs; i++) {
struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
irq_set_handler(virq + i, NULL);
irq_domain_reset_irq_data(d);
}
}
static const struct irq_domain_ops eiointc_domain_ops = {
.translate = irq_domain_translate_onecell,
.alloc = eiointc_domain_alloc,
.free = eiointc_domain_free,
};
static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
{
int i;
if (cpu_has_flatmode)
node = cpu_to_node(node * CORES_PER_EIO_NODE);
for (i = 0; i < MAX_IO_PICS; i++) {
if (node == vec_group[i].node) {
vec_group[i].parent = parent;
return;
}
}
}
static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
{
int i;
for (i = 0; i < MAX_IO_PICS; i++) {
if (node == vec_group[i].node)
return vec_group[i].parent;
}
return NULL;
}
static int __init
pch_pic_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
unsigned int node = (pchpic_entry->address >> 44) & 0xf;
struct irq_domain *parent = acpi_get_vec_parent(node, pch_group);
if (parent)
return pch_pic_acpi_init(parent, pchpic_entry);
return -EINVAL;
}
static int __init
pch_msi_parse_madt(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
struct irq_domain *parent = acpi_get_vec_parent(eiointc_priv[nr_pics - 1]->node, msi_group);
if (parent)
return pch_msi_acpi_init(parent, pchmsi_entry);
return -EINVAL;
}
static int __init acpi_cascade_irqdomain_init(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_BIO_PIC,
pch_pic_parse_madt, 0);
acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC,
pch_msi_parse_madt, 1);
return 0;
}
int __init eiointc_acpi_init(struct irq_domain *parent,
struct acpi_madt_eio_pic *acpi_eiointc)
{
int i, parent_irq;
unsigned long node_map;
struct eiointc_priv *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->domain_handle = irq_domain_alloc_named_id_fwnode("EIOPIC",
acpi_eiointc->node);
if (!priv->domain_handle) {
pr_err("Unable to allocate domain handle\n");
goto out_free_priv;
}
priv->node = acpi_eiointc->node;
node_map = acpi_eiointc->node_map ? : -1ULL;
for_each_possible_cpu(i) {
if (node_map & (1ULL << cpu_to_eio_node(i))) {
node_set(cpu_to_eio_node(i), priv->node_map);
cpumask_or(&priv->cpuspan_map, &priv->cpuspan_map, cpumask_of(i));
}
}
/* Setup IRQ domain */
priv->eiointc_domain = irq_domain_create_linear(priv->domain_handle, VEC_COUNT,
&eiointc_domain_ops, priv);
if (!priv->eiointc_domain) {
pr_err("loongson-eiointc: cannot add IRQ domain\n");
goto out_free_handle;
}
eiointc_priv[nr_pics++] = priv;
eiointc_router_init(0);
parent_irq = irq_create_mapping(parent, acpi_eiointc->cascade);
irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv);
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_LOONGARCH_STARTING,
"irqchip/loongarch/intc:starting",
eiointc_router_init, NULL);
acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, pch_group);
acpi_set_vec_parent(acpi_eiointc->node, priv->eiointc_domain, msi_group);
acpi_cascade_irqdomain_init();
return 0;
out_free_handle:
irq_domain_free_fwnode(priv->domain_handle);
priv->domain_handle = NULL;
out_free_priv:
kfree(priv);
return -ENOMEM;
}

View File

@ -0,0 +1,205 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Loongson LPC Interrupt Controller support
*
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#define pr_fmt(fmt) "lpc: " fmt
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
/* Registers */
#define LPC_INT_CTL 0x00
#define LPC_INT_ENA 0x04
#define LPC_INT_STS 0x08
#define LPC_INT_CLR 0x0c
#define LPC_INT_POL 0x10
#define LPC_COUNT 16
/* LPC_INT_CTL */
#define LPC_INT_CTL_EN BIT(31)
struct pch_lpc {
void __iomem *base;
struct irq_domain *lpc_domain;
raw_spinlock_t lpc_lock;
u32 saved_reg_ctl;
u32 saved_reg_ena;
u32 saved_reg_pol;
};
struct fwnode_handle *pch_lpc_handle;
static void lpc_irq_ack(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(0x1 << d->hwirq, priv->base + LPC_INT_CLR);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static void lpc_irq_mask(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(readl(priv->base + LPC_INT_ENA) & (~(0x1 << (d->hwirq))),
priv->base + LPC_INT_ENA);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static void lpc_irq_unmask(struct irq_data *d)
{
unsigned long flags;
struct pch_lpc *priv = d->domain->host_data;
raw_spin_lock_irqsave(&priv->lpc_lock, flags);
writel(readl(priv->base + LPC_INT_ENA) | (0x1 << (d->hwirq)),
priv->base + LPC_INT_ENA);
raw_spin_unlock_irqrestore(&priv->lpc_lock, flags);
}
static int lpc_irq_set_type(struct irq_data *d, unsigned int type)
{
u32 val;
u32 mask = 0x1 << (d->hwirq);
struct pch_lpc *priv = d->domain->host_data;
if (!(type & IRQ_TYPE_LEVEL_MASK))
return 0;
val = readl(priv->base + LPC_INT_POL);
if (type == IRQ_TYPE_LEVEL_HIGH)
val |= mask;
else
val &= ~mask;
writel(val, priv->base + LPC_INT_POL);
return 0;
}
static const struct irq_chip pch_lpc_irq_chip = {
.name = "PCH LPC",
.irq_mask = lpc_irq_mask,
.irq_unmask = lpc_irq_unmask,
.irq_ack = lpc_irq_ack,
.irq_set_type = lpc_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
static void lpc_irq_dispatch(struct irq_desc *desc)
{
u32 pending, bit;
struct irq_chip *chip = irq_desc_get_chip(desc);
struct pch_lpc *priv = irq_desc_get_handler_data(desc);
chained_irq_enter(chip, desc);
pending = readl(priv->base + LPC_INT_ENA);
pending &= readl(priv->base + LPC_INT_STS);
if (!pending)
spurious_interrupt();
while (pending) {
bit = __ffs(pending);
generic_handle_domain_irq(priv->lpc_domain, bit);
pending &= ~BIT(bit);
}
chained_irq_exit(chip, desc);
}
static int pch_lpc_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &pch_lpc_irq_chip, handle_level_irq);
return 0;
}
static const struct irq_domain_ops pch_lpc_domain_ops = {
.map = pch_lpc_map,
.translate = irq_domain_translate_twocell,
};
static void pch_lpc_reset(struct pch_lpc *priv)
{
/* Enable the LPC interrupt, bit31: en bit30: edge */
writel(LPC_INT_CTL_EN, priv->base + LPC_INT_CTL);
writel(0, priv->base + LPC_INT_ENA);
/* Clear all 18-bit interrpt bit */
writel(GENMASK(17, 0), priv->base + LPC_INT_CLR);
}
static int pch_lpc_disabled(struct pch_lpc *priv)
{
return (readl(priv->base + LPC_INT_ENA) == 0xffffffff) &&
(readl(priv->base + LPC_INT_STS) == 0xffffffff);
}
int __init pch_lpc_acpi_init(struct irq_domain *parent,
struct acpi_madt_lpc_pic *acpi_pchlpc)
{
int parent_irq;
struct pch_lpc *priv;
struct irq_fwspec fwspec;
struct fwnode_handle *irq_handle;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
raw_spin_lock_init(&priv->lpc_lock);
priv->base = ioremap(acpi_pchlpc->address, acpi_pchlpc->size);
if (!priv->base)
goto free_priv;
if (pch_lpc_disabled(priv)) {
pr_err("Failed to get LPC status\n");
goto iounmap_base;
}
irq_handle = irq_domain_alloc_named_fwnode("lpcintc");
if (!irq_handle) {
pr_err("Unable to allocate domain handle\n");
goto iounmap_base;
}
priv->lpc_domain = irq_domain_create_linear(irq_handle, LPC_COUNT,
&pch_lpc_domain_ops, priv);
if (!priv->lpc_domain) {
pr_err("Failed to create IRQ domain\n");
goto free_irq_handle;
}
pch_lpc_reset(priv);
fwspec.fwnode = parent->fwnode;
fwspec.param[0] = acpi_pchlpc->cascade + GSI_MIN_PCH_IRQ;
fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
fwspec.param_count = 2;
parent_irq = irq_create_fwspec_mapping(&fwspec);
irq_set_chained_handler_and_data(parent_irq, lpc_irq_dispatch, priv);
pch_lpc_handle = irq_handle;
return 0;
free_irq_handle:
irq_domain_free_fwnode(irq_handle);
iounmap_base:
iounmap(priv->base);
free_priv:
kfree(priv);
return -ENOMEM;
}

View File

@ -0,0 +1,393 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2L IRQC Driver
*
* Copyright (C) 2022 Renesas Electronics Corporation.
*
* Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/spinlock.h>
#define IRQC_IRQ_START 1
#define IRQC_IRQ_COUNT 8
#define IRQC_TINT_START (IRQC_IRQ_START + IRQC_IRQ_COUNT)
#define IRQC_TINT_COUNT 32
#define IRQC_NUM_IRQ (IRQC_TINT_START + IRQC_TINT_COUNT)
#define ISCR 0x10
#define IITSR 0x14
#define TSCR 0x20
#define TITSR0 0x24
#define TITSR1 0x28
#define TITSR0_MAX_INT 16
#define TITSEL_WIDTH 0x2
#define TSSR(n) (0x30 + ((n) * 4))
#define TIEN BIT(7)
#define TSSEL_SHIFT(n) (8 * (n))
#define TSSEL_MASK GENMASK(7, 0)
#define IRQ_MASK 0x3
#define TSSR_OFFSET(n) ((n) % 4)
#define TSSR_INDEX(n) ((n) / 4)
#define TITSR_TITSEL_EDGE_RISING 0
#define TITSR_TITSEL_EDGE_FALLING 1
#define TITSR_TITSEL_LEVEL_HIGH 2
#define TITSR_TITSEL_LEVEL_LOW 3
#define IITSR_IITSEL(n, sense) ((sense) << ((n) * 2))
#define IITSR_IITSEL_LEVEL_LOW 0
#define IITSR_IITSEL_EDGE_FALLING 1
#define IITSR_IITSEL_EDGE_RISING 2
#define IITSR_IITSEL_EDGE_BOTH 3
#define IITSR_IITSEL_MASK(n) IITSR_IITSEL((n), 3)
#define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
#define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
struct rzg2l_irqc_priv {
void __iomem *base;
struct irq_fwspec fwspec[IRQC_NUM_IRQ];
raw_spinlock_t lock;
};
static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
}
static void rzg2l_irq_eoi(struct irq_data *d)
{
unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 bit = BIT(hw_irq);
u32 reg;
reg = readl_relaxed(priv->base + ISCR);
if (reg & bit)
writel_relaxed(reg & ~bit, priv->base + ISCR);
}
static void rzg2l_tint_eoi(struct irq_data *d)
{
unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 bit = BIT(hw_irq);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
if (reg & bit)
writel_relaxed(reg & ~bit, priv->base + TSCR);
}
static void rzg2l_irqc_eoi(struct irq_data *d)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hw_irq = irqd_to_hwirq(d);
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
rzg2l_irq_eoi(d);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
rzg2l_tint_eoi(d);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
static void rzg2l_irqc_irq_disable(struct irq_data *d)
{
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);
u32 reg;
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
reg &= ~(TSSEL_MASK << tssr_offset);
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
irq_chip_disable_parent(d);
}
static void rzg2l_irqc_irq_enable(struct irq_data *d)
{
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned long tint = (uintptr_t)d->chip_data;
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
u8 tssr_index = TSSR_INDEX(offset);
u32 reg;
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
irq_chip_enable_parent(d);
}
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_LEVEL_LOW:
sense = IITSR_IITSEL_LEVEL_LOW;
break;
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
break;
default:
return -EINVAL;
}
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
tmp &= ~IITSR_IITSEL_MASK(hw_irq);
tmp |= IITSR_IITSEL(hw_irq, sense);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
u32 offset;
u8 sense;
u32 reg;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
sense = TITSR_TITSEL_EDGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
sense = TITSR_TITSEL_EDGE_FALLING;
break;
default:
return -EINVAL;
}
offset = TITSR0;
if (titseln >= TITSR0_MAX_INT) {
titseln -= TITSR0_MAX_INT;
offset = TITSR1;
}
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + offset);
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
writel_relaxed(reg, priv->base + offset);
raw_spin_unlock(&priv->lock);
return 0;
}
static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
{
unsigned int hw_irq = irqd_to_hwirq(d);
int ret = -EINVAL;
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
ret = rzg2l_irq_set_type(d, type);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
ret = rzg2l_tint_set_edge(d, type);
if (ret)
return ret;
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
static const struct irq_chip irqc_chip = {
.name = "rzg2l-irqc",
.irq_eoi = rzg2l_irqc_eoi,
.irq_mask = irq_chip_mask_parent,
.irq_unmask = irq_chip_unmask_parent,
.irq_disable = rzg2l_irqc_irq_disable,
.irq_enable = rzg2l_irqc_irq_enable,
.irq_get_irqchip_state = irq_chip_get_parent_state,
.irq_set_irqchip_state = irq_chip_set_parent_state,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_type = rzg2l_irqc_set_type,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE,
};
static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
struct rzg2l_irqc_priv *priv = domain->host_data;
unsigned long tint = 0;
irq_hw_number_t hwirq;
unsigned int type;
int ret;
ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
if (ret)
return ret;
/*
* For TINT interrupts ie where pinctrl driver is child of irqc domain
* the hwirq and TINT are encoded in fwspec->param[0].
* hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
* from 16-31 bits. TINT from the pinctrl driver needs to be programmed
* in IRQC registers to enable a given gpio pin as interrupt.
*/
if (hwirq > IRQC_IRQ_COUNT) {
tint = TINT_EXTRACT_GPIOINT(hwirq);
hwirq = TINT_EXTRACT_HWIRQ(hwirq);
if (hwirq < IRQC_TINT_START)
return -EINVAL;
}
if (hwirq > (IRQC_NUM_IRQ - 1))
return -EINVAL;
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &irqc_chip,
(void *)(uintptr_t)tint);
if (ret)
return ret;
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
}
static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
.alloc = rzg2l_irqc_alloc,
.free = irq_domain_free_irqs_common,
.translate = irq_domain_translate_twocell,
};
static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
struct device_node *np)
{
struct of_phandle_args map;
unsigned int i;
int ret;
for (i = 0; i < IRQC_NUM_IRQ; i++) {
ret = of_irq_parse_one(np, i, &map);
if (ret)
return ret;
of_phandle_args_to_fwspec(np, map.args, map.args_count,
&priv->fwspec[i]);
}
return 0;
}
static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *irq_domain, *parent_domain;
struct platform_device *pdev;
struct reset_control *resetn;
struct rzg2l_irqc_priv *priv;
int ret;
pdev = of_find_device_by_node(node);
if (!pdev)
return -ENODEV;
parent_domain = irq_find_host(parent);
if (!parent_domain) {
dev_err(&pdev->dev, "cannot find parent domain\n");
return -ENODEV;
}
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
ret = rzg2l_irqc_parse_interrupts(priv, node);
if (ret) {
dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
return ret;
}
resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(resetn))
return PTR_ERR(resetn);
ret = reset_control_deassert(resetn);
if (ret) {
dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
return ret;
}
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
goto pm_disable;
}
raw_spin_lock_init(&priv->lock);
irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
node, &rzg2l_irqc_domain_ops,
priv);
if (!irq_domain) {
dev_err(&pdev->dev, "failed to add irq domain\n");
ret = -ENOMEM;
goto pm_put;
}
return 0;
pm_put:
pm_runtime_put(&pdev->dev);
pm_disable:
pm_runtime_disable(&pdev->dev);
reset_control_assert(resetn);
return ret;
}
IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,278 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* Copyright (C) Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define SP_INTC_HWIRQ_MIN 0
#define SP_INTC_HWIRQ_MAX 223
#define SP_INTC_NR_IRQS (SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1)
#define SP_INTC_NR_GROUPS DIV_ROUND_UP(SP_INTC_NR_IRQS, 32)
#define SP_INTC_REG_SIZE (SP_INTC_NR_GROUPS * 4)
/* REG_GROUP_0 regs */
#define REG_INTR_TYPE (sp_intc.g0)
#define REG_INTR_POLARITY (REG_INTR_TYPE + SP_INTC_REG_SIZE)
#define REG_INTR_PRIORITY (REG_INTR_POLARITY + SP_INTC_REG_SIZE)
#define REG_INTR_MASK (REG_INTR_PRIORITY + SP_INTC_REG_SIZE)
/* REG_GROUP_1 regs */
#define REG_INTR_CLEAR (sp_intc.g1)
#define REG_MASKED_EXT1 (REG_INTR_CLEAR + SP_INTC_REG_SIZE)
#define REG_MASKED_EXT0 (REG_MASKED_EXT1 + SP_INTC_REG_SIZE)
#define REG_INTR_GROUP (REG_INTR_CLEAR + 31 * 4)
#define GROUP_MASK (BIT(SP_INTC_NR_GROUPS) - 1)
#define GROUP_SHIFT_EXT1 (0)
#define GROUP_SHIFT_EXT0 (8)
/*
* When GPIO_INT0~7 set to edge trigger, doesn't work properly.
* WORKAROUND: change it to level trigger, and toggle the polarity
* at ACK/Handler to make the HW work.
*/
#define GPIO_INT0_HWIRQ 120
#define GPIO_INT7_HWIRQ 127
#define IS_GPIO_INT(irq) \
({ \
u32 i = irq; \
(i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ); \
})
/* index of states */
enum {
_IS_EDGE = 0,
_IS_LOW,
_IS_ACTIVE
};
#define STATE_BIT(irq, idx) (((irq) - GPIO_INT0_HWIRQ) * 3 + (idx))
#define ASSIGN_STATE(irq, idx, v) assign_bit(STATE_BIT(irq, idx), sp_intc.states, v)
#define TEST_STATE(irq, idx) test_bit(STATE_BIT(irq, idx), sp_intc.states)
static struct sp_intctl {
/*
* REG_GROUP_0: include type/polarity/priority/mask regs.
* REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs.
*/
void __iomem *g0; // REG_GROUP_0 base
void __iomem *g1; // REG_GROUP_1 base
struct irq_domain *domain;
raw_spinlock_t lock;
/*
* store GPIO_INT states
* each interrupt has 3 states: is_edge, is_low, is_active
*/
DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3);
} sp_intc;
static struct irq_chip sp_intc_chip;
static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value)
{
u32 offset, mask;
unsigned long flags;
void __iomem *reg;
offset = (hwirq / 32) * 4;
reg = base + offset;
raw_spin_lock_irqsave(&sp_intc.lock, flags);
mask = readl_relaxed(reg);
if (value)
mask |= BIT(hwirq % 32);
else
mask &= ~BIT(hwirq % 32);
writel_relaxed(mask, reg);
raw_spin_unlock_irqrestore(&sp_intc.lock, flags);
}
static void sp_intc_ack_irq(struct irq_data *d)
{
u32 hwirq = d->hwirq;
if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND
sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW));
ASSIGN_STATE(hwirq, _IS_ACTIVE, true);
}
sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1);
}
static void sp_intc_mask_irq(struct irq_data *d)
{
sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0);
}
static void sp_intc_unmask_irq(struct irq_data *d)
{
sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1);
}
static int sp_intc_set_type(struct irq_data *d, unsigned int type)
{
u32 hwirq = d->hwirq;
bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK);
bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING);
irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq);
if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND
/* store states */
ASSIGN_STATE(hwirq, _IS_EDGE, is_edge);
ASSIGN_STATE(hwirq, _IS_LOW, is_low);
ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
/* change to level */
is_edge = false;
}
sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge);
sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low);
return 0;
}
static int sp_intc_get_ext_irq(int ext_num)
{
void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0;
u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0;
u32 groups;
u32 pending_group;
u32 group;
u32 pending_irq;
groups = readl_relaxed(REG_INTR_GROUP);
pending_group = (groups >> shift) & GROUP_MASK;
if (!pending_group)
return -1;
group = fls(pending_group) - 1;
pending_irq = readl_relaxed(base + group * 4);
if (!pending_irq)
return -1;
return (group * 32) + fls(pending_irq) - 1;
}
static void sp_intc_handle_ext_cascaded(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
int ext_num = (uintptr_t)irq_desc_get_handler_data(desc);
int hwirq;
chained_irq_enter(chip, desc);
while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) {
if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND
ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW));
} else {
generic_handle_domain_irq(sp_intc.domain, hwirq);
}
}
chained_irq_exit(chip, desc);
}
static struct irq_chip sp_intc_chip = {
.name = "sp_intc",
.irq_ack = sp_intc_ack_irq,
.irq_mask = sp_intc_mask_irq,
.irq_unmask = sp_intc_unmask_irq,
.irq_set_type = sp_intc_set_type,
};
static int sp_intc_irq_domain_map(struct irq_domain *domain,
unsigned int irq, irq_hw_number_t hwirq)
{
irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
irq_set_chip_data(irq, &sp_intc_chip);
irq_set_noprobe(irq);
return 0;
}
static const struct irq_domain_ops sp_intc_dm_ops = {
.xlate = irq_domain_xlate_twocell,
.map = sp_intc_irq_domain_map,
};
static int sp_intc_irq_map(struct device_node *node, int i)
{
unsigned int irq;
irq = irq_of_parse_and_map(node, i);
if (!irq)
return -ENOENT;
irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i);
return 0;
}
static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
{
int i, ret;
sp_intc.g0 = of_iomap(node, 0);
if (!sp_intc.g0)
return -ENXIO;
sp_intc.g1 = of_iomap(node, 1);
if (!sp_intc.g1) {
ret = -ENXIO;
goto out_unmap0;
}
ret = sp_intc_irq_map(node, 0); // EXT_INT0
if (ret)
goto out_unmap1;
ret = sp_intc_irq_map(node, 1); // EXT_INT1
if (ret)
goto out_unmap1;
/* initial regs */
for (i = 0; i < SP_INTC_NR_GROUPS; i++) {
/* all mask */
writel_relaxed(0, REG_INTR_MASK + i * 4);
/* all edge */
writel_relaxed(~0, REG_INTR_TYPE + i * 4);
/* all high-active */
writel_relaxed(0, REG_INTR_POLARITY + i * 4);
/* all EXT_INT0 */
writel_relaxed(~0, REG_INTR_PRIORITY + i * 4);
/* all clear */
writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
}
sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
&sp_intc_dm_ops, &sp_intc);
if (!sp_intc.domain) {
ret = -ENOMEM;
goto out_unmap1;
}
raw_spin_lock_init(&sp_intc.lock);
return 0;
out_unmap1:
iounmap(sp_intc.g1);
out_unmap0:
iounmap(sp_intc.g0);
return ret;
}
IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);

View File

@ -0,0 +1,307 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#define BCM63138_MAX_LEDS 32
#define BCM63138_MAX_BRIGHTNESS 9
#define BCM63138_LED_BITS 4 /* how many bits control a single LED */
#define BCM63138_LED_MASK ((1 << BCM63138_LED_BITS) - 1) /* 0xf */
#define BCM63138_LEDS_PER_REG (32 / BCM63138_LED_BITS) /* 8 */
#define BCM63138_GLB_CTRL 0x00
#define BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL 0x00000002
#define BCM63138_GLB_CTRL_SERIAL_LED_EN_POL 0x00000008
#define BCM63138_MASK 0x04
#define BCM63138_HW_LED_EN 0x08
#define BCM63138_SERIAL_LED_SHIFT_SEL 0x0c
#define BCM63138_FLASH_RATE_CTRL1 0x10
#define BCM63138_FLASH_RATE_CTRL2 0x14
#define BCM63138_FLASH_RATE_CTRL3 0x18
#define BCM63138_FLASH_RATE_CTRL4 0x1c
#define BCM63138_BRIGHT_CTRL1 0x20
#define BCM63138_BRIGHT_CTRL2 0x24
#define BCM63138_BRIGHT_CTRL3 0x28
#define BCM63138_BRIGHT_CTRL4 0x2c
#define BCM63138_POWER_LED_CFG 0x30
#define BCM63138_HW_POLARITY 0xb4
#define BCM63138_SW_DATA 0xb8
#define BCM63138_SW_POLARITY 0xbc
#define BCM63138_PARALLEL_LED_POLARITY 0xc0
#define BCM63138_SERIAL_LED_POLARITY 0xc4
#define BCM63138_HW_LED_STATUS 0xc8
#define BCM63138_FLASH_CTRL_STATUS 0xcc
#define BCM63138_FLASH_BRT_CTRL 0xd0
#define BCM63138_FLASH_P_LED_OUT_STATUS 0xd4
#define BCM63138_FLASH_S_LED_OUT_STATUS 0xd8
struct bcm63138_leds {
struct device *dev;
void __iomem *base;
spinlock_t lock;
};
struct bcm63138_led {
struct bcm63138_leds *leds;
struct led_classdev cdev;
u32 pin;
bool active_low;
};
/*
* I/O access
*/
static void bcm63138_leds_write(struct bcm63138_leds *leds, unsigned int reg,
u32 data)
{
writel(data, leds->base + reg);
}
static unsigned long bcm63138_leds_read(struct bcm63138_leds *leds,
unsigned int reg)
{
return readl(leds->base + reg);
}
static void bcm63138_leds_update_bits(struct bcm63138_leds *leds,
unsigned int reg, u32 mask, u32 val)
{
WARN_ON(val & ~mask);
bcm63138_leds_write(leds, reg, (bcm63138_leds_read(leds, reg) & ~mask) | (val & mask));
}
/*
* Helpers
*/
static void bcm63138_leds_set_flash_rate(struct bcm63138_leds *leds,
struct bcm63138_led *led,
u8 value)
{
int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
bcm63138_leds_update_bits(leds, BCM63138_FLASH_RATE_CTRL1 + reg_offset,
BCM63138_LED_MASK << shift, value << shift);
}
static void bcm63138_leds_set_bright(struct bcm63138_leds *leds,
struct bcm63138_led *led,
u8 value)
{
int reg_offset = (led->pin >> fls((BCM63138_LEDS_PER_REG - 1))) * 4;
int shift = (led->pin & (BCM63138_LEDS_PER_REG - 1)) * BCM63138_LED_BITS;
bcm63138_leds_update_bits(leds, BCM63138_BRIGHT_CTRL1 + reg_offset,
BCM63138_LED_MASK << shift, value << shift);
}
static void bcm63138_leds_enable_led(struct bcm63138_leds *leds,
struct bcm63138_led *led,
enum led_brightness value)
{
u32 bit = BIT(led->pin);
bcm63138_leds_update_bits(leds, BCM63138_SW_DATA, bit, value ? bit : 0);
}
/*
* API callbacks
*/
static void bcm63138_leds_brightness_set(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
struct bcm63138_leds *leds = led->leds;
unsigned long flags;
spin_lock_irqsave(&leds->lock, flags);
bcm63138_leds_enable_led(leds, led, value);
if (!value)
bcm63138_leds_set_flash_rate(leds, led, 0);
else
bcm63138_leds_set_bright(leds, led, value);
spin_unlock_irqrestore(&leds->lock, flags);
}
static int bcm63138_leds_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
struct bcm63138_led *led = container_of(led_cdev, struct bcm63138_led, cdev);
struct bcm63138_leds *leds = led->leds;
unsigned long flags;
u8 value;
if (!*delay_on && !*delay_off) {
*delay_on = 640;
*delay_off = 640;
}
if (*delay_on != *delay_off) {
dev_dbg(led_cdev->dev, "Blinking at unequal delays is not supported\n");
return -EINVAL;
}
switch (*delay_on) {
case 1152 ... 1408: /* 1280 ms ± 10% */
value = 0x7;
break;
case 576 ... 704: /* 640 ms ± 10% */
value = 0x6;
break;
case 288 ... 352: /* 320 ms ± 10% */
value = 0x5;
break;
case 126 ... 154: /* 140 ms ± 10% */
value = 0x4;
break;
case 59 ... 72: /* 65 ms ± 10% */
value = 0x3;
break;
default:
dev_dbg(led_cdev->dev, "Blinking delay value %lu is unsupported\n",
*delay_on);
return -EINVAL;
}
spin_lock_irqsave(&leds->lock, flags);
bcm63138_leds_enable_led(leds, led, BCM63138_MAX_BRIGHTNESS);
bcm63138_leds_set_flash_rate(leds, led, value);
spin_unlock_irqrestore(&leds->lock, flags);
return 0;
}
/*
* LED driver
*/
static void bcm63138_leds_create_led(struct bcm63138_leds *leds,
struct device_node *np)
{
struct led_init_data init_data = {
.fwnode = of_fwnode_handle(np),
};
struct device *dev = leds->dev;
struct bcm63138_led *led;
struct pinctrl *pinctrl;
u32 bit;
int err;
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led) {
dev_err(dev, "Failed to alloc LED\n");
return;
}
led->leds = leds;
if (of_property_read_u32(np, "reg", &led->pin)) {
dev_err(dev, "Missing \"reg\" property in %pOF\n", np);
goto err_free;
}
if (led->pin >= BCM63138_MAX_LEDS) {
dev_err(dev, "Invalid \"reg\" value %d\n", led->pin);
goto err_free;
}
led->active_low = of_property_read_bool(np, "active-low");
led->cdev.max_brightness = BCM63138_MAX_BRIGHTNESS;
led->cdev.brightness_set = bcm63138_leds_brightness_set;
led->cdev.blink_set = bcm63138_leds_blink_set;
err = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (err) {
dev_err(dev, "Failed to register LED %pOF: %d\n", np, err);
goto err_free;
}
pinctrl = devm_pinctrl_get_select_default(led->cdev.dev);
if (IS_ERR(pinctrl) && PTR_ERR(pinctrl) != -ENODEV) {
dev_warn(led->cdev.dev, "Failed to select %pOF pinctrl: %ld\n",
np, PTR_ERR(pinctrl));
}
bit = BIT(led->pin);
bcm63138_leds_update_bits(leds, BCM63138_PARALLEL_LED_POLARITY, bit,
led->active_low ? 0 : bit);
bcm63138_leds_update_bits(leds, BCM63138_HW_LED_EN, bit, 0);
bcm63138_leds_set_flash_rate(leds, led, 0);
bcm63138_leds_enable_led(leds, led, led->cdev.brightness);
return;
err_free:
devm_kfree(dev, led);
}
static int bcm63138_leds_probe(struct platform_device *pdev)
{
struct device_node *np = dev_of_node(&pdev->dev);
struct device *dev = &pdev->dev;
struct bcm63138_leds *leds;
struct device_node *child;
leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
if (!leds)
return -ENOMEM;
leds->dev = dev;
leds->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(leds->base))
return PTR_ERR(leds->base);
spin_lock_init(&leds->lock);
bcm63138_leds_write(leds, BCM63138_GLB_CTRL,
BCM63138_GLB_CTRL_SERIAL_LED_DATA_PPOL |
BCM63138_GLB_CTRL_SERIAL_LED_EN_POL);
bcm63138_leds_write(leds, BCM63138_HW_LED_EN, 0);
bcm63138_leds_write(leds, BCM63138_SERIAL_LED_POLARITY, 0);
bcm63138_leds_write(leds, BCM63138_PARALLEL_LED_POLARITY, 0);
for_each_available_child_of_node(np, child) {
bcm63138_leds_create_led(leds, child);
}
return 0;
}
static const struct of_device_id bcm63138_leds_of_match_table[] = {
{ .compatible = "brcm,bcm63138-leds", },
{ },
};
static struct platform_driver bcm63138_leds_driver = {
.probe = bcm63138_leds_probe,
.driver = {
.name = "leds-bcm63xxx",
.of_match_table = bcm63138_leds_of_match_table,
},
};
module_platform_driver(bcm63138_leds_driver);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, bcm63138_leds_of_match_table);

29
drivers/leds/rgb/Kconfig Normal file
View File

@ -0,0 +1,29 @@
# SPDX-License-Identifier: GPL-2.0
if LEDS_CLASS_MULTICOLOR
config LEDS_PWM_MULTICOLOR
tristate "PWM driven multi-color LED Support"
depends on PWM
help
This option enables support for PWM driven monochrome LEDs that are
grouped into multicolor LEDs.
To compile this driver as a module, choose M here: the module
will be called leds-pwm-multicolor.
config LEDS_QCOM_LPG
tristate "LED support for Qualcomm LPG"
depends on OF
depends on PWM
depends on SPMI
help
This option enables support for the Light Pulse Generator found in a
wide variety of Qualcomm PMICs. The LPG consists of a number of PWM
channels and typically a shared pattern lookup table and a current
sink, intended to drive RGB LEDs. Each channel can either be used as
a LED, grouped to represent a RGB LED or exposed as PWM channels.
If compiled as a module, the module will be named leds-qcom-lpg.
endif # LEDS_CLASS_MULTICOLOR

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_LEDS_PWM_MULTICOLOR) += leds-pwm-multicolor.o
obj-$(CONFIG_LEDS_QCOM_LPG) += leds-qcom-lpg.o

View File

@ -0,0 +1,190 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* PWM-based multi-color LED control
*
* Copyright 2022 Sven Schwermer <sven.schwermer@disruptive-technologies.com>
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/led-class-multicolor.h>
#include <linux/leds.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/pwm.h>
struct pwm_led {
struct pwm_device *pwm;
struct pwm_state state;
bool active_low;
};
struct pwm_mc_led {
struct led_classdev_mc mc_cdev;
struct mutex lock;
struct pwm_led leds[];
};
static int led_pwm_mc_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
struct led_classdev_mc *mc_cdev = lcdev_to_mccdev(cdev);
struct pwm_mc_led *priv = container_of(mc_cdev, struct pwm_mc_led, mc_cdev);
unsigned long long duty;
int ret = 0;
int i;
led_mc_calc_color_components(mc_cdev, brightness);
mutex_lock(&priv->lock);
for (i = 0; i < mc_cdev->num_colors; i++) {
duty = priv->leds[i].state.period;
duty *= mc_cdev->subled_info[i].brightness;
do_div(duty, cdev->max_brightness);
if (priv->leds[i].active_low)
duty = priv->leds[i].state.period - duty;
priv->leds[i].state.duty_cycle = duty;
priv->leds[i].state.enabled = duty > 0;
ret = pwm_apply_state(priv->leds[i].pwm,
&priv->leds[i].state);
if (ret)
break;
}
mutex_unlock(&priv->lock);
return ret;
}
static int iterate_subleds(struct device *dev, struct pwm_mc_led *priv,
struct fwnode_handle *mcnode)
{
struct mc_subled *subled = priv->mc_cdev.subled_info;
struct fwnode_handle *fwnode;
struct pwm_led *pwmled;
u32 color;
int ret;
/* iterate over the nodes inside the multi-led node */
fwnode_for_each_child_node(mcnode, fwnode) {
pwmled = &priv->leds[priv->mc_cdev.num_colors];
pwmled->pwm = devm_fwnode_pwm_get(dev, fwnode, NULL);
if (IS_ERR(pwmled->pwm)) {
ret = dev_err_probe(dev, PTR_ERR(pwmled->pwm), "unable to request PWM\n");
goto release_fwnode;
}
pwm_init_state(pwmled->pwm, &pwmled->state);
pwmled->active_low = fwnode_property_read_bool(fwnode, "active-low");
ret = fwnode_property_read_u32(fwnode, "color", &color);
if (ret) {
dev_err(dev, "cannot read color: %d\n", ret);
goto release_fwnode;
}
subled[priv->mc_cdev.num_colors].color_index = color;
priv->mc_cdev.num_colors++;
}
return 0;
release_fwnode:
fwnode_handle_put(fwnode);
return ret;
}
static int led_pwm_mc_probe(struct platform_device *pdev)
{
struct fwnode_handle *mcnode, *fwnode;
struct led_init_data init_data = {};
struct led_classdev *cdev;
struct mc_subled *subled;
struct pwm_mc_led *priv;
int count = 0;
int ret = 0;
mcnode = device_get_named_child_node(&pdev->dev, "multi-led");
if (!mcnode)
return dev_err_probe(&pdev->dev, -ENODEV,
"expected multi-led node\n");
/* count the nodes inside the multi-led node */
fwnode_for_each_child_node(mcnode, fwnode)
count++;
priv = devm_kzalloc(&pdev->dev, struct_size(priv, leds, count),
GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto release_mcnode;
}
mutex_init(&priv->lock);
subled = devm_kcalloc(&pdev->dev, count, sizeof(*subled), GFP_KERNEL);
if (!subled) {
ret = -ENOMEM;
goto release_mcnode;
}
priv->mc_cdev.subled_info = subled;
/* init the multicolor's LED class device */
cdev = &priv->mc_cdev.led_cdev;
fwnode_property_read_u32(mcnode, "max-brightness",
&cdev->max_brightness);
cdev->flags = LED_CORE_SUSPENDRESUME;
cdev->brightness_set_blocking = led_pwm_mc_set;
ret = iterate_subleds(&pdev->dev, priv, mcnode);
if (ret)
goto release_mcnode;
init_data.fwnode = mcnode;
ret = devm_led_classdev_multicolor_register_ext(&pdev->dev,
&priv->mc_cdev,
&init_data);
if (ret) {
dev_err(&pdev->dev,
"failed to register multicolor PWM led for %s: %d\n",
cdev->name, ret);
goto release_mcnode;
}
ret = led_pwm_mc_set(cdev, cdev->brightness);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to set led PWM value for %s: %d",
cdev->name, ret);
platform_set_drvdata(pdev, priv);
return 0;
release_mcnode:
fwnode_handle_put(mcnode);
return ret;
}
static const struct of_device_id of_pwm_leds_mc_match[] = {
{ .compatible = "pwm-leds-multicolor", },
{}
};
MODULE_DEVICE_TABLE(of, of_pwm_leds_mc_match);
static struct platform_driver led_pwm_mc_driver = {
.probe = led_pwm_mc_probe,
.driver = {
.name = "leds_pwm_multicolor",
.of_match_table = of_pwm_leds_mc_match,
},
};
module_platform_driver(led_pwm_mc_driver);
MODULE_AUTHOR("Sven Schwermer <sven.schwermer@disruptive-technologies.com>");
MODULE_DESCRIPTION("multi-color PWM LED driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:leds-pwm-multicolor");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,105 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Siemens SIMATIC IPC driver for GPIO based LEDs
*
* Copyright (c) Siemens AG, 2022
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
*/
#include <linux/gpio/machine.h>
#include <linux/gpio/consumer.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
static struct gpiod_lookup_table simatic_ipc_led_gpio_table = {
.dev_id = "leds-gpio",
.table = {
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW),
GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH),
},
};
static const struct gpio_led simatic_ipc_gpio_leds[] = {
{ .name = "green:" LED_FUNCTION_STATUS "-3" },
{ .name = "red:" LED_FUNCTION_STATUS "-1" },
{ .name = "green:" LED_FUNCTION_STATUS "-1" },
{ .name = "red:" LED_FUNCTION_STATUS "-2" },
{ .name = "green:" LED_FUNCTION_STATUS "-2" },
{ .name = "red:" LED_FUNCTION_STATUS "-3" },
};
static const struct gpio_led_platform_data simatic_ipc_gpio_leds_pdata = {
.num_leds = ARRAY_SIZE(simatic_ipc_gpio_leds),
.leds = simatic_ipc_gpio_leds,
};
static struct platform_device *simatic_leds_pdev;
static int simatic_ipc_leds_gpio_remove(struct platform_device *pdev)
{
gpiod_remove_lookup_table(&simatic_ipc_led_gpio_table);
platform_device_unregister(simatic_leds_pdev);
return 0;
}
static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
{
struct gpio_desc *gpiod;
int err;
gpiod_add_lookup_table(&simatic_ipc_led_gpio_table);
simatic_leds_pdev = platform_device_register_resndata(NULL,
"leds-gpio", PLATFORM_DEVID_NONE, NULL, 0,
&simatic_ipc_gpio_leds_pdata,
sizeof(simatic_ipc_gpio_leds_pdata));
if (IS_ERR(simatic_leds_pdev)) {
err = PTR_ERR(simatic_leds_pdev);
goto out;
}
/* PM_BIOS_BOOT_N */
gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 6, GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
err = PTR_ERR(gpiod);
goto out;
}
gpiod_put(gpiod);
/* PM_WDT_OUT */
gpiod = gpiod_get_index(&simatic_leds_pdev->dev, NULL, 7, GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
err = PTR_ERR(gpiod);
goto out;
}
gpiod_put(gpiod);
return 0;
out:
simatic_ipc_leds_gpio_remove(pdev);
return err;
}
static struct platform_driver simatic_ipc_led_gpio_driver = {
.probe = simatic_ipc_leds_gpio_probe,
.remove = simatic_ipc_leds_gpio_remove,
.driver = {
.name = KBUILD_MODNAME,
}
};
module_platform_driver(simatic_ipc_led_gpio_driver);
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" KBUILD_MODNAME);
MODULE_SOFTDEP("pre: platform:leds-gpio");
MODULE_AUTHOR("Henning Schild <henning.schild@siemens.com>");

166
drivers/md/dm-io-rewind.c Normal file
View File

@ -0,0 +1,166 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2022 Red Hat, Inc.
*/
#include <linux/bio.h>
#include <linux/blk-crypto.h>
#include <linux/blk-integrity.h>
#include "dm-core.h"
static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv,
struct bvec_iter *iter,
unsigned int bytes)
{
int idx;
iter->bi_size += bytes;
if (bytes <= iter->bi_bvec_done) {
iter->bi_bvec_done -= bytes;
return true;
}
bytes -= iter->bi_bvec_done;
idx = iter->bi_idx - 1;
while (idx >= 0 && bytes && bytes > bv[idx].bv_len) {
bytes -= bv[idx].bv_len;
idx--;
}
if (WARN_ONCE(idx < 0 && bytes,
"Attempted to rewind iter beyond bvec's boundaries\n")) {
iter->bi_size -= bytes;
iter->bi_bvec_done = 0;
iter->bi_idx = 0;
return false;
}
iter->bi_idx = idx;
iter->bi_bvec_done = bv[idx].bv_len - bytes;
return true;
}
#if defined(CONFIG_BLK_DEV_INTEGRITY)
/**
* dm_bio_integrity_rewind - Rewind integrity vector
* @bio: bio whose integrity vector to update
* @bytes_done: number of data bytes to rewind
*
* Description: This function calculates how many integrity bytes the
* number of completed data bytes correspond to and rewind the
* integrity vector accordingly.
*/
static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
}
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void dm_bio_integrity_rewind(struct bio *bio,
unsigned int bytes_done)
{
return;
}
#endif
#if defined(CONFIG_BLK_INLINE_ENCRYPTION)
/* Decrements @dun by @dec, treating @dun as a multi-limb integer. */
static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
unsigned int dec)
{
int i;
for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
u64 prev = dun[i];
dun[i] -= dec;
if (dun[i] > prev)
dec = 1;
else
dec = 0;
}
}
static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
struct bio_crypt_ctx *bc = bio->bi_crypt_context;
dm_bio_crypt_dun_decrement(bc->bc_dun,
bytes >> bc->bc_key->data_unit_size_bits);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
return;
}
#endif
static inline void dm_bio_rewind_iter(const struct bio *bio,
struct bvec_iter *iter, unsigned int bytes)
{
iter->bi_sector -= bytes >> 9;
/* No advance means no rewind */
if (bio_no_advance_iter(bio))
iter->bi_size += bytes;
else
dm_bvec_iter_rewind(bio->bi_io_vec, iter, bytes);
}
/**
* dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes.
* @bio: bio to rewind
* @bytes: how many bytes to rewind
*
* WARNING:
* Caller must ensure that @bio has a fixed end sector, to allow
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
static void dm_bio_rewind(struct bio *bio, unsigned bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);
if (bio_has_crypt_ctx(bio))
dm_bio_crypt_rewind(bio, bytes);
dm_bio_rewind_iter(bio, &bio->bi_iter, bytes);
}
void dm_io_rewind(struct dm_io *io, struct bio_set *bs)
{
struct bio *orig = io->orig_bio;
struct bio *new_orig = bio_alloc_clone(orig->bi_bdev, orig,
GFP_NOIO, bs);
/*
* dm_bio_rewind can restore to previous position since the
* end sector is fixed for original bio, but we still need
* to restore bio's size manually (using io->sectors).
*/
dm_bio_rewind(new_orig, ((io->sector_offset << 9) -
orig->bi_iter.bi_size));
bio_trim(new_orig, 0, io->sectors);
bio_chain(new_orig, orig);
/*
* __bi_remaining was increased (by dm_split_and_process_bio),
* so must drop the one added in bio_chain.
*/
atomic_dec(&orig->__bi_remaining);
io->orig_bio = new_orig;
}

View File

@ -0,0 +1,75 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/dm-verity-loadpin.h>
#include "dm.h"
#include "dm-core.h"
#include "dm-verity.h"
#define DM_MSG_PREFIX "verity-loadpin"
LIST_HEAD(dm_verity_loadpin_trusted_root_digests);
static bool is_trusted_verity_target(struct dm_target *ti)
{
u8 *root_digest;
unsigned int digest_size;
struct dm_verity_loadpin_trusted_root_digest *trd;
bool trusted = false;
if (!dm_is_verity_target(ti))
return false;
if (dm_verity_get_root_digest(ti, &root_digest, &digest_size))
return false;
list_for_each_entry(trd, &dm_verity_loadpin_trusted_root_digests, node) {
if ((trd->len == digest_size) &&
!memcmp(trd->data, root_digest, digest_size)) {
trusted = true;
break;
}
}
kfree(root_digest);
return trusted;
}
/*
* Determines whether the file system of a superblock is located on
* a verity device that is trusted by LoadPin.
*/
bool dm_verity_loadpin_is_bdev_trusted(struct block_device *bdev)
{
struct mapped_device *md;
struct dm_table *table;
struct dm_target *ti;
int srcu_idx;
bool trusted = false;
if (list_empty(&dm_verity_loadpin_trusted_root_digests))
return false;
md = dm_get_md(bdev->bd_dev);
if (!md)
return false;
table = dm_get_live_table(md, &srcu_idx);
if (table->num_targets != 1)
goto out;
ti = dm_table_get_target(table, 0);
if (is_trusted_verity_target(ti))
trusted = true;
out:
dm_put_live_table(md, srcu_idx);
dm_put(md);
return trusted;
}

1061
drivers/media/i2c/ar0521.c Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ARDUCAM_PIVARIETY_H_
#define _ARDUCAM_PIVARIETY_H_
#define DEVICE_REG_BASE 0x0100
#define PIXFORMAT_REG_BASE 0x0200
#define FORMAT_REG_BASE 0x0300
#define CTRL_REG_BASE 0x0400
#define IPC_REG_BASE 0x0600
#define ARDUCAM_MODE_STANDBY 0x00
#define ARDUCAM_MODE_STREAMING 0x01
#define MODE_SELECT_REG (DEVICE_REG_BASE | 0x0000)
#define DEVICE_VERSION_REG (DEVICE_REG_BASE | 0x0001)
#define SENSOR_ID_REG (DEVICE_REG_BASE | 0x0002)
#define DEVICE_ID_REG (DEVICE_REG_BASE | 0x0003)
#define SYSTEM_IDLE_REG (DEVICE_REG_BASE | 0x0007)
#define PIXFORMAT_INDEX_REG (PIXFORMAT_REG_BASE | 0x0000)
#define PIXFORMAT_TYPE_REG (PIXFORMAT_REG_BASE | 0x0001)
#define PIXFORMAT_ORDER_REG (PIXFORMAT_REG_BASE | 0x0002)
#define MIPI_LANES_REG (PIXFORMAT_REG_BASE | 0x0003)
#define FLIPS_DONT_CHANGE_ORDER_REG (PIXFORMAT_REG_BASE | 0x0004)
#define RESOLUTION_INDEX_REG (FORMAT_REG_BASE | 0x0000)
#define FORMAT_WIDTH_REG (FORMAT_REG_BASE | 0x0001)
#define FORMAT_HEIGHT_REG (FORMAT_REG_BASE | 0x0002)
#define CTRL_INDEX_REG (CTRL_REG_BASE | 0x0000)
#define CTRL_ID_REG (CTRL_REG_BASE | 0x0001)
#define CTRL_MIN_REG (CTRL_REG_BASE | 0x0002)
#define CTRL_MAX_REG (CTRL_REG_BASE | 0x0003)
#define CTRL_STEP_REG (CTRL_REG_BASE | 0x0004)
#define CTRL_DEF_REG (CTRL_REG_BASE | 0x0005)
#define CTRL_VALUE_REG (CTRL_REG_BASE | 0x0006)
#define IPC_SEL_TARGET_REG (IPC_REG_BASE | 0x0000)
#define IPC_SEL_TOP_REG (IPC_REG_BASE | 0x0001)
#define IPC_SEL_LEFT_REG (IPC_REG_BASE | 0x0002)
#define IPC_SEL_WIDTH_REG (IPC_REG_BASE | 0x0003)
#define IPC_SEL_HEIGHT_REG (IPC_REG_BASE | 0x0004)
#define IPC_DELAY_REG (IPC_REG_BASE | 0x0005)
#define NO_DATA_AVAILABLE 0xFFFFFFFE
#define DEVICE_ID 0x0030
#define I2C_READ_RETRY_COUNT 3
#define I2C_WRITE_RETRY_COUNT 2
#define V4L2_CID_ARDUCAM_BASE (V4L2_CID_USER_BASE + 0x1000)
#define V4L2_CID_ARDUCAM_EXT_TRI (V4L2_CID_ARDUCAM_BASE + 1)
#define V4L2_CID_ARDUCAM_IRCUT (V4L2_CID_ARDUCAM_BASE + 8)
#define V4L2_CID_ARDUCAM_STROBE_SHIFT (V4L2_CID_ARDUCAM_BASE + 14)
#define V4L2_CID_ARDUCAM_STROBE_WIDTH (V4L2_CID_ARDUCAM_BASE + 15)
#define V4L2_CID_ARDUCAM_MODE (V4L2_CID_ARDUCAM_BASE + 16)
enum image_dt {
IMAGE_DT_YUV420_8 = 0x18,
IMAGE_DT_YUV420_10,
IMAGE_DT_YUV420CSPS_8 = 0x1C,
IMAGE_DT_YUV420CSPS_10,
IMAGE_DT_YUV422_8,
IMAGE_DT_YUV422_10,
IMAGE_DT_RGB444,
IMAGE_DT_RGB555,
IMAGE_DT_RGB565,
IMAGE_DT_RGB666,
IMAGE_DT_RGB888,
IMAGE_DT_RAW6 = 0x28,
IMAGE_DT_RAW7,
IMAGE_DT_RAW8,
IMAGE_DT_RAW10,
IMAGE_DT_RAW12,
IMAGE_DT_RAW14,
};
enum bayer_order {
BAYER_ORDER_BGGR = 0,
BAYER_ORDER_GBRG = 1,
BAYER_ORDER_GRBG = 2,
BAYER_ORDER_RGGB = 3,
BAYER_ORDER_GRAY = 4,
};
enum yuv_order {
YUV_ORDER_YUYV = 0,
YUV_ORDER_YVYU = 1,
YUV_ORDER_UYVY = 2,
YUV_ORDER_VYUY = 3,
};
struct arducam_resolution {
u32 width;
u32 height;
};
struct arducam_format {
u32 index;
u32 mbus_code;
u32 bayer_order;
u32 data_type;
u32 num_resolution_set;
struct arducam_resolution *resolution_set;
};
#endif

File diff suppressed because it is too large Load Diff

1295
drivers/media/i2c/imx296.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,322 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Inc.
* Author: Yunfei Dong <yunfei.dong@mediatek.com>
*/
#include "vdec_h264_req_common.h"
/* get used parameters for sps/pps */
#define GET_MTK_VDEC_FLAG(cond, flag) \
{ dst_param->cond = ((src_param->flags & flag) ? (1) : (0)); }
#define GET_MTK_VDEC_PARAM(param) \
{ dst_param->param = src_param->param; }
void mtk_vdec_h264_get_ref_list(u8 *ref_list,
const struct v4l2_h264_reference *v4l2_ref_list,
int num_valid)
{
u32 i;
/*
* TODO The firmware does not support field decoding. Future
* implementation must use v4l2_ref_list[i].fields to obtain
* the reference field parity.
*/
for (i = 0; i < num_valid; i++)
ref_list[i] = v4l2_ref_list[i].index;
/*
* The firmware expects unused reflist entries to have the value 0x20.
*/
memset(&ref_list[num_valid], 0x20, 32 - num_valid);
}
void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
if (!ctrl)
return ERR_PTR(-EINVAL);
return ctrl->p_cur.p;
}
void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
struct slice_api_h264_decode_param *decode_params,
struct mtk_h264_dpb_info *h264_dpb_info)
{
const struct slice_h264_dpb_entry *dpb;
struct vb2_queue *vq;
struct vb2_buffer *vb;
struct vb2_v4l2_buffer *vb2_v4l2;
int index;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
for (index = 0; index < V4L2_H264_NUM_DPB_ENTRIES; index++) {
dpb = &decode_params->dpb[index];
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) {
h264_dpb_info[index].reference_flag = 0;
continue;
}
vb = vb2_find_buffer(vq, dpb->reference_ts);
if (!vb) {
dev_err(&ctx->dev->plat_dev->dev,
"Reference invalid: dpb_index(%d) reference_ts(%lld)",
index, dpb->reference_ts);
continue;
}
/* 1 for short term reference, 2 for long term reference */
if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
h264_dpb_info[index].reference_flag = 1;
else
h264_dpb_info[index].reference_flag = 2;
vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
h264_dpb_info[index].field = vb2_v4l2->field;
h264_dpb_info[index].y_dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 0);
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
h264_dpb_info[index].c_dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 1);
else
h264_dpb_info[index].c_dma_addr =
h264_dpb_info[index].y_dma_addr +
ctx->picinfo.fb_sz[0];
}
}
void mtk_vdec_h264_copy_sps_params(struct mtk_h264_sps_param *dst_param,
const struct v4l2_ctrl_h264_sps *src_param)
{
GET_MTK_VDEC_PARAM(chroma_format_idc);
GET_MTK_VDEC_PARAM(bit_depth_luma_minus8);
GET_MTK_VDEC_PARAM(bit_depth_chroma_minus8);
GET_MTK_VDEC_PARAM(log2_max_frame_num_minus4);
GET_MTK_VDEC_PARAM(pic_order_cnt_type);
GET_MTK_VDEC_PARAM(log2_max_pic_order_cnt_lsb_minus4);
GET_MTK_VDEC_PARAM(max_num_ref_frames);
GET_MTK_VDEC_PARAM(pic_width_in_mbs_minus1);
GET_MTK_VDEC_PARAM(pic_height_in_map_units_minus1);
GET_MTK_VDEC_FLAG(separate_colour_plane_flag,
V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
GET_MTK_VDEC_FLAG(qpprime_y_zero_transform_bypass_flag,
V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
GET_MTK_VDEC_FLAG(delta_pic_order_always_zero_flag,
V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
GET_MTK_VDEC_FLAG(frame_mbs_only_flag,
V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
GET_MTK_VDEC_FLAG(mb_adaptive_frame_field_flag,
V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
GET_MTK_VDEC_FLAG(direct_8x8_inference_flag,
V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
}
void mtk_vdec_h264_copy_pps_params(struct mtk_h264_pps_param *dst_param,
const struct v4l2_ctrl_h264_pps *src_param)
{
GET_MTK_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
GET_MTK_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
GET_MTK_VDEC_PARAM(weighted_bipred_idc);
GET_MTK_VDEC_PARAM(pic_init_qp_minus26);
GET_MTK_VDEC_PARAM(chroma_qp_index_offset);
GET_MTK_VDEC_PARAM(second_chroma_qp_index_offset);
GET_MTK_VDEC_FLAG(entropy_coding_mode_flag,
V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
GET_MTK_VDEC_FLAG(pic_order_present_flag,
V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
GET_MTK_VDEC_FLAG(weighted_pred_flag,
V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
GET_MTK_VDEC_FLAG(deblocking_filter_control_present_flag,
V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
GET_MTK_VDEC_FLAG(constrained_intra_pred_flag,
V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
GET_MTK_VDEC_FLAG(redundant_pic_cnt_present_flag,
V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
GET_MTK_VDEC_FLAG(transform_8x8_mode_flag,
V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
GET_MTK_VDEC_FLAG(scaling_matrix_present_flag,
V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
}
void mtk_vdec_h264_copy_slice_hd_params(struct mtk_h264_slice_hd_param *dst_param,
const struct v4l2_ctrl_h264_slice_params *src_param,
const struct v4l2_ctrl_h264_decode_params *dec_param)
{
int temp;
GET_MTK_VDEC_PARAM(first_mb_in_slice);
GET_MTK_VDEC_PARAM(slice_type);
GET_MTK_VDEC_PARAM(cabac_init_idc);
GET_MTK_VDEC_PARAM(slice_qp_delta);
GET_MTK_VDEC_PARAM(disable_deblocking_filter_idc);
GET_MTK_VDEC_PARAM(slice_alpha_c0_offset_div2);
GET_MTK_VDEC_PARAM(slice_beta_offset_div2);
GET_MTK_VDEC_PARAM(num_ref_idx_l0_active_minus1);
GET_MTK_VDEC_PARAM(num_ref_idx_l1_active_minus1);
dst_param->frame_num = dec_param->frame_num;
dst_param->pic_order_cnt_lsb = dec_param->pic_order_cnt_lsb;
dst_param->delta_pic_order_cnt_bottom =
dec_param->delta_pic_order_cnt_bottom;
dst_param->delta_pic_order_cnt0 =
dec_param->delta_pic_order_cnt0;
dst_param->delta_pic_order_cnt1 =
dec_param->delta_pic_order_cnt1;
temp = dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC;
dst_param->field_pic_flag = temp ? 1 : 0;
temp = dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD;
dst_param->bottom_field_flag = temp ? 1 : 0;
GET_MTK_VDEC_FLAG(direct_spatial_mv_pred_flag,
V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED);
}
void mtk_vdec_h264_copy_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix)
{
memcpy(dst_matrix->scaling_list_4x4, src_matrix->scaling_list_4x4,
sizeof(dst_matrix->scaling_list_4x4));
memcpy(dst_matrix->scaling_list_8x8, src_matrix->scaling_list_8x8,
sizeof(dst_matrix->scaling_list_8x8));
}
void
mtk_vdec_h264_copy_decode_params(struct slice_api_h264_decode_param *dst_params,
const struct v4l2_ctrl_h264_decode_params *src_params,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
{
struct slice_h264_dpb_entry *dst_entry;
const struct v4l2_h264_dpb_entry *src_entry;
int i;
for (i = 0; i < ARRAY_SIZE(dst_params->dpb); i++) {
dst_entry = &dst_params->dpb[i];
src_entry = &dpb[i];
dst_entry->reference_ts = src_entry->reference_ts;
dst_entry->frame_num = src_entry->frame_num;
dst_entry->pic_num = src_entry->pic_num;
dst_entry->top_field_order_cnt = src_entry->top_field_order_cnt;
dst_entry->bottom_field_order_cnt =
src_entry->bottom_field_order_cnt;
dst_entry->flags = src_entry->flags;
}
/* num_slices is a leftover from the old H.264 support and is ignored
* by the firmware.
*/
dst_params->num_slices = 0;
dst_params->nal_ref_idc = src_params->nal_ref_idc;
dst_params->top_field_order_cnt = src_params->top_field_order_cnt;
dst_params->bottom_field_order_cnt = src_params->bottom_field_order_cnt;
dst_params->flags = src_params->flags;
}
static bool mtk_vdec_h264_dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
const struct v4l2_h264_dpb_entry *b)
{
return a->top_field_order_cnt == b->top_field_order_cnt &&
a->bottom_field_order_cnt == b->bottom_field_order_cnt;
}
/*
* Move DPB entries of dec_param that refer to a frame already existing in dpb
* into the already existing slot in dpb, and move other entries into new slots.
*
* This function is an adaptation of the similarly-named function in
* hantro_h264.c.
*/
void mtk_vdec_h264_update_dpb(const struct v4l2_ctrl_h264_decode_params *dec_param,
struct v4l2_h264_dpb_entry *dpb)
{
DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
DECLARE_BITMAP(in_use, ARRAY_SIZE(dec_param->dpb)) = { 0, };
DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
unsigned int i, j;
/* Disable all entries by default, and mark the ones in use. */
for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
set_bit(i, in_use);
dpb[i].flags &= ~V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
}
/* Try to match new DPB entries with existing ones by their POCs. */
for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
continue;
/*
* To cut off some comparisons, iterate only on target DPB
* entries were already used.
*/
for_each_set_bit(j, in_use, ARRAY_SIZE(dec_param->dpb)) {
struct v4l2_h264_dpb_entry *cdpb;
cdpb = &dpb[j];
if (!mtk_vdec_h264_dpb_entry_match(cdpb, ndpb))
continue;
*cdpb = *ndpb;
set_bit(j, used);
/* Don't reiterate on this one. */
clear_bit(j, in_use);
break;
}
if (j == ARRAY_SIZE(dec_param->dpb))
set_bit(i, new);
}
/* For entries that could not be matched, use remaining free slots. */
for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
struct v4l2_h264_dpb_entry *cdpb;
/*
* Both arrays are of the same sizes, so there is no way
* we can end up with no space in target array, unless
* something is buggy.
*/
j = find_first_zero_bit(used, ARRAY_SIZE(dec_param->dpb));
if (WARN_ON(j >= ARRAY_SIZE(dec_param->dpb)))
return;
cdpb = &dpb[j];
*cdpb = *ndpb;
set_bit(j, used);
}
}
unsigned int mtk_vdec_h264_get_mv_buf_size(unsigned int width, unsigned int height)
{
int unit_size = (width / MB_UNIT_LEN) * (height / MB_UNIT_LEN) + 8;
return HW_MB_STORE_SZ * unit_size;
}
int mtk_vdec_h264_find_start_code(unsigned char *data, unsigned int data_sz)
{
if (data_sz > 3 && data[0] == 0 && data[1] == 0 && data[2] == 1)
return 3;
if (data_sz > 4 && data[0] == 0 && data[1] == 0 && data[2] == 0 &&
data[3] == 1)
return 4;
return -1;
}

View File

@ -0,0 +1,277 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2022 MediaTek Inc.
* Author: Yunfei Dong <yunfei.dong@mediatek.com>
*/
#ifndef _VDEC_H264_REQ_COMMON_H_
#define _VDEC_H264_REQ_COMMON_H_
#include <linux/module.h>
#include <linux/slab.h>
#include <media/v4l2-h264.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "../mtk_vcodec_drv.h"
#define NAL_NON_IDR_SLICE 0x01
#define NAL_IDR_SLICE 0x05
#define NAL_TYPE(value) ((value) & 0x1F)
#define BUF_PREDICTION_SZ (64 * 4096)
#define MB_UNIT_LEN 16
/* motion vector size (bytes) for every macro block */
#define HW_MB_STORE_SZ 64
#define H264_MAX_MV_NUM 32
/**
* struct mtk_h264_dpb_info - h264 dpb information
*
* @y_dma_addr: Y bitstream physical address
* @c_dma_addr: CbCr bitstream physical address
* @reference_flag: reference picture flag (short/long term reference picture)
* @field: field picture flag
*/
struct mtk_h264_dpb_info {
dma_addr_t y_dma_addr;
dma_addr_t c_dma_addr;
int reference_flag;
int field;
};
/*
* struct mtk_h264_sps_param - parameters for sps
*/
struct mtk_h264_sps_param {
unsigned char chroma_format_idc;
unsigned char bit_depth_luma_minus8;
unsigned char bit_depth_chroma_minus8;
unsigned char log2_max_frame_num_minus4;
unsigned char pic_order_cnt_type;
unsigned char log2_max_pic_order_cnt_lsb_minus4;
unsigned char max_num_ref_frames;
unsigned char separate_colour_plane_flag;
unsigned short pic_width_in_mbs_minus1;
unsigned short pic_height_in_map_units_minus1;
unsigned int max_frame_nums;
unsigned char qpprime_y_zero_transform_bypass_flag;
unsigned char delta_pic_order_always_zero_flag;
unsigned char frame_mbs_only_flag;
unsigned char mb_adaptive_frame_field_flag;
unsigned char direct_8x8_inference_flag;
unsigned char reserved[3];
};
/*
* struct mtk_h264_pps_param - parameters for pps
*/
struct mtk_h264_pps_param {
unsigned char num_ref_idx_l0_default_active_minus1;
unsigned char num_ref_idx_l1_default_active_minus1;
unsigned char weighted_bipred_idc;
char pic_init_qp_minus26;
char chroma_qp_index_offset;
char second_chroma_qp_index_offset;
unsigned char entropy_coding_mode_flag;
unsigned char pic_order_present_flag;
unsigned char deblocking_filter_control_present_flag;
unsigned char constrained_intra_pred_flag;
unsigned char weighted_pred_flag;
unsigned char redundant_pic_cnt_present_flag;
unsigned char transform_8x8_mode_flag;
unsigned char scaling_matrix_present_flag;
unsigned char reserved[2];
};
/*
* struct mtk_h264_slice_hd_param - parameters for slice header
*/
struct mtk_h264_slice_hd_param {
unsigned int first_mb_in_slice;
unsigned int field_pic_flag;
unsigned int slice_type;
unsigned int frame_num;
int pic_order_cnt_lsb;
int delta_pic_order_cnt_bottom;
unsigned int bottom_field_flag;
unsigned int direct_spatial_mv_pred_flag;
int delta_pic_order_cnt0;
int delta_pic_order_cnt1;
unsigned int cabac_init_idc;
int slice_qp_delta;
unsigned int disable_deblocking_filter_idc;
int slice_alpha_c0_offset_div2;
int slice_beta_offset_div2;
unsigned int num_ref_idx_l0_active_minus1;
unsigned int num_ref_idx_l1_active_minus1;
unsigned int reserved;
};
/*
* struct slice_api_h264_scaling_matrix - parameters for scaling list
*/
struct slice_api_h264_scaling_matrix {
unsigned char scaling_list_4x4[6][16];
unsigned char scaling_list_8x8[6][64];
};
/*
* struct slice_h264_dpb_entry - each dpb information
*/
struct slice_h264_dpb_entry {
unsigned long long reference_ts;
unsigned short frame_num;
unsigned short pic_num;
/* Note that field is indicated by v4l2_buffer.field */
int top_field_order_cnt;
int bottom_field_order_cnt;
unsigned int flags;
};
/*
* struct slice_api_h264_decode_param - parameters for decode.
*/
struct slice_api_h264_decode_param {
struct slice_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES];
unsigned short num_slices;
unsigned short nal_ref_idc;
unsigned char ref_pic_list_p0[32];
unsigned char ref_pic_list_b0[32];
unsigned char ref_pic_list_b1[32];
int top_field_order_cnt;
int bottom_field_order_cnt;
unsigned int flags;
};
/**
* struct h264_fb - h264 decode frame buffer information
*
* @vdec_fb_va: virtual address of struct vdec_fb
* @y_fb_dma: dma address of Y frame buffer (luma)
* @c_fb_dma: dma address of C frame buffer (chroma)
* @poc: picture order count of frame buffer
* @reserved: for 8 bytes alignment
*/
struct h264_fb {
u64 vdec_fb_va;
u64 y_fb_dma;
u64 c_fb_dma;
s32 poc;
u32 reserved;
};
/**
* mtk_vdec_h264_get_ref_list - translate V4L2 reference list
*
* @ref_list: Mediatek reference picture list
* @v4l2_ref_list: V4L2 reference picture list
* @num_valid: used reference number
*/
void mtk_vdec_h264_get_ref_list(u8 *ref_list,
const struct v4l2_h264_reference *v4l2_ref_list,
int num_valid);
/**
* mtk_vdec_h264_get_ctrl_ptr - get each CID contrl address.
*
* @ctx: v4l2 ctx
* @id: CID control ID
*
* Return: returns CID ctrl address.
*/
void *mtk_vdec_h264_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id);
/**
* mtk_vdec_h264_fill_dpb_info - get each CID contrl address.
*
* @ctx: v4l2 ctx
* @decode_params: slice decode params
* @h264_dpb_info: dpb buffer information
*/
void mtk_vdec_h264_fill_dpb_info(struct mtk_vcodec_ctx *ctx,
struct slice_api_h264_decode_param *decode_params,
struct mtk_h264_dpb_info *h264_dpb_info);
/**
* mtk_vdec_h264_copy_sps_params - get sps params.
*
* @dst_param: sps params for hw decoder
* @src_param: sps params from user driver
*/
void mtk_vdec_h264_copy_sps_params(struct mtk_h264_sps_param *dst_param,
const struct v4l2_ctrl_h264_sps *src_param);
/**
* mtk_vdec_h264_copy_pps_params - get pps params.
*
* @dst_param: pps params for hw decoder
* @src_param: pps params from user driver
*/
void mtk_vdec_h264_copy_pps_params(struct mtk_h264_pps_param *dst_param,
const struct v4l2_ctrl_h264_pps *src_param);
/**
* mtk_vdec_h264_copy_slice_hd_params - get slice header params.
*
* @dst_param: slice params for hw decoder
* @src_param: slice params from user driver
* @dec_param: decode params from user driver
*/
void mtk_vdec_h264_copy_slice_hd_params(struct mtk_h264_slice_hd_param *dst_param,
const struct v4l2_ctrl_h264_slice_params *src_param,
const struct v4l2_ctrl_h264_decode_params *dec_param);
/**
* mtk_vdec_h264_copy_scaling_matrix - get each CID contrl address.
*
* @dst_matrix: scaling list params for hw decoder
* @src_matrix: scaling list params from user driver
*/
void mtk_vdec_h264_copy_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix);
/**
* mtk_vdec_h264_copy_decode_params - get decode params.
*
* @dst_params: dst params for hw decoder
* @src_params: decode params from user driver
* @dpb: dpb information
*/
void
mtk_vdec_h264_copy_decode_params(struct slice_api_h264_decode_param *dst_params,
const struct v4l2_ctrl_h264_decode_params *src_params,
const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]);
/**
* mtk_vdec_h264_update_dpb - updata dpb list.
*
* @dec_param: v4l2 control decode params
* @dpb: dpb entry informaton
*/
void mtk_vdec_h264_update_dpb(const struct v4l2_ctrl_h264_decode_params *dec_param,
struct v4l2_h264_dpb_entry *dpb);
/**
* mtk_vdec_h264_find_start_code - find h264 start code using sofeware.
*
* @data: input buffer address
* @data_sz: input buffer size
*
* Return: returns start code position.
*/
int mtk_vdec_h264_find_start_code(unsigned char *data, unsigned int data_sz);
/**
* mtk_vdec_h264_get_mv_buf_size - get mv buffer size.
*
* @width: picture width
* @height: picture height
*
* Return: returns mv buffer size.
*/
unsigned int mtk_vdec_h264_get_mv_buf_size(unsigned int width, unsigned int height);
#endif

View File

@ -0,0 +1,823 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2022 MediaTek Inc.
* Author: Yunfei Dong <yunfei.dong@mediatek.com>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <media/v4l2-h264.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
#include "../mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
#include "vdec_h264_req_common.h"
/**
* enum vdec_h264_core_dec_err_type - core decode error type
*
* @TRANS_BUFFER_FULL: trans buffer is full
* @SLICE_HEADER_FULL: slice header buffer is full
*/
enum vdec_h264_core_dec_err_type {
TRANS_BUFFER_FULL = 1,
SLICE_HEADER_FULL,
};
/**
* struct vdec_h264_slice_lat_dec_param - parameters for decode current frame
*
* @sps: h264 sps syntax parameters
* @pps: h264 pps syntax parameters
* @slice_header: h264 slice header syntax parameters
* @scaling_matrix: h264 scaling list parameters
* @decode_params: decoder parameters of each frame used for hardware decode
* @h264_dpb_info: dpb reference list
*/
struct vdec_h264_slice_lat_dec_param {
struct mtk_h264_sps_param sps;
struct mtk_h264_pps_param pps;
struct mtk_h264_slice_hd_param slice_header;
struct slice_api_h264_scaling_matrix scaling_matrix;
struct slice_api_h264_decode_param decode_params;
struct mtk_h264_dpb_info h264_dpb_info[V4L2_H264_NUM_DPB_ENTRIES];
};
/**
* struct vdec_h264_slice_info - decode information
*
* @nal_info: nal info of current picture
* @timeout: Decode timeout: 1 timeout, 0 no timeount
* @bs_buf_size: bitstream size
* @bs_buf_addr: bitstream buffer dma address
* @y_fb_dma: Y frame buffer dma address
* @c_fb_dma: C frame buffer dma address
* @vdec_fb_va: VDEC frame buffer struct virtual address
* @crc: Used to check whether hardware's status is right
*/
struct vdec_h264_slice_info {
u16 nal_info;
u16 timeout;
u32 bs_buf_size;
u64 bs_buf_addr;
u64 y_fb_dma;
u64 c_fb_dma;
u64 vdec_fb_va;
u32 crc[8];
};
/**
* struct vdec_h264_slice_vsi - shared memory for decode information exchange
* between SCP and Host.
*
* @wdma_err_addr: wdma error dma address
* @wdma_start_addr: wdma start dma address
* @wdma_end_addr: wdma end dma address
* @slice_bc_start_addr: slice bc start dma address
* @slice_bc_end_addr: slice bc end dma address
* @row_info_start_addr: row info start dma address
* @row_info_end_addr: row info end dma address
* @trans_start: trans start dma address
* @trans_end: trans end dma address
* @wdma_end_addr_offset: wdma end address offset
*
* @mv_buf_dma: HW working motion vector buffer
* dma address (AP-W, VPU-R)
* @dec: decode information (AP-R, VPU-W)
* @h264_slice_params: decode parameters for hw used
*/
struct vdec_h264_slice_vsi {
/* LAT dec addr */
u64 wdma_err_addr;
u64 wdma_start_addr;
u64 wdma_end_addr;
u64 slice_bc_start_addr;
u64 slice_bc_end_addr;
u64 row_info_start_addr;
u64 row_info_end_addr;
u64 trans_start;
u64 trans_end;
u64 wdma_end_addr_offset;
u64 mv_buf_dma[H264_MAX_MV_NUM];
struct vdec_h264_slice_info dec;
struct vdec_h264_slice_lat_dec_param h264_slice_params;
};
/**
* struct vdec_h264_slice_share_info - shared information used to exchange
* message between lat and core
*
* @sps: sequence header information from user space
* @dec_params: decoder params from user space
* @h264_slice_params: decoder params used for hardware
* @trans_start: trans start dma address
* @trans_end: trans end dma address
* @nal_info: nal info of current picture
*/
struct vdec_h264_slice_share_info {
struct v4l2_ctrl_h264_sps sps;
struct v4l2_ctrl_h264_decode_params dec_params;
struct vdec_h264_slice_lat_dec_param h264_slice_params;
u64 trans_start;
u64 trans_end;
u16 nal_info;
};
/**
* struct vdec_h264_slice_inst - h264 decoder instance
*
* @slice_dec_num: how many picture be decoded
* @ctx: point to mtk_vcodec_ctx
* @pred_buf: HW working predication buffer
* @mv_buf: HW working motion vector buffer
* @vpu: VPU instance
* @vsi: vsi used for lat
* @vsi_core: vsi used for core
*
* @vsi_ctx: Local VSI data for this decoding context
* @h264_slice_param: the parameters that hardware use to decode
*
* @resolution_changed:resolution changed
* @realloc_mv_buf: reallocate mv buffer
* @cap_num_planes: number of capture queue plane
*
* @dpb: decoded picture buffer used to store reference
* buffer information
*@is_field_bitstream: is field bitstream
*/
struct vdec_h264_slice_inst {
unsigned int slice_dec_num;
struct mtk_vcodec_ctx *ctx;
struct mtk_vcodec_mem pred_buf;
struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
struct vdec_vpu_inst vpu;
struct vdec_h264_slice_vsi *vsi;
struct vdec_h264_slice_vsi *vsi_core;
struct vdec_h264_slice_vsi vsi_ctx;
struct vdec_h264_slice_lat_dec_param h264_slice_param;
unsigned int resolution_changed;
unsigned int realloc_mv_buf;
unsigned int cap_num_planes;
struct v4l2_h264_dpb_entry dpb[16];
bool is_field_bitstream;
};
static int vdec_h264_slice_fill_decode_parameters(struct vdec_h264_slice_inst *inst,
struct vdec_h264_slice_share_info *share_info)
{
struct vdec_h264_slice_lat_dec_param *slice_param = &inst->vsi->h264_slice_params;
const struct v4l2_ctrl_h264_decode_params *dec_params;
const struct v4l2_ctrl_h264_scaling_matrix *src_matrix;
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
dec_params =
mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
if (IS_ERR(dec_params))
return PTR_ERR(dec_params);
src_matrix =
mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
if (IS_ERR(src_matrix))
return PTR_ERR(src_matrix);
sps = mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SPS);
if (IS_ERR(sps))
return PTR_ERR(sps);
pps = mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_PPS);
if (IS_ERR(pps))
return PTR_ERR(pps);
if (dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) {
mtk_vcodec_err(inst, "No support for H.264 field decoding.");
inst->is_field_bitstream = true;
return -EINVAL;
}
mtk_vdec_h264_copy_sps_params(&slice_param->sps, sps);
mtk_vdec_h264_copy_pps_params(&slice_param->pps, pps);
mtk_vdec_h264_copy_scaling_matrix(&slice_param->scaling_matrix, src_matrix);
memcpy(&share_info->sps, sps, sizeof(*sps));
memcpy(&share_info->dec_params, dec_params, sizeof(*dec_params));
return 0;
}
static int get_vdec_sig_decode_parameters(struct vdec_h264_slice_inst *inst)
{
const struct v4l2_ctrl_h264_decode_params *dec_params;
const struct v4l2_ctrl_h264_sps *sps;
const struct v4l2_ctrl_h264_pps *pps;
const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix;
struct vdec_h264_slice_lat_dec_param *slice_param = &inst->h264_slice_param;
struct v4l2_h264_reflist_builder reflist_builder;
struct v4l2_h264_reference v4l2_p0_reflist[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference v4l2_b0_reflist[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference v4l2_b1_reflist[V4L2_H264_REF_LIST_LEN];
u8 *p0_reflist = slice_param->decode_params.ref_pic_list_p0;
u8 *b0_reflist = slice_param->decode_params.ref_pic_list_b0;
u8 *b1_reflist = slice_param->decode_params.ref_pic_list_b1;
dec_params =
mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
if (IS_ERR(dec_params))
return PTR_ERR(dec_params);
sps = mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SPS);
if (IS_ERR(sps))
return PTR_ERR(sps);
pps = mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_PPS);
if (IS_ERR(pps))
return PTR_ERR(pps);
scaling_matrix =
mtk_vdec_h264_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
if (IS_ERR(scaling_matrix))
return PTR_ERR(scaling_matrix);
mtk_vdec_h264_update_dpb(dec_params, inst->dpb);
mtk_vdec_h264_copy_sps_params(&slice_param->sps, sps);
mtk_vdec_h264_copy_pps_params(&slice_param->pps, pps);
mtk_vdec_h264_copy_scaling_matrix(&slice_param->scaling_matrix, scaling_matrix);
mtk_vdec_h264_copy_decode_params(&slice_param->decode_params, dec_params, inst->dpb);
mtk_vdec_h264_fill_dpb_info(inst->ctx, &slice_param->decode_params,
slice_param->h264_dpb_info);
/* Build the reference lists */
v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps, inst->dpb);
v4l2_h264_build_p_ref_list(&reflist_builder, v4l2_p0_reflist);
v4l2_h264_build_b_ref_lists(&reflist_builder, v4l2_b0_reflist, v4l2_b1_reflist);
/* Adapt the built lists to the firmware's expectations */
mtk_vdec_h264_get_ref_list(p0_reflist, v4l2_p0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b0_reflist, v4l2_b0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b1_reflist, v4l2_b1_reflist, reflist_builder.num_valid);
memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
sizeof(inst->vsi_ctx.h264_slice_params));
return 0;
}
static void vdec_h264_slice_fill_decode_reflist(struct vdec_h264_slice_inst *inst,
struct vdec_h264_slice_lat_dec_param *slice_param,
struct vdec_h264_slice_share_info *share_info)
{
struct v4l2_ctrl_h264_decode_params *dec_params = &share_info->dec_params;
struct v4l2_ctrl_h264_sps *sps = &share_info->sps;
struct v4l2_h264_reflist_builder reflist_builder;
struct v4l2_h264_reference v4l2_p0_reflist[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference v4l2_b0_reflist[V4L2_H264_REF_LIST_LEN];
struct v4l2_h264_reference v4l2_b1_reflist[V4L2_H264_REF_LIST_LEN];
u8 *p0_reflist = slice_param->decode_params.ref_pic_list_p0;
u8 *b0_reflist = slice_param->decode_params.ref_pic_list_b0;
u8 *b1_reflist = slice_param->decode_params.ref_pic_list_b1;
mtk_vdec_h264_update_dpb(dec_params, inst->dpb);
mtk_vdec_h264_copy_decode_params(&slice_param->decode_params, dec_params,
inst->dpb);
mtk_vdec_h264_fill_dpb_info(inst->ctx, &slice_param->decode_params,
slice_param->h264_dpb_info);
mtk_v4l2_debug(3, "cur poc = %d\n", dec_params->bottom_field_order_cnt);
/* Build the reference lists */
v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps,
inst->dpb);
v4l2_h264_build_p_ref_list(&reflist_builder, v4l2_p0_reflist);
v4l2_h264_build_b_ref_lists(&reflist_builder, v4l2_b0_reflist, v4l2_b1_reflist);
/* Adapt the built lists to the firmware's expectations */
mtk_vdec_h264_get_ref_list(p0_reflist, v4l2_p0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b0_reflist, v4l2_b0_reflist, reflist_builder.num_valid);
mtk_vdec_h264_get_ref_list(b1_reflist, v4l2_b1_reflist, reflist_builder.num_valid);
}
static int vdec_h264_slice_alloc_mv_buf(struct vdec_h264_slice_inst *inst,
struct vdec_pic_info *pic)
{
unsigned int buf_sz = mtk_vdec_h264_get_mv_buf_size(pic->buf_w, pic->buf_h);
struct mtk_vcodec_mem *mem;
int i, err;
mtk_v4l2_debug(3, "size = 0x%x", buf_sz);
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
mem->size = buf_sz;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
mtk_vcodec_err(inst, "failed to allocate mv buf");
return err;
}
}
return 0;
}
static void vdec_h264_slice_free_mv_buf(struct vdec_h264_slice_inst *inst)
{
int i;
struct mtk_vcodec_mem *mem;
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
}
}
static void vdec_h264_slice_get_pic_info(struct vdec_h264_slice_inst *inst)
{
struct mtk_vcodec_ctx *ctx = inst->ctx;
u32 data[3];
data[0] = ctx->picinfo.pic_w;
data[1] = ctx->picinfo.pic_h;
data[2] = ctx->capture_fourcc;
vpu_dec_get_param(&inst->vpu, data, 3, GET_PARAM_PIC_INFO);
ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, VCODEC_DEC_ALIGNED_64);
ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, VCODEC_DEC_ALIGNED_64);
ctx->picinfo.fb_sz[0] = inst->vpu.fb_sz[0];
ctx->picinfo.fb_sz[1] = inst->vpu.fb_sz[1];
inst->cap_num_planes =
ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
ctx->picinfo.pic_w, ctx->picinfo.pic_h,
ctx->picinfo.buf_w, ctx->picinfo.buf_h);
mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
ctx->picinfo.fb_sz[1]);
if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
inst->resolution_changed = true;
if (ctx->last_decoded_picinfo.buf_w != ctx->picinfo.buf_w ||
ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
inst->realloc_mv_buf = true;
mtk_v4l2_debug(1, "resChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
inst->resolution_changed,
inst->realloc_mv_buf,
ctx->last_decoded_picinfo.pic_w,
ctx->last_decoded_picinfo.pic_h,
ctx->picinfo.pic_w, ctx->picinfo.pic_h);
}
}
static void vdec_h264_slice_get_crop_info(struct vdec_h264_slice_inst *inst,
struct v4l2_rect *cr)
{
cr->left = 0;
cr->top = 0;
cr->width = inst->ctx->picinfo.pic_w;
cr->height = inst->ctx->picinfo.pic_h;
mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
cr->left, cr->top, cr->width, cr->height);
}
static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
{
struct vdec_h264_slice_inst *inst;
int err, vsi_size;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
inst->ctx = ctx;
inst->vpu.id = SCP_IPI_VDEC_LAT;
inst->vpu.core_id = SCP_IPI_VDEC_CORE;
inst->vpu.ctx = ctx;
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
goto error_free_inst;
}
vsi_size = round_up(sizeof(struct vdec_h264_slice_vsi), VCODEC_DEC_ALIGNED_64);
inst->vsi = inst->vpu.vsi;
inst->vsi_core =
(struct vdec_h264_slice_vsi *)(((char *)inst->vpu.vsi) + vsi_size);
inst->resolution_changed = true;
inst->realloc_mv_buf = true;
mtk_vcodec_debug(inst, "lat struct size = %d,%d,%d,%d vsi: %d\n",
(int)sizeof(struct mtk_h264_sps_param),
(int)sizeof(struct mtk_h264_pps_param),
(int)sizeof(struct vdec_h264_slice_lat_dec_param),
(int)sizeof(struct mtk_h264_dpb_info),
vsi_size);
mtk_vcodec_debug(inst, "lat H264 instance >> %p, codec_type = 0x%x",
inst, inst->vpu.codec_type);
ctx->drv_handle = inst;
return 0;
error_free_inst:
kfree(inst);
return err;
}
static void vdec_h264_slice_deinit(void *h_vdec)
{
struct vdec_h264_slice_inst *inst = h_vdec;
mtk_vcodec_debug_enter(inst);
vpu_dec_deinit(&inst->vpu);
vdec_h264_slice_free_mv_buf(inst);
vdec_msg_queue_deinit(&inst->ctx->msg_queue, inst->ctx);
kfree(inst);
}
static int vdec_h264_slice_core_decode(struct vdec_lat_buf *lat_buf)
{
struct vdec_fb *fb;
u64 vdec_fb_va;
u64 y_fb_dma, c_fb_dma;
int err, timeout, i;
struct mtk_vcodec_ctx *ctx = lat_buf->ctx;
struct vdec_h264_slice_inst *inst = ctx->drv_handle;
struct vb2_v4l2_buffer *vb2_v4l2;
struct vdec_h264_slice_share_info *share_info = lat_buf->private_data;
struct mtk_vcodec_mem *mem;
struct vdec_vpu_inst *vpu = &inst->vpu;
mtk_vcodec_debug(inst, "[h264-core] vdec_h264 core decode");
memcpy(&inst->vsi_core->h264_slice_params, &share_info->h264_slice_params,
sizeof(share_info->h264_slice_params));
fb = ctx->dev->vdec_pdata->get_cap_buffer(ctx);
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
vdec_fb_va = (unsigned long)fb;
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
c_fb_dma =
y_fb_dma + inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
else
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
mtk_vcodec_debug(inst, "[h264-core] y/c addr = 0x%llx 0x%llx", y_fb_dma,
c_fb_dma);
inst->vsi_core->dec.y_fb_dma = y_fb_dma;
inst->vsi_core->dec.c_fb_dma = c_fb_dma;
inst->vsi_core->dec.vdec_fb_va = vdec_fb_va;
inst->vsi_core->dec.nal_info = share_info->nal_info;
inst->vsi_core->wdma_start_addr =
lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
inst->vsi_core->wdma_end_addr =
lat_buf->ctx->msg_queue.wdma_addr.dma_addr +
lat_buf->ctx->msg_queue.wdma_addr.size;
inst->vsi_core->wdma_err_addr = lat_buf->wdma_err_addr.dma_addr;
inst->vsi_core->slice_bc_start_addr = lat_buf->slice_bc_addr.dma_addr;
inst->vsi_core->slice_bc_end_addr = lat_buf->slice_bc_addr.dma_addr +
lat_buf->slice_bc_addr.size;
inst->vsi_core->trans_start = share_info->trans_start;
inst->vsi_core->trans_end = share_info->trans_end;
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
inst->vsi_core->mv_buf_dma[i] = mem->dma_addr;
}
vb2_v4l2 = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
v4l2_m2m_buf_copy_metadata(&lat_buf->ts_info, vb2_v4l2, true);
vdec_h264_slice_fill_decode_reflist(inst, &inst->vsi_core->h264_slice_params,
share_info);
err = vpu_dec_core(vpu);
if (err) {
mtk_vcodec_err(inst, "core decode err=%d", err);
goto vdec_dec_end;
}
/* wait decoder done interrupt */
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
if (timeout)
mtk_vcodec_err(inst, "core decode timeout: pic_%d",
ctx->decoded_frame_cnt);
inst->vsi_core->dec.timeout = !!timeout;
vpu_dec_core_end(vpu);
mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
ctx->decoded_frame_cnt,
inst->vsi_core->dec.crc[0], inst->vsi_core->dec.crc[1],
inst->vsi_core->dec.crc[2], inst->vsi_core->dec.crc[3],
inst->vsi_core->dec.crc[4], inst->vsi_core->dec.crc[5],
inst->vsi_core->dec.crc[6], inst->vsi_core->dec.crc[7]);
vdec_dec_end:
vdec_msg_queue_update_ube_rptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
ctx->dev->vdec_pdata->cap_to_disp(ctx, !!err, lat_buf->src_buf_req);
mtk_vcodec_debug(inst, "core decode done err=%d", err);
ctx->decoded_frame_cnt++;
return 0;
}
static int vdec_h264_slice_lat_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
struct vdec_h264_slice_inst *inst = h_vdec;
struct vdec_vpu_inst *vpu = &inst->vpu;
struct mtk_video_dec_buf *src_buf_info;
int nal_start_idx, err, timeout = 0, i;
unsigned int data[2];
struct vdec_lat_buf *lat_buf;
struct vdec_h264_slice_share_info *share_info;
unsigned char *buf;
struct mtk_vcodec_mem *mem;
if (vdec_msg_queue_init(&inst->ctx->msg_queue, inst->ctx,
vdec_h264_slice_core_decode,
sizeof(*share_info)))
return -ENOMEM;
/* bs NULL means flush decoder */
if (!bs) {
vdec_msg_queue_wait_lat_buf_full(&inst->ctx->msg_queue);
return vpu_dec_reset(vpu);
}
if (inst->is_field_bitstream)
return -EINVAL;
lat_buf = vdec_msg_queue_dqbuf(&inst->ctx->msg_queue.lat_ctx);
if (!lat_buf) {
mtk_vcodec_err(inst, "failed to get lat buffer");
return -EINVAL;
}
share_info = lat_buf->private_data;
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
buf = (unsigned char *)bs->va;
nal_start_idx = mtk_vdec_h264_find_start_code(buf, bs->size);
if (nal_start_idx < 0) {
err = -EINVAL;
goto err_free_fb_out;
}
inst->vsi->dec.nal_info = buf[nal_start_idx];
inst->vsi->dec.bs_buf_addr = (u64)bs->dma_addr;
inst->vsi->dec.bs_buf_size = bs->size;
lat_buf->src_buf_req = src_buf_info->m2m_buf.vb.vb2_buf.req_obj.req;
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb, &lat_buf->ts_info, true);
err = vdec_h264_slice_fill_decode_parameters(inst, share_info);
if (err)
goto err_free_fb_out;
*res_chg = inst->resolution_changed;
if (inst->resolution_changed) {
mtk_vcodec_debug(inst, "- resolution changed -");
if (inst->realloc_mv_buf) {
err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->realloc_mv_buf = false;
if (err)
goto err_free_fb_out;
}
inst->resolution_changed = false;
}
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
inst->vsi->mv_buf_dma[i] = mem->dma_addr;
}
inst->vsi->wdma_start_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr;
inst->vsi->wdma_end_addr = lat_buf->ctx->msg_queue.wdma_addr.dma_addr +
lat_buf->ctx->msg_queue.wdma_addr.size;
inst->vsi->wdma_err_addr = lat_buf->wdma_err_addr.dma_addr;
inst->vsi->slice_bc_start_addr = lat_buf->slice_bc_addr.dma_addr;
inst->vsi->slice_bc_end_addr = lat_buf->slice_bc_addr.dma_addr +
lat_buf->slice_bc_addr.size;
inst->vsi->trans_end = inst->ctx->msg_queue.wdma_rptr_addr;
inst->vsi->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
mtk_vcodec_debug(inst, "lat:trans(0x%llx 0x%llx) err:0x%llx",
inst->vsi->wdma_start_addr,
inst->vsi->wdma_end_addr,
inst->vsi->wdma_err_addr);
mtk_vcodec_debug(inst, "slice(0x%llx 0x%llx) rprt((0x%llx 0x%llx))",
inst->vsi->slice_bc_start_addr,
inst->vsi->slice_bc_end_addr,
inst->vsi->trans_start,
inst->vsi->trans_end);
err = vpu_dec_start(vpu, data, 2);
if (err) {
mtk_vcodec_debug(inst, "lat decode err: %d", err);
goto err_scp_decode;
}
share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
inst->vsi->wdma_end_addr_offset;
share_info->trans_start = inst->ctx->msg_queue.wdma_wptr_addr;
share_info->nal_info = inst->vsi->dec.nal_info;
if (IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
sizeof(share_info->h264_slice_params));
vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
}
/* wait decoder done interrupt */
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_LAT0);
inst->vsi->dec.timeout = !!timeout;
err = vpu_dec_end(vpu);
if (err == SLICE_HEADER_FULL || timeout || err == TRANS_BUFFER_FULL) {
err = -EINVAL;
goto err_scp_decode;
}
share_info->trans_end = inst->ctx->msg_queue.wdma_addr.dma_addr +
inst->vsi->wdma_end_addr_offset;
vdec_msg_queue_update_ube_wptr(&lat_buf->ctx->msg_queue, share_info->trans_end);
if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability)) {
memcpy(&share_info->h264_slice_params, &inst->vsi->h264_slice_params,
sizeof(share_info->h264_slice_params));
vdec_msg_queue_qbuf(&inst->ctx->dev->msg_queue_core_ctx, lat_buf);
}
mtk_vcodec_debug(inst, "dec num: %d lat crc: 0x%x 0x%x 0x%x", inst->slice_dec_num,
inst->vsi->dec.crc[0], inst->vsi->dec.crc[1], inst->vsi->dec.crc[2]);
inst->slice_dec_num++;
return 0;
err_scp_decode:
if (!IS_VDEC_INNER_RACING(inst->ctx->dev->dec_capability))
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
err_free_fb_out:
vdec_msg_queue_qbuf(&inst->ctx->msg_queue.lat_ctx, lat_buf);
mtk_vcodec_err(inst, "slice dec number: %d err: %d", inst->slice_dec_num, err);
return err;
}
static int vdec_h264_slice_single_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
struct vdec_h264_slice_inst *inst = h_vdec;
struct vdec_vpu_inst *vpu = &inst->vpu;
struct mtk_video_dec_buf *src_buf_info, *dst_buf_info;
struct vdec_fb *fb;
unsigned char *buf;
unsigned int data[2], i;
u64 y_fb_dma, c_fb_dma;
struct mtk_vcodec_mem *mem;
int err, nal_start_idx;
/* bs NULL means flush decoder */
if (!bs)
return vpu_dec_reset(vpu);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
mtk_vcodec_debug(inst, "[h264-dec] [%d] y_dma=%llx c_dma=%llx",
inst->ctx->decoded_frame_cnt, y_fb_dma, c_fb_dma);
inst->vsi_ctx.dec.bs_buf_addr = (u64)bs->dma_addr;
inst->vsi_ctx.dec.bs_buf_size = bs->size;
inst->vsi_ctx.dec.y_fb_dma = y_fb_dma;
inst->vsi_ctx.dec.c_fb_dma = c_fb_dma;
inst->vsi_ctx.dec.vdec_fb_va = (u64)(uintptr_t)fb;
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb,
&dst_buf_info->m2m_buf.vb, true);
err = get_vdec_sig_decode_parameters(inst);
if (err)
goto err_free_fb_out;
buf = (unsigned char *)bs->va;
nal_start_idx = mtk_vdec_h264_find_start_code(buf, bs->size);
if (nal_start_idx < 0) {
err = -EINVAL;
goto err_free_fb_out;
}
inst->vsi_ctx.dec.nal_info = buf[nal_start_idx];
*res_chg = inst->resolution_changed;
if (inst->resolution_changed) {
mtk_vcodec_debug(inst, "- resolution changed -");
if (inst->realloc_mv_buf) {
err = vdec_h264_slice_alloc_mv_buf(inst, &inst->ctx->picinfo);
inst->realloc_mv_buf = false;
if (err)
goto err_free_fb_out;
}
inst->resolution_changed = false;
for (i = 0; i < H264_MAX_MV_NUM; i++) {
mem = &inst->mv_buf[i];
inst->vsi_ctx.mv_buf_dma[i] = mem->dma_addr;
}
}
memcpy(inst->vpu.vsi, &inst->vsi_ctx, sizeof(inst->vsi_ctx));
err = vpu_dec_start(vpu, data, 2);
if (err)
goto err_free_fb_out;
/* wait decoder done interrupt */
err = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
WAIT_INTR_TIMEOUT_MS, MTK_VDEC_CORE);
if (err)
mtk_vcodec_err(inst, "decode timeout: pic_%d",
inst->ctx->decoded_frame_cnt);
inst->vsi->dec.timeout = !!err;
err = vpu_dec_end(vpu);
if (err)
goto err_free_fb_out;
memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
inst->ctx->decoded_frame_cnt,
inst->vsi_ctx.dec.crc[0], inst->vsi_ctx.dec.crc[1],
inst->vsi_ctx.dec.crc[2], inst->vsi_ctx.dec.crc[3],
inst->vsi_ctx.dec.crc[4], inst->vsi_ctx.dec.crc[5],
inst->vsi_ctx.dec.crc[6], inst->vsi_ctx.dec.crc[7]);
inst->ctx->decoded_frame_cnt++;
return 0;
err_free_fb_out:
mtk_vcodec_err(inst, "dec frame number: %d err: %d",
inst->ctx->decoded_frame_cnt, err);
return err;
}
static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *unused, bool *res_chg)
{
struct vdec_h264_slice_inst *inst = h_vdec;
int ret;
if (!h_vdec)
return -EINVAL;
if (inst->ctx->dev->vdec_pdata->hw_arch == MTK_VDEC_PURE_SINGLE_CORE)
ret = vdec_h264_slice_single_decode(h_vdec, bs, unused, res_chg);
else
ret = vdec_h264_slice_lat_decode(h_vdec, bs, unused, res_chg);
return ret;
}
static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type,
void *out)
{
struct vdec_h264_slice_inst *inst = h_vdec;
switch (type) {
case GET_PARAM_PIC_INFO:
vdec_h264_slice_get_pic_info(inst);
break;
case GET_PARAM_DPB_SIZE:
*(unsigned int *)out = 6;
break;
case GET_PARAM_CROP_INFO:
vdec_h264_slice_get_crop_info(inst, out);
break;
default:
mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
return -EINVAL;
}
return 0;
}
const struct vdec_common_if vdec_h264_slice_multi_if = {
.init = vdec_h264_slice_init,
.decode = vdec_h264_slice_decode,
.get_param = vdec_h264_slice_get_param,
.deinit = vdec_h264_slice_deinit,
};

View File

@ -0,0 +1,436 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2021 MediaTek Inc.
* Author: Yunfei Dong <yunfei.dong@mediatek.com>
*/
#include <linux/slab.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
#include <uapi/linux/v4l2-controls.h>
#include "../mtk_vcodec_util.h"
#include "../mtk_vcodec_dec.h"
#include "../mtk_vcodec_intr.h"
#include "../vdec_drv_base.h"
#include "../vdec_drv_if.h"
#include "../vdec_vpu_if.h"
/* Decoding picture buffer size (3 reference frames plus current frame) */
#define VP8_DPB_SIZE 4
/* HW working buffer size (bytes) */
#define VP8_SEG_ID_SZ SZ_256K
#define VP8_PP_WRAPY_SZ SZ_64K
#define VP8_PP_WRAPC_SZ SZ_64K
#define VP8_VLD_PRED_SZ SZ_64K
/**
* struct vdec_vp8_slice_info - decode misc information
*
* @vld_wrapper_dma: vld wrapper dma address
* @seg_id_buf_dma: seg id dma address
* @wrap_y_dma: wrap y dma address
* @wrap_c_dma: wrap y dma address
* @cur_y_fb_dma: current plane Y frame buffer dma address
* @cur_c_fb_dma: current plane C frame buffer dma address
* @bs_dma: bitstream dma address
* @bs_sz: bitstream size
* @resolution_changed:resolution change flag 1 - changed, 0 - not change
* @frame_header_type: current frame header type
* @wait_key_frame: wait key frame coming
* @crc: used to check whether hardware's status is right
* @reserved: reserved, currently unused
*/
struct vdec_vp8_slice_info {
u64 vld_wrapper_dma;
u64 seg_id_buf_dma;
u64 wrap_y_dma;
u64 wrap_c_dma;
u64 cur_y_fb_dma;
u64 cur_c_fb_dma;
u64 bs_dma;
u32 bs_sz;
u32 resolution_changed;
u32 frame_header_type;
u32 crc[8];
u32 reserved;
};
/**
* struct vdec_vp8_slice_dpb_info - vp8 reference information
*
* @y_dma_addr: Y bitstream physical address
* @c_dma_addr: CbCr bitstream physical address
* @reference_flag: reference picture flag
* @reserved: 64bit align
*/
struct vdec_vp8_slice_dpb_info {
dma_addr_t y_dma_addr;
dma_addr_t c_dma_addr;
int reference_flag;
int reserved;
};
/**
* struct vdec_vp8_slice_vsi - VPU shared information
*
* @dec: decoding information
* @pic: picture information
* @vp8_dpb_info: reference buffer information
*/
struct vdec_vp8_slice_vsi {
struct vdec_vp8_slice_info dec;
struct vdec_pic_info pic;
struct vdec_vp8_slice_dpb_info vp8_dpb_info[3];
};
/**
* struct vdec_vp8_slice_inst - VP8 decoder instance
*
* @seg_id_buf: seg buffer
* @wrap_y_buf: wrapper y buffer
* @wrap_c_buf: wrapper c buffer
* @vld_wrapper_buf: vld wrapper buffer
* @ctx: V4L2 context
* @vpu: VPU instance for decoder
* @vsi: VPU share information
*/
struct vdec_vp8_slice_inst {
struct mtk_vcodec_mem seg_id_buf;
struct mtk_vcodec_mem wrap_y_buf;
struct mtk_vcodec_mem wrap_c_buf;
struct mtk_vcodec_mem vld_wrapper_buf;
struct mtk_vcodec_ctx *ctx;
struct vdec_vpu_inst vpu;
struct vdec_vp8_slice_vsi *vsi;
};
static void *vdec_vp8_slice_get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
{
struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
if (!ctrl)
return ERR_PTR(-EINVAL);
return ctrl->p_cur.p;
}
static void vdec_vp8_slice_get_pic_info(struct vdec_vp8_slice_inst *inst)
{
struct mtk_vcodec_ctx *ctx = inst->ctx;
unsigned int data[3];
data[0] = ctx->picinfo.pic_w;
data[1] = ctx->picinfo.pic_h;
data[2] = ctx->capture_fourcc;
vpu_dec_get_param(&inst->vpu, data, 3, GET_PARAM_PIC_INFO);
ctx->picinfo.buf_w = ALIGN(ctx->picinfo.pic_w, 64);
ctx->picinfo.buf_h = ALIGN(ctx->picinfo.pic_h, 64);
ctx->picinfo.fb_sz[0] = inst->vpu.fb_sz[0];
ctx->picinfo.fb_sz[1] = inst->vpu.fb_sz[1];
inst->vsi->pic.pic_w = ctx->picinfo.pic_w;
inst->vsi->pic.pic_h = ctx->picinfo.pic_h;
inst->vsi->pic.buf_w = ctx->picinfo.buf_w;
inst->vsi->pic.buf_h = ctx->picinfo.buf_h;
inst->vsi->pic.fb_sz[0] = ctx->picinfo.fb_sz[0];
inst->vsi->pic.fb_sz[1] = ctx->picinfo.fb_sz[1];
mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
ctx->picinfo.pic_w, ctx->picinfo.pic_h,
ctx->picinfo.buf_w, ctx->picinfo.buf_h);
mtk_vcodec_debug(inst, "fb size: Y(%d), C(%d)",
ctx->picinfo.fb_sz[0], ctx->picinfo.fb_sz[1]);
}
static int vdec_vp8_slice_alloc_working_buf(struct vdec_vp8_slice_inst *inst)
{
int err;
struct mtk_vcodec_mem *mem;
mem = &inst->seg_id_buf;
mem->size = VP8_SEG_ID_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
mtk_vcodec_err(inst, "Cannot allocate working buffer");
return err;
}
inst->vsi->dec.seg_id_buf_dma = (u64)mem->dma_addr;
mem = &inst->wrap_y_buf;
mem->size = VP8_PP_WRAPY_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
mtk_vcodec_err(inst, "cannot allocate WRAP Y buffer");
return err;
}
inst->vsi->dec.wrap_y_dma = (u64)mem->dma_addr;
mem = &inst->wrap_c_buf;
mem->size = VP8_PP_WRAPC_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
mtk_vcodec_err(inst, "cannot allocate WRAP C buffer");
return err;
}
inst->vsi->dec.wrap_c_dma = (u64)mem->dma_addr;
mem = &inst->vld_wrapper_buf;
mem->size = VP8_VLD_PRED_SZ;
err = mtk_vcodec_mem_alloc(inst->ctx, mem);
if (err) {
mtk_vcodec_err(inst, "cannot allocate vld wrapper buffer");
return err;
}
inst->vsi->dec.vld_wrapper_dma = (u64)mem->dma_addr;
return 0;
}
static void vdec_vp8_slice_free_working_buf(struct vdec_vp8_slice_inst *inst)
{
struct mtk_vcodec_mem *mem;
mem = &inst->seg_id_buf;
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
inst->vsi->dec.seg_id_buf_dma = 0;
mem = &inst->wrap_y_buf;
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
inst->vsi->dec.wrap_y_dma = 0;
mem = &inst->wrap_c_buf;
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
inst->vsi->dec.wrap_c_dma = 0;
mem = &inst->vld_wrapper_buf;
if (mem->va)
mtk_vcodec_mem_free(inst->ctx, mem);
inst->vsi->dec.vld_wrapper_dma = 0;
}
static u64 vdec_vp8_slice_get_ref_by_ts(const struct v4l2_ctrl_vp8_frame *frame_header,
int index)
{
switch (index) {
case 0:
return frame_header->last_frame_ts;
case 1:
return frame_header->golden_frame_ts;
case 2:
return frame_header->alt_frame_ts;
default:
break;
}
return -1;
}
static int vdec_vp8_slice_get_decode_parameters(struct vdec_vp8_slice_inst *inst)
{
const struct v4l2_ctrl_vp8_frame *frame_header;
struct mtk_vcodec_ctx *ctx = inst->ctx;
struct vb2_queue *vq;
struct vb2_buffer *vb;
u64 referenct_ts;
int index;
frame_header = vdec_vp8_slice_get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_VP8_FRAME);
if (IS_ERR(frame_header))
return PTR_ERR(frame_header);
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
for (index = 0; index < 3; index++) {
referenct_ts = vdec_vp8_slice_get_ref_by_ts(frame_header, index);
vb = vb2_find_buffer(vq, referenct_ts);
if (!vb) {
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(frame_header))
mtk_vcodec_err(inst, "reference invalid: index(%d) ts(%lld)",
index, referenct_ts);
inst->vsi->vp8_dpb_info[index].reference_flag = 0;
continue;
}
inst->vsi->vp8_dpb_info[index].reference_flag = 1;
inst->vsi->vp8_dpb_info[index].y_dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 0);
if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
inst->vsi->vp8_dpb_info[index].c_dma_addr =
vb2_dma_contig_plane_dma_addr(vb, 1);
else
inst->vsi->vp8_dpb_info[index].c_dma_addr =
inst->vsi->vp8_dpb_info[index].y_dma_addr +
ctx->picinfo.fb_sz[0];
}
inst->vsi->dec.frame_header_type = frame_header->flags >> 1;
return 0;
}
static int vdec_vp8_slice_init(struct mtk_vcodec_ctx *ctx)
{
struct vdec_vp8_slice_inst *inst;
int err;
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst)
return -ENOMEM;
inst->ctx = ctx;
inst->vpu.id = SCP_IPI_VDEC_LAT;
inst->vpu.core_id = SCP_IPI_VDEC_CORE;
inst->vpu.ctx = ctx;
inst->vpu.codec_type = ctx->current_codec;
inst->vpu.capture_type = ctx->capture_fourcc;
err = vpu_dec_init(&inst->vpu);
if (err) {
mtk_vcodec_err(inst, "vdec_vp8 init err=%d", err);
goto error_free_inst;
}
inst->vsi = inst->vpu.vsi;
err = vdec_vp8_slice_alloc_working_buf(inst);
if (err)
goto error_deinit;
mtk_vcodec_debug(inst, "vp8 struct size = %d vsi: %d\n",
(int)sizeof(struct v4l2_ctrl_vp8_frame),
(int)sizeof(struct vdec_vp8_slice_vsi));
mtk_vcodec_debug(inst, "vp8:%p, codec_type = 0x%x vsi: 0x%p",
inst, inst->vpu.codec_type, inst->vpu.vsi);
ctx->drv_handle = inst;
return 0;
error_deinit:
vpu_dec_deinit(&inst->vpu);
error_free_inst:
kfree(inst);
return err;
}
static int vdec_vp8_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
struct vdec_fb *fb, bool *res_chg)
{
struct vdec_vp8_slice_inst *inst = h_vdec;
struct vdec_vpu_inst *vpu = &inst->vpu;
struct mtk_video_dec_buf *src_buf_info, *dst_buf_info;
unsigned int data;
u64 y_fb_dma, c_fb_dma;
int err, timeout;
/* Resolution changes are never initiated by us */
*res_chg = false;
/* bs NULL means flush decoder */
if (!bs)
return vpu_dec_reset(vpu);
src_buf_info = container_of(bs, struct mtk_video_dec_buf, bs_buffer);
fb = inst->ctx->dev->vdec_pdata->get_cap_buffer(inst->ctx);
dst_buf_info = container_of(fb, struct mtk_video_dec_buf, frame_buffer);
y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1)
c_fb_dma = y_fb_dma +
inst->ctx->picinfo.buf_w * inst->ctx->picinfo.buf_h;
else
c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
inst->vsi->dec.bs_dma = (u64)bs->dma_addr;
inst->vsi->dec.bs_sz = bs->size;
inst->vsi->dec.cur_y_fb_dma = y_fb_dma;
inst->vsi->dec.cur_c_fb_dma = c_fb_dma;
mtk_vcodec_debug(inst, "frame[%d] bs(%zu 0x%llx) y/c(0x%llx 0x%llx)",
inst->ctx->decoded_frame_cnt,
bs->size, (u64)bs->dma_addr,
y_fb_dma, c_fb_dma);
v4l2_m2m_buf_copy_metadata(&src_buf_info->m2m_buf.vb,
&dst_buf_info->m2m_buf.vb, true);
err = vdec_vp8_slice_get_decode_parameters(inst);
if (err)
goto error;
err = vpu_dec_start(vpu, &data, 1);
if (err) {
mtk_vcodec_debug(inst, "vp8 dec start err!");
goto error;
}
if (inst->vsi->dec.resolution_changed) {
mtk_vcodec_debug(inst, "- resolution_changed -");
*res_chg = true;
return 0;
}
/* wait decode done interrupt */
timeout = mtk_vcodec_wait_for_done_ctx(inst->ctx, MTK_INST_IRQ_RECEIVED,
50, MTK_VDEC_CORE);
err = vpu_dec_end(vpu);
if (err || timeout)
mtk_vcodec_debug(inst, "vp8 dec error timeout:%d err: %d pic_%d",
timeout, err, inst->ctx->decoded_frame_cnt);
mtk_vcodec_debug(inst, "pic[%d] crc: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
inst->ctx->decoded_frame_cnt,
inst->vsi->dec.crc[0], inst->vsi->dec.crc[1],
inst->vsi->dec.crc[2], inst->vsi->dec.crc[3],
inst->vsi->dec.crc[4], inst->vsi->dec.crc[5],
inst->vsi->dec.crc[6], inst->vsi->dec.crc[7]);
inst->ctx->decoded_frame_cnt++;
error:
return err;
}
static int vdec_vp8_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
{
struct vdec_vp8_slice_inst *inst = h_vdec;
switch (type) {
case GET_PARAM_PIC_INFO:
vdec_vp8_slice_get_pic_info(inst);
break;
case GET_PARAM_CROP_INFO:
mtk_vcodec_debug(inst, "No need to get vp8 crop information.");
break;
case GET_PARAM_DPB_SIZE:
*((unsigned int *)out) = VP8_DPB_SIZE;
break;
default:
mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
return -EINVAL;
}
return 0;
}
static void vdec_vp8_slice_deinit(void *h_vdec)
{
struct vdec_vp8_slice_inst *inst = h_vdec;
mtk_vcodec_debug_enter(inst);
vpu_dec_deinit(&inst->vpu);
vdec_vp8_slice_free_working_buf(inst);
kfree(inst);
}
const struct vdec_common_if vdec_vp8_slice_if = {
.init = vdec_vp8_slice_init,
.decode = vdec_vp8_slice_decode,
.get_param = vdec_vp8_slice_get_param,
.deinit = vdec_vp8_slice_deinit,
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,536 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - CSI-2 Receiver
*
* Copyright (C) 2019 Collabora, Ltd.
* Copyright (C) 2022 Ideas on Board
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/lockdep.h>
#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-fwnode.h>
#include "rkisp1-common.h"
#include "rkisp1-csi.h"
#define RKISP1_CSI_DEV_NAME RKISP1_DRIVER_NAME "_csi"
#define RKISP1_CSI_DEF_FMT MEDIA_BUS_FMT_SRGGB10_1X10
static inline struct rkisp1_csi *to_rkisp1_csi(struct v4l2_subdev *sd)
{
return container_of(sd, struct rkisp1_csi, sd);
}
static struct v4l2_mbus_framefmt *
rkisp1_csi_get_pad_fmt(struct rkisp1_csi *csi,
struct v4l2_subdev_state *sd_state,
unsigned int pad, u32 which)
{
struct v4l2_subdev_state state = {
.pads = csi->pad_cfg
};
lockdep_assert_held(&csi->lock);
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
else
return v4l2_subdev_get_try_format(&csi->sd, &state, pad);
}
int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
struct rkisp1_sensor_async *s_asd,
unsigned int source_pad)
{
struct rkisp1_csi *csi = &rkisp1->csi;
int ret;
s_asd->pixel_rate_ctrl = v4l2_ctrl_find(sd->ctrl_handler,
V4L2_CID_PIXEL_RATE);
if (!s_asd->pixel_rate_ctrl) {
dev_err(rkisp1->dev, "No pixel rate control in subdev %s\n",
sd->name);
return -EINVAL;
}
/* Create the link from the sensor to the CSI receiver. */
ret = media_create_pad_link(&sd->entity, source_pad,
&csi->sd.entity, RKISP1_CSI_PAD_SINK,
!s_asd->index ? MEDIA_LNK_FL_ENABLED : 0);
if (ret) {
dev_err(csi->rkisp1->dev, "failed to link src pad of %s\n",
sd->name);
return ret;
}
return 0;
}
static int rkisp1_csi_config(struct rkisp1_csi *csi,
const struct rkisp1_sensor_async *sensor)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
unsigned int lanes = sensor->lanes;
u32 mipi_ctrl;
if (lanes < 1 || lanes > 4)
return -EINVAL;
mipi_ctrl = RKISP1_CIF_MIPI_CTRL_NUM_LANES(lanes - 1) |
RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(0xf) |
RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP |
RKISP1_CIF_MIPI_CTRL_CLOCKLANE_ENA;
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL, mipi_ctrl);
/* V12 could also use a newer csi2-host, but we don't want that yet */
if (rkisp1->info->isp_ver == RKISP1_V12)
rkisp1_write(rkisp1, RKISP1_CIF_ISP_CSI0_CTRL0, 0);
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL,
RKISP1_CIF_MIPI_DATA_SEL_DT(csi->sink_fmt->mipi_dt) |
RKISP1_CIF_MIPI_DATA_SEL_VC(0));
/* Clear MIPI interrupts */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
/*
* Disable RKISP1_CIF_MIPI_ERR_DPHY interrupt here temporary for
* isp bus may be dead when switch isp.
*/
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
RKISP1_CIF_MIPI_FRAME_END | RKISP1_CIF_MIPI_ERR_CSI |
RKISP1_CIF_MIPI_ERR_DPHY |
RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(0x03) |
RKISP1_CIF_MIPI_ADD_DATA_OVFLW);
dev_dbg(rkisp1->dev, "\n MIPI_CTRL 0x%08x\n"
" MIPI_IMG_DATA_SEL 0x%08x\n"
" MIPI_STATUS 0x%08x\n"
" MIPI_IMSC 0x%08x\n",
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_STATUS),
rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC));
return 0;
}
static void rkisp1_csi_enable(struct rkisp1_csi *csi)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
u32 val;
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
val | RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA);
}
static void rkisp1_csi_disable(struct rkisp1_csi *csi)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
u32 val;
/* Mask and clear interrupts. */
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, 0);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, ~0);
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_CTRL);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_CTRL,
val & (~RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA));
}
static int rkisp1_csi_start(struct rkisp1_csi *csi,
const struct rkisp1_sensor_async *sensor)
{
struct rkisp1_device *rkisp1 = csi->rkisp1;
union phy_configure_opts opts;
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
s64 pixel_clock;
int ret;
ret = rkisp1_csi_config(csi, sensor);
if (ret)
return ret;
pixel_clock = v4l2_ctrl_g_ctrl_int64(sensor->pixel_rate_ctrl);
if (!pixel_clock) {
dev_err(rkisp1->dev, "Invalid pixel rate value\n");
return -EINVAL;
}
phy_mipi_dphy_get_default_config(pixel_clock, csi->sink_fmt->bus_width,
sensor->lanes, cfg);
phy_set_mode(csi->dphy, PHY_MODE_MIPI_DPHY);
phy_configure(csi->dphy, &opts);
phy_power_on(csi->dphy);
rkisp1_csi_enable(csi);
/*
* CIF spec says to wait for sufficient time after enabling
* the MIPI interface and before starting the sensor output.
*/
usleep_range(1000, 1200);
return 0;
}
static void rkisp1_csi_stop(struct rkisp1_csi *csi)
{
rkisp1_csi_disable(csi);
phy_power_off(csi->dphy);
}
irqreturn_t rkisp1_csi_isr(int irq, void *ctx)
{
struct device *dev = ctx;
struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
return IRQ_NONE;
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_ICR, status);
/*
* Disable DPHY errctrl interrupt, because this dphy
* erctrl signal is asserted until the next changes
* of line state. This time is may be too long and cpu
* is hold in this interrupt.
*/
if (status & RKISP1_CIF_MIPI_ERR_CTRL(0x0f)) {
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC,
val & ~RKISP1_CIF_MIPI_ERR_CTRL(0x0f));
rkisp1->csi.is_dphy_errctrl_disabled = true;
}
/*
* Enable DPHY errctrl interrupt again, if mipi have receive
* the whole frame without any error.
*/
if (status == RKISP1_CIF_MIPI_FRAME_END) {
/*
* Enable DPHY errctrl interrupt again, if mipi have receive
* the whole frame without any error.
*/
if (rkisp1->csi.is_dphy_errctrl_disabled) {
val = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_IMSC);
val |= RKISP1_CIF_MIPI_ERR_CTRL(0x0f);
rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMSC, val);
rkisp1->csi.is_dphy_errctrl_disabled = false;
}
} else {
rkisp1->debug.mipi_error++;
}
return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
* Subdev pad operations
*/
static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
unsigned int i;
int pos = 0;
if (code->pad == RKISP1_CSI_PAD_SRC) {
const struct v4l2_mbus_framefmt *sink_fmt;
if (code->index)
return -EINVAL;
mutex_lock(&csi->lock);
sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state,
RKISP1_CSI_PAD_SINK,
code->which);
code->code = sink_fmt->code;
mutex_unlock(&csi->lock);
return 0;
}
for (i = 0; ; i++) {
const struct rkisp1_mbus_info *fmt =
rkisp1_mbus_info_get_by_index(i);
if (!fmt)
return -EINVAL;
if (!(fmt->direction & RKISP1_ISP_SD_SINK))
continue;
if (code->index == pos) {
code->code = fmt->mbus_code;
return 0;
}
pos++;
}
return -EINVAL;
}
static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_CSI_PAD_SINK);
src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
RKISP1_CSI_PAD_SRC);
sink_fmt->width = RKISP1_DEFAULT_WIDTH;
sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
sink_fmt->field = V4L2_FIELD_NONE;
sink_fmt->code = RKISP1_CSI_DEF_FMT;
*src_fmt = *sink_fmt;
return 0;
}
static int rkisp1_csi_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
mutex_lock(&csi->lock);
fmt->format = *rkisp1_csi_get_pad_fmt(csi, sd_state, fmt->pad,
fmt->which);
mutex_unlock(&csi->lock);
return 0;
}
static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
const struct rkisp1_mbus_info *mbus_info;
struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
/* The format on the source pad always matches the sink pad. */
if (fmt->pad == RKISP1_CSI_PAD_SRC)
return rkisp1_csi_get_fmt(sd, sd_state, fmt);
mutex_lock(&csi->lock);
sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SINK,
fmt->which);
sink_fmt->code = fmt->format.code;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
sink_fmt->code = RKISP1_CSI_DEF_FMT;
mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
}
sink_fmt->width = clamp_t(u32, fmt->format.width,
RKISP1_ISP_MIN_WIDTH,
RKISP1_ISP_MAX_WIDTH);
sink_fmt->height = clamp_t(u32, fmt->format.height,
RKISP1_ISP_MIN_HEIGHT,
RKISP1_ISP_MAX_HEIGHT);
fmt->format = *sink_fmt;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
csi->sink_fmt = mbus_info;
/* Propagate the format to the source pad. */
src_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SRC,
fmt->which);
*src_fmt = *sink_fmt;
mutex_unlock(&csi->lock);
return 0;
}
/* ----------------------------------------------------------------------------
* Subdev video operations
*/
static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
{
struct rkisp1_csi *csi = to_rkisp1_csi(sd);
struct rkisp1_device *rkisp1 = csi->rkisp1;
struct rkisp1_sensor_async *source_asd;
struct media_pad *source_pad;
struct v4l2_subdev *source;
int ret;
if (!enable) {
v4l2_subdev_call(csi->source, video, s_stream, false);
rkisp1_csi_stop(csi);
return 0;
}
source_pad = media_entity_remote_source_pad_unique(&sd->entity);
if (IS_ERR(source_pad)) {
dev_dbg(rkisp1->dev, "Failed to get source for CSI: %ld\n",
PTR_ERR(source_pad));
return -EPIPE;
}
source = media_entity_to_v4l2_subdev(source_pad->entity);
if (!source) {
/* This should really not happen, so is not worth a message. */
return -EPIPE;
}
source_asd = container_of(source->asd, struct rkisp1_sensor_async, asd);
if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
return -EINVAL;
mutex_lock(&csi->lock);
ret = rkisp1_csi_start(csi, source_asd);
mutex_unlock(&csi->lock);
if (ret)
return ret;
ret = v4l2_subdev_call(source, video, s_stream, true);
if (ret) {
rkisp1_csi_stop(csi);
return ret;
}
csi->source = source;
return 0;
}
/* ----------------------------------------------------------------------------
* Registration
*/
static const struct media_entity_operations rkisp1_csi_media_ops = {
.link_validate = v4l2_subdev_link_validate,
};
static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
.s_stream = rkisp1_csi_s_stream,
};
static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
.enum_mbus_code = rkisp1_csi_enum_mbus_code,
.init_cfg = rkisp1_csi_init_config,
.get_fmt = rkisp1_csi_get_fmt,
.set_fmt = rkisp1_csi_set_fmt,
};
static const struct v4l2_subdev_ops rkisp1_csi_ops = {
.video = &rkisp1_csi_video_ops,
.pad = &rkisp1_csi_pad_ops,
};
int rkisp1_csi_register(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
struct v4l2_subdev_state state = {};
struct media_pad *pads;
struct v4l2_subdev *sd;
int ret;
csi->rkisp1 = rkisp1;
mutex_init(&csi->lock);
sd = &csi->sd;
v4l2_subdev_init(sd, &rkisp1_csi_ops);
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
sd->entity.ops = &rkisp1_csi_media_ops;
sd->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
sd->owner = THIS_MODULE;
strscpy(sd->name, RKISP1_CSI_DEV_NAME, sizeof(sd->name));
pads = csi->pads;
pads[RKISP1_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
MEDIA_PAD_FL_MUST_CONNECT;
pads[RKISP1_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
MEDIA_PAD_FL_MUST_CONNECT;
csi->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_CSI_DEF_FMT);
ret = media_entity_pads_init(&sd->entity, RKISP1_CSI_PAD_NUM, pads);
if (ret)
goto error;
state.pads = csi->pad_cfg;
rkisp1_csi_init_config(sd, &state);
ret = v4l2_device_register_subdev(&csi->rkisp1->v4l2_dev, sd);
if (ret) {
dev_err(sd->dev, "Failed to register csi receiver subdev\n");
goto error;
}
return 0;
error:
media_entity_cleanup(&sd->entity);
mutex_destroy(&csi->lock);
csi->rkisp1 = NULL;
return ret;
}
void rkisp1_csi_unregister(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
if (!csi->rkisp1)
return;
v4l2_device_unregister_subdev(&csi->sd);
media_entity_cleanup(&csi->sd.entity);
mutex_destroy(&csi->lock);
}
int rkisp1_csi_init(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
csi->rkisp1 = rkisp1;
csi->dphy = devm_phy_get(rkisp1->dev, "dphy");
if (IS_ERR(csi->dphy))
return dev_err_probe(rkisp1->dev, PTR_ERR(csi->dphy),
"Couldn't get the MIPI D-PHY\n");
phy_init(csi->dphy);
return 0;
}
void rkisp1_csi_cleanup(struct rkisp1_device *rkisp1)
{
struct rkisp1_csi *csi = &rkisp1->csi;
phy_exit(csi->dphy);
}

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Rockchip ISP1 Driver - CSI-2 Receiver
*
* Copyright (C) 2019 Collabora, Ltd.
* Copyright (C) 2022 Ideas on Board
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#ifndef _RKISP1_CSI_H
#define _RKISP1_CSI_H
struct rkisp1_csi;
struct rkisp1_device;
struct rkisp1_sensor_async;
int rkisp1_csi_init(struct rkisp1_device *rkisp1);
void rkisp1_csi_cleanup(struct rkisp1_device *rkisp1);
int rkisp1_csi_register(struct rkisp1_device *rkisp1);
void rkisp1_csi_unregister(struct rkisp1_device *rkisp1);
int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
struct rkisp1_sensor_async *s_asd,
unsigned int source_pad);
#endif /* _RKISP1_CSI_H */

View File

@ -0,0 +1,243 @@
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
/*
* Rockchip ISP1 Driver - Base driver
*
* Copyright (C) 2019 Collabora, Ltd.
*
* Based on Rockchip ISP1 driver by Rockchip Electronics Co., Ltd.
* Copyright (C) 2017 Rockchip Electronics Co., Ltd.
*/
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/minmax.h>
#include <linux/pm_runtime.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include "rkisp1-common.h"
#include "rkisp1-regs.h"
struct rkisp1_debug_register {
u32 reg;
u32 shd;
const char * const name;
};
#define RKISP1_DEBUG_REG(name) { RKISP1_CIF_##name, 0, #name }
#define RKISP1_DEBUG_SHD_REG(name) { \
RKISP1_CIF_##name, RKISP1_CIF_##name##_SHD, #name \
}
/* Keep this up-to-date when adding new registers. */
#define RKISP1_MAX_REG_LENGTH 21
static int rkisp1_debug_dump_regs(struct rkisp1_device *rkisp1,
struct seq_file *m, unsigned int offset,
const struct rkisp1_debug_register *regs)
{
const int width = RKISP1_MAX_REG_LENGTH;
u32 val, shd;
int ret;
ret = pm_runtime_get_if_in_use(rkisp1->dev);
if (ret <= 0)
return ret ? : -ENODATA;
for (; regs->name; ++regs) {
val = rkisp1_read(rkisp1, offset + regs->reg);
if (regs->shd) {
shd = rkisp1_read(rkisp1, offset + regs->shd);
seq_printf(m, "%*s: 0x%08x/0x%08x\n", width, regs->name,
val, shd);
} else {
seq_printf(m, "%*s: 0x%08x\n", width, regs->name, val);
}
}
pm_runtime_put(rkisp1->dev);
return 0;
}
static int rkisp1_debug_dump_core_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(VI_CCL),
RKISP1_DEBUG_REG(VI_ICCL),
RKISP1_DEBUG_REG(VI_IRCL),
RKISP1_DEBUG_REG(VI_DPCL),
RKISP1_DEBUG_REG(MI_CTRL),
RKISP1_DEBUG_REG(MI_BYTE_CNT),
RKISP1_DEBUG_REG(MI_CTRL_SHD),
RKISP1_DEBUG_REG(MI_RIS),
RKISP1_DEBUG_REG(MI_STATUS),
RKISP1_DEBUG_REG(MI_DMA_CTRL),
RKISP1_DEBUG_REG(MI_DMA_STATUS),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_core_regs);
static int rkisp1_debug_dump_isp_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(ISP_CTRL),
RKISP1_DEBUG_REG(ISP_ACQ_PROP),
RKISP1_DEBUG_REG(ISP_FLAGS_SHD),
RKISP1_DEBUG_REG(ISP_RIS),
RKISP1_DEBUG_REG(ISP_ERR),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_isp_regs);
static int rkisp1_debug_dump_rsz_regs_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_SHD_REG(RSZ_CTRL),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HY),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCB),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_HCR),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VY),
RKISP1_DEBUG_SHD_REG(RSZ_SCALE_VC),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HY),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_HC),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VY),
RKISP1_DEBUG_SHD_REG(RSZ_PHASE_VC),
{ /* Sentinel */ },
};
struct rkisp1_resizer *rsz = m->private;
return rkisp1_debug_dump_regs(rsz->rkisp1, m, rsz->regs_base, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_rsz_regs);
static int rkisp1_debug_dump_mi_mp_show(struct seq_file *m, void *p)
{
static const struct rkisp1_debug_register registers[] = {
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_INIT2),
RKISP1_DEBUG_REG(MI_MP_Y_BASE_AD_SHD),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_INIT),
RKISP1_DEBUG_REG(MI_MP_Y_SIZE_SHD),
RKISP1_DEBUG_REG(MI_MP_Y_OFFS_CNT_SHD),
{ /* Sentinel */ },
};
struct rkisp1_device *rkisp1 = m->private;
return rkisp1_debug_dump_regs(rkisp1, m, 0, registers);
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_dump_mi_mp);
#define RKISP1_DEBUG_DATA_COUNT_BINS 32
#define RKISP1_DEBUG_DATA_COUNT_STEP (4096 / RKISP1_DEBUG_DATA_COUNT_BINS)
static int rkisp1_debug_input_status_show(struct seq_file *m, void *p)
{
struct rkisp1_device *rkisp1 = m->private;
u16 data_count[RKISP1_DEBUG_DATA_COUNT_BINS] = { };
unsigned int hsync_count = 0;
unsigned int vsync_count = 0;
unsigned int i;
u32 data;
u32 val;
int ret;
ret = pm_runtime_get_if_in_use(rkisp1->dev);
if (ret <= 0)
return ret ? : -ENODATA;
/* Sample the ISP input port status 10000 times with a 1µs interval. */
for (i = 0; i < 10000; ++i) {
val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_FLAGS_SHD);
data = (val & RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_MASK)
>> RKISP1_CIF_ISP_FLAGS_SHD_S_DATA_SHIFT;
data_count[data / RKISP1_DEBUG_DATA_COUNT_STEP]++;
if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_HSYNC)
hsync_count++;
if (val & RKISP1_CIF_ISP_FLAGS_SHD_S_VSYNC)
vsync_count++;
udelay(1);
}
pm_runtime_put(rkisp1->dev);
seq_printf(m, "vsync: %u, hsync: %u\n", vsync_count, hsync_count);
seq_puts(m, "data:\n");
for (i = 0; i < ARRAY_SIZE(data_count); ++i)
seq_printf(m, "- [%04u:%04u]: %u\n",
i * RKISP1_DEBUG_DATA_COUNT_STEP,
(i + 1) * RKISP1_DEBUG_DATA_COUNT_STEP - 1,
data_count[i]);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(rkisp1_debug_input_status);
void rkisp1_debug_init(struct rkisp1_device *rkisp1)
{
struct rkisp1_debug *debug = &rkisp1->debug;
struct dentry *regs_dir;
debug->debugfs_dir = debugfs_create_dir(dev_name(rkisp1->dev), NULL);
debugfs_create_ulong("data_loss", 0444, debug->debugfs_dir,
&debug->data_loss);
debugfs_create_ulong("outform_size_err", 0444, debug->debugfs_dir,
&debug->outform_size_error);
debugfs_create_ulong("img_stabilization_size_error", 0444,
debug->debugfs_dir,
&debug->img_stabilization_size_error);
debugfs_create_ulong("inform_size_error", 0444, debug->debugfs_dir,
&debug->inform_size_error);
debugfs_create_ulong("irq_delay", 0444, debug->debugfs_dir,
&debug->irq_delay);
debugfs_create_ulong("mipi_error", 0444, debug->debugfs_dir,
&debug->mipi_error);
debugfs_create_ulong("stats_error", 0444, debug->debugfs_dir,
&debug->stats_error);
debugfs_create_ulong("mp_stop_timeout", 0444, debug->debugfs_dir,
&debug->stop_timeout[RKISP1_MAINPATH]);
debugfs_create_ulong("sp_stop_timeout", 0444, debug->debugfs_dir,
&debug->stop_timeout[RKISP1_SELFPATH]);
debugfs_create_ulong("mp_frame_drop", 0444, debug->debugfs_dir,
&debug->frame_drop[RKISP1_MAINPATH]);
debugfs_create_ulong("sp_frame_drop", 0444, debug->debugfs_dir,
&debug->frame_drop[RKISP1_SELFPATH]);
debugfs_create_file("input_status", 0444, debug->debugfs_dir, rkisp1,
&rkisp1_debug_input_status_fops);
regs_dir = debugfs_create_dir("regs", debug->debugfs_dir);
debugfs_create_file("core", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_core_regs_fops);
debugfs_create_file("isp", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_isp_regs_fops);
debugfs_create_file("mrsz", 0444, regs_dir,
&rkisp1->resizer_devs[RKISP1_MAINPATH],
&rkisp1_debug_dump_rsz_regs_fops);
debugfs_create_file("srsz", 0444, regs_dir,
&rkisp1->resizer_devs[RKISP1_SELFPATH],
&rkisp1_debug_dump_rsz_regs_fops);
debugfs_create_file("mi_mp", 0444, regs_dir, rkisp1,
&rkisp1_debug_dump_mi_mp_fops);
}
void rkisp1_debug_cleanup(struct rkisp1_device *rkisp1)
{
debugfs_remove_recursive(rkisp1->debug.debugfs_dir);
}

View File

@ -0,0 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_SUN6I_MIPI_CSI2
tristate "Allwinner A31 MIPI CSI-2 Controller Driver"
depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
depends on ARCH_SUNXI || COMPILE_TEST
depends on PM && COMMON_CLK
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select PHY_SUN6I_MIPI_DPHY
select GENERIC_PHY_MIPI_DPHY
select REGMAP_MMIO
help
Support for the Allwinner A31 MIPI CSI-2 controller, also found on
other platforms such as the V3/V3s.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
sun6i-mipi-csi2-y += sun6i_mipi_csi2.o
obj-$(CONFIG_VIDEO_SUN6I_MIPI_CSI2) += sun6i-mipi-csi2.o

View File

@ -0,0 +1,750 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include "sun6i_mipi_csi2.h"
#include "sun6i_mipi_csi2_reg.h"
/* Format */
static const struct sun6i_mipi_csi2_format sun6i_mipi_csi2_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
};
static const struct sun6i_mipi_csi2_format *
sun6i_mipi_csi2_format_find(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sun6i_mipi_csi2_formats); i++)
if (sun6i_mipi_csi2_formats[i].mbus_code == mbus_code)
return &sun6i_mipi_csi2_formats[i];
return NULL;
}
/* Controller */
static void sun6i_mipi_csi2_enable(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
SUN6I_MIPI_CSI2_CTL_EN, SUN6I_MIPI_CSI2_CTL_EN);
}
static void sun6i_mipi_csi2_disable(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
SUN6I_MIPI_CSI2_CTL_EN, 0);
}
static void sun6i_mipi_csi2_configure(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun6i_mipi_csi2_format *format;
struct device *dev = csi2_dev->dev;
u32 version = 0;
format = sun6i_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format))
return;
/*
* The enable flow in the Allwinner BSP is a bit different: the enable
* and reset bits are set together before starting the CSI controller.
*
* In mainline we enable the CSI controller first (due to subdev logic).
* One reliable way to make this work is to deassert reset, configure
* registers and enable the controller when everything's ready.
*
* However, setting the version enable bit and removing it afterwards
* appears necessary for capture to work reliably, while replacing it
* with a delay doesn't do the trick.
*/
regmap_write(regmap, SUN6I_MIPI_CSI2_CTL_REG,
SUN6I_MIPI_CSI2_CTL_RESET_N |
SUN6I_MIPI_CSI2_CTL_VERSION_EN |
SUN6I_MIPI_CSI2_CTL_UNPK_EN);
regmap_read(regmap, SUN6I_MIPI_CSI2_VERSION_REG, &version);
regmap_update_bits(regmap, SUN6I_MIPI_CSI2_CTL_REG,
SUN6I_MIPI_CSI2_CTL_VERSION_EN, 0);
dev_dbg(dev, "A31 MIPI CSI-2 version: %04x\n", version);
regmap_write(regmap, SUN6I_MIPI_CSI2_CFG_REG,
SUN6I_MIPI_CSI2_CFG_CHANNEL_MODE(1) |
SUN6I_MIPI_CSI2_CFG_LANE_COUNT(lanes_count));
/*
* Only a single virtual channel (index 0) is currently supported.
* While the registers do mention multiple physical channels being
* available (which can be configured to match a specific virtual
* channel or data type), it's unclear whether channels > 0 are actually
* connected and available and the reference source code only makes use
* of channel 0.
*
* Using extra channels would also require matching channels to be
* available on the CSI (and ISP) side, which is also unsure although
* some CSI implementations are said to support multiple channels for
* BT656 time-sharing.
*
* We still configure virtual channel numbers to ensure that virtual
* channel 0 only goes to channel 0.
*/
regmap_write(regmap, SUN6I_MIPI_CSI2_VCDT_RX_REG,
SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(3, 3) |
SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(2, 2) |
SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(1, 1) |
SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(0, 0) |
SUN6I_MIPI_CSI2_VCDT_RX_CH_DT(0, format->data_type));
regmap_write(regmap, SUN6I_MIPI_CSI2_CH_INT_PD_REG,
SUN6I_MIPI_CSI2_CH_INT_PD_CLEAR);
}
/* V4L2 Subdev */
static int sun6i_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on)
{
struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev;
union phy_configure_opts dphy_opts = { 0 };
struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun6i_mipi_csi2_format *format;
struct phy *dphy = csi2_dev->dphy;
struct device *dev = csi2_dev->dev;
struct v4l2_ctrl *ctrl;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
unsigned long pixel_rate;
int ret;
if (!source_subdev)
return -ENODEV;
if (!on) {
ret = v4l2_subdev_call(source_subdev, video, s_stream, 0);
goto disable;
}
/* Runtime PM */
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
/* Sensor Pixel Rate */
ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (!ctrl) {
dev_err(dev, "missing sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl);
if (!pixel_rate) {
dev_err(dev, "missing (zero) sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
/* D-PHY */
if (!lanes_count) {
dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n");
ret = -ENODEV;
goto error_pm;
}
format = sun6i_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format)) {
ret = -ENODEV;
goto error_pm;
}
phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count,
dphy_cfg);
/*
* Note that our hardware is using DDR, which is not taken in account by
* phy_mipi_dphy_get_default_config when calculating hs_clk_rate from
* the pixel rate, lanes count and bpp.
*
* The resulting clock rate is basically the symbol rate over the whole
* link. The actual clock rate is calculated with division by two since
* DDR samples both on rising and falling edges.
*/
dev_dbg(dev, "A31 MIPI CSI-2 config:\n");
dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n",
pixel_rate, format->bpp, lanes_count,
dphy_cfg->hs_clk_rate / 2);
ret = phy_reset(dphy);
if (ret) {
dev_err(dev, "failed to reset MIPI D-PHY\n");
goto error_pm;
}
ret = phy_configure(dphy, &dphy_opts);
if (ret) {
dev_err(dev, "failed to configure MIPI D-PHY\n");
goto error_pm;
}
/* Controller */
sun6i_mipi_csi2_configure(csi2_dev);
sun6i_mipi_csi2_enable(csi2_dev);
/* D-PHY */
ret = phy_power_on(dphy);
if (ret) {
dev_err(dev, "failed to power on MIPI D-PHY\n");
goto error_pm;
}
/* Source */
ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD)
goto disable;
return 0;
disable:
if (!on)
ret = 0;
phy_power_off(dphy);
sun6i_mipi_csi2_disable(csi2_dev);
error_pm:
pm_runtime_put(dev);
return ret;
}
static const struct v4l2_subdev_video_ops sun6i_mipi_csi2_video_ops = {
.s_stream = sun6i_mipi_csi2_s_stream,
};
static void
sun6i_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
{
if (!sun6i_mipi_csi2_format_find(mbus_format->code))
mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code;
mbus_format->field = V4L2_FIELD_NONE;
mbus_format->colorspace = V4L2_COLORSPACE_RAW;
mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static int sun6i_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state)
{
struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
unsigned int pad = SUN6I_MIPI_CSI2_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
v4l2_subdev_get_try_format(subdev, state, pad);
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
mbus_format->code = sun6i_mipi_csi2_formats[0].mbus_code;
mbus_format->width = 640;
mbus_format->height = 480;
sun6i_mipi_csi2_mbus_format_prepare(mbus_format);
mutex_unlock(lock);
return 0;
}
static int
sun6i_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(sun6i_mipi_csi2_formats))
return -EINVAL;
code_enum->code = sun6i_mipi_csi2_formats[code_enum->index].mbus_code;
return 0;
}
static int sun6i_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*mbus_format = *v4l2_subdev_get_try_format(subdev, state,
format->pad);
else
*mbus_format = csi2_dev->bridge.mbus_format;
mutex_unlock(lock);
return 0;
}
static int sun6i_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun6i_mipi_csi2_device *csi2_dev = v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
sun6i_mipi_csi2_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*v4l2_subdev_get_try_format(subdev, state, format->pad) =
*mbus_format;
else
csi2_dev->bridge.mbus_format = *mbus_format;
mutex_unlock(lock);
return 0;
}
static const struct v4l2_subdev_pad_ops sun6i_mipi_csi2_pad_ops = {
.init_cfg = sun6i_mipi_csi2_init_cfg,
.enum_mbus_code = sun6i_mipi_csi2_enum_mbus_code,
.get_fmt = sun6i_mipi_csi2_get_fmt,
.set_fmt = sun6i_mipi_csi2_set_fmt,
};
static const struct v4l2_subdev_ops sun6i_mipi_csi2_subdev_ops = {
.video = &sun6i_mipi_csi2_video_ops,
.pad = &sun6i_mipi_csi2_pad_ops,
};
/* Media Entity */
static const struct media_entity_operations sun6i_mipi_csi2_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
/* V4L2 Async */
static int
sun6i_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
struct v4l2_async_subdev *async_subdev)
{
struct v4l2_subdev *subdev = notifier->sd;
struct sun6i_mipi_csi2_device *csi2_dev =
container_of(notifier, struct sun6i_mipi_csi2_device,
bridge.notifier);
struct media_entity *sink_entity = &subdev->entity;
struct media_entity *source_entity = &remote_subdev->entity;
struct device *dev = csi2_dev->dev;
int sink_pad_index = 0;
int source_pad_index;
int ret;
ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
MEDIA_PAD_FL_SOURCE);
if (ret < 0) {
dev_err(dev, "missing source pad in external entity %s\n",
source_entity->name);
return -EINVAL;
}
source_pad_index = ret;
dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
source_pad_index, sink_entity->name, sink_pad_index);
ret = media_create_pad_link(source_entity, source_pad_index,
sink_entity, sink_pad_index,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret) {
dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
source_entity->name, source_pad_index,
sink_entity->name, sink_pad_index);
return ret;
}
csi2_dev->bridge.source_subdev = remote_subdev;
return 0;
}
static const struct v4l2_async_notifier_operations
sun6i_mipi_csi2_notifier_ops = {
.bound = sun6i_mipi_csi2_notifier_bound,
};
/* Bridge */
static int
sun6i_mipi_csi2_bridge_source_setup(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
struct v4l2_async_subdev *subdev_async;
struct fwnode_handle *handle;
struct device *dev = csi2_dev->dev;
int ret;
handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!handle)
return -ENODEV;
endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
if (ret)
goto complete;
subdev_async =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
struct v4l2_async_subdev);
if (IS_ERR(subdev_async))
ret = PTR_ERR(subdev_async);
complete:
fwnode_handle_put(handle);
return ret;
}
static int sun6i_mipi_csi2_bridge_setup(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct sun6i_mipi_csi2_bridge *bridge = &csi2_dev->bridge;
struct v4l2_subdev *subdev = &bridge->subdev;
struct v4l2_async_notifier *notifier = &bridge->notifier;
struct media_pad *pads = bridge->pads;
struct device *dev = csi2_dev->dev;
int ret;
mutex_init(&bridge->lock);
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun6i_mipi_csi2_subdev_ops);
strscpy(subdev->name, SUN6I_MIPI_CSI2_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
subdev->dev = dev;
v4l2_set_subdevdata(subdev, csi2_dev);
/* Media Entity */
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
subdev->entity.ops = &sun6i_mipi_csi2_entity_ops;
/* Media Pads */
pads[SUN6I_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[SUN6I_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&subdev->entity, SUN6I_MIPI_CSI2_PAD_COUNT,
pads);
if (ret)
return ret;
/* V4L2 Async */
v4l2_async_nf_init(notifier);
notifier->ops = &sun6i_mipi_csi2_notifier_ops;
ret = sun6i_mipi_csi2_bridge_source_setup(csi2_dev);
if (ret)
goto error_v4l2_notifier_cleanup;
ret = v4l2_async_subdev_nf_register(subdev, notifier);
if (ret < 0)
goto error_v4l2_notifier_cleanup;
/* V4L2 Subdev */
ret = v4l2_async_register_subdev(subdev);
if (ret < 0)
goto error_v4l2_notifier_unregister;
return 0;
error_v4l2_notifier_unregister:
v4l2_async_nf_unregister(notifier);
error_v4l2_notifier_cleanup:
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
return ret;
}
static void
sun6i_mipi_csi2_bridge_cleanup(struct sun6i_mipi_csi2_device *csi2_dev)
{
struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev;
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
v4l2_async_unregister_subdev(subdev);
v4l2_async_nf_unregister(notifier);
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
}
/* Platform */
static int sun6i_mipi_csi2_suspend(struct device *dev)
{
struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
clk_disable_unprepare(csi2_dev->clock_mod);
reset_control_assert(csi2_dev->reset);
return 0;
}
static int sun6i_mipi_csi2_resume(struct device *dev)
{
struct sun6i_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(csi2_dev->reset);
if (ret) {
dev_err(dev, "failed to deassert reset\n");
return ret;
}
ret = clk_prepare_enable(csi2_dev->clock_mod);
if (ret) {
dev_err(dev, "failed to enable module clock\n");
goto error_reset;
}
return 0;
error_reset:
reset_control_assert(csi2_dev->reset);
return ret;
}
static const struct dev_pm_ops sun6i_mipi_csi2_pm_ops = {
.runtime_suspend = sun6i_mipi_csi2_suspend,
.runtime_resume = sun6i_mipi_csi2_resume,
};
static const struct regmap_config sun6i_mipi_csi2_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x400,
};
static int
sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev,
struct platform_device *platform_dev)
{
struct device *dev = csi2_dev->dev;
void __iomem *io_base;
int ret;
/* Registers */
io_base = devm_platform_ioremap_resource(platform_dev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
csi2_dev->regmap =
devm_regmap_init_mmio_clk(dev, "bus", io_base,
&sun6i_mipi_csi2_regmap_config);
if (IS_ERR(csi2_dev->regmap)) {
dev_err(dev, "failed to init register map\n");
return PTR_ERR(csi2_dev->regmap);
}
/* Clock */
csi2_dev->clock_mod = devm_clk_get(dev, "mod");
if (IS_ERR(csi2_dev->clock_mod)) {
dev_err(dev, "failed to acquire mod clock\n");
return PTR_ERR(csi2_dev->clock_mod);
}
ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000);
if (ret) {
dev_err(dev, "failed to set mod clock rate\n");
return ret;
}
/* Reset */
csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(csi2_dev->reset)) {
dev_err(dev, "failed to get reset controller\n");
return PTR_ERR(csi2_dev->reset);
}
/* D-PHY */
csi2_dev->dphy = devm_phy_get(dev, "dphy");
if (IS_ERR(csi2_dev->dphy)) {
dev_err(dev, "failed to get MIPI D-PHY\n");
return PTR_ERR(csi2_dev->dphy);
}
ret = phy_init(csi2_dev->dphy);
if (ret) {
dev_err(dev, "failed to initialize MIPI D-PHY\n");
return ret;
}
/* Runtime PM */
pm_runtime_enable(dev);
return 0;
}
static void
sun6i_mipi_csi2_resources_cleanup(struct sun6i_mipi_csi2_device *csi2_dev)
{
pm_runtime_disable(csi2_dev->dev);
phy_exit(csi2_dev->dphy);
clk_rate_exclusive_put(csi2_dev->clock_mod);
}
static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev)
{
struct sun6i_mipi_csi2_device *csi2_dev;
struct device *dev = &platform_dev->dev;
int ret;
csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL);
if (!csi2_dev)
return -ENOMEM;
csi2_dev->dev = dev;
platform_set_drvdata(platform_dev, csi2_dev);
ret = sun6i_mipi_csi2_resources_setup(csi2_dev, platform_dev);
if (ret)
return ret;
ret = sun6i_mipi_csi2_bridge_setup(csi2_dev);
if (ret)
return ret;
return 0;
}
static int sun6i_mipi_csi2_remove(struct platform_device *platform_dev)
{
struct sun6i_mipi_csi2_device *csi2_dev =
platform_get_drvdata(platform_dev);
sun6i_mipi_csi2_bridge_cleanup(csi2_dev);
sun6i_mipi_csi2_resources_cleanup(csi2_dev);
return 0;
}
static const struct of_device_id sun6i_mipi_csi2_of_match[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-csi2" },
{},
};
MODULE_DEVICE_TABLE(of, sun6i_mipi_csi2_of_match);
static struct platform_driver sun6i_mipi_csi2_platform_driver = {
.probe = sun6i_mipi_csi2_probe,
.remove = sun6i_mipi_csi2_remove,
.driver = {
.name = SUN6I_MIPI_CSI2_NAME,
.of_match_table = of_match_ptr(sun6i_mipi_csi2_of_match),
.pm = &sun6i_mipi_csi2_pm_ops,
},
};
module_platform_driver(sun6i_mipi_csi2_platform_driver);
MODULE_DESCRIPTION("Allwinner A31 MIPI CSI-2 Controller Driver");
MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,52 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _SUN6I_MIPI_CSI2_H_
#define _SUN6I_MIPI_CSI2_H_
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#define SUN6I_MIPI_CSI2_NAME "sun6i-mipi-csi2"
enum sun6i_mipi_csi2_pad {
SUN6I_MIPI_CSI2_PAD_SINK = 0,
SUN6I_MIPI_CSI2_PAD_SOURCE = 1,
SUN6I_MIPI_CSI2_PAD_COUNT = 2,
};
struct sun6i_mipi_csi2_format {
u32 mbus_code;
u8 data_type;
u32 bpp;
};
struct sun6i_mipi_csi2_bridge {
struct v4l2_subdev subdev;
struct media_pad pads[SUN6I_MIPI_CSI2_PAD_COUNT];
struct v4l2_fwnode_endpoint endpoint;
struct v4l2_async_notifier notifier;
struct v4l2_mbus_framefmt mbus_format;
struct mutex lock; /* Mbus format lock. */
struct v4l2_subdev *source_subdev;
};
struct sun6i_mipi_csi2_device {
struct device *dev;
struct regmap *regmap;
struct clk *clock_mod;
struct reset_control *reset;
struct phy *dphy;
struct sun6i_mipi_csi2_bridge bridge;
};
#endif

View File

@ -0,0 +1,76 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _SUN6I_MIPI_CSI2_REG_H_
#define _SUN6I_MIPI_CSI2_REG_H_
#define SUN6I_MIPI_CSI2_CTL_REG 0x0
#define SUN6I_MIPI_CSI2_CTL_RESET_N BIT(31)
#define SUN6I_MIPI_CSI2_CTL_VERSION_EN BIT(30)
#define SUN6I_MIPI_CSI2_CTL_UNPK_EN BIT(1)
#define SUN6I_MIPI_CSI2_CTL_EN BIT(0)
#define SUN6I_MIPI_CSI2_CFG_REG 0x4
#define SUN6I_MIPI_CSI2_CFG_CHANNEL_MODE(v) ((((v) - 1) << 8) & \
GENMASK(9, 8))
#define SUN6I_MIPI_CSI2_CFG_LANE_COUNT(v) (((v) - 1) & GENMASK(1, 0))
#define SUN6I_MIPI_CSI2_VCDT_RX_REG 0x8
#define SUN6I_MIPI_CSI2_VCDT_RX_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
((ch) * 8 + 6))
#define SUN6I_MIPI_CSI2_VCDT_RX_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
((ch) * 8))
#define SUN6I_MIPI_CSI2_RX_PKT_NUM_REG 0xc
#define SUN6I_MIPI_CSI2_VERSION_REG 0x3c
#define SUN6I_MIPI_CSI2_CH_CFG_REG 0x40
#define SUN6I_MIPI_CSI2_CH_INT_EN_REG 0x50
#define SUN6I_MIPI_CSI2_CH_INT_EN_EOT_ERR BIT(29)
#define SUN6I_MIPI_CSI2_CH_INT_EN_CHKSUM_ERR BIT(28)
#define SUN6I_MIPI_CSI2_CH_INT_EN_ECC_WRN BIT(27)
#define SUN6I_MIPI_CSI2_CH_INT_EN_ECC_ERR BIT(26)
#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_SYNC_ERR BIT(25)
#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_SYNC_ERR BIT(24)
#define SUN6I_MIPI_CSI2_CH_INT_EN_EMB_DATA BIT(18)
#define SUN6I_MIPI_CSI2_CH_INT_EN_PF BIT(17)
#define SUN6I_MIPI_CSI2_CH_INT_EN_PH_UPDATE BIT(16)
#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_START_SYNC BIT(11)
#define SUN6I_MIPI_CSI2_CH_INT_EN_LINE_END_SYNC BIT(10)
#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_START_SYNC BIT(9)
#define SUN6I_MIPI_CSI2_CH_INT_EN_FRAME_END_SYNC BIT(8)
#define SUN6I_MIPI_CSI2_CH_INT_EN_FIFO_OVER BIT(0)
#define SUN6I_MIPI_CSI2_CH_INT_PD_REG 0x58
#define SUN6I_MIPI_CSI2_CH_INT_PD_CLEAR 0xff
#define SUN6I_MIPI_CSI2_CH_INT_PD_EOT_ERR BIT(29)
#define SUN6I_MIPI_CSI2_CH_INT_PD_CHKSUM_ERR BIT(28)
#define SUN6I_MIPI_CSI2_CH_INT_PD_ECC_WRN BIT(27)
#define SUN6I_MIPI_CSI2_CH_INT_PD_ECC_ERR BIT(26)
#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_SYNC_ERR BIT(25)
#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_SYNC_ERR BIT(24)
#define SUN6I_MIPI_CSI2_CH_INT_PD_EMB_DATA BIT(18)
#define SUN6I_MIPI_CSI2_CH_INT_PD_PF BIT(17)
#define SUN6I_MIPI_CSI2_CH_INT_PD_PH_UPDATE BIT(16)
#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_START_SYNC BIT(11)
#define SUN6I_MIPI_CSI2_CH_INT_PD_LINE_END_SYNC BIT(10)
#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_START_SYNC BIT(9)
#define SUN6I_MIPI_CSI2_CH_INT_PD_FRAME_END_SYNC BIT(8)
#define SUN6I_MIPI_CSI2_CH_INT_PD_FIFO_OVER BIT(0)
#define SUN6I_MIPI_CSI2_CH_DT_TRIGGER_REG 0x60
#define SUN6I_MIPI_CSI2_CH_CUR_PH_REG 0x70
#define SUN6I_MIPI_CSI2_CH_ECC_REG 0x74
#define SUN6I_MIPI_CSI2_CH_CKS_REG 0x78
#define SUN6I_MIPI_CSI2_CH_FRAME_NUM_REG 0x7c
#define SUN6I_MIPI_CSI2_CH_LINE_NUM_REG 0x80
#define SUN6I_MIPI_CSI2_CH_OFFSET 0x100
#define SUN6I_MIPI_CSI2_CH_REG(reg, ch) \
(SUN6I_MIPI_CSI2_CH_OFFSET * (ch) + (reg))
#endif

View File

@ -0,0 +1,13 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_SUN8I_A83T_MIPI_CSI2
tristate "Allwinner A83T MIPI CSI-2 Controller and D-PHY Driver"
depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
depends on ARCH_SUNXI || COMPILE_TEST
depends on PM && COMMON_CLK
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select V4L2_FWNODE
select REGMAP_MMIO
select GENERIC_PHY_MIPI_DPHY
help
Support for the Allwinner A83T MIPI CSI-2 controller and D-PHY.

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
sun8i-a83t-mipi-csi2-y += sun8i_a83t_mipi_csi2.o sun8i_a83t_dphy.o
obj-$(CONFIG_VIDEO_SUN8I_A83T_MIPI_CSI2) += sun8i-a83t-mipi-csi2.o

View File

@ -0,0 +1,72 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include "sun8i_a83t_dphy.h"
#include "sun8i_a83t_mipi_csi2.h"
static int sun8i_a83t_dphy_configure(struct phy *dphy,
union phy_configure_opts *opts)
{
return phy_mipi_dphy_config_validate(&opts->mipi_dphy);
}
static int sun8i_a83t_dphy_power_on(struct phy *dphy)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy);
struct regmap *regmap = csi2_dev->regmap;
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG,
SUN8I_A83T_DPHY_CTRL_RESET_N |
SUN8I_A83T_DPHY_CTRL_SHUTDOWN_N);
regmap_write(regmap, SUN8I_A83T_DPHY_ANA0_REG,
SUN8I_A83T_DPHY_ANA0_REXT_EN |
SUN8I_A83T_DPHY_ANA0_RINT(2) |
SUN8I_A83T_DPHY_ANA0_SNK(2));
return 0;
};
static int sun8i_a83t_dphy_power_off(struct phy *dphy)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = phy_get_drvdata(dphy);
struct regmap *regmap = csi2_dev->regmap;
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0);
return 0;
};
static const struct phy_ops sun8i_a83t_dphy_ops = {
.configure = sun8i_a83t_dphy_configure,
.power_on = sun8i_a83t_dphy_power_on,
.power_off = sun8i_a83t_dphy_power_off,
};
int sun8i_a83t_dphy_register(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct device *dev = csi2_dev->dev;
struct phy_provider *phy_provider;
csi2_dev->dphy = devm_phy_create(dev, NULL, &sun8i_a83t_dphy_ops);
if (IS_ERR(csi2_dev->dphy)) {
dev_err(dev, "failed to create D-PHY\n");
return PTR_ERR(csi2_dev->dphy);
}
phy_set_drvdata(csi2_dev->dphy, csi2_dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
if (IS_ERR(phy_provider)) {
dev_err(dev, "failed to register D-PHY provider\n");
return PTR_ERR(phy_provider);
}
return 0;
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _SUN8I_A83T_DPHY_H_
#define _SUN8I_A83T_DPHY_H_
#include "sun8i_a83t_mipi_csi2.h"
#define SUN8I_A83T_DPHY_CTRL_REG 0x10
#define SUN8I_A83T_DPHY_CTRL_INIT_VALUE 0xb8df698e
#define SUN8I_A83T_DPHY_CTRL_RESET_N BIT(31)
#define SUN8I_A83T_DPHY_CTRL_SHUTDOWN_N BIT(15)
#define SUN8I_A83T_DPHY_CTRL_DEBUG BIT(8)
#define SUN8I_A83T_DPHY_STATUS_REG 0x14
#define SUN8I_A83T_DPHY_STATUS_CLK_STOP BIT(10)
#define SUN8I_A83T_DPHY_STATUS_CLK_ULPS BIT(9)
#define SUN8I_A83T_DPHY_STATUS_HSCLK BIT(8)
#define SUN8I_A83T_DPHY_STATUS_D3_STOP BIT(7)
#define SUN8I_A83T_DPHY_STATUS_D2_STOP BIT(6)
#define SUN8I_A83T_DPHY_STATUS_D1_STOP BIT(5)
#define SUN8I_A83T_DPHY_STATUS_D0_STOP BIT(4)
#define SUN8I_A83T_DPHY_STATUS_D3_ULPS BIT(3)
#define SUN8I_A83T_DPHY_STATUS_D2_ULPS BIT(2)
#define SUN8I_A83T_DPHY_STATUS_D1_ULPS BIT(1)
#define SUN8I_A83T_DPHY_STATUS_D0_ULPS BIT(0)
#define SUN8I_A83T_DPHY_ANA0_REG 0x30
#define SUN8I_A83T_DPHY_ANA0_REXT_EN BIT(31)
#define SUN8I_A83T_DPHY_ANA0_REXT BIT(30)
#define SUN8I_A83T_DPHY_ANA0_RINT(v) (((v) << 28) & GENMASK(29, 28))
#define SUN8I_A83T_DPHY_ANA0_SNK(v) (((v) << 20) & GENMASK(22, 20))
int sun8i_a83t_dphy_register(struct sun8i_a83t_mipi_csi2_device *csi2_dev);
#endif

View File

@ -0,0 +1,816 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/mipi-csi2.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#include "sun8i_a83t_dphy.h"
#include "sun8i_a83t_mipi_csi2.h"
#include "sun8i_a83t_mipi_csi2_reg.h"
/* Format */
static const struct sun8i_a83t_mipi_csi2_format
sun8i_a83t_mipi_csi2_formats[] = {
{
.mbus_code = MEDIA_BUS_FMT_SBGGR8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB8_1X8,
.data_type = MIPI_CSI2_DT_RAW8,
.bpp = 8,
},
{
.mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
{
.mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
.data_type = MIPI_CSI2_DT_RAW10,
.bpp = 10,
},
};
static const struct sun8i_a83t_mipi_csi2_format *
sun8i_a83t_mipi_csi2_format_find(u32 mbus_code)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats); i++)
if (sun8i_a83t_mipi_csi2_formats[i].mbus_code == mbus_code)
return &sun8i_a83t_mipi_csi2_formats[i];
return NULL;
}
/* Controller */
static void
sun8i_a83t_mipi_csi2_init(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
/*
* The Allwinner BSP sets various magic values on a bunch of registers.
* This is apparently a necessary initialization process that will cause
* the capture to fail with unsolicited interrupts hitting if skipped.
*
* Most of the registers are set to proper values later, except for the
* two reserved registers. They are said to hold a "hardware lock"
* value, without more information available.
*/
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG,
SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG, 0);
regmap_write(regmap, SUN8I_A83T_DPHY_CTRL_REG,
SUN8I_A83T_DPHY_CTRL_INIT_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD1_REG,
SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_RSVD2_REG,
SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE);
}
static void
sun8i_a83t_mipi_csi2_enable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN);
}
static void
sun8i_a83t_mipi_csi2_disable(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
regmap_update_bits(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN, 0);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG, 0);
}
static void
sun8i_a83t_mipi_csi2_configure(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct regmap *regmap = csi2_dev->regmap;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun8i_a83t_mipi_csi2_format *format;
struct device *dev = csi2_dev->dev;
u32 version = 0;
format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format))
return;
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CTRL_REG,
SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N);
regmap_read(regmap, SUN8I_A83T_MIPI_CSI2_VERSION_REG, &version);
dev_dbg(dev, "A83T MIPI CSI-2 version: %04x\n", version);
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_CFG_REG,
SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN |
SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(8) |
SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(1) |
SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(lanes_count));
/*
* Only a single virtual channel (index 0) is currently supported.
* While the registers do mention multiple physical channels being
* available (which can be configured to match a specific virtual
* channel or data type), it's unclear whether channels > 0 are actually
* connected and available and the reference source code only makes use
* of channel 0.
*
* Using extra channels would also require matching channels to be
* available on the CSI (and ISP) side, which is also unsure although
* some CSI implementations are said to support multiple channels for
* BT656 time-sharing.
*
* We still configure virtual channel numbers to ensure that virtual
* channel 0 only goes to channel 0.
*/
regmap_write(regmap, SUN8I_A83T_MIPI_CSI2_VCDT0_REG,
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(3, 3) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(2, 2) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(1, 1) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(0, 0) |
SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(0, format->data_type));
}
/* V4L2 Subdev */
static int sun8i_a83t_mipi_csi2_s_stream(struct v4l2_subdev *subdev, int on)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_subdev *source_subdev = csi2_dev->bridge.source_subdev;
union phy_configure_opts dphy_opts = { 0 };
struct phy_configure_opts_mipi_dphy *dphy_cfg = &dphy_opts.mipi_dphy;
struct v4l2_mbus_framefmt *mbus_format = &csi2_dev->bridge.mbus_format;
const struct sun8i_a83t_mipi_csi2_format *format;
struct phy *dphy = csi2_dev->dphy;
struct device *dev = csi2_dev->dev;
struct v4l2_ctrl *ctrl;
unsigned int lanes_count =
csi2_dev->bridge.endpoint.bus.mipi_csi2.num_data_lanes;
unsigned long pixel_rate;
int ret;
if (!source_subdev)
return -ENODEV;
if (!on) {
ret = v4l2_subdev_call(source_subdev, video, s_stream, 0);
goto disable;
}
/* Runtime PM */
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
/* Sensor pixel rate */
ctrl = v4l2_ctrl_find(source_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
if (!ctrl) {
dev_err(dev, "missing sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
pixel_rate = (unsigned long)v4l2_ctrl_g_ctrl_int64(ctrl);
if (!pixel_rate) {
dev_err(dev, "missing (zero) sensor pixel rate\n");
ret = -ENODEV;
goto error_pm;
}
/* D-PHY */
if (!lanes_count) {
dev_err(dev, "missing (zero) MIPI CSI-2 lanes count\n");
ret = -ENODEV;
goto error_pm;
}
format = sun8i_a83t_mipi_csi2_format_find(mbus_format->code);
if (WARN_ON(!format)) {
ret = -ENODEV;
goto error_pm;
}
phy_mipi_dphy_get_default_config(pixel_rate, format->bpp, lanes_count,
dphy_cfg);
/*
* Note that our hardware is using DDR, which is not taken in account by
* phy_mipi_dphy_get_default_config when calculating hs_clk_rate from
* the pixel rate, lanes count and bpp.
*
* The resulting clock rate is basically the symbol rate over the whole
* link. The actual clock rate is calculated with division by two since
* DDR samples both on rising and falling edges.
*/
dev_dbg(dev, "A83T MIPI CSI-2 config:\n");
dev_dbg(dev, "%ld pixels/s, %u bits/pixel, %u lanes, %lu Hz clock\n",
pixel_rate, format->bpp, lanes_count,
dphy_cfg->hs_clk_rate / 2);
ret = phy_reset(dphy);
if (ret) {
dev_err(dev, "failed to reset MIPI D-PHY\n");
goto error_pm;
}
ret = phy_configure(dphy, &dphy_opts);
if (ret) {
dev_err(dev, "failed to configure MIPI D-PHY\n");
goto error_pm;
}
/* Controller */
sun8i_a83t_mipi_csi2_configure(csi2_dev);
sun8i_a83t_mipi_csi2_enable(csi2_dev);
/* D-PHY */
ret = phy_power_on(dphy);
if (ret) {
dev_err(dev, "failed to power on MIPI D-PHY\n");
goto error_pm;
}
/* Source */
ret = v4l2_subdev_call(source_subdev, video, s_stream, 1);
if (ret && ret != -ENOIOCTLCMD)
goto disable;
return 0;
disable:
if (!on)
ret = 0;
phy_power_off(dphy);
sun8i_a83t_mipi_csi2_disable(csi2_dev);
error_pm:
pm_runtime_put(dev);
return ret;
}
static const struct v4l2_subdev_video_ops
sun8i_a83t_mipi_csi2_video_ops = {
.s_stream = sun8i_a83t_mipi_csi2_s_stream,
};
static void
sun8i_a83t_mipi_csi2_mbus_format_prepare(struct v4l2_mbus_framefmt *mbus_format)
{
if (!sun8i_a83t_mipi_csi2_format_find(mbus_format->code))
mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
mbus_format->field = V4L2_FIELD_NONE;
mbus_format->colorspace = V4L2_COLORSPACE_RAW;
mbus_format->quantization = V4L2_QUANTIZATION_DEFAULT;
mbus_format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
}
static int sun8i_a83t_mipi_csi2_init_cfg(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
unsigned int pad = SUN8I_A83T_MIPI_CSI2_PAD_SINK;
struct v4l2_mbus_framefmt *mbus_format =
v4l2_subdev_get_try_format(subdev, state, pad);
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
mbus_format->code = sun8i_a83t_mipi_csi2_formats[0].mbus_code;
mbus_format->width = 640;
mbus_format->height = 480;
sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
mutex_unlock(lock);
return 0;
}
static int
sun8i_a83t_mipi_csi2_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code_enum)
{
if (code_enum->index >= ARRAY_SIZE(sun8i_a83t_mipi_csi2_formats))
return -EINVAL;
code_enum->code =
sun8i_a83t_mipi_csi2_formats[code_enum->index].mbus_code;
return 0;
}
static int sun8i_a83t_mipi_csi2_get_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*mbus_format = *v4l2_subdev_get_try_format(subdev, state,
format->pad);
else
*mbus_format = csi2_dev->bridge.mbus_format;
mutex_unlock(lock);
return 0;
}
static int sun8i_a83t_mipi_csi2_set_fmt(struct v4l2_subdev *subdev,
struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
v4l2_get_subdevdata(subdev);
struct v4l2_mbus_framefmt *mbus_format = &format->format;
struct mutex *lock = &csi2_dev->bridge.lock;
mutex_lock(lock);
sun8i_a83t_mipi_csi2_mbus_format_prepare(mbus_format);
if (format->which == V4L2_SUBDEV_FORMAT_TRY)
*v4l2_subdev_get_try_format(subdev, state, format->pad) =
*mbus_format;
else
csi2_dev->bridge.mbus_format = *mbus_format;
mutex_unlock(lock);
return 0;
}
static const struct v4l2_subdev_pad_ops sun8i_a83t_mipi_csi2_pad_ops = {
.init_cfg = sun8i_a83t_mipi_csi2_init_cfg,
.enum_mbus_code = sun8i_a83t_mipi_csi2_enum_mbus_code,
.get_fmt = sun8i_a83t_mipi_csi2_get_fmt,
.set_fmt = sun8i_a83t_mipi_csi2_set_fmt,
};
static const struct v4l2_subdev_ops sun8i_a83t_mipi_csi2_subdev_ops = {
.video = &sun8i_a83t_mipi_csi2_video_ops,
.pad = &sun8i_a83t_mipi_csi2_pad_ops,
};
/* Media Entity */
static const struct media_entity_operations sun8i_a83t_mipi_csi2_entity_ops = {
.link_validate = v4l2_subdev_link_validate,
};
/* V4L2 Async */
static int
sun8i_a83t_mipi_csi2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *remote_subdev,
struct v4l2_async_subdev *async_subdev)
{
struct v4l2_subdev *subdev = notifier->sd;
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
container_of(notifier, struct sun8i_a83t_mipi_csi2_device,
bridge.notifier);
struct media_entity *sink_entity = &subdev->entity;
struct media_entity *source_entity = &remote_subdev->entity;
struct device *dev = csi2_dev->dev;
int sink_pad_index = 0;
int source_pad_index;
int ret;
ret = media_entity_get_fwnode_pad(source_entity, remote_subdev->fwnode,
MEDIA_PAD_FL_SOURCE);
if (ret < 0) {
dev_err(dev, "missing source pad in external entity %s\n",
source_entity->name);
return -EINVAL;
}
source_pad_index = ret;
dev_dbg(dev, "creating %s:%u -> %s:%u link\n", source_entity->name,
source_pad_index, sink_entity->name, sink_pad_index);
ret = media_create_pad_link(source_entity, source_pad_index,
sink_entity, sink_pad_index,
MEDIA_LNK_FL_ENABLED |
MEDIA_LNK_FL_IMMUTABLE);
if (ret) {
dev_err(dev, "failed to create %s:%u -> %s:%u link\n",
source_entity->name, source_pad_index,
sink_entity->name, sink_pad_index);
return ret;
}
csi2_dev->bridge.source_subdev = remote_subdev;
return 0;
}
static const struct v4l2_async_notifier_operations
sun8i_a83t_mipi_csi2_notifier_ops = {
.bound = sun8i_a83t_mipi_csi2_notifier_bound,
};
/* Bridge */
static int
sun8i_a83t_mipi_csi2_bridge_source_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
struct v4l2_fwnode_endpoint *endpoint = &csi2_dev->bridge.endpoint;
struct v4l2_async_subdev *subdev_async;
struct fwnode_handle *handle;
struct device *dev = csi2_dev->dev;
int ret;
handle = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (!handle)
return -ENODEV;
endpoint->bus_type = V4L2_MBUS_CSI2_DPHY;
ret = v4l2_fwnode_endpoint_parse(handle, endpoint);
if (ret)
goto complete;
subdev_async =
v4l2_async_nf_add_fwnode_remote(notifier, handle,
struct v4l2_async_subdev);
if (IS_ERR(subdev_async))
ret = PTR_ERR(subdev_async);
complete:
fwnode_handle_put(handle);
return ret;
}
static int
sun8i_a83t_mipi_csi2_bridge_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct sun8i_a83t_mipi_csi2_bridge *bridge = &csi2_dev->bridge;
struct v4l2_subdev *subdev = &bridge->subdev;
struct v4l2_async_notifier *notifier = &bridge->notifier;
struct media_pad *pads = bridge->pads;
struct device *dev = csi2_dev->dev;
int ret;
mutex_init(&bridge->lock);
/* V4L2 Subdev */
v4l2_subdev_init(subdev, &sun8i_a83t_mipi_csi2_subdev_ops);
strscpy(subdev->name, SUN8I_A83T_MIPI_CSI2_NAME, sizeof(subdev->name));
subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
subdev->owner = THIS_MODULE;
subdev->dev = dev;
v4l2_set_subdevdata(subdev, csi2_dev);
/* Media Entity */
subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
subdev->entity.ops = &sun8i_a83t_mipi_csi2_entity_ops;
/* Media Pads */
pads[SUN8I_A83T_MIPI_CSI2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
pads[SUN8I_A83T_MIPI_CSI2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&subdev->entity,
SUN8I_A83T_MIPI_CSI2_PAD_COUNT, pads);
if (ret)
return ret;
/* V4L2 Async */
v4l2_async_nf_init(notifier);
notifier->ops = &sun8i_a83t_mipi_csi2_notifier_ops;
ret = sun8i_a83t_mipi_csi2_bridge_source_setup(csi2_dev);
if (ret)
goto error_v4l2_notifier_cleanup;
ret = v4l2_async_subdev_nf_register(subdev, notifier);
if (ret < 0)
goto error_v4l2_notifier_cleanup;
/* V4L2 Subdev */
ret = v4l2_async_register_subdev(subdev);
if (ret < 0)
goto error_v4l2_notifier_unregister;
return 0;
error_v4l2_notifier_unregister:
v4l2_async_nf_unregister(notifier);
error_v4l2_notifier_cleanup:
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
return ret;
}
static void
sun8i_a83t_mipi_csi2_bridge_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
struct v4l2_subdev *subdev = &csi2_dev->bridge.subdev;
struct v4l2_async_notifier *notifier = &csi2_dev->bridge.notifier;
v4l2_async_unregister_subdev(subdev);
v4l2_async_nf_unregister(notifier);
v4l2_async_nf_cleanup(notifier);
media_entity_cleanup(&subdev->entity);
}
/* Platform */
static int sun8i_a83t_mipi_csi2_suspend(struct device *dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
clk_disable_unprepare(csi2_dev->clock_misc);
clk_disable_unprepare(csi2_dev->clock_mipi);
clk_disable_unprepare(csi2_dev->clock_mod);
reset_control_assert(csi2_dev->reset);
return 0;
}
static int sun8i_a83t_mipi_csi2_resume(struct device *dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev = dev_get_drvdata(dev);
int ret;
ret = reset_control_deassert(csi2_dev->reset);
if (ret) {
dev_err(dev, "failed to deassert reset\n");
return ret;
}
ret = clk_prepare_enable(csi2_dev->clock_mod);
if (ret) {
dev_err(dev, "failed to enable module clock\n");
goto error_reset;
}
ret = clk_prepare_enable(csi2_dev->clock_mipi);
if (ret) {
dev_err(dev, "failed to enable MIPI clock\n");
goto error_clock_mod;
}
ret = clk_prepare_enable(csi2_dev->clock_misc);
if (ret) {
dev_err(dev, "failed to enable CSI misc clock\n");
goto error_clock_mipi;
}
sun8i_a83t_mipi_csi2_init(csi2_dev);
return 0;
error_clock_mipi:
clk_disable_unprepare(csi2_dev->clock_mipi);
error_clock_mod:
clk_disable_unprepare(csi2_dev->clock_mod);
error_reset:
reset_control_assert(csi2_dev->reset);
return ret;
}
static const struct dev_pm_ops sun8i_a83t_mipi_csi2_pm_ops = {
.runtime_suspend = sun8i_a83t_mipi_csi2_suspend,
.runtime_resume = sun8i_a83t_mipi_csi2_resume,
};
static const struct regmap_config sun8i_a83t_mipi_csi2_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x120,
};
static int
sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_dev,
struct platform_device *platform_dev)
{
struct device *dev = csi2_dev->dev;
void __iomem *io_base;
int ret;
/* Registers */
io_base = devm_platform_ioremap_resource(platform_dev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
csi2_dev->regmap =
devm_regmap_init_mmio_clk(dev, "bus", io_base,
&sun8i_a83t_mipi_csi2_regmap_config);
if (IS_ERR(csi2_dev->regmap)) {
dev_err(dev, "failed to init register map\n");
return PTR_ERR(csi2_dev->regmap);
}
/* Clocks */
csi2_dev->clock_mod = devm_clk_get(dev, "mod");
if (IS_ERR(csi2_dev->clock_mod)) {
dev_err(dev, "failed to acquire mod clock\n");
return PTR_ERR(csi2_dev->clock_mod);
}
ret = clk_set_rate_exclusive(csi2_dev->clock_mod, 297000000);
if (ret) {
dev_err(dev, "failed to set mod clock rate\n");
return ret;
}
csi2_dev->clock_mipi = devm_clk_get(dev, "mipi");
if (IS_ERR(csi2_dev->clock_mipi)) {
dev_err(dev, "failed to acquire mipi clock\n");
return PTR_ERR(csi2_dev->clock_mipi);
}
csi2_dev->clock_misc = devm_clk_get(dev, "misc");
if (IS_ERR(csi2_dev->clock_misc)) {
dev_err(dev, "failed to acquire misc clock\n");
return PTR_ERR(csi2_dev->clock_misc);
}
/* Reset */
csi2_dev->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(csi2_dev->reset)) {
dev_err(dev, "failed to get reset controller\n");
return PTR_ERR(csi2_dev->reset);
}
/* D-PHY */
ret = sun8i_a83t_dphy_register(csi2_dev);
if (ret) {
dev_err(dev, "failed to initialize MIPI D-PHY\n");
return ret;
}
/* Runtime PM */
pm_runtime_enable(dev);
return 0;
}
static void
sun8i_a83t_mipi_csi2_resources_cleanup(struct sun8i_a83t_mipi_csi2_device *csi2_dev)
{
pm_runtime_disable(csi2_dev->dev);
phy_exit(csi2_dev->dphy);
clk_rate_exclusive_put(csi2_dev->clock_mod);
}
static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev;
struct device *dev = &platform_dev->dev;
int ret;
csi2_dev = devm_kzalloc(dev, sizeof(*csi2_dev), GFP_KERNEL);
if (!csi2_dev)
return -ENOMEM;
csi2_dev->dev = dev;
platform_set_drvdata(platform_dev, csi2_dev);
ret = sun8i_a83t_mipi_csi2_resources_setup(csi2_dev, platform_dev);
if (ret)
return ret;
ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev);
if (ret)
return ret;
return 0;
}
static int sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev)
{
struct sun8i_a83t_mipi_csi2_device *csi2_dev =
platform_get_drvdata(platform_dev);
sun8i_a83t_mipi_csi2_bridge_cleanup(csi2_dev);
sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev);
return 0;
}
static const struct of_device_id sun8i_a83t_mipi_csi2_of_match[] = {
{ .compatible = "allwinner,sun8i-a83t-mipi-csi2" },
{},
};
MODULE_DEVICE_TABLE(of, sun8i_a83t_mipi_csi2_of_match);
static struct platform_driver sun8i_a83t_mipi_csi2_platform_driver = {
.probe = sun8i_a83t_mipi_csi2_probe,
.remove = sun8i_a83t_mipi_csi2_remove,
.driver = {
.name = SUN8I_A83T_MIPI_CSI2_NAME,
.of_match_table = of_match_ptr(sun8i_a83t_mipi_csi2_of_match),
.pm = &sun8i_a83t_mipi_csi2_pm_ops,
},
};
module_platform_driver(sun8i_a83t_mipi_csi2_platform_driver);
MODULE_DESCRIPTION("Allwinner A83T MIPI CSI-2 and D-PHY Controller Driver");
MODULE_AUTHOR("Paul Kocialkowski <paul.kocialkowski@bootlin.com>");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _SUN8I_A83T_MIPI_CSI2_H_
#define _SUN8I_A83T_MIPI_CSI2_H_
#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <media/v4l2-device.h>
#include <media/v4l2-fwnode.h>
#define SUN8I_A83T_MIPI_CSI2_NAME "sun8i-a83t-mipi-csi2"
enum sun8i_a83t_mipi_csi2_pad {
SUN8I_A83T_MIPI_CSI2_PAD_SINK = 0,
SUN8I_A83T_MIPI_CSI2_PAD_SOURCE = 1,
SUN8I_A83T_MIPI_CSI2_PAD_COUNT = 2,
};
struct sun8i_a83t_mipi_csi2_format {
u32 mbus_code;
u8 data_type;
u32 bpp;
};
struct sun8i_a83t_mipi_csi2_bridge {
struct v4l2_subdev subdev;
struct media_pad pads[SUN8I_A83T_MIPI_CSI2_PAD_COUNT];
struct v4l2_fwnode_endpoint endpoint;
struct v4l2_async_notifier notifier;
struct v4l2_mbus_framefmt mbus_format;
struct mutex lock; /* Mbus format lock. */
struct v4l2_subdev *source_subdev;
};
struct sun8i_a83t_mipi_csi2_device {
struct device *dev;
struct regmap *regmap;
struct clk *clock_mod;
struct clk *clock_mipi;
struct clk *clock_misc;
struct reset_control *reset;
struct phy *dphy;
struct sun8i_a83t_mipi_csi2_bridge bridge;
};
#endif

View File

@ -0,0 +1,151 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2020 Kévin L'hôpital <kevin.lhopital@bootlin.com>
* Copyright 2020-2022 Bootlin
* Author: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
*/
#ifndef _SUN8I_A83T_MIPI_CSI2_REG_H_
#define _SUN8I_A83T_MIPI_CSI2_REG_H_
#define SUN8I_A83T_MIPI_CSI2_VERSION_REG 0x0
#define SUN8I_A83T_MIPI_CSI2_CTRL_REG 0x4
#define SUN8I_A83T_MIPI_CSI2_CTRL_INIT_VALUE 0xb8c39bec
#define SUN8I_A83T_MIPI_CSI2_CTRL_RESET_N BIT(31)
#define SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_REG 0x8
#define SUN8I_A83T_MIPI_CSI2_RX_PKT_NUM_INIT_VALUE 0xb8d257f8
#define SUN8I_A83T_MIPI_CSI2_RSVD0_REG 0xc
#define SUN8I_A83T_MIPI_CSI2_RSVD1_REG 0x18
#define SUN8I_A83T_MIPI_CSI2_RSVD1_HW_LOCK_VALUE 0xb8c8a30c
#define SUN8I_A83T_MIPI_CSI2_RSVD2_REG 0x1c
#define SUN8I_A83T_MIPI_CSI2_RSVD2_HW_LOCK_VALUE 0xb8df8ad7
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_REG 0x20
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_ECC_ERR_DBL BIT(28)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC3 BIT(27)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC2 BIT(26)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC1 BIT(25)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_CKSM_ERR_VC0 BIT(24)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT3 BIT(23)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT2 BIT(22)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT1 BIT(21)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LINE_SEQ_ERR_DT0 BIT(20)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT3 BIT(19)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT2 BIT(18)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT1 BIT(17)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_LS_LE_ERR_DT0 BIT(16)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC3 BIT(15)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC2 BIT(14)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC1 BIT(13)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_CRC_ERR_VC0 BIT(12)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC3 BIT(11)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC2 BIT(10)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC1 BIT(9)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FRM_SEQ_ERR_VC0 BIT(8)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC3 BIT(7)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC2 BIT(6)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC1 BIT(5)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_FS_FE_ERR_VC0 BIT(4)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_3 BIT(3)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_2 BIT(2)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_1 BIT(1)
#define SUN8I_A83T_MIPI_CSI2_INT_STA0_SOT_SYNC_ERR_0 BIT(0)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_REG 0x24
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT7 BIT(23)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT6 BIT(22)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT5 BIT(21)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LINE_SEQ_ERR_DT4 BIT(20)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT7 BIT(19)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT6 BIT(18)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT5 BIT(17)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_LS_LE_ERR_DT4 BIT(16)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC3 BIT(15)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC2 BIT(14)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC1 BIT(13)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_DT_ERR_VC0 BIT(12)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC3 BIT(11)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC2 BIT(10)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC1 BIT(9)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ECC_ERR1_VC0 BIT(8)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_3 BIT(7)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_2 BIT(6)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_1 BIT(5)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_SOT_ERR_0 BIT(4)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_3 BIT(3)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_2 BIT(2)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_1 BIT(1)
#define SUN8I_A83T_MIPI_CSI2_INT_STA1_ESC_ENTRY_ERR_0 BIT(0)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_REG 0x28
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_ECC_ERR_DBL BIT(28)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC3 BIT(27)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC2 BIT(26)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC1 BIT(25)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CKSM_ERR_VC0 BIT(24)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT3 BIT(23)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT2 BIT(22)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT1 BIT(21)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LINE_SEQ_ERR_DT0 BIT(20)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT3 BIT(19)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT2 BIT(18)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT1 BIT(17)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_LS_LE_ERR_DT0 BIT(16)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC3 BIT(15)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC2 BIT(14)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC1 BIT(13)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_CRC_ERR_VC0 BIT(12)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC3 BIT(11)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC2 BIT(10)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC1 BIT(9)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FRM_SEQ_ERR_VC0 BIT(8)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC3 BIT(7)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC2 BIT(6)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC1 BIT(5)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_FS_FE_ERR_VC0 BIT(4)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_3 BIT(3)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_2 BIT(2)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_1 BIT(1)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK0_SOT_SYNC_ERR_0 BIT(0)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_REG 0x2c
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC3 BIT(15)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC2 BIT(14)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC1 BIT(13)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_DT_ERR_VC0 BIT(12)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC3 BIT(11)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC2 BIT(10)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC1 BIT(9)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ECC_ERR1_VC0 BIT(8)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_3 BIT(7)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_2 BIT(6)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_1 BIT(5)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_SOT_ERR_0 BIT(4)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_3 BIT(3)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_2 BIT(2)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_1 BIT(1)
#define SUN8I_A83T_MIPI_CSI2_INT_MSK1_ESC_ENTRY_ERR_0 BIT(0)
#define SUN8I_A83T_MIPI_CSI2_CFG_REG 0x100
#define SUN8I_A83T_MIPI_CSI2_CFG_INIT_VALUE 0xb8c64f24
#define SUN8I_A83T_MIPI_CSI2_CFG_SYNC_EN BIT(31)
#define SUN8I_A83T_MIPI_CSI2_CFG_BYPASS_ECC_EN BIT(29)
#define SUN8I_A83T_MIPI_CSI2_CFG_UNPKT_EN BIT(28)
#define SUN8I_A83T_MIPI_CSI2_CFG_NONE_UNPKT_RX_MODE BIT(27)
#define SUN8I_A83T_MIPI_CSI2_CFG_YC_SWAB BIT(26)
#define SUN8I_A83T_MIPI_CSI2_CFG_N_BYTE BIT(24)
#define SUN8I_A83T_MIPI_CSI2_CFG_SYNC_DLY_CYCLE(v) (((v) << 18) & \
GENMASK(22, 18))
#define SUN8I_A83T_MIPI_CSI2_CFG_N_CHANNEL(v) ((((v) - 1) << 16) & \
GENMASK(17, 16))
#define SUN8I_A83T_MIPI_CSI2_CFG_N_LANE(v) ((((v) - 1) << 4) & \
GENMASK(5, 4))
#define SUN8I_A83T_MIPI_CSI2_VCDT0_REG 0x104
#define SUN8I_A83T_MIPI_CSI2_VCDT0_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
((ch) * 8 + 6))
#define SUN8I_A83T_MIPI_CSI2_VCDT0_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
((ch) * 8))
#define SUN8I_A83T_MIPI_CSI2_VCDT1_REG 0x108
#define SUN8I_A83T_MIPI_CSI2_VCDT1_CH_VC(ch, vc) (((vc) & GENMASK(1, 0)) << \
(((ch) - 4) * 8 + 6))
#define SUN8I_A83T_MIPI_CSI2_VCDT1_CH_DT(ch, t) (((t) & GENMASK(5, 0)) << \
(((ch) - 4) * 8))
#endif

View File

@ -0,0 +1,102 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* vimc-lens.c Virtual Media Controller Driver
* Copyright (C) 2022 Google, Inc
* Author: yunkec@google.com (Yunke Cao)
*/
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
#include <media/v4l2-subdev.h>
#include "vimc-common.h"
#define VIMC_LENS_MAX_FOCUS_POS 1023
#define VIMC_LENS_MAX_FOCUS_STEP 1
struct vimc_lens_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
u32 focus_absolute;
};
static const struct v4l2_subdev_core_ops vimc_lens_core_ops = {
.log_status = v4l2_ctrl_subdev_log_status,
.subscribe_event = v4l2_ctrl_subdev_subscribe_event,
.unsubscribe_event = v4l2_event_subdev_unsubscribe,
};
static const struct v4l2_subdev_ops vimc_lens_ops = {
.core = &vimc_lens_core_ops
};
static int vimc_lens_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct vimc_lens_device *vlens =
container_of(ctrl->handler, struct vimc_lens_device, hdl);
if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE) {
vlens->focus_absolute = ctrl->val;
return 0;
}
return -EINVAL;
}
static const struct v4l2_ctrl_ops vimc_lens_ctrl_ops = {
.s_ctrl = vimc_lens_s_ctrl,
};
static struct vimc_ent_device *vimc_lens_add(struct vimc_device *vimc,
const char *vcfg_name)
{
struct v4l2_device *v4l2_dev = &vimc->v4l2_dev;
struct vimc_lens_device *vlens;
int ret;
/* Allocate the vlens struct */
vlens = kzalloc(sizeof(*vlens), GFP_KERNEL);
if (!vlens)
return ERR_PTR(-ENOMEM);
v4l2_ctrl_handler_init(&vlens->hdl, 1);
v4l2_ctrl_new_std(&vlens->hdl, &vimc_lens_ctrl_ops,
V4L2_CID_FOCUS_ABSOLUTE, 0,
VIMC_LENS_MAX_FOCUS_POS, VIMC_LENS_MAX_FOCUS_STEP, 0);
vlens->sd.ctrl_handler = &vlens->hdl;
if (vlens->hdl.error) {
ret = vlens->hdl.error;
goto err_free_vlens;
}
vlens->ved.dev = vimc->mdev.dev;
ret = vimc_ent_sd_register(&vlens->ved, &vlens->sd, v4l2_dev,
vcfg_name, MEDIA_ENT_F_LENS, 0,
NULL, &vimc_lens_ops);
if (ret)
goto err_free_hdl;
return &vlens->ved;
err_free_hdl:
v4l2_ctrl_handler_free(&vlens->hdl);
err_free_vlens:
kfree(vlens);
return ERR_PTR(ret);
}
static void vimc_lens_release(struct vimc_ent_device *ved)
{
struct vimc_lens_device *vlens =
container_of(ved, struct vimc_lens_device, ved);
v4l2_ctrl_handler_free(&vlens->hdl);
media_entity_cleanup(vlens->ved.ent);
kfree(vlens);
}
struct vimc_ent_type vimc_lens_type = {
.add = vimc_lens_add,
.release = vimc_lens_release
};

View File

@ -0,0 +1,190 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021-2022, NVIDIA CORPORATION. All rights reserved.
*/
#include <soc/tegra/mc.h>
#include <dt-bindings/memory/tegra234-mc.h>
#include "mc.h"
static const struct tegra_mc_client tegra234_mc_clients[] = {
{
.id = TEGRA234_MEMORY_CLIENT_MGBEARD,
.name = "mgbeard",
.sid = TEGRA234_SID_MGBE,
.regs = {
.sid = {
.override = 0x2c0,
.security = 0x2c4,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBEBRD,
.name = "mgbebrd",
.sid = TEGRA234_SID_MGBE_VF1,
.regs = {
.sid = {
.override = 0x2c8,
.security = 0x2cc,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBECRD,
.name = "mgbecrd",
.sid = TEGRA234_SID_MGBE_VF2,
.regs = {
.sid = {
.override = 0x2d0,
.security = 0x2d4,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBEDRD,
.name = "mgbedrd",
.sid = TEGRA234_SID_MGBE_VF3,
.regs = {
.sid = {
.override = 0x2d8,
.security = 0x2dc,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBEAWR,
.name = "mgbeawr",
.sid = TEGRA234_SID_MGBE,
.regs = {
.sid = {
.override = 0x2e0,
.security = 0x2e4,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBEBWR,
.name = "mgbebwr",
.sid = TEGRA234_SID_MGBE_VF1,
.regs = {
.sid = {
.override = 0x2f8,
.security = 0x2fc,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBECWR,
.name = "mgbecwr",
.sid = TEGRA234_SID_MGBE_VF2,
.regs = {
.sid = {
.override = 0x308,
.security = 0x30c,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCRAB,
.name = "sdmmcrab",
.sid = TEGRA234_SID_SDMMC4,
.regs = {
.sid = {
.override = 0x318,
.security = 0x31c,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_MGBEDWR,
.name = "mgbedwr",
.sid = TEGRA234_SID_MGBE_VF3,
.regs = {
.sid = {
.override = 0x328,
.security = 0x32c,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_SDMMCWAB,
.name = "sdmmcwab",
.sid = TEGRA234_SID_SDMMC4,
.regs = {
.sid = {
.override = 0x338,
.security = 0x33c,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_BPMPR,
.name = "bpmpr",
.sid = TEGRA234_SID_BPMP,
.regs = {
.sid = {
.override = 0x498,
.security = 0x49c,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_BPMPW,
.name = "bpmpw",
.sid = TEGRA234_SID_BPMP,
.regs = {
.sid = {
.override = 0x4a0,
.security = 0x4a4,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_BPMPDMAR,
.name = "bpmpdmar",
.sid = TEGRA234_SID_BPMP,
.regs = {
.sid = {
.override = 0x4a8,
.security = 0x4ac,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_BPMPDMAW,
.name = "bpmpdmaw",
.sid = TEGRA234_SID_BPMP,
.regs = {
.sid = {
.override = 0x4b0,
.security = 0x4b4,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_APEDMAR,
.name = "apedmar",
.sid = TEGRA234_SID_APE,
.regs = {
.sid = {
.override = 0x4f8,
.security = 0x4fc,
},
},
}, {
.id = TEGRA234_MEMORY_CLIENT_APEDMAW,
.name = "apedmaw",
.sid = TEGRA234_SID_APE,
.regs = {
.sid = {
.override = 0x500,
.security = 0x504,
},
},
},
};
const struct tegra_mc_soc tegra234_mc_soc = {
.num_clients = ARRAY_SIZE(tegra234_mc_clients),
.clients = tegra234_mc_clients,
.num_address_bits = 40,
.num_channels = 16,
.client_id_mask = 0x1ff,
.intmask = MC_INT_DECERR_ROUTE_SANITY |
MC_INT_DECERR_GENERALIZED_CARVEOUT | MC_INT_DECERR_MTS |
MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
.has_addr_hi_reg = true,
.ops = &tegra186_mc_ops,
.ch_intmask = 0x0000ff00,
.global_intstatus_channel_shift = 8,
};

View File

@ -0,0 +1,133 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
#define VCMD_CONTROL_OFFSET 0x40 /* SWREG16 */
#define VCMD_IRQ_STATUS_OFFSET 0x44 /* SWREG17 */
#define VCMD_IRQ_STATUS_ENDCMD_MASK 0x1
#define VCMD_IRQ_STATUS_BUSERR_MASK 0x2
#define VCMD_IRQ_STATUS_TIMEOUT_MASK 0x4
#define VCMD_IRQ_STATUS_CMDERR_MASK 0x8
#define VCMD_IRQ_STATUS_ABORT_MASK 0x10
#define VCMD_IRQ_STATUS_RESET_MASK 0x20
static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
{
const char *format = "abnormal interrupt source:%s%s%s%s%s%s\n";
char *intr_source[6] = {"Unknown", "", "", "", "", ""};
int i = 0;
if (!irq_status)
return;
if (irq_status & VCMD_IRQ_STATUS_ENDCMD_MASK)
intr_source[i++] = " ENDCMD";
if (irq_status & VCMD_IRQ_STATUS_BUSERR_MASK)
intr_source[i++] = " BUSERR";
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
intr_source[i++] = " TIMEOUT";
if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
intr_source[i++] = " CMDERR";
if (irq_status & VCMD_IRQ_STATUS_ABORT_MASK)
intr_source[i++] = " ABORT";
if (irq_status & VCMD_IRQ_STATUS_RESET_MASK)
intr_source[i++] = " RESET";
dev_err(hdev->dev, format, intr_source[0], intr_source[1],
intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
}
static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
{
bool reset_required = false;
u32 irq_status;
irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, core_id);
dec_print_abnrm_intr_source(hdev, irq_status);
if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
reset_required = true;
/* Clear the interrupt */
WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
/* Flush the interrupt clear */
RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
if (reset_required)
hl_device_reset(hdev, HL_DRV_RESET_HARD);
}
static void dec_completion_abnrm(struct work_struct *work)
{
struct hl_dec *dec = container_of(work, struct hl_dec, completion_abnrm_work);
struct hl_device *hdev = dec->hdev;
dec_error_intr_work(hdev, dec->base_addr, dec->core_id);
}
void hl_dec_fini(struct hl_device *hdev)
{
kfree(hdev->dec);
}
int hl_dec_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_dec *dec;
int rc, j;
/* if max core is 0, nothing to do*/
if (!prop->max_dec)
return 0;
hdev->dec = kcalloc(prop->max_dec, sizeof(struct hl_dec), GFP_KERNEL);
if (!hdev->dec)
return -ENOMEM;
for (j = 0 ; j < prop->max_dec ; j++) {
dec = hdev->dec + j;
dec->hdev = hdev;
INIT_WORK(&dec->completion_abnrm_work, dec_completion_abnrm);
dec->core_id = j;
dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
if (!dec->base_addr) {
dev_err(hdev->dev, "Invalid base address of decoder %d\n", j);
rc = -EINVAL;
goto err_dec_fini;
}
}
return 0;
err_dec_fini:
hl_dec_fini(hdev);
return rc;
}
void hl_dec_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_dec *dec;
int j;
for (j = 0 ; j < prop->max_dec ; j++) {
if (!!(prop->decoder_enabled_mask & BIT(j))) {
dec = hdev->dec + j;
/* Stop the decoder */
WREG32(dec->base_addr + VCMD_CONTROL_OFFSET, 0);
}
}
}

View File

@ -0,0 +1,349 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
/**
* hl_mmap_mem_buf_get - increase the buffer refcount and return a pointer to
* the buffer descriptor.
*
* @mmg: parent unifed memory manager
* @handle: requested buffer handle
*
* Find the buffer in the store and return a pointer to its descriptor.
* Increase buffer refcount. If not found - return NULL.
*/
struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
dev_warn(mmg->dev,
"Buff get failed, no match to handle %#llx\n", handle);
return NULL;
}
kref_get(&buf->refcount);
spin_unlock(&mmg->lock);
return buf;
}
/**
* hl_mmap_mem_buf_destroy - destroy the unused buffer
*
* @buf: memory manager buffer descriptor
*
* Internal function, used as a final step of buffer release. Shall be invoked
* only when the buffer is no longer in use (removed from idr). Will call the
* release callback (if applicable), and free the memory.
*/
static void hl_mmap_mem_buf_destroy(struct hl_mmap_mem_buf *buf)
{
if (buf->behavior->release)
buf->behavior->release(buf);
kfree(buf);
}
/**
* hl_mmap_mem_buf_release - release buffer
*
* @kref: kref that reached 0.
*
* Internal function, used as a kref release callback, when the last user of
* the buffer is released. Shall be called from an interrupt context.
*/
static void hl_mmap_mem_buf_release(struct kref *kref)
{
struct hl_mmap_mem_buf *buf =
container_of(kref, struct hl_mmap_mem_buf, refcount);
spin_lock(&buf->mmg->lock);
idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
spin_unlock(&buf->mmg->lock);
hl_mmap_mem_buf_destroy(buf);
}
/**
* hl_mmap_mem_buf_remove_idr_locked - remove handle from idr
*
* @kref: kref that reached 0.
*
* Internal function, used for kref put by handle. Assumes mmg lock is taken.
* Will remove the buffer from idr, without destroying it.
*/
static void hl_mmap_mem_buf_remove_idr_locked(struct kref *kref)
{
struct hl_mmap_mem_buf *buf =
container_of(kref, struct hl_mmap_mem_buf, refcount);
idr_remove(&buf->mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
}
/**
* hl_mmap_mem_buf_put - decrease the reference to the buffer
*
* @buf: memory manager buffer descriptor
*
* Decrease the reference to the buffer, and release it if it was the last one.
* Shall be called from an interrupt context.
*/
int hl_mmap_mem_buf_put(struct hl_mmap_mem_buf *buf)
{
return kref_put(&buf->refcount, hl_mmap_mem_buf_release);
}
/**
* hl_mmap_mem_buf_put_handle - decrease the reference to the buffer with the
* given handle.
*
* @mmg: parent unifed memory manager
* @handle: requested buffer handle
*
* Decrease the reference to the buffer, and release it if it was the last one.
* Shall not be called from an interrupt context. Return -EINVAL if handle was
* not found, else return the put outcome (0 or 1).
*/
int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
{
struct hl_mmap_mem_buf *buf;
spin_lock(&mmg->lock);
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
dev_dbg(mmg->dev,
"Buff put failed, no match to handle %#llx\n", handle);
return -EINVAL;
}
if (kref_put(&buf->refcount, hl_mmap_mem_buf_remove_idr_locked)) {
spin_unlock(&mmg->lock);
hl_mmap_mem_buf_destroy(buf);
return 1;
}
spin_unlock(&mmg->lock);
return 0;
}
/**
* hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
* @mmg: parent unifed memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
* @gfp: gfp flags to use for the memory allocations
* @args: additional args passed to behavior->alloc
*
* Allocate and register a new memory buffer inside the give memory manager.
* Return the pointer to the new buffer on success or NULL on failure.
*/
struct hl_mmap_mem_buf *
hl_mmap_mem_buf_alloc(struct hl_mem_mgr *mmg,
struct hl_mmap_mem_buf_behavior *behavior, gfp_t gfp,
void *args)
{
struct hl_mmap_mem_buf *buf;
int rc;
buf = kzalloc(sizeof(*buf), gfp);
if (!buf)
return NULL;
spin_lock(&mmg->lock);
rc = idr_alloc(&mmg->handles, buf, 1, 0, GFP_ATOMIC);
spin_unlock(&mmg->lock);
if (rc < 0) {
dev_err(mmg->dev,
"%s: Failed to allocate IDR for a new buffer, rc=%d\n",
behavior->topic, rc);
goto free_buf;
}
buf->mmg = mmg;
buf->behavior = behavior;
buf->handle = (((u64)rc | buf->behavior->mem_id) << PAGE_SHIFT);
kref_init(&buf->refcount);
rc = buf->behavior->alloc(buf, gfp, args);
if (rc) {
dev_err(mmg->dev, "%s: Failure in buffer alloc callback %d\n",
behavior->topic, rc);
goto remove_idr;
}
return buf;
remove_idr:
spin_lock(&mmg->lock);
idr_remove(&mmg->handles, lower_32_bits(buf->handle >> PAGE_SHIFT));
spin_unlock(&mmg->lock);
free_buf:
kfree(buf);
return NULL;
}
/**
* hl_mmap_mem_buf_vm_close - handle mmap close
*
* @vma: the vma object for which mmap was closed.
*
* Put the memory buffer if it is no longer mapped.
*/
static void hl_mmap_mem_buf_vm_close(struct vm_area_struct *vma)
{
struct hl_mmap_mem_buf *buf =
(struct hl_mmap_mem_buf *)vma->vm_private_data;
long new_mmap_size;
new_mmap_size = buf->real_mapped_size - (vma->vm_end - vma->vm_start);
if (new_mmap_size > 0) {
buf->real_mapped_size = new_mmap_size;
return;
}
atomic_set(&buf->mmap, 0);
hl_mmap_mem_buf_put(buf);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct hl_mmap_mem_buf_vm_ops = {
.close = hl_mmap_mem_buf_vm_close
};
/**
* hl_mem_mgr_mmap - map the given buffer to the user
*
* @mmg: unifed memory manager
* @vma: the vma object for which mmap was closed.
* @args: additional args passed to behavior->mmap
*
* Map the buffer specified by the vma->vm_pgoff to the given vma.
*/
int hl_mem_mgr_mmap(struct hl_mem_mgr *mmg, struct vm_area_struct *vma,
void *args)
{
struct hl_mmap_mem_buf *buf;
u64 user_mem_size;
u64 handle;
int rc;
/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
handle = vma->vm_pgoff << PAGE_SHIFT;
vma->vm_pgoff = 0;
/* Reference was taken here */
buf = hl_mmap_mem_buf_get(mmg, handle);
if (!buf) {
dev_err(mmg->dev,
"Memory mmap failed, no match to handle %#llx\n", handle);
return -EINVAL;
}
/* Validation check */
user_mem_size = vma->vm_end - vma->vm_start;
if (user_mem_size != ALIGN(buf->mappable_size, PAGE_SIZE)) {
dev_err(mmg->dev,
"%s: Memory mmap failed, mmap VM size 0x%llx != 0x%llx allocated physical mem size\n",
buf->behavior->topic, user_mem_size, buf->mappable_size);
rc = -EINVAL;
goto put_mem;
}
#ifdef _HAS_TYPE_ARG_IN_ACCESS_OK
if (!access_ok(VERIFY_WRITE, (void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#else
if (!access_ok((void __user *)(uintptr_t)vma->vm_start,
user_mem_size)) {
#endif
dev_err(mmg->dev, "%s: User pointer is invalid - 0x%lx\n",
buf->behavior->topic, vma->vm_start);
rc = -EINVAL;
goto put_mem;
}
if (atomic_cmpxchg(&buf->mmap, 0, 1)) {
dev_err(mmg->dev,
"%s, Memory mmap failed, already mmaped to user\n",
buf->behavior->topic);
rc = -EINVAL;
goto put_mem;
}
vma->vm_ops = &hl_mmap_mem_buf_vm_ops;
/* Note: We're transferring the memory reference to vma->vm_private_data here. */
vma->vm_private_data = buf;
rc = buf->behavior->mmap(buf, vma, args);
if (rc) {
atomic_set(&buf->mmap, 0);
goto put_mem;
}
buf->real_mapped_size = buf->mappable_size;
vma->vm_pgoff = handle >> PAGE_SHIFT;
return 0;
put_mem:
hl_mmap_mem_buf_put(buf);
return rc;
}
/**
* hl_mem_mgr_init - initialize unified memory manager
*
* @dev: owner device pointer
* @mmg: structure to initialize
*
* Initialize an instance of unified memory manager
*/
void hl_mem_mgr_init(struct device *dev, struct hl_mem_mgr *mmg)
{
mmg->dev = dev;
spin_lock_init(&mmg->lock);
idr_init(&mmg->handles);
}
/**
* hl_mem_mgr_fini - release unified memory manager
*
* @mmg: parent unifed memory manager
*
* Release the unified memory manager. Shall be called from an interrupt context.
*/
void hl_mem_mgr_fini(struct hl_mem_mgr *mmg)
{
struct hl_mmap_mem_buf *buf;
struct idr *idp;
const char *topic;
u32 id;
idp = &mmg->handles;
idr_for_each_entry(idp, buf, id) {
topic = buf->behavior->topic;
if (hl_mmap_mem_buf_put(buf) != 1)
dev_err(mmg->dev,
"%s: Buff handle %u for CTX is still alive\n",
topic, id);
}
/* TODO: can it happen that some buffer is still in use at this point? */
idr_destroy(&mmg->handles);
}

View File

@ -0,0 +1,399 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "../habanalabs.h"
#include "../../include/hw_ip/mmu/mmu_general.h"
#include <linux/slab.h>
static struct pgt_info *hl_mmu_v2_hr_get_pgt_info(struct hl_ctx *ctx, u64 phys_hop_addr)
{
struct pgt_info *pgt_info = NULL;
hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node,
(unsigned long) phys_hop_addr)
if (phys_hop_addr == pgt_info->phys_addr)
break;
return pgt_info;
}
static void hl_mmu_v2_hr_add_pgt_info(struct hl_ctx *ctx, struct pgt_info *pgt_info,
dma_addr_t phys_addr)
{
hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr);
}
static struct pgt_info *hl_mmu_v2_hr_get_hop0_pgt_info(struct hl_ctx *ctx)
{
return &ctx->hdev->mmu_priv.hr.mmu_asid_hop0[ctx->asid];
}
/**
* hl_mmu_v2_hr_init() - initialize the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Create a pool of pages for pgt_infos.
* - Create a shadow table for pgt
*
* Return: 0 for success, non-zero for failure.
*/
static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
prop->mmu_pgt_size);
}
/**
* hl_mmu_v2_hr_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
* - Disable MMU in H/W.
* - Free the pgt_infos pool.
*
* All contexts should be freed before calling this function.
*/
static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
}
/**
* hl_mmu_v2_hr_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
* page tables hops related to this context.
* Return: 0 on success, non-zero otherwise.
*/
static int hl_mmu_v2_hr_ctx_init(struct hl_ctx *ctx)
{
hash_init(ctx->hr_mmu_phys_hash);
return 0;
}
/*
* hl_mmu_v2_hr_ctx_fini - disable a ctx from using the mmu module
*
* @ctx: pointer to the context structure
*
* This function does the following:
* - Free any pgts which were not freed yet
* - Free the mutex
* - Free DRAM default page mapping hops
*/
static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
{
struct hl_device *hdev = ctx->hdev;
struct pgt_info *pgt_info;
struct hlist_node *tmp;
int i;
if (!hash_empty(ctx->hr_mmu_phys_hash))
dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
ctx->asid);
hash_for_each_safe(ctx->hr_mmu_phys_hash, i, tmp, pgt_info, node) {
dev_err_ratelimited(hdev->dev,
"pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size);
}
}
static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
u64 virt_addr, bool is_dram_addr)
{
u64 curr_pte, scrambled_virt_addr, hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 };
struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop;
struct hl_mmu_properties *mmu_prop;
bool is_huge = false;
int i, hop_last;
prop = &hdev->asic_prop;
/* shifts and masks are the same in PMMU and HMMU, use one of them */
mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
hop_last = mmu_prop->num_hops - 1;
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
curr_pte = 0;
for (i = 0 ; i < mmu_prop->num_hops ; i++) {
/* we get HOP0 differently, it doesn't need curr_pte */
if (i == 0)
hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
else
hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs, curr_pte);
if (!hops_pgt_info[i])
goto not_mapped;
hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops_pgt_info[i]->phys_addr,
scrambled_virt_addr);
if (hop_pte_phys_addr[i] == U64_MAX)
return -EFAULT;
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
hop_last = i;
is_huge = true;
break;
}
}
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
return -EFAULT;
}
if (!(curr_pte & PAGE_PRESENT_MASK))
goto not_mapped;
for (i = hop_last ; i > 0 ; i--) {
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size))
goto mapped;
}
hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
ctx->hdev->asic_prop.mmu_hop_table_size);
mapped:
return 0;
not_mapped:
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", virt_addr);
return -EINVAL;
}
static int hl_mmu_v2_get_last_hop(struct hl_mmu_properties *mmu_prop, u32 page_size)
{
int hop;
for (hop = (mmu_prop->num_hops - 1); hop; hop--) {
if (mmu_prop->hop_shifts[hop] == 0)
continue;
if (page_size <= (1 << mmu_prop->hop_shifts[hop]))
break;
}
return hop;
}
static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
u64 virt_addr, u64 phys_addr,
u32 page_size, bool is_dram_addr)
{
u64 hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 },
curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
bool hop_new[MMU_ARCH_6_HOPS] = { false };
struct hl_device *hdev = ctx->hdev;
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_mmu_properties *mmu_prop;
int i, hop_last, rc = -ENOMEM;
/*
* This mapping function can map a page or a huge page. For huge page
* there are only 4 hops rather than 5. Currently the DRAM allocation
* uses huge pages only but user memory could have been allocated with
* one of the two page sizes. Since this is a common code for all the
* three cases, we need this hugs page check.
*/
if (is_dram_addr)
mmu_prop = &prop->dmmu;
else if (page_size == prop->pmmu_huge.page_size)
mmu_prop = &prop->pmmu_huge;
else
mmu_prop = &prop->pmmu;
hop_last = hl_mmu_v2_get_last_hop(mmu_prop, page_size);
if (hop_last <= 0) {
dev_err(ctx->hdev->dev, "Invalid last HOP %d\n", hop_last);
return -EFAULT;
}
scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
for (i = 0 ; i <= hop_last ; i++) {
if (i == 0)
hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
else
hops_pgt_info[i] = hl_mmu_hr_get_alloc_next_hop(ctx,
&ctx->hdev->mmu_priv.hr,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
mmu_prop, curr_pte, &hop_new[i]);
if (!hops_pgt_info[i])
goto err;
hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
hops_pgt_info[i]->phys_addr,
scrambled_virt_addr);
curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size);
}
if (curr_pte & PAGE_PRESENT_MASK) {
dev_err(hdev->dev, "mapping already exists for virt_addr 0x%llx\n",
scrambled_virt_addr);
for (i = 0 ; i <= hop_last ; i++)
dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
i,
*(u64 *) (uintptr_t)
hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
hop_pte_phys_addr[i],
ctx->hdev->asic_prop.mmu_hop_table_size),
hop_pte_phys_addr[i]);
rc = -EINVAL;
goto err;
}
curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
/* Write the PTEs */
hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
ctx->hdev->asic_prop.mmu_hop_table_size);
/* for each new hop, add its address to the table of previous-hop */
for (i = 1 ; i <= hop_last ; i++) {
if (hop_new[i]) {
curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
PAGE_PRESENT_MASK;
hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
if (i - 1)
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[i - 1]->phys_addr);
}
}
hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
hops_pgt_info[hop_last]->phys_addr);
return 0;
err:
for (i = 1 ; i <= hop_last ; i++)
if (hop_new[i] && hops_pgt_info[i])
hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
ctx->hdev->asic_prop.mmu_hop_table_size);
return rc;
}
/*
* hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_hr_swap_out(struct hl_ctx *ctx)
{
}
/*
* hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
*
* @ctx: pointer to the context structure
*
*/
static void hl_mmu_v2_hr_swap_in(struct hl_ctx *ctx)
{
}
static int hl_mmu_v2_hr_get_tlb_mapping_params(struct hl_device *hdev,
struct hl_mmu_properties **mmu_prop,
struct hl_mmu_hop_info *hops,
u64 virt_addr, bool *is_huge)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr;
is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
prop->dmmu.start_addr,
prop->dmmu.end_addr);
is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
prop->pmmu.start_addr,
prop->pmmu.end_addr);
is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
prop->pmmu_huge.page_size,
prop->pmmu_huge.start_addr,
prop->pmmu_huge.end_addr);
if (is_dram_addr) {
*mmu_prop = &prop->dmmu;
*is_huge = true;
hops->range_type = HL_VA_RANGE_TYPE_DRAM;
} else if (is_pmmu_addr) {
*mmu_prop = &prop->pmmu;
*is_huge = false;
hops->range_type = HL_VA_RANGE_TYPE_HOST;
} else if (is_pmmu_h_addr) {
*mmu_prop = &prop->pmmu_huge;
*is_huge = true;
hops->range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
} else {
return -EINVAL;
}
return 0;
}
static int hl_mmu_v2_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops)
{
return hl_mmu_hr_get_tlb_info(ctx, virt_addr, hops,
&ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs);
}
/*
* hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
*
* @hdev: pointer to the device structure
* @mmu_if: pointer to the mmu interface structure
*/
void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
{
mmu->init = hl_mmu_v2_hr_init;
mmu->fini = hl_mmu_v2_hr_fini;
mmu->ctx_init = hl_mmu_v2_hr_ctx_init;
mmu->ctx_fini = hl_mmu_v2_hr_ctx_fini;
mmu->map = _hl_mmu_v2_hr_map;
mmu->unmap = _hl_mmu_v2_hr_unmap;
mmu->flush = hl_mmu_hr_flush;
mmu->swap_out = hl_mmu_v2_hr_swap_out;
mmu->swap_in = hl_mmu_v2_hr_swap_in;
mmu->get_tlb_info = hl_mmu_v2_hr_get_tlb_info;
mmu->hr_funcs.get_hop0_pgt_info = hl_mmu_v2_hr_get_hop0_pgt_info;
mmu->hr_funcs.get_pgt_info = hl_mmu_v2_hr_get_pgt_info;
mmu->hr_funcs.add_pgt_info = hl_mmu_v2_hr_add_pgt_info;
mmu->hr_funcs.get_tlb_mapping_params = hl_mmu_v2_hr_get_tlb_mapping_params;
}

View File

@ -0,0 +1,600 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2020 HabanaLabs, Ltd.
* All Rights Reserved.
*/
#include "habanalabs.h"
/**
* hl_get_pb_block - return the relevant block within the block array
*
* @hdev: pointer to hl_device structure
* @mm_reg_addr: register address in the desired block
* @pb_blocks: blocks array
* @array_size: blocks array size
*
*/
static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
const u32 pb_blocks[], int array_size)
{
int i;
u32 start_addr, end_addr;
for (i = 0 ; i < array_size ; i++) {
start_addr = pb_blocks[i];
end_addr = start_addr + HL_BLOCK_SIZE;
if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
return i;
}
dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
mm_reg_addr);
return -EDOM;
}
/**
* hl_unset_pb_in_block - clear a specific protection bit in a block
*
* @hdev: pointer to hl_device structure
* @reg_offset: register offset will be converted to bit offset in pb block
* @sgs_entry: pb array
*
*/
static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
struct hl_block_glbl_sec *sgs_entry)
{
if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
dev_err(hdev->dev,
"Register offset(%d) is out of range(%d) or invalid\n",
reg_offset, HL_BLOCK_SIZE);
return -EINVAL;
}
UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
(reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
return 0;
}
/**
* hl_unsecure_register - locate the relevant block for this register and
* remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_addr: register address to unsecure
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @array_size: blocks array size
*
*/
int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
int array_size)
{
u32 reg_offset;
int block_num;
block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
array_size);
if (block_num < 0)
return block_num;
reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
}
/**
* hl_unsecure_register_range - locate the relevant block for this register
* range and remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_range: register address range to unsecure
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @array_size: blocks array size
*
*/
static int hl_unsecure_register_range(struct hl_device *hdev,
struct range mm_reg_range, int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[],
int array_size)
{
u32 reg_offset;
int i, block_num, rc = 0;
block_num = hl_get_pb_block(hdev,
mm_reg_range.start + offset, pb_blocks,
array_size);
if (block_num < 0)
return block_num;
for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
reg_offset = (i + offset) - pb_blocks[block_num];
rc |= hl_unset_pb_in_block(hdev, reg_offset,
&sgs_array[block_num]);
}
return rc;
}
/**
* hl_unsecure_registers - locate the relevant block for all registers and
* remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_array: register address array to unsecure
* @mm_array_size: register array size
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @blocks_array_size: blocks array size
*
*/
int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
int mm_array_size, int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
{
int i, rc = 0;
for (i = 0 ; i < mm_array_size ; i++) {
rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
pb_blocks, sgs_array, blocks_array_size);
if (rc)
return rc;
}
return rc;
}
/**
* hl_unsecure_registers_range - locate the relevant block for all register
* ranges and remove corresponding protection bit
*
* @hdev: pointer to hl_device structure
* @mm_reg_range_array: register address range array to unsecure
* @mm_array_size: register array size
* @offset: additional offset to the register address
* @pb_blocks: blocks array
* @sgs_array: pb array
* @blocks_array_size: blocks array size
*
*/
static int hl_unsecure_registers_range(struct hl_device *hdev,
const struct range mm_reg_range_array[], int mm_array_size,
int offset, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
{
int i, rc = 0;
for (i = 0 ; i < mm_array_size ; i++) {
rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
offset, pb_blocks, sgs_array, blocks_array_size);
if (rc)
return rc;
}
return rc;
}
/**
* hl_ack_pb_security_violations - Ack security violation
*
* @hdev: pointer to hl_device structure
* @pb_blocks: blocks array
* @block_offset: additional offset to the block
* @array_size: blocks array size
*
*/
static void hl_ack_pb_security_violations(struct hl_device *hdev,
const u32 pb_blocks[], u32 block_offset, int array_size)
{
int i;
u32 cause, addr, block_base;
for (i = 0 ; i < array_size ; i++) {
block_base = pb_blocks[i] + block_offset;
cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
if (cause) {
addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
hdev->asic_funcs->pb_print_security_errors(hdev,
block_base, cause, addr);
WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
}
}
}
/**
* hl_config_glbl_sec - set pb in HW according to given pb array
*
* @hdev: pointer to hl_device structure
* @pb_blocks: blocks array
* @sgs_array: pb array
* @block_offset: additional offset to the block
* @array_size: blocks array size
*
*/
void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
struct hl_block_glbl_sec sgs_array[], u32 block_offset,
int array_size)
{
int i, j;
u32 sgs_base;
if (hdev->pldm)
usleep_range(100, 1000);
for (i = 0 ; i < array_size ; i++) {
sgs_base = block_offset + pb_blocks[i] +
HL_BLOCK_GLBL_SEC_OFFS;
for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
WREG32(sgs_base + j * sizeof(u32),
sgs_array[i].sec_array[j]);
}
}
/**
* hl_secure_block - locally memsets a block to 0
*
* @hdev: pointer to hl_device structure
* @sgs_array: pb array to clear
* @array_size: blocks array size
*
*/
void hl_secure_block(struct hl_device *hdev,
struct hl_block_glbl_sec sgs_array[], int array_size)
{
int i;
for (i = 0 ; i < array_size ; i++)
memset((char *)(sgs_array[i].sec_array), 0,
HL_BLOCK_GLBL_SEC_SIZE);
}
/**
* hl_init_pb_with_mask - set selected pb instances with mask in HW according
* to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_array: register array
* @regs_array_size: register array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*/
int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *regs_array, u32 regs_array_size, u64 mask)
{
int i, j;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
glbl_sec, blocks_array_size);
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
kfree(glbl_sec);
return 0;
}
/**
* hl_init_pb - set pb in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_array: register array
* @regs_array_size: register array size
*
*/
int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *regs_array, u32 regs_array_size)
{
return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
num_instances, instance_offset, pb_blocks,
blocks_array_size, regs_array, regs_array_size,
ULLONG_MAX);
}
/**
* hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
* given configuration unsecurring registers
* ranges instead of specific registers
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_range_array: register range array
* @regs_range_array_size: register range array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*/
int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *regs_range_array, u32 regs_range_array_size,
u64 mask)
{
int i, j, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
rc = hl_unsecure_registers_range(hdev, regs_range_array,
regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
if (rc)
goto free_glbl_sec;
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
free_glbl_sec:
kfree(glbl_sec);
return rc;
}
/**
* hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
* registers ranges instead of specific registers
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_range_array: register range array
* @regs_range_array_size: register range array size
*
*/
int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *regs_range_array, u32 regs_range_array_size)
{
return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
num_instances, instance_offset, pb_blocks,
blocks_array_size, regs_range_array,
regs_range_array_size, ULLONG_MAX);
}
/**
* hl_init_pb_single_dcore - set pb for a single docre in HW
* according to given configuration
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from the dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_array: register array
* @regs_array_size: register array size
*
*/
int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const u32 *regs_array, u32 regs_array_size)
{
int i, rc = 0;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
pb_blocks, glbl_sec, blocks_array_size);
if (rc)
goto free_glbl_sec;
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_instances ; i++)
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
dcore_offset + i * instance_offset,
blocks_array_size);
free_glbl_sec:
kfree(glbl_sec);
return rc;
}
/**
* hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
* to given configuration unsecurring
* registers ranges instead of specific
* registers
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from the dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @regs_range_array: register range array
* @regs_range_array_size: register range array size
*
*/
int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size,
const struct range *regs_range_array, u32 regs_range_array_size)
{
int i;
struct hl_block_glbl_sec *glbl_sec;
glbl_sec = kcalloc(blocks_array_size,
sizeof(struct hl_block_glbl_sec),
GFP_KERNEL);
if (!glbl_sec)
return -ENOMEM;
hl_secure_block(hdev, glbl_sec, blocks_array_size);
hl_unsecure_registers_range(hdev, regs_range_array,
regs_range_array_size, 0, pb_blocks, glbl_sec,
blocks_array_size);
/* Fill all blocks with the same configuration */
for (i = 0 ; i < num_instances ; i++)
hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
dcore_offset + i * instance_offset,
blocks_array_size);
kfree(glbl_sec);
return 0;
}
/**
* hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
* @mask: enabled instances mask: 1- enabled, 0- disabled
*
*/
void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
u32 dcore_offset, u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
{
int i, j;
/* ack all blocks */
for (i = 0 ; i < num_dcores ; i++) {
for (j = 0 ; j < num_instances ; j++) {
int seq = i * num_instances + j;
if (!(mask & BIT_ULL(seq)))
continue;
hl_ack_pb_security_violations(hdev, pb_blocks,
i * dcore_offset + j * instance_offset,
blocks_array_size);
}
}
}
/**
* hl_ack_pb - ack pb in HW according to given configuration
*
* @hdev: pointer to hl_device structure
* @num_dcores: number of decores to apply configuration to
* set to HL_PB_SHARED if need to apply only once
* @dcore_offset: offset between dcores
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
*
*/
void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size)
{
hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
instance_offset, pb_blocks, blocks_array_size,
ULLONG_MAX);
}
/**
* hl_ack_pb_single_dcore - ack pb for single docre in HW
* according to given configuration
*
* @hdev: pointer to hl_device structure
* @dcore_offset: offset from dcore0
* @num_instances: number of instances to apply configuration to
* @instance_offset: offset between instances
* @pb_blocks: blocks array
* @blocks_array_size: blocks array size
*
*/
void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
u32 num_instances, u32 instance_offset,
const u32 pb_blocks[], u32 blocks_array_size)
{
int i;
/* ack all blocks */
for (i = 0 ; i < num_instances ; i++)
hl_ack_pb_security_violations(hdev, pb_blocks,
dcore_offset + i * instance_offset,
blocks_array_size);
}

View File

@ -0,0 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
HL_GAUDI2_FILES := gaudi2/gaudi2.o gaudi2/gaudi2_security.o \
gaudi2/gaudi2_coresight.o

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,566 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
#ifndef GAUDI2P_H_
#define GAUDI2P_H_
#include <uapi/misc/habanalabs.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/gaudi2/gaudi2.h"
#include "../include/gaudi2/gaudi2_packets.h"
#include "../include/gaudi2/gaudi2_fw_if.h"
#include "../include/gaudi2/gaudi2_async_events.h"
#include "../include/gaudi2/gaudi2_async_virt_events.h"
#define GAUDI2_LINUX_FW_FILE "habanalabs/gaudi2/gaudi2-fit.itb"
#define GAUDI2_BOOT_FIT_FILE "habanalabs/gaudi2/gaudi2-boot-fit.itb"
#define MMU_PAGE_TABLES_INITIAL_SIZE 0x10000000 /* 256MB */
#define GAUDI2_CPU_TIMEOUT_USEC 30000000 /* 30s */
#define GAUDI2_FPGA_CPU_TIMEOUT 100000000 /* 100s */
#define NUMBER_OF_PDMA_QUEUES 2
#define NUMBER_OF_EDMA_QUEUES 8
#define NUMBER_OF_MME_QUEUES 4
#define NUMBER_OF_TPC_QUEUES 25
#define NUMBER_OF_NIC_QUEUES 24
#define NUMBER_OF_ROT_QUEUES 2
#define NUMBER_OF_CPU_QUEUES 1
#define NUMBER_OF_HW_QUEUES ((NUMBER_OF_PDMA_QUEUES + \
NUMBER_OF_EDMA_QUEUES + \
NUMBER_OF_MME_QUEUES + \
NUMBER_OF_TPC_QUEUES + \
NUMBER_OF_NIC_QUEUES + \
NUMBER_OF_ROT_QUEUES + \
NUMBER_OF_CPU_QUEUES) * \
NUM_OF_PQ_PER_QMAN)
#define NUMBER_OF_QUEUES (NUMBER_OF_CPU_QUEUES + NUMBER_OF_HW_QUEUES)
#define DCORE_NUM_OF_SOB \
(((mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_8191 - \
mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
#define DCORE_NUM_OF_MONITORS \
(((mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_2047 - \
mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0) + 4) >> 2)
#define NUMBER_OF_DEC ((NUM_OF_DEC_PER_DCORE * NUM_OF_DCORES) + NUMBER_OF_PCIE_DEC)
/* Map all arcs dccm + arc schedulers acp blocks */
#define NUM_OF_USER_ACP_BLOCKS (NUM_OF_SCHEDULER_ARC + 2)
#define NUM_OF_USER_NIC_UMR_BLOCKS 15
#define NUM_OF_EXPOSED_SM_BLOCKS ((NUM_OF_DCORES - 1) * 2)
#define NUM_USER_MAPPED_BLOCKS \
(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
NUM_OF_EXPOSED_SM_BLOCKS + \
(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
/* Within the user mapped array, decoder entries start post all the ARC related
* entries
*/
#define USR_MAPPED_BLK_DEC_START_IDX \
(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + \
(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
#define USR_MAPPED_BLK_SM_START_IDX \
(NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS + NUMBER_OF_DEC + \
(NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS))
#define SM_OBJS_BLOCK_SIZE (mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - \
mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0)
#define GAUDI2_MAX_PENDING_CS 64
#if !IS_MAX_PENDING_CS_VALID(GAUDI2_MAX_PENDING_CS)
#error "GAUDI2_MAX_PENDING_CS must be power of 2 and greater than 1"
#endif
#define CORESIGHT_TIMEOUT_USEC 100000 /* 100 ms */
#define GAUDI2_PREBOOT_REQ_TIMEOUT_USEC 25000000 /* 25s */
#define GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC 10000000 /* 10s */
#define GAUDI2_NIC_CLK_FREQ 450000000ull /* 450 MHz */
#define DC_POWER_DEFAULT 60000 /* 60W */
#define GAUDI2_HBM_NUM 6
#define DMA_MAX_TRANSFER_SIZE U32_MAX
#define GAUDI2_DEFAULT_CARD_NAME "HL225"
#define QMAN_STREAMS 4
#define PQ_FETCHER_CACHE_SIZE 8
#define NUM_OF_MME_SBTE_PORTS 5
#define NUM_OF_MME_WB_PORTS 2
#define GAUDI2_ENGINE_ID_DCORE_OFFSET \
(GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)
/* DRAM Memory Map */
#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */
/* This define should be used only when working in a debug mode without dram.
* When working with dram, the driver size will be calculated dynamically.
*/
#define NIC_DEFAULT_DRV_SIZE 0x20000000 /* 512MB */
#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE
#define NIC_NUMBER_OF_PORTS NIC_NUMBER_OF_ENGINES
#define NUMBER_OF_PCIE_DEC 2
#define PCIE_DEC_SHIFT 8
#define SRAM_USER_BASE_OFFSET 0
/* cluster binning */
#define MAX_FAULTY_HBMS 1
#define GAUDI2_XBAR_EDGE_FULL_MASK 0xF
#define GAUDI2_EDMA_FULL_MASK 0xFF
#define GAUDI2_DRAM_FULL_MASK 0x3F
/* Host virtual address space. */
#define VA_HOST_SPACE_PAGE_START 0xFFF0000000000000ull
#define VA_HOST_SPACE_PAGE_END 0xFFF0800000000000ull /* 140TB */
#define VA_HOST_SPACE_HPAGE_START 0xFFF0800000000000ull
#define VA_HOST_SPACE_HPAGE_END 0xFFF1000000000000ull /* 140TB */
#define VA_HOST_SPACE_USER_MAPPED_CB_START 0xFFF1000000000000ull
#define VA_HOST_SPACE_USER_MAPPED_CB_END 0xFFF1000100000000ull /* 4GB */
/* 140TB */
#define VA_HOST_SPACE_PAGE_SIZE (VA_HOST_SPACE_PAGE_END - VA_HOST_SPACE_PAGE_START)
/* 140TB */
#define VA_HOST_SPACE_HPAGE_SIZE (VA_HOST_SPACE_HPAGE_END - VA_HOST_SPACE_HPAGE_START)
#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_PAGE_SIZE + VA_HOST_SPACE_HPAGE_SIZE)
#define HOST_SPACE_INTERNAL_CB_SZ SZ_2M
/*
* HBM virtual address space
* Gaudi2 has 6 HBM devices, each supporting 16GB total of 96GB at most.
* No core separation is supported so we can have one chunk of virtual address
* space just above the physical ones.
* The virtual address space starts immediately after the end of the physical
* address space which is determined at run-time.
*/
#define VA_HBM_SPACE_END 0x1002000000000000ull
#define HW_CAP_PLL BIT_ULL(0)
#define HW_CAP_DRAM BIT_ULL(1)
#define HW_CAP_PMMU BIT_ULL(2)
#define HW_CAP_CPU BIT_ULL(3)
#define HW_CAP_MSIX BIT_ULL(4)
#define HW_CAP_CPU_Q BIT_ULL(5)
#define HW_CAP_CPU_Q_SHIFT 5
#define HW_CAP_CLK_GATE BIT_ULL(6)
#define HW_CAP_KDMA BIT_ULL(7)
#define HW_CAP_SRAM_SCRAMBLER BIT_ULL(8)
#define HW_CAP_DCORE0_DMMU0 BIT_ULL(9)
#define HW_CAP_DCORE0_DMMU1 BIT_ULL(10)
#define HW_CAP_DCORE0_DMMU2 BIT_ULL(11)
#define HW_CAP_DCORE0_DMMU3 BIT_ULL(12)
#define HW_CAP_DCORE1_DMMU0 BIT_ULL(13)
#define HW_CAP_DCORE1_DMMU1 BIT_ULL(14)
#define HW_CAP_DCORE1_DMMU2 BIT_ULL(15)
#define HW_CAP_DCORE1_DMMU3 BIT_ULL(16)
#define HW_CAP_DCORE2_DMMU0 BIT_ULL(17)
#define HW_CAP_DCORE2_DMMU1 BIT_ULL(18)
#define HW_CAP_DCORE2_DMMU2 BIT_ULL(19)
#define HW_CAP_DCORE2_DMMU3 BIT_ULL(20)
#define HW_CAP_DCORE3_DMMU0 BIT_ULL(21)
#define HW_CAP_DCORE3_DMMU1 BIT_ULL(22)
#define HW_CAP_DCORE3_DMMU2 BIT_ULL(23)
#define HW_CAP_DCORE3_DMMU3 BIT_ULL(24)
#define HW_CAP_DMMU_MASK GENMASK_ULL(24, 9)
#define HW_CAP_DMMU_SHIFT 9
#define HW_CAP_PDMA_MASK BIT_ULL(26)
#define HW_CAP_EDMA_MASK GENMASK_ULL(34, 27)
#define HW_CAP_EDMA_SHIFT 27
#define HW_CAP_MME_MASK GENMASK_ULL(38, 35)
#define HW_CAP_MME_SHIFT 35
#define HW_CAP_ROT_MASK GENMASK_ULL(40, 39)
#define HW_CAP_ROT_SHIFT 39
#define HW_CAP_HBM_SCRAMBLER_HW_RESET BIT_ULL(41)
#define HW_CAP_HBM_SCRAMBLER_SW_RESET BIT_ULL(42)
#define HW_CAP_HBM_SCRAMBLER_MASK (HW_CAP_HBM_SCRAMBLER_HW_RESET | \
HW_CAP_HBM_SCRAMBLER_SW_RESET)
#define HW_CAP_HBM_SCRAMBLER_SHIFT 41
#define HW_CAP_RESERVED BIT(43)
#define HW_CAP_MMU_MASK (HW_CAP_PMMU | HW_CAP_DMMU_MASK)
/* Range Registers */
#define RR_TYPE_SHORT 0
#define RR_TYPE_LONG 1
#define RR_TYPE_SHORT_PRIV 2
#define RR_TYPE_LONG_PRIV 3
#define NUM_SHORT_LBW_RR 14
#define NUM_LONG_LBW_RR 4
#define NUM_SHORT_HBW_RR 6
#define NUM_LONG_HBW_RR 4
/* RAZWI initiator coordinates- X- 5 bits, Y- 4 bits */
#define RAZWI_INITIATOR_X_SHIFT 0
#define RAZWI_INITIATOR_X_MASK 0x1F
#define RAZWI_INITIATOR_Y_SHIFT 5
#define RAZWI_INITIATOR_Y_MASK 0xF
#define RTR_ID_X_Y(x, y) \
((((y) & RAZWI_INITIATOR_Y_MASK) << RAZWI_INITIATOR_Y_SHIFT) | \
(((x) & RAZWI_INITIATOR_X_MASK) << RAZWI_INITIATOR_X_SHIFT))
/* decoders have separate mask */
#define HW_CAP_DEC_SHIFT 0
#define HW_CAP_DEC_MASK GENMASK_ULL(9, 0)
/* TPCs have separate mask */
#define HW_CAP_TPC_SHIFT 0
#define HW_CAP_TPC_MASK GENMASK_ULL(24, 0)
/* nics have separate mask */
#define HW_CAP_NIC_SHIFT 0
#define HW_CAP_NIC_MASK GENMASK_ULL(NIC_NUMBER_OF_ENGINES - 1, 0)
#define GAUDI2_ARC_PCI_MSB_ADDR(addr) (((addr) & GENMASK_ULL(49, 28)) >> 28)
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
GAUDI2_RESERVED_SOB_KDMA_COMPLETION,
GAUDI2_RESERVED_SOB_DEC_NRM_FIRST,
GAUDI2_RESERVED_SOB_DEC_NRM_LAST =
GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + NUMBER_OF_DEC - 1,
GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST,
GAUDI2_RESERVED_SOB_DEC_ABNRM_LAST =
GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + NUMBER_OF_DEC - 1,
GAUDI2_RESERVED_SOB_NUMBER
};
enum gaudi2_reserved_mon_id {
GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_MON_CS_COMPLETION_LAST =
GAUDI2_RESERVED_MON_CS_COMPLETION_FIRST + GAUDI2_MAX_PENDING_CS - 1,
GAUDI2_RESERVED_MON_KDMA_COMPLETION,
GAUDI2_RESERVED_MON_DEC_NRM_FIRST,
GAUDI2_RESERVED_MON_DEC_NRM_LAST =
GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * NUMBER_OF_DEC - 1,
GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST,
GAUDI2_RESERVED_MON_DEC_ABNRM_LAST =
GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * NUMBER_OF_DEC - 1,
GAUDI2_RESERVED_MON_NUMBER
};
enum gaudi2_reserved_cq_id {
GAUDI2_RESERVED_CQ_CS_COMPLETION,
GAUDI2_RESERVED_CQ_KDMA_COMPLETION,
GAUDI2_RESERVED_CQ_NUMBER
};
/*
* Gaudi2 subtitute TPCs Numbering
* At most- two faulty TPCs are allowed
* First replacement to a faulty TPC will be TPC24, second- TPC23
*/
enum substitude_tpc {
FAULTY_TPC_SUBTS_1_TPC_24,
FAULTY_TPC_SUBTS_2_TPC_23,
MAX_FAULTY_TPCS
};
enum gaudi2_dma_core_id {
DMA_CORE_ID_PDMA0, /* Dcore 0 */
DMA_CORE_ID_PDMA1, /* Dcore 0 */
DMA_CORE_ID_EDMA0, /* Dcore 0 */
DMA_CORE_ID_EDMA1, /* Dcore 0 */
DMA_CORE_ID_EDMA2, /* Dcore 1 */
DMA_CORE_ID_EDMA3, /* Dcore 1 */
DMA_CORE_ID_EDMA4, /* Dcore 2 */
DMA_CORE_ID_EDMA5, /* Dcore 2 */
DMA_CORE_ID_EDMA6, /* Dcore 3 */
DMA_CORE_ID_EDMA7, /* Dcore 3 */
DMA_CORE_ID_KDMA, /* Dcore 0 */
DMA_CORE_ID_SIZE
};
enum gaudi2_rotator_id {
ROTATOR_ID_0,
ROTATOR_ID_1,
ROTATOR_ID_SIZE,
};
enum gaudi2_mme_id {
MME_ID_DCORE0,
MME_ID_DCORE1,
MME_ID_DCORE2,
MME_ID_DCORE3,
MME_ID_SIZE,
};
enum gaudi2_tpc_id {
TPC_ID_DCORE0_TPC0,
TPC_ID_DCORE0_TPC1,
TPC_ID_DCORE0_TPC2,
TPC_ID_DCORE0_TPC3,
TPC_ID_DCORE0_TPC4,
TPC_ID_DCORE0_TPC5,
TPC_ID_DCORE1_TPC0,
TPC_ID_DCORE1_TPC1,
TPC_ID_DCORE1_TPC2,
TPC_ID_DCORE1_TPC3,
TPC_ID_DCORE1_TPC4,
TPC_ID_DCORE1_TPC5,
TPC_ID_DCORE2_TPC0,
TPC_ID_DCORE2_TPC1,
TPC_ID_DCORE2_TPC2,
TPC_ID_DCORE2_TPC3,
TPC_ID_DCORE2_TPC4,
TPC_ID_DCORE2_TPC5,
TPC_ID_DCORE3_TPC0,
TPC_ID_DCORE3_TPC1,
TPC_ID_DCORE3_TPC2,
TPC_ID_DCORE3_TPC3,
TPC_ID_DCORE3_TPC4,
TPC_ID_DCORE3_TPC5,
/* the PCI TPC is placed last (mapped liked HW) */
TPC_ID_DCORE0_TPC6,
TPC_ID_SIZE,
};
enum gaudi2_dec_id {
DEC_ID_DCORE0_DEC0,
DEC_ID_DCORE0_DEC1,
DEC_ID_DCORE1_DEC0,
DEC_ID_DCORE1_DEC1,
DEC_ID_DCORE2_DEC0,
DEC_ID_DCORE2_DEC1,
DEC_ID_DCORE3_DEC0,
DEC_ID_DCORE3_DEC1,
DEC_ID_PCIE_VDEC0,
DEC_ID_PCIE_VDEC1,
DEC_ID_SIZE,
};
enum gaudi2_hbm_id {
HBM_ID0,
HBM_ID1,
HBM_ID2,
HBM_ID3,
HBM_ID4,
HBM_ID5,
HBM_ID_SIZE,
};
/* specific EDMA enumeration */
enum gaudi2_edma_id {
EDMA_ID_DCORE0_INSTANCE0,
EDMA_ID_DCORE0_INSTANCE1,
EDMA_ID_DCORE1_INSTANCE0,
EDMA_ID_DCORE1_INSTANCE1,
EDMA_ID_DCORE2_INSTANCE0,
EDMA_ID_DCORE2_INSTANCE1,
EDMA_ID_DCORE3_INSTANCE0,
EDMA_ID_DCORE3_INSTANCE1,
EDMA_ID_SIZE,
};
/* User interrupt count is aligned with HW CQ count.
* We have 64 CQ's per dcore, CQ0 in dcore 0 is reserved for legacy mode
*/
#define GAUDI2_NUM_USER_INTERRUPTS 255
enum gaudi2_irq_num {
GAUDI2_IRQ_NUM_EVENT_QUEUE = GAUDI2_EVENT_QUEUE_MSIX_IDX,
GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM,
GAUDI2_IRQ_NUM_DCORE0_DEC0_ABNRM,
GAUDI2_IRQ_NUM_DCORE0_DEC1_NRM,
GAUDI2_IRQ_NUM_DCORE0_DEC1_ABNRM,
GAUDI2_IRQ_NUM_DCORE1_DEC0_NRM,
GAUDI2_IRQ_NUM_DCORE1_DEC0_ABNRM,
GAUDI2_IRQ_NUM_DCORE1_DEC1_NRM,
GAUDI2_IRQ_NUM_DCORE1_DEC1_ABNRM,
GAUDI2_IRQ_NUM_DCORE2_DEC0_NRM,
GAUDI2_IRQ_NUM_DCORE2_DEC0_ABNRM,
GAUDI2_IRQ_NUM_DCORE2_DEC1_NRM,
GAUDI2_IRQ_NUM_DCORE2_DEC1_ABNRM,
GAUDI2_IRQ_NUM_DCORE3_DEC0_NRM,
GAUDI2_IRQ_NUM_DCORE3_DEC0_ABNRM,
GAUDI2_IRQ_NUM_DCORE3_DEC1_NRM,
GAUDI2_IRQ_NUM_DCORE3_DEC1_ABNRM,
GAUDI2_IRQ_NUM_SHARED_DEC0_NRM,
GAUDI2_IRQ_NUM_SHARED_DEC0_ABNRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_NRM,
GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM,
GAUDI2_IRQ_NUM_COMPLETION,
GAUDI2_IRQ_NUM_NIC_PORT_FIRST,
GAUDI2_IRQ_NUM_NIC_PORT_LAST = (GAUDI2_IRQ_NUM_NIC_PORT_FIRST + NIC_NUMBER_OF_PORTS - 1),
GAUDI2_IRQ_NUM_RESERVED_FIRST,
GAUDI2_IRQ_NUM_RESERVED_LAST = (GAUDI2_MSIX_ENTRIES - GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_USER_FIRST,
GAUDI2_IRQ_NUM_USER_LAST = (GAUDI2_IRQ_NUM_USER_FIRST + GAUDI2_NUM_USER_INTERRUPTS - 1),
GAUDI2_IRQ_NUM_LAST = (GAUDI2_MSIX_ENTRIES - 1)
};
static_assert(GAUDI2_IRQ_NUM_USER_FIRST > GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM);
/**
* struct dup_block_ctx - context to initialize unit instances across multiple
* blocks where block can be either a dcore of duplicated
* common module. this code relies on constant offsets
* of blocks and unit instances in a block.
* @instance_cfg_fn: instance specific configuration function.
* @data: private configuration data.
* @base: base address of the first instance in the first block.
* @block_off: subsequent blocks address spacing.
* @instance_off: subsequent block's instances address spacing.
* @enabled_mask: mask of enabled instances (1- enabled, 0- disabled).
* @blocks: number of blocks.
* @instances: unit instances per block.
*/
struct dup_block_ctx {
void (*instance_cfg_fn)(struct hl_device *hdev, u64 base, void *data);
void *data;
u64 base;
u64 block_off;
u64 instance_off;
u64 enabled_mask;
unsigned int blocks;
unsigned int instances;
};
/**
* struct gaudi2_device - ASIC specific manage structure.
* @cpucp_info_get: get information on device from CPU-CP
* @mapped_blocks: array that holds the base address and size of all blocks
* the user can map.
* @lfsr_rand_seeds: array of MME ACC random seeds to set.
* @hw_queues_lock: protects the H/W queues from concurrent access.
* @kdma_lock: protects the KDMA engine from concurrent access.
* @scratchpad_kernel_address: general purpose PAGE_SIZE contiguous memory,
* this memory region should be write-only.
* currently used for HBW QMAN writes which is
* redundant.
* @scratchpad_bus_address: scratchpad bus address
* @virt_msix_db_cpu_addr: host memory page for the virtual MSI-X doorbell.
* @virt_msix_db_dma_addr: bus address of the page for the virtual MSI-X doorbell.
* @dram_bar_cur_addr: current address of DRAM PCI bar.
* @hw_cap_initialized: This field contains a bit per H/W engine. When that
* engine is initialized, that bit is set by the driver to
* signal we can use this engine in later code paths.
* Each bit is cleared upon reset of its corresponding H/W
* engine.
* @active_hw_arc: This field contains a bit per ARC of an H/W engine with
* exception of TPC and NIC engines. Once an engine arc is
* initialized, its respective bit is set. Driver can uniquely
* identify each initialized ARC and use this information in
* later code paths. Each respective bit is cleared upon reset
* of its corresponding ARC of the H/W engine.
* @dec_hw_cap_initialized: This field contains a bit per decoder H/W engine.
* When that engine is initialized, that bit is set by
* the driver to signal we can use this engine in later
* code paths.
* Each bit is cleared upon reset of its corresponding H/W
* engine.
* @tpc_hw_cap_initialized: This field contains a bit per TPC H/W engine.
* When that engine is initialized, that bit is set by
* the driver to signal we can use this engine in later
* code paths.
* Each bit is cleared upon reset of its corresponding H/W
* engine.
* @active_tpc_arc: This field contains a bit per ARC of the TPC engines.
* Once an engine arc is initialized, its respective bit is
* set. Each respective bit is cleared upon reset of its
* corresponding ARC of the TPC engine.
* @nic_hw_cap_initialized: This field contains a bit per nic H/W engine.
* @active_nic_arc: This field contains a bit per ARC of the NIC engines.
* Once an engine arc is initialized, its respective bit is
* set. Each respective bit is cleared upon reset of its
* corresponding ARC of the NIC engine.
* @hw_events: array that holds all H/W events that are defined valid.
* @events_stat: array that holds histogram of all received events.
* @events_stat_aggregate: same as events_stat but doesn't get cleared on reset.
* @num_of_valid_hw_events: used to hold the number of valid H/W events.
* @nic_ports: array that holds all NIC ports manage structures.
* @nic_macros: array that holds all NIC macro manage structures.
* @core_info: core info to be used by the Ethernet driver.
* @aux_ops: functions for core <-> aux drivers communication.
* @flush_db_fifo: flag to force flush DB FIFO after a write.
* @hbm_cfg: HBM subsystem settings
* @hw_queues_lock_mutex: used by simulator instead of hw_queues_lock.
* @kdma_lock_mutex: used by simulator instead of kdma_lock.
* @use_deprecated_event_mappings: use old event mappings which are about to be
* deprecated
*/
struct gaudi2_device {
int (*cpucp_info_get)(struct hl_device *hdev);
struct user_mapped_block mapped_blocks[NUM_USER_MAPPED_BLOCKS];
int lfsr_rand_seeds[MME_NUM_OF_LFSR_SEEDS];
spinlock_t hw_queues_lock;
spinlock_t kdma_lock;
void *scratchpad_kernel_address;
dma_addr_t scratchpad_bus_address;
void *virt_msix_db_cpu_addr;
dma_addr_t virt_msix_db_dma_addr;
u64 dram_bar_cur_addr;
u64 hw_cap_initialized;
u64 active_hw_arc;
u64 dec_hw_cap_initialized;
u64 tpc_hw_cap_initialized;
u64 active_tpc_arc;
u64 nic_hw_cap_initialized;
u64 active_nic_arc;
u32 hw_events[GAUDI2_EVENT_SIZE];
u32 events_stat[GAUDI2_EVENT_SIZE];
u32 events_stat_aggregate[GAUDI2_EVENT_SIZE];
u32 num_of_valid_hw_events;
};
extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE];
extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE];
extern const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE];
extern const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES];
extern const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE];
void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx);
int gaudi2_coresight_init(struct hl_device *hdev);
int gaudi2_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
void gaudi2_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx);
bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id);
void gaudi2_write_rr_to_all_lbw_rtrs(struct hl_device *hdev, u8 rr_type, u32 rr_index, u64 min_val,
u64 max_val);
void gaudi2_pb_print_security_errors(struct hl_device *hdev, u32 block_addr, u32 cause,
u32 offended_addr);
int gaudi2_init_security(struct hl_device *hdev);
void gaudi2_ack_protection_bits_errors(struct hl_device *hdev);
#endif /* GAUDI2P_H_ */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,141 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2020-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
#ifndef GAUDI2_MASKS_H_
#define GAUDI2_MASKS_H_
#include "../include/gaudi2/asic_reg/gaudi2_regs.h"
/* Useful masks for bits in various registers */
#define QMAN_GLBL_ERR_CFG_MSG_EN_MASK \
((0xF << PDMA0_QM_GLBL_ERR_CFG_PQF_ERR_MSG_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_ERR_CFG_CQF_ERR_MSG_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_ERR_CFG_CP_ERR_MSG_EN_SHIFT))
#define QMAN_GLBL_ERR_CFG_STOP_ON_ERR_EN_MASK \
((0xF << PDMA0_QM_GLBL_ERR_CFG_PQF_STOP_ON_ERR_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_ERR_CFG_CQF_STOP_ON_ERR_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_ERR_CFG_CP_STOP_ON_ERR_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_ERR_CFG_ARB_STOP_ON_ERR_SHIFT))
#define QMAN_GLBL_ERR_CFG1_MSG_EN_MASK \
(0x1 << PDMA0_QM_GLBL_ERR_CFG1_CQF_ERR_MSG_EN_SHIFT)
#define QMAN_GLBL_ERR_CFG1_STOP_ON_ERR_EN_MASK \
((0x1 << PDMA0_QM_GLBL_ERR_CFG1_CQF_STOP_ON_ERR_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_ERR_CFG1_ARC_STOP_ON_ERR_SHIFT))
#define QM_PQC_LBW_WDATA \
((1 << DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_SHIFT) | \
(1 << DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_SHIFT))
#define QMAN_MAKE_TRUSTED \
((0xF << PDMA0_QM_GLBL_PROT_PQF_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_PROT_ERR_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_PROT_PQC_SHIFT))
#define QMAN_MAKE_TRUSTED_TEST_MODE \
((0xF << PDMA0_QM_GLBL_PROT_PQF_SHIFT) | \
(0xF << PDMA0_QM_GLBL_PROT_CQF_SHIFT) | \
(0xF << PDMA0_QM_GLBL_PROT_CP_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_PROT_ERR_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_PROT_PQC_SHIFT))
#define QMAN_ENABLE \
((0xF << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
#define PDMA1_QMAN_ENABLE \
((0x3 << PDMA0_QM_GLBL_CFG0_PQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CQF_EN_SHIFT) | \
(0x1F << PDMA0_QM_GLBL_CFG0_CP_EN_SHIFT) | \
(0x1 << PDMA0_QM_GLBL_CFG0_ARC_CQF_EN_SHIFT))
/* QM_IDLE_MASK is valid for all engines QM idle check */
#define QM_IDLE_MASK (DCORE0_EDMA0_QM_GLBL_STS0_PQF_IDLE_MASK | \
DCORE0_EDMA0_QM_GLBL_STS0_CQF_IDLE_MASK | \
DCORE0_EDMA0_QM_GLBL_STS0_CP_IDLE_MASK)
#define QM_ARC_IDLE_MASK DCORE0_EDMA0_QM_GLBL_STS1_ARC_CQF_IDLE_MASK
#define MME_ARCH_IDLE_MASK \
(DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_EMPTY_MASK | \
DCORE0_MME_CTRL_LO_ARCH_STATUS_AGU_COUT_SM_IDLE_MASK | \
DCORE0_MME_CTRL_LO_ARCH_STATUS_WBC_AXI_IDLE_MASK | \
DCORE0_MME_CTRL_LO_ARCH_STATUS_SB_IN_AXI_IDLE_MASK | \
DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_IDLE_MASK | \
DCORE0_MME_CTRL_LO_ARCH_STATUS_QM_RDY_MASK)
#define TPC_IDLE_MASK (DCORE0_TPC0_CFG_STATUS_SCALAR_PIPE_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_VECTOR_PIPE_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_IQ_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_SB_EMPTY_MASK | \
DCORE0_TPC0_CFG_STATUS_QM_IDLE_MASK | \
DCORE0_TPC0_CFG_STATUS_QM_RDY_MASK)
#define DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK 0x100
/* CGM_IDLE_MASK is valid for all engines CGM idle check */
#define CGM_IDLE_MASK DCORE0_TPC0_QM_CGM_STS_AGENT_IDLE_MASK
#define QM_GLBL_CFG1_PQF_STOP PDMA0_QM_GLBL_CFG1_PQF_STOP_MASK
#define QM_GLBL_CFG1_CQF_STOP PDMA0_QM_GLBL_CFG1_CQF_STOP_MASK
#define QM_GLBL_CFG1_CP_STOP PDMA0_QM_GLBL_CFG1_CP_STOP_MASK
#define QM_GLBL_CFG1_PQF_FLUSH PDMA0_QM_GLBL_CFG1_PQF_FLUSH_MASK
#define QM_GLBL_CFG1_CQF_FLUSH PDMA0_QM_GLBL_CFG1_CQF_FLUSH_MASK
#define QM_GLBL_CFG1_CP_FLUSH PDMA0_QM_GLBL_CFG1_CP_FLUSH_MASK
#define QM_GLBL_CFG2_ARC_CQF_STOP PDMA0_QM_GLBL_CFG2_ARC_CQF_STOP_MASK
#define QM_GLBL_CFG2_ARC_CQF_FLUSH PDMA0_QM_GLBL_CFG2_ARC_CQF_FLUSH_MASK
#define QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK 0x1
#define QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK 0x2
#define QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK 0x4
#define QM_ARB_ERR_MSG_EN_MASK (\
QM_ARB_ERR_MSG_EN_CHOISE_OVF_MASK |\
QM_ARB_ERR_MSG_EN_CHOISE_WDT_MASK |\
QM_ARB_ERR_MSG_EN_AXI_LBW_ERR_MASK)
#define PCIE_AUX_FLR_CTRL_HW_CTRL_MASK 0x1
#define PCIE_AUX_FLR_CTRL_INT_MASK_MASK 0x2
#define MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK GENMASK(1, 0)
#define MME_ACC_INTR_MASK_AP_SRC_POS_INF_MASK BIT(2)
#define MME_ACC_INTR_MASK_AP_SRC_NEG_INF_MASK BIT(3)
#define MME_ACC_INTR_MASK_AP_SRC_NAN_MASK BIT(4)
#define MME_ACC_INTR_MASK_AP_RESULT_POS_INF_MASK BIT(5)
#define MME_ACC_INTR_MASK_AP_RESULT_NEG_INF_MASK BIT(6)
#define SM_CQ_L2H_MASK_VAL 0xFFFFFFFFFC000000ull
#define SM_CQ_L2H_CMPR_VAL 0x1000007FFC000000ull
#define SM_CQ_L2H_LOW_MASK GENMASK(31, 20)
#define SM_CQ_L2H_LOW_SHIFT 20
#define MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK \
REG_FIELD_MASK(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE, HOP4_PAGE_SIZE)
#define STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK \
REG_FIELD_MASK(DCORE0_HMMU0_STLB_HOP_CONFIGURATION, ONLY_LARGE_PAGE)
#define AXUSER_HB_SEC_ASID_MASK 0x3FF
#define AXUSER_HB_SEC_MMBP_MASK 0x400
#define MMUBP_ASID_MASK (AXUSER_HB_SEC_ASID_MASK | AXUSER_HB_SEC_MMBP_MASK)
#define ROT_MSS_HALT_WBC_MASK BIT(0)
#define ROT_MSS_HALT_RSB_MASK BIT(1)
#define ROT_MSS_HALT_MRSB_MASK BIT(2)
#define PCIE_DBI_MSIX_ADDRESS_MATCH_LOW_OFF_MSIX_ADDRESS_MATCH_EN_SHIFT 0
#define PCIE_DBI_MSIX_ADDRESS_MATCH_LOW_OFF_MSIX_ADDRESS_MATCH_EN_MASK 0x1
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_SHIFT 15
#define DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK 0x8000
#endif /* GAUDI2_MASKS_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,213 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2020 HabanaLabs Ltd.
* All Rights Reserved.
*/
#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__
#define __GAUDI2_ARC_COMMON_PACKETS_H__
/*
* CPU IDs for each ARC CPUs
*/
#define CPU_ID_SCHED_ARC0 0 /* FARM_ARC0 */
#define CPU_ID_SCHED_ARC1 1 /* FARM_ARC1 */
#define CPU_ID_SCHED_ARC2 2 /* FARM_ARC2 */
#define CPU_ID_SCHED_ARC3 3 /* FARM_ARC3 */
/* Dcore1 MME Engine ARC instance used as scheduler */
#define CPU_ID_SCHED_ARC4 4 /* DCORE1_MME0 */
/* Dcore3 MME Engine ARC instance used as scheduler */
#define CPU_ID_SCHED_ARC5 5 /* DCORE3_MME0 */
#define CPU_ID_TPC_QMAN_ARC0 6 /* DCORE0_TPC0 */
#define CPU_ID_TPC_QMAN_ARC1 7 /* DCORE0_TPC1 */
#define CPU_ID_TPC_QMAN_ARC2 8 /* DCORE0_TPC2 */
#define CPU_ID_TPC_QMAN_ARC3 9 /* DCORE0_TPC3 */
#define CPU_ID_TPC_QMAN_ARC4 10 /* DCORE0_TPC4 */
#define CPU_ID_TPC_QMAN_ARC5 11 /* DCORE0_TPC5 */
#define CPU_ID_TPC_QMAN_ARC6 12 /* DCORE1_TPC0 */
#define CPU_ID_TPC_QMAN_ARC7 13 /* DCORE1_TPC1 */
#define CPU_ID_TPC_QMAN_ARC8 14 /* DCORE1_TPC2 */
#define CPU_ID_TPC_QMAN_ARC9 15 /* DCORE1_TPC3 */
#define CPU_ID_TPC_QMAN_ARC10 16 /* DCORE1_TPC4 */
#define CPU_ID_TPC_QMAN_ARC11 17 /* DCORE1_TPC5 */
#define CPU_ID_TPC_QMAN_ARC12 18 /* DCORE2_TPC0 */
#define CPU_ID_TPC_QMAN_ARC13 19 /* DCORE2_TPC1 */
#define CPU_ID_TPC_QMAN_ARC14 20 /* DCORE2_TPC2 */
#define CPU_ID_TPC_QMAN_ARC15 21 /* DCORE2_TPC3 */
#define CPU_ID_TPC_QMAN_ARC16 22 /* DCORE2_TPC4 */
#define CPU_ID_TPC_QMAN_ARC17 23 /* DCORE2_TPC5 */
#define CPU_ID_TPC_QMAN_ARC18 24 /* DCORE3_TPC0 */
#define CPU_ID_TPC_QMAN_ARC19 25 /* DCORE3_TPC1 */
#define CPU_ID_TPC_QMAN_ARC20 26 /* DCORE3_TPC2 */
#define CPU_ID_TPC_QMAN_ARC21 27 /* DCORE3_TPC3 */
#define CPU_ID_TPC_QMAN_ARC22 28 /* DCORE3_TPC4 */
#define CPU_ID_TPC_QMAN_ARC23 29 /* DCORE3_TPC5 */
#define CPU_ID_TPC_QMAN_ARC24 30 /* DCORE0_TPC6 - Never present */
#define CPU_ID_MME_QMAN_ARC0 31 /* DCORE0_MME0 */
#define CPU_ID_MME_QMAN_ARC1 32 /* DCORE2_MME0 */
#define CPU_ID_EDMA_QMAN_ARC0 33 /* DCORE0_EDMA0 */
#define CPU_ID_EDMA_QMAN_ARC1 34 /* DCORE0_EDMA1 */
#define CPU_ID_EDMA_QMAN_ARC2 35 /* DCORE1_EDMA0 */
#define CPU_ID_EDMA_QMAN_ARC3 36 /* DCORE1_EDMA1 */
#define CPU_ID_EDMA_QMAN_ARC4 37 /* DCORE2_EDMA0 */
#define CPU_ID_EDMA_QMAN_ARC5 38 /* DCORE2_EDMA1 */
#define CPU_ID_EDMA_QMAN_ARC6 39 /* DCORE3_EDMA0 */
#define CPU_ID_EDMA_QMAN_ARC7 40 /* DCORE3_EDMA1 */
#define CPU_ID_PDMA_QMAN_ARC0 41 /* DCORE0_PDMA0 */
#define CPU_ID_PDMA_QMAN_ARC1 42 /* DCORE0_PDMA1 */
#define CPU_ID_ROT_QMAN_ARC0 43 /* ROT0 */
#define CPU_ID_ROT_QMAN_ARC1 44 /* ROT1 */
#define CPU_ID_NIC_QMAN_ARC0 45 /* NIC0_0 */
#define CPU_ID_NIC_QMAN_ARC1 46 /* NIC0_1 */
#define CPU_ID_NIC_QMAN_ARC2 47 /* NIC1_0 */
#define CPU_ID_NIC_QMAN_ARC3 48 /* NIC1_1 */
#define CPU_ID_NIC_QMAN_ARC4 49 /* NIC2_0 */
#define CPU_ID_NIC_QMAN_ARC5 50 /* NIC2_1 */
#define CPU_ID_NIC_QMAN_ARC6 51 /* NIC3_0 */
#define CPU_ID_NIC_QMAN_ARC7 52 /* NIC3_1 */
#define CPU_ID_NIC_QMAN_ARC8 53 /* NIC4_0 */
#define CPU_ID_NIC_QMAN_ARC9 54 /* NIC4_1 */
#define CPU_ID_NIC_QMAN_ARC10 55 /* NIC5_0 */
#define CPU_ID_NIC_QMAN_ARC11 56 /* NIC5_1 */
#define CPU_ID_NIC_QMAN_ARC12 57 /* NIC6_0 */
#define CPU_ID_NIC_QMAN_ARC13 58 /* NIC6_1 */
#define CPU_ID_NIC_QMAN_ARC14 59 /* NIC7_0 */
#define CPU_ID_NIC_QMAN_ARC15 60 /* NIC7_1 */
#define CPU_ID_NIC_QMAN_ARC16 61 /* NIC8_0 */
#define CPU_ID_NIC_QMAN_ARC17 62 /* NIC8_1 */
#define CPU_ID_NIC_QMAN_ARC18 63 /* NIC9_0 */
#define CPU_ID_NIC_QMAN_ARC19 64 /* NIC9_1 */
#define CPU_ID_NIC_QMAN_ARC20 65 /* NIC10_0 */
#define CPU_ID_NIC_QMAN_ARC21 66 /* NIC10_1 */
#define CPU_ID_NIC_QMAN_ARC22 67 /* NIC11_0 */
#define CPU_ID_NIC_QMAN_ARC23 68 /* NIC11_1 */
#define CPU_ID_MAX 69
#define CPU_ID_SCHED_MAX 6
#define CPU_ID_ALL 0xFE
#define CPU_ID_INVALID 0xFF
enum arc_regions_t {
ARC_REGION0_UNSED = 0,
/*
* Extension registers
* None
*/
ARC_REGION1_SRAM = 1,
/*
* Extension registers
* AUX_SRAM_LSB_ADDR
* AUX_SRAM_MSB_ADDR
* ARC Address: 0x1000_0000
*/
ARC_REGION2_CFG = 2,
/*
* Extension registers
* AUX_CFG_LSB_ADDR
* AUX_CFG_MSB_ADDR
* ARC Address: 0x2000_0000
*/
ARC_REGION3_GENERAL = 3,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_0
* AUX_GENERAL_PURPOSE_MSB_ADDR_0
* ARC Address: 0x3000_0000
*/
ARC_REGION4_HBM0_FW = 4,
/*
* Extension registers
* AUX_HBM0_LSB_ADDR
* AUX_HBM0_MSB_ADDR
* AUX_HBM0_OFFSET
* ARC Address: 0x4000_0000
*/
ARC_REGION5_HBM1_GC_DATA = 5,
/*
* Extension registers
* AUX_HBM1_LSB_ADDR
* AUX_HBM1_MSB_ADDR
* AUX_HBM1_OFFSET
* ARC Address: 0x5000_0000
*/
ARC_REGION6_HBM2_GC_DATA = 6,
/*
* Extension registers
* AUX_HBM2_LSB_ADDR
* AUX_HBM2_MSB_ADDR
* AUX_HBM2_OFFSET
* ARC Address: 0x6000_0000
*/
ARC_REGION7_HBM3_GC_DATA = 7,
/*
* Extension registers
* AUX_HBM3_LSB_ADDR
* AUX_HBM3_MSB_ADDR
* AUX_HBM3_OFFSET
* ARC Address: 0x7000_0000
*/
ARC_REGION8_DCCM = 8,
/*
* Extension registers
* None
* ARC Address: 0x8000_0000
*/
ARC_REGION9_PCIE = 9,
/*
* Extension registers
* AUX_PCIE_LSB_ADDR
* AUX_PCIE_MSB_ADDR
* ARC Address: 0x9000_0000
*/
ARC_REGION10_GENERAL = 10,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_1
* AUX_GENERAL_PURPOSE_MSB_ADDR_1
* ARC Address: 0xA000_0000
*/
ARC_REGION11_GENERAL = 11,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_2
* AUX_GENERAL_PURPOSE_MSB_ADDR_2
* ARC Address: 0xB000_0000
*/
ARC_REGION12_GENERAL = 12,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_3
* AUX_GENERAL_PURPOSE_MSB_ADDR_3
* ARC Address: 0xC000_0000
*/
ARC_REGION13_GENERAL = 13,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_4
* AUX_GENERAL_PURPOSE_MSB_ADDR_4
* ARC Address: 0xD000_0000
*/
ARC_REGION14_GENERAL = 14,
/*
* Extension registers
* AUX_GENERAL_PURPOSE_LSB_ADDR_5
* AUX_GENERAL_PURPOSE_MSB_ADDR_5
* ARC Address: 0xE000_0000
*/
ARC_REGION15_LBU = 15
/*
* Extension registers
* None
* ARC Address: 0xF000_0000
*/
};
#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */

View File

@ -0,0 +1,567 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_
#define ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_
/*
*****************************************
* ARC_FARM_ARC0_ACP_ENG
* (Prototype: ARC_ACP_ENG)
*****************************************
*/
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_0 0x4E8F000
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_1 0x4E8F004
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_2 0x4E8F008
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_3 0x4E8F00C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_4 0x4E8F010
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_5 0x4E8F014
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_6 0x4E8F018
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_7 0x4E8F01C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_8 0x4E8F020
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_9 0x4E8F024
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_10 0x4E8F028
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_11 0x4E8F02C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_12 0x4E8F030
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_13 0x4E8F034
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_14 0x4E8F038
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_15 0x4E8F03C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_16 0x4E8F040
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_17 0x4E8F044
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_18 0x4E8F048
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_19 0x4E8F04C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_20 0x4E8F050
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_21 0x4E8F054
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_22 0x4E8F058
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_23 0x4E8F05C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_24 0x4E8F060
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_25 0x4E8F064
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_26 0x4E8F068
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_27 0x4E8F06C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_28 0x4E8F070
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_29 0x4E8F074
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_30 0x4E8F078
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_31 0x4E8F07C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_32 0x4E8F080
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_33 0x4E8F084
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_34 0x4E8F088
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_35 0x4E8F08C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_36 0x4E8F090
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_37 0x4E8F094
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_38 0x4E8F098
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_39 0x4E8F09C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_40 0x4E8F0A0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_41 0x4E8F0A4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_42 0x4E8F0A8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_43 0x4E8F0AC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_44 0x4E8F0B0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_45 0x4E8F0B4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_46 0x4E8F0B8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_47 0x4E8F0BC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_48 0x4E8F0C0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_49 0x4E8F0C4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_50 0x4E8F0C8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_51 0x4E8F0CC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_52 0x4E8F0D0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_53 0x4E8F0D4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_54 0x4E8F0D8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_55 0x4E8F0DC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_56 0x4E8F0E0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_57 0x4E8F0E4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_58 0x4E8F0E8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_59 0x4E8F0EC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_60 0x4E8F0F0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_61 0x4E8F0F4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_62 0x4E8F0F8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PI_REG_63 0x4E8F0FC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_0 0x4E8F100
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_1 0x4E8F104
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_2 0x4E8F108
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_3 0x4E8F10C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_4 0x4E8F110
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_5 0x4E8F114
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_6 0x4E8F118
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_7 0x4E8F11C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_8 0x4E8F120
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_9 0x4E8F124
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_10 0x4E8F128
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_11 0x4E8F12C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_12 0x4E8F130
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_13 0x4E8F134
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_14 0x4E8F138
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_15 0x4E8F13C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_16 0x4E8F140
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_17 0x4E8F144
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_18 0x4E8F148
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_19 0x4E8F14C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_20 0x4E8F150
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_21 0x4E8F154
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_22 0x4E8F158
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_23 0x4E8F15C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_24 0x4E8F160
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_25 0x4E8F164
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_26 0x4E8F168
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_27 0x4E8F16C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_28 0x4E8F170
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_29 0x4E8F174
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_30 0x4E8F178
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_31 0x4E8F17C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_32 0x4E8F180
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_33 0x4E8F184
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_34 0x4E8F188
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_35 0x4E8F18C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_36 0x4E8F190
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_37 0x4E8F194
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_38 0x4E8F198
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_39 0x4E8F19C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_40 0x4E8F1A0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_41 0x4E8F1A4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_42 0x4E8F1A8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_43 0x4E8F1AC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_44 0x4E8F1B0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_45 0x4E8F1B4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_46 0x4E8F1B8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_47 0x4E8F1BC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_48 0x4E8F1C0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_49 0x4E8F1C4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_50 0x4E8F1C8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_51 0x4E8F1CC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_52 0x4E8F1D0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_53 0x4E8F1D4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_54 0x4E8F1D8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_55 0x4E8F1DC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_56 0x4E8F1E0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_57 0x4E8F1E4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_58 0x4E8F1E8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_59 0x4E8F1EC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_60 0x4E8F1F0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_61 0x4E8F1F4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_62 0x4E8F1F8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_CI_REG_63 0x4E8F1FC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_0 0x4E8F200
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_1 0x4E8F204
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_2 0x4E8F208
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_3 0x4E8F20C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_4 0x4E8F210
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_5 0x4E8F214
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_6 0x4E8F218
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_7 0x4E8F21C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_8 0x4E8F220
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_9 0x4E8F224
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_10 0x4E8F228
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_11 0x4E8F22C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_12 0x4E8F230
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_13 0x4E8F234
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_14 0x4E8F238
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_15 0x4E8F23C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_16 0x4E8F240
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_17 0x4E8F244
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_18 0x4E8F248
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_19 0x4E8F24C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_20 0x4E8F250
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_21 0x4E8F254
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_22 0x4E8F258
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_23 0x4E8F25C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_24 0x4E8F260
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_25 0x4E8F264
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_26 0x4E8F268
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_27 0x4E8F26C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_28 0x4E8F270
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_29 0x4E8F274
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_30 0x4E8F278
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_31 0x4E8F27C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_32 0x4E8F280
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_33 0x4E8F284
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_34 0x4E8F288
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_35 0x4E8F28C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_36 0x4E8F290
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_37 0x4E8F294
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_38 0x4E8F298
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_39 0x4E8F29C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_40 0x4E8F2A0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_41 0x4E8F2A4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_42 0x4E8F2A8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_43 0x4E8F2AC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_44 0x4E8F2B0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_45 0x4E8F2B4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_46 0x4E8F2B8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_47 0x4E8F2BC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_48 0x4E8F2C0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_49 0x4E8F2C4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_50 0x4E8F2C8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_51 0x4E8F2CC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_52 0x4E8F2D0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_53 0x4E8F2D4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_54 0x4E8F2D8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_55 0x4E8F2DC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_56 0x4E8F2E0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_57 0x4E8F2E4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_58 0x4E8F2E8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_59 0x4E8F2EC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_60 0x4E8F2F0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_61 0x4E8F2F4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_62 0x4E8F2F8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_PR_REG_63 0x4E8F2FC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_0 0x4E8F300
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_1 0x4E8F304
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_2 0x4E8F308
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_3 0x4E8F30C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_4 0x4E8F310
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_5 0x4E8F314
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_6 0x4E8F318
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_7 0x4E8F31C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_8 0x4E8F320
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_9 0x4E8F324
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_10 0x4E8F328
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_11 0x4E8F32C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_12 0x4E8F330
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_13 0x4E8F334
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_14 0x4E8F338
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_15 0x4E8F33C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_16 0x4E8F340
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_17 0x4E8F344
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_18 0x4E8F348
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_19 0x4E8F34C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_20 0x4E8F350
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_21 0x4E8F354
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_22 0x4E8F358
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_23 0x4E8F35C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_24 0x4E8F360
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_25 0x4E8F364
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_26 0x4E8F368
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_27 0x4E8F36C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_28 0x4E8F370
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_29 0x4E8F374
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_30 0x4E8F378
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_31 0x4E8F37C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_32 0x4E8F380
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_33 0x4E8F384
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_34 0x4E8F388
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_35 0x4E8F38C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_36 0x4E8F390
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_37 0x4E8F394
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_38 0x4E8F398
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_39 0x4E8F39C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_40 0x4E8F3A0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_41 0x4E8F3A4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_42 0x4E8F3A8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_43 0x4E8F3AC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_44 0x4E8F3B0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_45 0x4E8F3B4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_46 0x4E8F3B8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_47 0x4E8F3BC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_48 0x4E8F3C0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_49 0x4E8F3C4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_50 0x4E8F3C8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_51 0x4E8F3CC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_52 0x4E8F3D0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_53 0x4E8F3D4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_54 0x4E8F3D8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_55 0x4E8F3DC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_56 0x4E8F3E0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_57 0x4E8F3E4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_58 0x4E8F3E8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_59 0x4E8F3EC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_60 0x4E8F3F0
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_61 0x4E8F3F4
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_62 0x4E8F3F8
#define mmARC_FARM_ARC0_ACP_ENG_ACP_MK_REG_63 0x4E8F3FC
#define mmARC_FARM_ARC0_ACP_ENG_ACP_SELECTED_QUEUE_ID 0x4E8F400
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_0 0x4E8F404
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_1 0x4E8F408
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_WEIGHT_PRIO_2 0x4E8F40C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_0 0x4E8F410
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_1 0x4E8F414
#define mmARC_FARM_ARC0_ACP_ENG_ACP_GRANTS_COUNTER_PRIO_2 0x4E8F418
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_0 0x4E8F41C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_1 0x4E8F420
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_2 0x4E8F424
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_OUT_CNT_3 0x4E8F428
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_0 0x4E8F42C
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_1 0x4E8F430
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_2 0x4E8F434
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_PRIO_RD_CNT_3 0x4E8F438
#define mmARC_FARM_ARC0_ACP_ENG_ACP_DBG_REG 0x4E8F43C
#endif /* ASIC_REG_ARC_FARM_ARC0_ACP_ENG_REGS_H_ */

View File

@ -0,0 +1,819 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_
#define ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_
/*
*****************************************
* ARC_FARM_ARC0_AUX
* (Prototype: QMAN_ARC_AUX)
*****************************************
*/
/* ARC_FARM_ARC0_AUX_RUN_HALT_REQ */
#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_SHIFT 0
#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK 0x1
#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_SHIFT 1
#define ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK 0x2
/* ARC_FARM_ARC0_AUX_RUN_HALT_ACK */
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_SHIFT 0
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK 0x1
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_SHIFT 4
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK 0x10
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_HALT_R_SHIFT 8
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_HALT_R_MASK 0x100
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_TF_HALT_R_SHIFT 12
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_TF_HALT_R_MASK 0x1000
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_R_SHIFT 16
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_R_MASK 0x10000
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_MODE_R_SHIFT 17
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_SYS_SLEEP_MODE_R_MASK 0xE0000
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_WATCHDOG_RESET_SHIFT 20
#define ARC_FARM_ARC0_AUX_RUN_HALT_ACK_WATCHDOG_RESET_MASK 0x100000
/* ARC_FARM_ARC0_AUX_RST_VEC_ADDR */
#define ARC_FARM_ARC0_AUX_RST_VEC_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_RST_VEC_ADDR_VAL_MASK 0x3FFFFF
/* ARC_FARM_ARC0_AUX_DBG_MODE */
#define ARC_FARM_ARC0_AUX_DBG_MODE_DBG_PROT_SEL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DBG_MODE_DBG_PROT_SEL_MASK 0x1
#define ARC_FARM_ARC0_AUX_DBG_MODE_DBGEN_SHIFT 4
#define ARC_FARM_ARC0_AUX_DBG_MODE_DBGEN_MASK 0x10
#define ARC_FARM_ARC0_AUX_DBG_MODE_NIDEN_SHIFT 8
#define ARC_FARM_ARC0_AUX_DBG_MODE_NIDEN_MASK 0x100
#define ARC_FARM_ARC0_AUX_DBG_MODE_CASHE_RST_DISABLE_SHIFT 12
#define ARC_FARM_ARC0_AUX_DBG_MODE_CASHE_RST_DISABLE_MASK 0x1000
#define ARC_FARM_ARC0_AUX_DBG_MODE_DDCM_DMI_PRIORITY_SHIFT 16
#define ARC_FARM_ARC0_AUX_DBG_MODE_DDCM_DMI_PRIORITY_MASK 0x10000
/* ARC_FARM_ARC0_AUX_CLUSTER_NUM */
#define ARC_FARM_ARC0_AUX_CLUSTER_NUM_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CLUSTER_NUM_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_ARC_NUM */
#define ARC_FARM_ARC0_AUX_ARC_NUM_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_NUM_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_WAKE_UP_EVENT */
#define ARC_FARM_ARC0_AUX_WAKE_UP_EVENT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_WAKE_UP_EVENT_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE */
#define ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CTI_AP_STS */
#define ARC_FARM_ARC0_AUX_CTI_AP_STS_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CTI_AP_STS_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL */
#define ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL_RUN_HALT_SHIFT 0
#define ARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL_RUN_HALT_MASK 0x1
/* ARC_FARM_ARC0_AUX_ARC_RST */
#define ARC_FARM_ARC0_AUX_ARC_RST_CORE_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_RST_CORE_MASK 0x1
#define ARC_FARM_ARC0_AUX_ARC_RST_PRESETDBGN_SHIFT 4
#define ARC_FARM_ARC0_AUX_ARC_RST_PRESETDBGN_MASK 0x10
/* ARC_FARM_ARC0_AUX_ARC_RST_REQ */
#define ARC_FARM_ARC0_AUX_ARC_RST_REQ_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_RST_REQ_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_SRAM_LSB_ADDR_VAL_MASK 0x3F
/* ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_SRAM_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_PCIE_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_PCIE_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CFG_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_CFG_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_CFG_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_CFG_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM0_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM0_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM1_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM1_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM2_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM2_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM3_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM3_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM0_OFFSET */
#define ARC_FARM_ARC0_AUX_HBM0_OFFSET_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM0_OFFSET_VAL_MASK 0xFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM1_OFFSET */
#define ARC_FARM_ARC0_AUX_HBM1_OFFSET_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM1_OFFSET_VAL_MASK 0xFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM2_OFFSET */
#define ARC_FARM_ARC0_AUX_HBM2_OFFSET_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM2_OFFSET_VAL_MASK 0xFFFFFFF
/* ARC_FARM_ARC0_AUX_HBM3_OFFSET */
#define ARC_FARM_ARC0_AUX_HBM3_OFFSET_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_HBM3_OFFSET_VAL_MASK 0xFFFFFFF
/* ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR */
#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR */
#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR */
#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_MASK 0xF
#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_EN_SHIFT 4
#define ARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR_AXI_WRITE_EN_MASK 0xF0
/* ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR */
#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_MASK 0xF
#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_EN_SHIFT 4
#define ARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR_AXI_WRITE_EN_MASK 0xF0
/* ARC_FARM_ARC0_AUX_CONTEXT_ID */
#define ARC_FARM_ARC0_AUX_CONTEXT_ID_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CONTEXT_ID_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CID_OFFSET */
#define ARC_FARM_ARC0_AUX_CID_OFFSET_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CID_OFFSET_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_SW_INTR */
#define ARC_FARM_ARC0_AUX_SW_INTR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_SW_INTR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_IRQ_INTR_MASK */
#define ARC_FARM_ARC0_AUX_IRQ_INTR_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_IRQ_INTR_MASK_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS */
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS_VAL_MASK 0x3FFF
/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR */
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR_VAL_MASK 0x3FFF
/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK */
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK_VAL_MASK 0x3FFF
/* ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE */
#define ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN */
#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_INTR_EN_SHIFT 0
#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_INTR_EN_MASK 0x1
#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_HALT_EN_SHIFT 1
#define ARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN_HALT_EN_MASK 0x2
/* ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK */
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK_VAL_MASK 0x3FFF
/* ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK */
#define ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK_VAL_MASK 0x3FFF
/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS */
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_SERR_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_SERR_MASK 0x1
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_DERR_SHIFT 1
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_STS_DERR_MASK 0x2
/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR */
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK */
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR */
#define ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME */
#define ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR */
#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME */
#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR */
#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME */
#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR */
#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR */
#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP */
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP */
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN */
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE */
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE_VAL_MASK 0x7
/* ARC_FARM_ARC0_AUX_SCRATCHPAD */
#define ARC_FARM_ARC0_AUX_SCRATCHPAD_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_SCRATCHPAD_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT */
#define ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT */
#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT */
#define ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT */
#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT */
#define ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT */
#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT */
#define ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT */
#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR */
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN */
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR */
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN */
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR */
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_VAL_MASK 0x3FF
/* ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN */
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN_VAL_MASK 0x3FF
/* ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR */
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_VAL_MASK 0x3FF
/* ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN */
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN_VAL_MASK 0x3FF
/* ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR */
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_READ_MASK 0xF
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WRITE_MASK 0xF0
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_RD_EN_MASK 0xF00
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR_CBU_WR_EN_MASK 0xF000
/* ARC_FARM_ARC0_AUX_CBU_LOCK_OVR */
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_READ_MASK 0x3
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WRITE_MASK 0x30
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_RD_EN_MASK 0x300
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_CBU_LOCK_OVR_CBU_WR_EN_MASK 0x3000
/* ARC_FARM_ARC0_AUX_CBU_PROT_OVR */
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_READ_MASK 0x7
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WRITE_MASK 0x70
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_RD_EN_MASK 0x700
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_CBU_PROT_OVR_CBU_WR_EN_MASK 0x7000
/* ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING */
#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_READ_MASK 0xFF
#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_WRITE_SHIFT 8
#define ARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING_CBU_WRITE_MASK 0xFF00
/* ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN */
#define ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN_CBU_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN_CBU_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK */
#define ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK_CBU_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK_CBU_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT */
#define ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID */
#define ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID_VAL_MASK 0x7F
/* ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR */
#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN */
#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR */
#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN */
#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR */
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_READ_MASK 0xF
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WRITE_MASK 0xF0
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_RD_EN_MASK 0xF00
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR_LBU_WR_EN_MASK 0xF000
/* ARC_FARM_ARC0_AUX_LBU_LOCK_OVR */
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_READ_MASK 0x3
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WRITE_MASK 0x30
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_RD_EN_MASK 0x300
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_LBU_LOCK_OVR_LBU_WR_EN_MASK 0x3000
/* ARC_FARM_ARC0_AUX_LBU_PROT_OVR */
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_READ_MASK 0x7
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WRITE_SHIFT 4
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WRITE_MASK 0x70
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_RD_EN_SHIFT 8
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_RD_EN_MASK 0x700
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WR_EN_SHIFT 12
#define ARC_FARM_ARC0_AUX_LBU_PROT_OVR_LBU_WR_EN_MASK 0x7000
/* ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING */
#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_READ_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_READ_MASK 0xFF
#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_WRITE_SHIFT 8
#define ARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING_LBU_WRITE_MASK 0xFF00
/* ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN */
#define ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK */
#define ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT */
#define ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID */
#define ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID_VAL_MASK 0x3FF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK */
#define ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK */
#define ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK_VAL_MASK 0xFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG_VAL_MASK 0xFFFF
/* ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG */
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG_VAL_MASK 0xFFFF
/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT */
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT_VAL_MASK 0x7
/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER */
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST */
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK */
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE */
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT */
#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_LBW_SLV_AXI_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_LBW_SLV_AXI_MASK 0xF
#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_GEN_AXI_SHIFT 4
#define ARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT_GEN_AXI_MASK 0xF0
/* ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG */
#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG_VAL_MASK 0x1F
/* ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT */
#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT_VAL_MASK 0x1F
/* ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI */
#define ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI */
#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI */
#define ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI */
#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_AUX2APB_PROT */
#define ARC_FARM_ARC0_AUX_AUX2APB_PROT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_AUX2APB_PROT_VAL_MASK 0x7
/* ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN */
#define ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0 */
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0 */
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1 */
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1 */
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0 */
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0 */
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1 */
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1 */
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0 */
#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1 */
#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK */
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR */
#define ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR */
#define ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR */
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN */
#define ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN_VAL_MASK 0xF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB */
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB */
#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB */
#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP */
#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP */
#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_ARC_REGION_CFG */
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_1_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_1_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_2_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_2_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_3_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_3_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_4_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_4_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_5_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_5_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_6_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_6_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_7_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_7_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_8_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_8_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_9_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_9_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_10_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_10_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_11_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_11_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_12_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_12_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_13_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_13_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_14_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_14_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_15_ASID_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_15_ASID_MASK 0x3FF
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_SHIFT 12
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK 0x1000
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_SHIFT 16
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_MASK 0x70000
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_EN_SHIFT 20
#define ARC_FARM_ARC0_AUX_ARC_REGION_CFG_PROT_VAL_EN_MASK 0x700000
/* ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR */
#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR */
#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP */
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP */
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN */
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN_VAL_MASK 0x1
/* ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION */
#define ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION_VAL_MASK 0xFFFFFF
/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT */
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL */
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_ENABLE_BP_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_ENABLE_BP_MASK 0x1
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_RD_DELAY_CC_SHIFT 1
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL_RD_DELAY_CC_MASK 0x3E
/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK */
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR */
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_VAL_MASK 0x7FFFFFF
/* ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER */
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER_VAL_MASK 0x3
/* ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN */
#define ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_SHIFT 0
#define ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK 0x1
#endif /* ASIC_REG_ARC_FARM_ARC0_AUX_MASKS_H_ */

View File

@ -0,0 +1,591 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_
#define ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_
/*
*****************************************
* ARC_FARM_ARC0_AUX
* (Prototype: QMAN_ARC_AUX)
*****************************************
*/
#define mmARC_FARM_ARC0_AUX_RUN_HALT_REQ 0x4E88100
#define mmARC_FARM_ARC0_AUX_RUN_HALT_ACK 0x4E88104
#define mmARC_FARM_ARC0_AUX_RST_VEC_ADDR 0x4E88108
#define mmARC_FARM_ARC0_AUX_DBG_MODE 0x4E8810C
#define mmARC_FARM_ARC0_AUX_CLUSTER_NUM 0x4E88110
#define mmARC_FARM_ARC0_AUX_ARC_NUM 0x4E88114
#define mmARC_FARM_ARC0_AUX_WAKE_UP_EVENT 0x4E88118
#define mmARC_FARM_ARC0_AUX_DCCM_SYS_ADDR_BASE 0x4E8811C
#define mmARC_FARM_ARC0_AUX_CTI_AP_STS 0x4E88120
#define mmARC_FARM_ARC0_AUX_CTI_CFG_MUX_SEL 0x4E88124
#define mmARC_FARM_ARC0_AUX_ARC_RST 0x4E88128
#define mmARC_FARM_ARC0_AUX_ARC_RST_REQ 0x4E8812C
#define mmARC_FARM_ARC0_AUX_SRAM_LSB_ADDR 0x4E88130
#define mmARC_FARM_ARC0_AUX_SRAM_MSB_ADDR 0x4E88134
#define mmARC_FARM_ARC0_AUX_PCIE_LSB_ADDR 0x4E88138
#define mmARC_FARM_ARC0_AUX_PCIE_MSB_ADDR 0x4E8813C
#define mmARC_FARM_ARC0_AUX_CFG_LSB_ADDR 0x4E88140
#define mmARC_FARM_ARC0_AUX_CFG_MSB_ADDR 0x4E88144
#define mmARC_FARM_ARC0_AUX_HBM0_LSB_ADDR 0x4E88150
#define mmARC_FARM_ARC0_AUX_HBM0_MSB_ADDR 0x4E88154
#define mmARC_FARM_ARC0_AUX_HBM1_LSB_ADDR 0x4E88158
#define mmARC_FARM_ARC0_AUX_HBM1_MSB_ADDR 0x4E8815C
#define mmARC_FARM_ARC0_AUX_HBM2_LSB_ADDR 0x4E88160
#define mmARC_FARM_ARC0_AUX_HBM2_MSB_ADDR 0x4E88164
#define mmARC_FARM_ARC0_AUX_HBM3_LSB_ADDR 0x4E88168
#define mmARC_FARM_ARC0_AUX_HBM3_MSB_ADDR 0x4E8816C
#define mmARC_FARM_ARC0_AUX_HBM0_OFFSET 0x4E88170
#define mmARC_FARM_ARC0_AUX_HBM1_OFFSET 0x4E88174
#define mmARC_FARM_ARC0_AUX_HBM2_OFFSET 0x4E88178
#define mmARC_FARM_ARC0_AUX_HBM3_OFFSET 0x4E8817C
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_0 0x4E88180
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_1 0x4E88184
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_2 0x4E88188
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_3 0x4E8818C
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_4 0x4E88190
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_5 0x4E88194
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_LSB_ADDR_6 0x4E88198
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_0 0x4E8819C
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_1 0x4E881A0
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_2 0x4E881A4
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_3 0x4E881A8
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_4 0x4E881AC
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_5 0x4E881B0
#define mmARC_FARM_ARC0_AUX_GENERAL_PURPOSE_MSB_ADDR_6 0x4E881B4
#define mmARC_FARM_ARC0_AUX_ARC_CBU_AWCACHE_OVR 0x4E881B8
#define mmARC_FARM_ARC0_AUX_ARC_LBU_AWCACHE_OVR 0x4E881BC
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_0 0x4E881C0
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_1 0x4E881C4
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_2 0x4E881C8
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_3 0x4E881CC
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_4 0x4E881D0
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_5 0x4E881D4
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_6 0x4E881D8
#define mmARC_FARM_ARC0_AUX_CONTEXT_ID_7 0x4E881DC
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_0 0x4E881E0
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_1 0x4E881E4
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_2 0x4E881E8
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_3 0x4E881EC
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_4 0x4E881F0
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_5 0x4E881F4
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_6 0x4E881F8
#define mmARC_FARM_ARC0_AUX_CID_OFFSET_7 0x4E881FC
#define mmARC_FARM_ARC0_AUX_SW_INTR_0 0x4E88200
#define mmARC_FARM_ARC0_AUX_SW_INTR_1 0x4E88204
#define mmARC_FARM_ARC0_AUX_SW_INTR_2 0x4E88208
#define mmARC_FARM_ARC0_AUX_SW_INTR_3 0x4E8820C
#define mmARC_FARM_ARC0_AUX_SW_INTR_4 0x4E88210
#define mmARC_FARM_ARC0_AUX_SW_INTR_5 0x4E88214
#define mmARC_FARM_ARC0_AUX_SW_INTR_6 0x4E88218
#define mmARC_FARM_ARC0_AUX_SW_INTR_7 0x4E8821C
#define mmARC_FARM_ARC0_AUX_SW_INTR_8 0x4E88220
#define mmARC_FARM_ARC0_AUX_SW_INTR_9 0x4E88224
#define mmARC_FARM_ARC0_AUX_SW_INTR_10 0x4E88228
#define mmARC_FARM_ARC0_AUX_SW_INTR_11 0x4E8822C
#define mmARC_FARM_ARC0_AUX_SW_INTR_12 0x4E88230
#define mmARC_FARM_ARC0_AUX_SW_INTR_13 0x4E88234
#define mmARC_FARM_ARC0_AUX_SW_INTR_14 0x4E88238
#define mmARC_FARM_ARC0_AUX_SW_INTR_15 0x4E8823C
#define mmARC_FARM_ARC0_AUX_IRQ_INTR_MASK_0 0x4E88280
#define mmARC_FARM_ARC0_AUX_IRQ_INTR_MASK_1 0x4E88284
#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS 0x4E88290
#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR 0x4E88294
#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_MASK 0x4E88298
#define mmARC_FARM_ARC0_AUX_ARC_EXCPTN_CAUSE 0x4E8829C
#define mmARC_FARM_ARC0_AUX_SEI_INTR_HALT_EN 0x4E882A0
#define mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_HALT_MASK 0x4E882A4
#define mmARC_FARM_ARC0_AUX_QMAN_SEI_INTR_HALT_MASK 0x4E882A8
#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_STS 0x4E882B0
#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_CLR 0x4E882B4
#define mmARC_FARM_ARC0_AUX_ARC_REI_INTR_MASK 0x4E882B8
#define mmARC_FARM_ARC0_AUX_DCCM_ECC_ERR_ADDR 0x4E882BC
#define mmARC_FARM_ARC0_AUX_DCCM_ECC_SYNDROME 0x4E882C0
#define mmARC_FARM_ARC0_AUX_I_CACHE_ECC_ERR_ADDR 0x4E882C4
#define mmARC_FARM_ARC0_AUX_I_CACHE_ECC_SYNDROME 0x4E882C8
#define mmARC_FARM_ARC0_AUX_D_CACHE_ECC_ERR_ADDR 0x4E882CC
#define mmARC_FARM_ARC0_AUX_D_CACHE_ECC_SYNDROME 0x4E882D0
#define mmARC_FARM_ARC0_AUX_LBW_TRMINATE_AWADDR_ERR 0x4E882E0
#define mmARC_FARM_ARC0_AUX_LBW_TRMINATE_ARADDR_ERR 0x4E882E4
#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_BRESP 0x4E882E8
#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_RRESP 0x4E882EC
#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXLEN 0x4E882F0
#define mmARC_FARM_ARC0_AUX_CFG_LBW_TERMINATE_AXSIZE 0x4E882F4
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_0 0x4E88300
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_1 0x4E88304
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_2 0x4E88308
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_3 0x4E8830C
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_4 0x4E88310
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_5 0x4E88314
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_6 0x4E88318
#define mmARC_FARM_ARC0_AUX_SCRATCHPAD_7 0x4E8831C
#define mmARC_FARM_ARC0_AUX_TOTAL_CBU_WR_CNT 0x4E88320
#define mmARC_FARM_ARC0_AUX_INFLIGHT_CBU_WR_CNT 0x4E88324
#define mmARC_FARM_ARC0_AUX_TOTAL_CBU_RD_CNT 0x4E88328
#define mmARC_FARM_ARC0_AUX_INFLIGHT_CBU_RD_CNT 0x4E8832C
#define mmARC_FARM_ARC0_AUX_TOTAL_LBU_WR_CNT 0x4E88330
#define mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_WR_CNT 0x4E88334
#define mmARC_FARM_ARC0_AUX_TOTAL_LBU_RD_CNT 0x4E88338
#define mmARC_FARM_ARC0_AUX_INFLIGHT_LBU_RD_CNT 0x4E8833C
#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_OVR 0x4E88350
#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_OVR_EN 0x4E88354
#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_OVR 0x4E88358
#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_OVR_EN 0x4E8835C
#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR 0x4E88360
#define mmARC_FARM_ARC0_AUX_CBU_ARUSER_MSB_OVR_EN 0x4E88364
#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR 0x4E88368
#define mmARC_FARM_ARC0_AUX_CBU_AWUSER_MSB_OVR_EN 0x4E8836C
#define mmARC_FARM_ARC0_AUX_CBU_AXCACHE_OVR 0x4E88370
#define mmARC_FARM_ARC0_AUX_CBU_LOCK_OVR 0x4E88374
#define mmARC_FARM_ARC0_AUX_CBU_PROT_OVR 0x4E88378
#define mmARC_FARM_ARC0_AUX_CBU_MAX_OUTSTANDING 0x4E8837C
#define mmARC_FARM_ARC0_AUX_CBU_EARLY_BRESP_EN 0x4E88380
#define mmARC_FARM_ARC0_AUX_CBU_FORCE_RSP_OK 0x4E88384
#define mmARC_FARM_ARC0_AUX_CBU_NO_WR_INFLIGHT 0x4E8838C
#define mmARC_FARM_ARC0_AUX_CBU_SEI_INTR_ID 0x4E88390
#define mmARC_FARM_ARC0_AUX_LBU_ARUSER_OVR 0x4E88400
#define mmARC_FARM_ARC0_AUX_LBU_ARUSER_OVR_EN 0x4E88404
#define mmARC_FARM_ARC0_AUX_LBU_AWUSER_OVR 0x4E88408
#define mmARC_FARM_ARC0_AUX_LBU_AWUSER_OVR_EN 0x4E8840C
#define mmARC_FARM_ARC0_AUX_LBU_AXCACHE_OVR 0x4E88420
#define mmARC_FARM_ARC0_AUX_LBU_LOCK_OVR 0x4E88424
#define mmARC_FARM_ARC0_AUX_LBU_PROT_OVR 0x4E88428
#define mmARC_FARM_ARC0_AUX_LBU_MAX_OUTSTANDING 0x4E8842C
#define mmARC_FARM_ARC0_AUX_LBU_EARLY_BRESP_EN 0x4E88430
#define mmARC_FARM_ARC0_AUX_LBU_FORCE_RSP_OK 0x4E88434
#define mmARC_FARM_ARC0_AUX_LBU_NO_WR_INFLIGHT 0x4E8843C
#define mmARC_FARM_ARC0_AUX_LBU_SEI_INTR_ID 0x4E88440
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_0 0x4E88500
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_1 0x4E88504
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_2 0x4E88508
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_3 0x4E8850C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_4 0x4E88510
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_5 0x4E88514
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_6 0x4E88518
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_BASE_ADDR_7 0x4E8851C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_0 0x4E88520
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_1 0x4E88524
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_2 0x4E88528
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_3 0x4E8852C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_4 0x4E88530
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_5 0x4E88534
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_6 0x4E88538
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_SIZE_7 0x4E8853C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_0 0x4E88540
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_1 0x4E88544
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_2 0x4E88548
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_3 0x4E8854C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_4 0x4E88550
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_5 0x4E88554
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_6 0x4E88558
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PI_7 0x4E8855C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_0 0x4E88560
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_1 0x4E88564
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_2 0x4E88568
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_3 0x4E8856C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_4 0x4E88570
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_5 0x4E88574
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_6 0x4E88578
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_CI_7 0x4E8857C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_0 0x4E88580
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_1 0x4E88584
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_2 0x4E88588
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_3 0x4E8858C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_4 0x4E88590
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_5 0x4E88594
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_6 0x4E88598
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_PUSH_REG_7 0x4E8859C
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_0 0x4E885A0
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_1 0x4E885A4
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_2 0x4E885A8
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_3 0x4E885AC
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_4 0x4E885B0
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_5 0x4E885B4
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_6 0x4E885B8
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_MAX_OCCUPANCY_7 0x4E885BC
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_0 0x4E885C0
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_1 0x4E885C4
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_2 0x4E885C8
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_3 0x4E885CC
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_4 0x4E885D0
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_5 0x4E885D4
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_6 0x4E885D8
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_VALID_ENTRIES_7 0x4E885DC
#define mmARC_FARM_ARC0_AUX_GENERAL_Q_VLD_ENTRY_MASK 0x4E885E0
#define mmARC_FARM_ARC0_AUX_NIC_Q_VLD_ENTRY_MASK 0x4E885E4
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_DROP_EN 0x4E88620
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_WARN_MSG 0x4E88624
#define mmARC_FARM_ARC0_AUX_DCCM_QUEUE_ALERT_MSG 0x4E88628
#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWPROT 0x4E88630
#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWUSER 0x4E88634
#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWBURST 0x4E88638
#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWLOCK 0x4E8863C
#define mmARC_FARM_ARC0_AUX_DCCM_GEN_AXI_AWCACHE 0x4E88640
#define mmARC_FARM_ARC0_AUX_DCCM_WRR_ARB_WEIGHT 0x4E88644
#define mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_FULL_CFG 0x4E88648
#define mmARC_FARM_ARC0_AUX_DCCM_Q_PUSH_FIFO_CNT 0x4E8864C
#define mmARC_FARM_ARC0_AUX_QMAN_CQ_IFIFO_SHADOW_CI 0x4E88650
#define mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_IFIFO_SHADOW_CI 0x4E88654
#define mmARC_FARM_ARC0_AUX_QMAN_CQ_SHADOW_CI 0x4E88658
#define mmARC_FARM_ARC0_AUX_QMAN_ARC_CQ_SHADOW_CI 0x4E8865C
#define mmARC_FARM_ARC0_AUX_AUX2APB_PROT 0x4E88700
#define mmARC_FARM_ARC0_AUX_LBW_FORK_WIN_EN 0x4E88704
#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR0 0x4E88708
#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK0 0x4E8870C
#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_BASE_ADDR1 0x4E88710
#define mmARC_FARM_ARC0_AUX_QMAN_LBW_FORK_ADDR_MASK1 0x4E88714
#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR0 0x4E88718
#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK0 0x4E8871C
#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_BASE_ADDR1 0x4E88720
#define mmARC_FARM_ARC0_AUX_FARM_LBW_FORK_ADDR_MASK1 0x4E88724
#define mmARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR0 0x4E88728
#define mmARC_FARM_ARC0_AUX_LBW_APB_FORK_MAX_ADDR1 0x4E8872C
#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_LBW_FORK_MASK 0x4E88730
#define mmARC_FARM_ARC0_AUX_ARC_DUP_ENG_LBW_FORK_ADDR 0x4E88734
#define mmARC_FARM_ARC0_AUX_ARC_ACP_ENG_LBW_FORK_ADDR 0x4E88738
#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_VIRTUAL_ADDR 0x4E8873C
#define mmARC_FARM_ARC0_AUX_CBU_FORK_WIN_EN 0x4E88740
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_LSB 0x4E88750
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR0_MSB 0x4E88754
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_LSB 0x4E88758
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK0_MSB 0x4E8875C
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_LSB 0x4E88760
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR1_MSB 0x4E88764
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_LSB 0x4E88768
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK1_MSB 0x4E8876C
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_LSB 0x4E88770
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR2_MSB 0x4E88774
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_LSB 0x4E88778
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK2_MSB 0x4E8877C
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_LSB 0x4E88780
#define mmARC_FARM_ARC0_AUX_CBU_FORK_BASE_ADDR3_MSB 0x4E88784
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_LSB 0x4E88788
#define mmARC_FARM_ARC0_AUX_CBU_FORK_ADDR_MASK3_MSB 0x4E8878C
#define mmARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_LSB 0x4E88790
#define mmARC_FARM_ARC0_AUX_CBU_TRMINATE_ARADDR_MSB 0x4E88794
#define mmARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_BRESP 0x4E88798
#define mmARC_FARM_ARC0_AUX_CFG_CBU_TERMINATE_RRESP 0x4E8879C
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_0 0x4E88800
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_1 0x4E88804
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_2 0x4E88808
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_3 0x4E8880C
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_4 0x4E88810
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_5 0x4E88814
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_6 0x4E88818
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_7 0x4E8881C
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_8 0x4E88820
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_9 0x4E88824
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_10 0x4E88828
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_11 0x4E8882C
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_12 0x4E88830
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_13 0x4E88834
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_14 0x4E88838
#define mmARC_FARM_ARC0_AUX_ARC_REGION_CFG_15 0x4E8883C
#define mmARC_FARM_ARC0_AUX_DCCM_TRMINATE_AWADDR_ERR 0x4E88840
#define mmARC_FARM_ARC0_AUX_DCCM_TRMINATE_ARADDR_ERR 0x4E88844
#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_BRESP 0x4E88848
#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_RRESP 0x4E8884C
#define mmARC_FARM_ARC0_AUX_CFG_DCCM_TERMINATE_EN 0x4E88850
#define mmARC_FARM_ARC0_AUX_CFG_DCCM_SECURE_REGION 0x4E88854
#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_WR_IF_CNT 0x4E88900
#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_CTL 0x4E88904
#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR_MSK 0x4E88908
#define mmARC_FARM_ARC0_AUX_ARC_AXI_ORDERING_ADDR 0x4E8890C
#define mmARC_FARM_ARC0_AUX_ARC_ACC_ENGS_BUSER 0x4E88910
#define mmARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN 0x4E88920
#endif /* ASIC_REG_ARC_FARM_ARC0_AUX_REGS_H_ */

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
#define ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_
/*
*****************************************
* ARC_FARM_ARC0_DUP_ENG_AXUSER
* (Prototype: AXUSER)
*****************************************
*/
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID 0x4E89900
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP 0x4E89904
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_STRONG_ORDER 0x4E89908
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_NO_SNOOP 0x4E8990C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_REDUCTION 0x4E89910
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_ATOMIC 0x4E89914
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_QOS 0x4E89918
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RSVD 0x4E8991C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_EMEM_CPAGE 0x4E89920
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_CORE 0x4E89924
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_E2E_COORD 0x4E89928
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_LO 0x4E89930
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_WR_OVRD_HI 0x4E89934
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_LO 0x4E89938
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_RD_OVRD_HI 0x4E8993C
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_COORD 0x4E89940
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_LOCK 0x4E89944
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_RSVD 0x4E89948
#define mmARC_FARM_ARC0_DUP_ENG_AXUSER_LB_OVRD 0x4E8994C
#endif /* ASIC_REG_ARC_FARM_ARC0_DUP_ENG_AXUSER_REGS_H_ */

View File

@ -0,0 +1,575 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_
#define ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_
/*
*****************************************
* ARC_FARM_ARC0_DUP_ENG
* (Prototype: ARC_DUP_ENG)
*****************************************
*/
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_0 0x4E89000
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_1 0x4E89004
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_2 0x4E89008
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_3 0x4E8900C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_4 0x4E89010
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_5 0x4E89014
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_6 0x4E89018
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_7 0x4E8901C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_8 0x4E89020
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_9 0x4E89024
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_10 0x4E89028
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_11 0x4E8902C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_12 0x4E89030
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_13 0x4E89034
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_14 0x4E89038
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_15 0x4E8903C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_16 0x4E89040
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_17 0x4E89044
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_18 0x4E89048
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_19 0x4E8904C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_20 0x4E89050
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_21 0x4E89054
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_22 0x4E89058
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_23 0x4E8905C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_ADDR_24 0x4E89060
#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_0 0x4E89064
#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_1 0x4E89068
#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_2 0x4E8906C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_ADDR_3 0x4E89070
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_0 0x4E89074
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_1 0x4E89078
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_2 0x4E8907C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_3 0x4E89080
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_4 0x4E89084
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_5 0x4E89088
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_6 0x4E8908C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_7 0x4E89090
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_8 0x4E89094
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_9 0x4E89098
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_10 0x4E8909C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_11 0x4E890A0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_12 0x4E890A4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_13 0x4E890A8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_14 0x4E890AC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_15 0x4E890B0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_16 0x4E890B4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_17 0x4E890B8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_18 0x4E890BC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_19 0x4E890C0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_20 0x4E890C4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_21 0x4E890C8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_22 0x4E890CC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_ADDR_23 0x4E890D0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_0 0x4E890D4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_1 0x4E890D8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_2 0x4E890DC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_3 0x4E890E0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_4 0x4E890E4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_5 0x4E890E8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_6 0x4E890EC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_ADDR_7 0x4E890F0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_ADDR_0 0x4E890F4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_ADDR_1 0x4E890F8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_ADDR_0 0x4E890FC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_ADDR_1 0x4E89100
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_0 0x4E89104
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_1 0x4E89108
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_2 0x4E8910C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_3 0x4E89110
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_4 0x4E89114
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_5 0x4E89118
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_6 0x4E8911C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_7 0x4E89120
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_8 0x4E89124
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_9 0x4E89128
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_10 0x4E8912C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_11 0x4E89130
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_12 0x4E89134
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_13 0x4E89138
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_14 0x4E8913C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_ADDR_15 0x4E89140
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TPC_ENG_MASK 0x4E89200
#define mmARC_FARM_ARC0_DUP_ENG_DUP_MME_ENG_MASK 0x4E89204
#define mmARC_FARM_ARC0_DUP_ENG_DUP_EDMA_ENG_MASK 0x4E89208
#define mmARC_FARM_ARC0_DUP_ENG_DUP_PDMA_ENG_MASK 0x4E8920C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_ROT_ENG_MASK 0x4E89210
#define mmARC_FARM_ARC0_DUP_ENG_DUP_RSVD_ENG_MASK 0x4E89214
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_0 0x4E89218
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_1 0x4E8921C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_2 0x4E89220
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_3 0x4E89224
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_4 0x4E89228
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_5 0x4E8922C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_6 0x4E89230
#define mmARC_FARM_ARC0_DUP_ENG_DUP_NIC_ENG_MASK_7 0x4E89234
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_0 0x4E89238
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_1 0x4E8923C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_2 0x4E89240
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_3 0x4E89244
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_4 0x4E89248
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_5 0x4E8924C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_6 0x4E89250
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_7 0x4E89254
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_8 0x4E89258
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_9 0x4E8925C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_10 0x4E89260
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_11 0x4E89264
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_12 0x4E89268
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_0_13 0x4E8926C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_0 0x4E89288
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_1 0x4E8928C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_2 0x4E89290
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_3 0x4E89294
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_4 0x4E89298
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_1_5 0x4E8929C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_0 0x4E892A0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_1 0x4E892A4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_2 0x4E892A8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_3 0x4E892AC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_4 0x4E892B0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_2_5 0x4E892B4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_0 0x4E892B8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_1 0x4E892BC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_2 0x4E892C0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_3 0x4E892C4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_4 0x4E892C8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_TRANS_DATA_Q_3_5 0x4E892CC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GENERAL_CFG 0x4E892D0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_BP_CFG 0x4E892D4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_0 0x4E892D8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_1 0x4E892DC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_2 0x4E892E0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_3 0x4E892E4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_4 0x4E892E8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_5 0x4E892EC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_6 0x4E892F0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_7 0x4E892F4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_8 0x4E892F8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_9 0x4E892FC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_10 0x4E89300
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_11 0x4E89304
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_12 0x4E89308
#define mmARC_FARM_ARC0_DUP_ENG_DUP_GRP_ENG_ADDR_OFFSET_13 0x4E8930C
#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_0 0x4E894A0
#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_1 0x4E894A4
#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_IN_GRP_TRANS_2 0x4E894A8
#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_STS 0x4E894AC
#define mmARC_FARM_ARC0_DUP_ENG_DUP_DBG_OUT_RQ_CNT 0x4E894B0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_0 0x4E894B4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_1 0x4E894B8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_2 0x4E894BC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_3 0x4E894C0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_4 0x4E894C4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_5 0x4E894C8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_6 0x4E894CC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_7 0x4E894D0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_8 0x4E894D4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_9 0x4E894D8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_10 0x4E894DC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_11 0x4E894E0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_12 0x4E894E4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_13 0x4E894E8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_14 0x4E894EC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_15 0x4E894F0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_16 0x4E894F4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_17 0x4E894F8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_18 0x4E894FC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_19 0x4E89500
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_20 0x4E89504
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_21 0x4E89508
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_22 0x4E8950C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_23 0x4E89510
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_24 0x4E89514
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_25 0x4E89518
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_26 0x4E8951C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_27 0x4E89520
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_28 0x4E89524
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_29 0x4E89528
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_30 0x4E8952C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_31 0x4E89530
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_32 0x4E89534
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_33 0x4E89538
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_34 0x4E8953C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_35 0x4E89540
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_36 0x4E89544
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_37 0x4E89548
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_38 0x4E8954C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_39 0x4E89550
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_40 0x4E89554
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_41 0x4E89558
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_42 0x4E8955C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_43 0x4E89560
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_44 0x4E89564
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_45 0x4E89568
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_46 0x4E8956C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_47 0x4E89570
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_48 0x4E89574
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_49 0x4E89578
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_50 0x4E8957C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_51 0x4E89580
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_52 0x4E89584
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_53 0x4E89588
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_54 0x4E8958C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_55 0x4E89590
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_56 0x4E89594
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_57 0x4E89598
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_58 0x4E8959C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_59 0x4E895A0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_60 0x4E895A4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_61 0x4E895A8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_62 0x4E895AC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CONTEXT_ID_63 0x4E895B0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_0 0x4E895B4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_1 0x4E895B8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_2 0x4E895BC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_3 0x4E895C0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_4 0x4E895C4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_5 0x4E895C8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_6 0x4E895CC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_7 0x4E895D0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_8 0x4E895D4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_9 0x4E895D8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_10 0x4E895DC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_11 0x4E895E0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_12 0x4E895E4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_13 0x4E895E8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_14 0x4E895EC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_15 0x4E895F0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_16 0x4E895F4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_17 0x4E895F8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_18 0x4E895FC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_19 0x4E89600
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_20 0x4E89604
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_21 0x4E89608
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_22 0x4E8960C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_23 0x4E89610
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_24 0x4E89614
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_25 0x4E89618
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_26 0x4E8961C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_27 0x4E89620
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_28 0x4E89624
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_29 0x4E89628
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_30 0x4E8962C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_31 0x4E89630
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_32 0x4E89634
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_33 0x4E89638
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_34 0x4E8963C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_35 0x4E89640
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_36 0x4E89644
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_37 0x4E89648
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_38 0x4E8964C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_39 0x4E89650
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_40 0x4E89654
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_41 0x4E89658
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_42 0x4E8965C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_43 0x4E89660
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_44 0x4E89664
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_45 0x4E89668
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_46 0x4E8966C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_47 0x4E89670
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_48 0x4E89674
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_49 0x4E89678
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_50 0x4E8967C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_51 0x4E89680
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_52 0x4E89684
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_53 0x4E89688
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_54 0x4E8968C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_55 0x4E89690
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_56 0x4E89694
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_57 0x4E89698
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_58 0x4E8969C
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_59 0x4E896A0
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_60 0x4E896A4
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_61 0x4E896A8
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_62 0x4E896AC
#define mmARC_FARM_ARC0_DUP_ENG_ARC_CID_OFFSET_63 0x4E896B0
#endif /* ASIC_REG_ARC_FARM_ARC0_DUP_ENG_REGS_H_ */

View File

@ -0,0 +1,135 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_
#define ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_
/*
*****************************************
* ARC_FARM_KDMA_CTX_AXUSER
* (Prototype: AXUSER)
*****************************************
*/
/* ARC_FARM_KDMA_CTX_AXUSER_HB_ASID */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_MASK 0x3FF
#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT 16
#define ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_MASK 0x3FF0000
/* ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK 0x10
/* ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_WR_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER_RD_MASK 0x10
/* ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_WR_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP_RD_MASK 0x10
/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_IND_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_IND_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_DTYPE_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_DTYPE_MASK 0xF0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_OP_SHIFT 8
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_OP_MASK 0x300
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_ROUND_SHIFT 12
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_ROUND_MASK 0x3000
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_MAX_SHIFT 16
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION_MAX_MASK 0x10000
/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_IND_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_IND_MASK 0x3
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_ADDITION_SIZE_MASK 0xFF0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_MSB_MASK_SHIFT 12
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC_MSB_MASK_MASK 0x1F000
/* ARC_FARM_KDMA_CTX_AXUSER_HB_QOS */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_WR_MASK 0xF
#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_QOS_RD_MASK 0x70
/* ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_27_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_27_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_28_SHIFT 1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_28_MASK 0x2
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_30_SHIFT 2
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_30_MASK 0x4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_31_SHIFT 3
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RSVD_WR_BIT_31_MASK 0x8
/* ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_WR_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE_RD_MASK 0x10
/* ARC_FARM_KDMA_CTX_AXUSER_HB_CORE */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_WR_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_WR_MASK 0x1
#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_RD_SHIFT 4
#define ARC_FARM_KDMA_CTX_AXUSER_HB_CORE_RD_MASK 0x10
/* ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD */
#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_X_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_X_MASK 0x1F
#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_Y_SHIFT 8
#define ARC_FARM_KDMA_CTX_AXUSER_E2E_COORD_Y_MASK 0xF00
/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI_VAL_MASK 0x3FF
/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI */
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI_VAL_MASK 0x3FF
/* ARC_FARM_KDMA_CTX_AXUSER_LB_COORD */
#define ARC_FARM_KDMA_CTX_AXUSER_LB_COORD_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_LB_COORD_VAL_MASK 0x3FF
/* ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK */
#define ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_LB_LOCK_VAL_MASK 0x1
/* ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD */
#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_21_11_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_21_11_MASK 0x7FF
#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_22_SHIFT 12
#define ARC_FARM_KDMA_CTX_AXUSER_LB_RSVD_BIT_22_MASK 0x1000
/* ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD */
#define ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_AXUSER_LB_OVRD_VAL_MASK 0xFFFFFFFF
#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_MASKS_H_ */

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_
#define ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_
/*
*****************************************
* ARC_FARM_KDMA_CTX_AXUSER
* (Prototype: AXUSER)
*****************************************
*/
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_ASID 0x4E8B800
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP 0x4E8B804
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_STRONG_ORDER 0x4E8B808
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_NO_SNOOP 0x4E8B80C
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_REDUCTION 0x4E8B810
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_ATOMIC 0x4E8B814
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_QOS 0x4E8B818
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RSVD 0x4E8B81C
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_EMEM_CPAGE 0x4E8B820
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_CORE 0x4E8B824
#define mmARC_FARM_KDMA_CTX_AXUSER_E2E_COORD 0x4E8B828
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_LO 0x4E8B830
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_WR_OVRD_HI 0x4E8B834
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_LO 0x4E8B838
#define mmARC_FARM_KDMA_CTX_AXUSER_HB_RD_OVRD_HI 0x4E8B83C
#define mmARC_FARM_KDMA_CTX_AXUSER_LB_COORD 0x4E8B840
#define mmARC_FARM_KDMA_CTX_AXUSER_LB_LOCK 0x4E8B844
#define mmARC_FARM_KDMA_CTX_AXUSER_LB_RSVD 0x4E8B848
#define mmARC_FARM_KDMA_CTX_AXUSER_LB_OVRD 0x4E8B84C
#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_AXUSER_REGS_H_ */

View File

@ -0,0 +1,221 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_
#define ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_
/*
*****************************************
* ARC_FARM_KDMA_CTX
* (Prototype: DMA_CORE_CTX)
*****************************************
*/
/* ARC_FARM_KDMA_CTX_RATE_LIM_TKN */
#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_RD_SHIFT 0
#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_RD_MASK 0xFF
#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_WR_SHIFT 16
#define ARC_FARM_KDMA_CTX_RATE_LIM_TKN_WR_MASK 0xFF0000
/* ARC_FARM_KDMA_CTX_PWRLP */
#define ARC_FARM_KDMA_CTX_PWRLP_DATA_SHIFT 0
#define ARC_FARM_KDMA_CTX_PWRLP_DATA_MASK 0xFF
#define ARC_FARM_KDMA_CTX_PWRLP_EN_SHIFT 8
#define ARC_FARM_KDMA_CTX_PWRLP_EN_MASK 0x100
/* ARC_FARM_KDMA_CTX_TE_NUMROWS */
#define ARC_FARM_KDMA_CTX_TE_NUMROWS_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_TE_NUMROWS_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_IDX */
#define ARC_FARM_KDMA_CTX_IDX_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_IDX_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_CTX_IDX_INC */
#define ARC_FARM_KDMA_CTX_IDX_INC_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_IDX_INC_VAL_MASK 0xFF
/* ARC_FARM_KDMA_CTX_CTRL */
#define ARC_FARM_KDMA_CTX_CTRL_TRANSPOSE_SHIFT 0
#define ARC_FARM_KDMA_CTX_CTRL_TRANSPOSE_MASK 0x1
#define ARC_FARM_KDMA_CTX_CTRL_DTYPE_SHIFT 4
#define ARC_FARM_KDMA_CTX_CTRL_DTYPE_MASK 0x30
#define ARC_FARM_KDMA_CTX_CTRL_COMPRESS_SHIFT 8
#define ARC_FARM_KDMA_CTX_CTRL_COMPRESS_MASK 0x100
#define ARC_FARM_KDMA_CTX_CTRL_DECOMPRESS_SHIFT 9
#define ARC_FARM_KDMA_CTX_CTRL_DECOMPRESS_MASK 0x200
#define ARC_FARM_KDMA_CTX_CTRL_RD_UNCACHEABLE_SHIFT 12
#define ARC_FARM_KDMA_CTX_CTRL_RD_UNCACHEABLE_MASK 0x1000
/* ARC_FARM_KDMA_CTX_SRC_TSIZE_0 */
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_0_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_0_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_TSIZE_1 */
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_1_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_1_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_STRIDE_1 */
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_1_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_1_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_TSIZE_2 */
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_2_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_2_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_STRIDE_2 */
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_2_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_2_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_TSIZE_3 */
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_3_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_3_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_STRIDE_3 */
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_3_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_3_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_TSIZE_4 */
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_4_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_TSIZE_4_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_STRIDE_4 */
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_4_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_STRIDE_4_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_TSIZE_1 */
#define ARC_FARM_KDMA_CTX_DST_TSIZE_1_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_TSIZE_1_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_STRIDE_1 */
#define ARC_FARM_KDMA_CTX_DST_STRIDE_1_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_STRIDE_1_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_TSIZE_2 */
#define ARC_FARM_KDMA_CTX_DST_TSIZE_2_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_TSIZE_2_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_STRIDE_2 */
#define ARC_FARM_KDMA_CTX_DST_STRIDE_2_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_STRIDE_2_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_TSIZE_3 */
#define ARC_FARM_KDMA_CTX_DST_TSIZE_3_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_TSIZE_3_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_STRIDE_3 */
#define ARC_FARM_KDMA_CTX_DST_STRIDE_3_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_STRIDE_3_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_TSIZE_4 */
#define ARC_FARM_KDMA_CTX_DST_TSIZE_4_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_TSIZE_4_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_STRIDE_4 */
#define ARC_FARM_KDMA_CTX_DST_STRIDE_4_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_STRIDE_4_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI */
#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO */
#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_WR_COMP_WDATA */
#define ARC_FARM_KDMA_CTX_WR_COMP_WDATA_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_WR_COMP_WDATA_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_OFFSET_LO */
#define ARC_FARM_KDMA_CTX_SRC_OFFSET_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_OFFSET_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_OFFSET_HI */
#define ARC_FARM_KDMA_CTX_SRC_OFFSET_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_OFFSET_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_OFFSET_LO */
#define ARC_FARM_KDMA_CTX_DST_OFFSET_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_OFFSET_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_OFFSET_HI */
#define ARC_FARM_KDMA_CTX_DST_OFFSET_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_OFFSET_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_BASE_LO */
#define ARC_FARM_KDMA_CTX_SRC_BASE_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_BASE_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_SRC_BASE_HI */
#define ARC_FARM_KDMA_CTX_SRC_BASE_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_SRC_BASE_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_BASE_LO */
#define ARC_FARM_KDMA_CTX_DST_BASE_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_BASE_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_BASE_HI */
#define ARC_FARM_KDMA_CTX_DST_BASE_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_BASE_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_DST_TSIZE_0 */
#define ARC_FARM_KDMA_CTX_DST_TSIZE_0_VAL_SHIFT 0
#define ARC_FARM_KDMA_CTX_DST_TSIZE_0_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_CTX_COMMIT */
#define ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_SHIFT 0
#define ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK 0x1
#define ARC_FARM_KDMA_CTX_COMMIT_ENDIAN_SWAP_SHIFT 1
#define ARC_FARM_KDMA_CTX_COMMIT_ENDIAN_SWAP_MASK 0x6
#define ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_SHIFT 4
#define ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK 0x10
#define ARC_FARM_KDMA_CTX_COMMIT_BF16_SHIFT 6
#define ARC_FARM_KDMA_CTX_COMMIT_BF16_MASK 0x40
#define ARC_FARM_KDMA_CTX_COMMIT_FP16_SHIFT 7
#define ARC_FARM_KDMA_CTX_COMMIT_FP16_MASK 0x80
#define ARC_FARM_KDMA_CTX_COMMIT_CTX_ID_INC_SHIFT 8
#define ARC_FARM_KDMA_CTX_COMMIT_CTX_ID_INC_MASK 0x100
#define ARC_FARM_KDMA_CTX_COMMIT_ADD_OFFSET_0_SHIFT 9
#define ARC_FARM_KDMA_CTX_COMMIT_ADD_OFFSET_0_MASK 0x200
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE0_FROM_DST_SIZE0_SHIFT 10
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE0_FROM_DST_SIZE0_MASK 0x400
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_OFST_FROM_DST_OFST_SHIFT 11
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_OFST_FROM_DST_OFST_MASK 0x800
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM1_SHIFT 12
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM1_MASK 0x1000
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM2_SHIFT 13
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM2_MASK 0x2000
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM3_SHIFT 14
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM3_MASK 0x4000
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM4_SHIFT 15
#define ARC_FARM_KDMA_CTX_COMMIT_DISABLE_DIM4_MASK 0x8000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE1_FROM_DST_SIZE1_SHIFT 16
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE1_FROM_DST_SIZE1_MASK 0x10000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE2_FROM_DST_SIZE2_SHIFT 17
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE2_FROM_DST_SIZE2_MASK 0x20000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE3_FROM_DST_SIZE3_SHIFT 18
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE3_FROM_DST_SIZE3_MASK 0x40000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE4_FROM_DST_SIZE4_SHIFT 19
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_SIZE4_FROM_DST_SIZE4_MASK 0x80000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD1_FROM_DST_STRD1_SHIFT 20
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD1_FROM_DST_STRD1_MASK 0x100000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD2_FROM_DST_STRD2_SHIFT 21
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD2_FROM_DST_STRD2_MASK 0x200000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD3_FROM_DST_STRD3_SHIFT 22
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD3_FROM_DST_STRD3_MASK 0x400000
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD4_FROM_DST_STRD4_SHIFT 23
#define ARC_FARM_KDMA_CTX_COMMIT_SRC_STRD4_FROM_DST_STRD4_MASK 0x800000
#define ARC_FARM_KDMA_CTX_COMMIT_LIN_SHIFT 31
#define ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK 0x80000000
#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_MASKS_H_ */

View File

@ -0,0 +1,95 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_
#define ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_
/*
*****************************************
* ARC_FARM_KDMA_CTX
* (Prototype: DMA_CORE_CTX)
*****************************************
*/
#define mmARC_FARM_KDMA_CTX_RATE_LIM_TKN 0x4E8B860
#define mmARC_FARM_KDMA_CTX_PWRLP 0x4E8B864
#define mmARC_FARM_KDMA_CTX_TE_NUMROWS 0x4E8B868
#define mmARC_FARM_KDMA_CTX_IDX 0x4E8B86C
#define mmARC_FARM_KDMA_CTX_IDX_INC 0x4E8B870
#define mmARC_FARM_KDMA_CTX_CTRL 0x4E8B874
#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_0 0x4E8B878
#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_1 0x4E8B87C
#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_1 0x4E8B880
#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_2 0x4E8B884
#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_2 0x4E8B888
#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_3 0x4E8B88C
#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_3 0x4E8B890
#define mmARC_FARM_KDMA_CTX_SRC_TSIZE_4 0x4E8B894
#define mmARC_FARM_KDMA_CTX_SRC_STRIDE_4 0x4E8B898
#define mmARC_FARM_KDMA_CTX_DST_TSIZE_1 0x4E8B89C
#define mmARC_FARM_KDMA_CTX_DST_STRIDE_1 0x4E8B8A0
#define mmARC_FARM_KDMA_CTX_DST_TSIZE_2 0x4E8B8A4
#define mmARC_FARM_KDMA_CTX_DST_STRIDE_2 0x4E8B8A8
#define mmARC_FARM_KDMA_CTX_DST_TSIZE_3 0x4E8B8AC
#define mmARC_FARM_KDMA_CTX_DST_STRIDE_3 0x4E8B8B0
#define mmARC_FARM_KDMA_CTX_DST_TSIZE_4 0x4E8B8B4
#define mmARC_FARM_KDMA_CTX_DST_STRIDE_4 0x4E8B8B8
#define mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI 0x4E8B8BC
#define mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO 0x4E8B8C0
#define mmARC_FARM_KDMA_CTX_WR_COMP_WDATA 0x4E8B8C4
#define mmARC_FARM_KDMA_CTX_SRC_OFFSET_LO 0x4E8B8C8
#define mmARC_FARM_KDMA_CTX_SRC_OFFSET_HI 0x4E8B8CC
#define mmARC_FARM_KDMA_CTX_DST_OFFSET_LO 0x4E8B8D0
#define mmARC_FARM_KDMA_CTX_DST_OFFSET_HI 0x4E8B8D4
#define mmARC_FARM_KDMA_CTX_SRC_BASE_LO 0x4E8B8D8
#define mmARC_FARM_KDMA_CTX_SRC_BASE_HI 0x4E8B8DC
#define mmARC_FARM_KDMA_CTX_DST_BASE_LO 0x4E8B8E0
#define mmARC_FARM_KDMA_CTX_DST_BASE_HI 0x4E8B8E4
#define mmARC_FARM_KDMA_CTX_DST_TSIZE_0 0x4E8B8E8
#define mmARC_FARM_KDMA_CTX_COMMIT 0x4E8B8EC
#endif /* ASIC_REG_ARC_FARM_KDMA_CTX_REGS_H_ */

View File

@ -0,0 +1,29 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_
#define ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_
/*
*****************************************
* ARC_FARM_KDMA_KDMA_CGM
* (Prototype: QMAN_CGM)
*****************************************
*/
#define mmARC_FARM_KDMA_KDMA_CGM_CFG 0x4E8BE00
#define mmARC_FARM_KDMA_KDMA_CGM_STS 0x4E8BE04
#define mmARC_FARM_KDMA_KDMA_CGM_CFG1 0x4E8BE08
#endif /* ASIC_REG_ARC_FARM_KDMA_KDMA_CGM_REGS_H_ */

View File

@ -0,0 +1,415 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_MASKS_H_
#define ASIC_REG_ARC_FARM_KDMA_MASKS_H_
/*
*****************************************
* ARC_FARM_KDMA
* (Prototype: DMA_CORE)
*****************************************
*/
/* ARC_FARM_KDMA_CFG_0 */
#define ARC_FARM_KDMA_CFG_0_EN_SHIFT 0
#define ARC_FARM_KDMA_CFG_0_EN_MASK 0x1
/* ARC_FARM_KDMA_CFG_1 */
#define ARC_FARM_KDMA_CFG_1_HALT_SHIFT 0
#define ARC_FARM_KDMA_CFG_1_HALT_MASK 0x1
#define ARC_FARM_KDMA_CFG_1_FLUSH_SHIFT 1
#define ARC_FARM_KDMA_CFG_1_FLUSH_MASK 0x2
/* ARC_FARM_KDMA_PROT */
#define ARC_FARM_KDMA_PROT_VAL_SHIFT 0
#define ARC_FARM_KDMA_PROT_VAL_MASK 0x1
#define ARC_FARM_KDMA_PROT_ERR_VAL_SHIFT 1
#define ARC_FARM_KDMA_PROT_ERR_VAL_MASK 0x2
/* ARC_FARM_KDMA_CKG */
#define ARC_FARM_KDMA_CKG_HBW_RBUF_SHIFT 0
#define ARC_FARM_KDMA_CKG_HBW_RBUF_MASK 0x1
#define ARC_FARM_KDMA_CKG_LBW_RBUF_KDMA_SHIFT 1
#define ARC_FARM_KDMA_CKG_LBW_RBUF_KDMA_MASK 0x2
#define ARC_FARM_KDMA_CKG_TE_SHIFT 2
#define ARC_FARM_KDMA_CKG_TE_MASK 0x4
/* ARC_FARM_KDMA_RD_GLBL */
#define ARC_FARM_KDMA_RD_GLBL_LBW_VIA_HBW_SHIFT 0
#define ARC_FARM_KDMA_RD_GLBL_LBW_VIA_HBW_MASK 0x1
#define ARC_FARM_KDMA_RD_GLBL_HBW_FORCE_MISS_SHIFT 4
#define ARC_FARM_KDMA_RD_GLBL_HBW_FORCE_MISS_MASK 0x10
#define ARC_FARM_KDMA_RD_GLBL_LBW_FORCE_MISS_SHIFT 5
#define ARC_FARM_KDMA_RD_GLBL_LBW_FORCE_MISS_MASK 0x20
/* ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND */
#define ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND_VAL_MASK 0xFFF
/* ARC_FARM_KDMA_RD_HBW_MAX_SIZE */
#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_DATA_SHIFT 0
#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_DATA_MASK 0xFFF
#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_MD_SHIFT 16
#define ARC_FARM_KDMA_RD_HBW_MAX_SIZE_MD_MASK 0xFFF0000
/* ARC_FARM_KDMA_RD_HBW_ARCACHE */
#define ARC_FARM_KDMA_RD_HBW_ARCACHE_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_HBW_ARCACHE_VAL_MASK 0xF
/* ARC_FARM_KDMA_RD_HBW_INFLIGHTS */
#define ARC_FARM_KDMA_RD_HBW_INFLIGHTS_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG */
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_SAT_SHIFT 16
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_EN_SHIFT 31
#define ARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
/* ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND */
#define ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND_VAL_MASK 0xFFF
/* ARC_FARM_KDMA_RD_LBW_MAX_SIZE */
#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_DATA_SHIFT 0
#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_DATA_MASK 0xFFF
#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_MD_SHIFT 16
#define ARC_FARM_KDMA_RD_LBW_MAX_SIZE_MD_MASK 0xFFF0000
/* ARC_FARM_KDMA_RD_LBW_ARCACHE */
#define ARC_FARM_KDMA_RD_LBW_ARCACHE_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_LBW_ARCACHE_VAL_MASK 0xF
/* ARC_FARM_KDMA_RD_LBW_INFLIGHTS */
#define ARC_FARM_KDMA_RD_LBW_INFLIGHTS_VAL_SHIFT 0
#define ARC_FARM_KDMA_RD_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG */
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_SAT_SHIFT 16
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_EN_SHIFT 31
#define ARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
/* ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND */
#define ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_WR_HBW_MAX_AWID */
#define ARC_FARM_KDMA_WR_HBW_MAX_AWID_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_HBW_MAX_AWID_VAL_MASK 0x3FFF
/* ARC_FARM_KDMA_WR_HBW_AWCACHE */
#define ARC_FARM_KDMA_WR_HBW_AWCACHE_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_HBW_AWCACHE_VAL_MASK 0xF
/* ARC_FARM_KDMA_WR_HBW_INFLIGHTS */
#define ARC_FARM_KDMA_WR_HBW_INFLIGHTS_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_HBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG */
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_TOUT_SHIFT 0
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_TOUT_MASK 0xFF
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_SAT_SHIFT 16
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_EN_SHIFT 31
#define ARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG_EN_MASK 0x80000000
/* ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND */
#define ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_WR_LBW_MAX_AWID */
#define ARC_FARM_KDMA_WR_LBW_MAX_AWID_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_LBW_MAX_AWID_VAL_MASK 0x7F
/* ARC_FARM_KDMA_WR_LBW_AWCACHE */
#define ARC_FARM_KDMA_WR_LBW_AWCACHE_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_LBW_AWCACHE_VAL_MASK 0xF
/* ARC_FARM_KDMA_WR_LBW_INFLIGHTS */
#define ARC_FARM_KDMA_WR_LBW_INFLIGHTS_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_LBW_INFLIGHTS_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG */
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_TOUT_SHIFT 0
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_TOUT_MASK 0xFF
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_SAT_SHIFT 16
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_SAT_MASK 0xFF0000
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_EN_SHIFT 31
#define ARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG_EN_MASK 0x80000000
/* ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND */
#define ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND_VAL_MASK 0x1F
/* ARC_FARM_KDMA_WR_COMP_AWUSER */
#define ARC_FARM_KDMA_WR_COMP_AWUSER_VAL_SHIFT 0
#define ARC_FARM_KDMA_WR_COMP_AWUSER_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_ERR_CFG */
#define ARC_FARM_KDMA_ERR_CFG_ERR_MSG_EN_SHIFT 0
#define ARC_FARM_KDMA_ERR_CFG_ERR_MSG_EN_MASK 0x1
#define ARC_FARM_KDMA_ERR_CFG_STOP_ON_ERR_SHIFT 1
#define ARC_FARM_KDMA_ERR_CFG_STOP_ON_ERR_MASK 0x2
/* ARC_FARM_KDMA_ERR_CAUSE */
#define ARC_FARM_KDMA_ERR_CAUSE_HBW_RD_ERR_SHIFT 0
#define ARC_FARM_KDMA_ERR_CAUSE_HBW_RD_ERR_MASK 0x1
#define ARC_FARM_KDMA_ERR_CAUSE_HBW_WR_ERR_SHIFT 1
#define ARC_FARM_KDMA_ERR_CAUSE_HBW_WR_ERR_MASK 0x2
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_MSG_WR_ERR_SHIFT 2
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_MSG_WR_ERR_MASK 0x4
#define ARC_FARM_KDMA_ERR_CAUSE_DESC_OVF_SHIFT 3
#define ARC_FARM_KDMA_ERR_CAUSE_DESC_OVF_MASK 0x8
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_RD_ERR_SHIFT 4
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_RD_ERR_MASK 0x10
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_WR_ERR_SHIFT 5
#define ARC_FARM_KDMA_ERR_CAUSE_LBW_WR_ERR_MASK 0x20
#define ARC_FARM_KDMA_ERR_CAUSE_TE_DESC_FIFO_OVFL_SHIFT 6
#define ARC_FARM_KDMA_ERR_CAUSE_TE_DESC_FIFO_OVFL_MASK 0x40
#define ARC_FARM_KDMA_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_SHIFT 7
#define ARC_FARM_KDMA_ERR_CAUSE_LIN_DMA_COMMIT_CFG_ERR_MASK 0x80
/* ARC_FARM_KDMA_ERRMSG_ADDR_LO */
#define ARC_FARM_KDMA_ERRMSG_ADDR_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_ERRMSG_ADDR_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_ERRMSG_ADDR_HI */
#define ARC_FARM_KDMA_ERRMSG_ADDR_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_ERRMSG_ADDR_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_ERRMSG_WDATA */
#define ARC_FARM_KDMA_ERRMSG_WDATA_VAL_SHIFT 0
#define ARC_FARM_KDMA_ERRMSG_WDATA_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS0 */
#define ARC_FARM_KDMA_STS0_RD_REQ_CNT_SHIFT 0
#define ARC_FARM_KDMA_STS0_RD_REQ_CNT_MASK 0x7FFF
#define ARC_FARM_KDMA_STS0_WR_REQ_CNT_SHIFT 16
#define ARC_FARM_KDMA_STS0_WR_REQ_CNT_MASK 0x7FFF0000
#define ARC_FARM_KDMA_STS0_BUSY_SHIFT 31
#define ARC_FARM_KDMA_STS0_BUSY_MASK 0x80000000
/* ARC_FARM_KDMA_STS1 */
#define ARC_FARM_KDMA_STS1_IS_HALT_SHIFT 0
#define ARC_FARM_KDMA_STS1_IS_HALT_MASK 0x1
/* ARC_FARM_KDMA_STS_RD_CTX_SEL */
#define ARC_FARM_KDMA_STS_RD_CTX_SEL_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_CTX_SEL_VAL_MASK 0x7
#define ARC_FARM_KDMA_STS_RD_CTX_SEL_STRIDE_SHIFT 8
#define ARC_FARM_KDMA_STS_RD_CTX_SEL_STRIDE_MASK 0x100
/* ARC_FARM_KDMA_STS_RD_CTX_SIZE */
#define ARC_FARM_KDMA_STS_RD_CTX_SIZE_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_CTX_SIZE_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_RD_CTX_BASE_LO */
#define ARC_FARM_KDMA_STS_RD_CTX_BASE_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_RD_CTX_BASE_HI */
#define ARC_FARM_KDMA_STS_RD_CTX_BASE_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_RD_CTX_ID */
#define ARC_FARM_KDMA_STS_RD_CTX_ID_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_CTX_ID_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO */
#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI */
#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR */
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_RDY_SHIFT 30
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_RDY_MASK 0x40000000
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VLD_SHIFT 31
#define ARC_FARM_KDMA_STS_RD_LB_AXI_ADDR_VLD_MASK 0x80000000
/* ARC_FARM_KDMA_STS_WR_CTX_SEL */
#define ARC_FARM_KDMA_STS_WR_CTX_SEL_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_CTX_SEL_VAL_MASK 0x7
#define ARC_FARM_KDMA_STS_WR_CTX_SEL_STRIDE_SHIFT 8
#define ARC_FARM_KDMA_STS_WR_CTX_SEL_STRIDE_MASK 0x100
/* ARC_FARM_KDMA_STS_WR_CTX_SIZE */
#define ARC_FARM_KDMA_STS_WR_CTX_SIZE_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_CTX_SIZE_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_WR_CTX_BASE_LO */
#define ARC_FARM_KDMA_STS_WR_CTX_BASE_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_CTX_BASE_LO_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_WR_CTX_BASE_HI */
#define ARC_FARM_KDMA_STS_WR_CTX_BASE_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_CTX_BASE_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_WR_CTX_ID */
#define ARC_FARM_KDMA_STS_WR_CTX_ID_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_CTX_ID_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO */
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VAL_MASK 0x3FFFF
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_RDY_SHIFT 30
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_RDY_MASK 0x40000000
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VLD_SHIFT 31
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO_VLD_MASK 0x80000000
/* ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI */
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VAL_MASK 0x3FFFF
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_RDY_SHIFT 30
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_RDY_MASK 0x40000000
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VLD_SHIFT 31
#define ARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI_VLD_MASK 0x80000000
/* ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR */
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VAL_SHIFT 0
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VAL_MASK 0x3FFFFFF
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_RDY_SHIFT 30
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_RDY_MASK 0x40000000
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VLD_SHIFT 31
#define ARC_FARM_KDMA_STS_WR_LB_AXI_ADDR_VLD_MASK 0x80000000
/* ARC_FARM_KDMA_PWRLP_CFG */
#define ARC_FARM_KDMA_PWRLP_CFG_GLBL_EN_SHIFT 0
#define ARC_FARM_KDMA_PWRLP_CFG_GLBL_EN_MASK 0x1
#define ARC_FARM_KDMA_PWRLP_CFG_CLR_SHIFT 4
#define ARC_FARM_KDMA_PWRLP_CFG_CLR_MASK 0x10
/* ARC_FARM_KDMA_PWRLP_STS */
#define ARC_FARM_KDMA_PWRLP_STS_RLVL_SHIFT 0
#define ARC_FARM_KDMA_PWRLP_STS_RLVL_MASK 0x7F
#define ARC_FARM_KDMA_PWRLP_STS_WLVL_SHIFT 8
#define ARC_FARM_KDMA_PWRLP_STS_WLVL_MASK 0x7F00
#define ARC_FARM_KDMA_PWRLP_STS_RCNT_SHIFT 16
#define ARC_FARM_KDMA_PWRLP_STS_RCNT_MASK 0x7F0000
#define ARC_FARM_KDMA_PWRLP_STS_WCNT_SHIFT 23
#define ARC_FARM_KDMA_PWRLP_STS_WCNT_MASK 0x3F800000
#define ARC_FARM_KDMA_PWRLP_STS_RFULL_SHIFT 30
#define ARC_FARM_KDMA_PWRLP_STS_RFULL_MASK 0x40000000
#define ARC_FARM_KDMA_PWRLP_STS_WFULL_SHIFT 31
#define ARC_FARM_KDMA_PWRLP_STS_WFULL_MASK 0x80000000
/* ARC_FARM_KDMA_DBG_DESC_CNT */
#define ARC_FARM_KDMA_DBG_DESC_CNT_VAL_SHIFT 0
#define ARC_FARM_KDMA_DBG_DESC_CNT_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_DBG_STS */
#define ARC_FARM_KDMA_DBG_STS_RD_CTX_FULL_SHIFT 0
#define ARC_FARM_KDMA_DBG_STS_RD_CTX_FULL_MASK 0x1
#define ARC_FARM_KDMA_DBG_STS_WR_CTX_FULL_SHIFT 1
#define ARC_FARM_KDMA_DBG_STS_WR_CTX_FULL_MASK 0x2
#define ARC_FARM_KDMA_DBG_STS_WR_COMP_FULL_SHIFT 2
#define ARC_FARM_KDMA_DBG_STS_WR_COMP_FULL_MASK 0x4
#define ARC_FARM_KDMA_DBG_STS_RD_CTX_EMPTY_SHIFT 3
#define ARC_FARM_KDMA_DBG_STS_RD_CTX_EMPTY_MASK 0x8
#define ARC_FARM_KDMA_DBG_STS_WR_CTX_EMPTY_SHIFT 4
#define ARC_FARM_KDMA_DBG_STS_WR_CTX_EMPTY_MASK 0x10
#define ARC_FARM_KDMA_DBG_STS_WR_COMP_EMPTY_SHIFT 5
#define ARC_FARM_KDMA_DBG_STS_WR_COMP_EMPTY_MASK 0x20
#define ARC_FARM_KDMA_DBG_STS_TE_EMPTY_SHIFT 6
#define ARC_FARM_KDMA_DBG_STS_TE_EMPTY_MASK 0x40
#define ARC_FARM_KDMA_DBG_STS_TE_BUSY_SHIFT 7
#define ARC_FARM_KDMA_DBG_STS_TE_BUSY_MASK 0x80
#define ARC_FARM_KDMA_DBG_STS_GSKT_EMPTY_SHIFT 8
#define ARC_FARM_KDMA_DBG_STS_GSKT_EMPTY_MASK 0x100
#define ARC_FARM_KDMA_DBG_STS_GSKT_FULL_SHIFT 9
#define ARC_FARM_KDMA_DBG_STS_GSKT_FULL_MASK 0x200
#define ARC_FARM_KDMA_DBG_STS_RD_AGU_CS_SHIFT 10
#define ARC_FARM_KDMA_DBG_STS_RD_AGU_CS_MASK 0x400
#define ARC_FARM_KDMA_DBG_STS_WR_AGU_CS_SHIFT 11
#define ARC_FARM_KDMA_DBG_STS_WR_AGU_CS_MASK 0x800
/* ARC_FARM_KDMA_DBG_BUF_STS */
#define ARC_FARM_KDMA_DBG_BUF_STS_HBW_FULLNESS_SHIFT 0
#define ARC_FARM_KDMA_DBG_BUF_STS_HBW_FULLNESS_MASK 0xFFF
#define ARC_FARM_KDMA_DBG_BUF_STS_LBW_FULLNESS_SHIFT 16
#define ARC_FARM_KDMA_DBG_BUF_STS_LBW_FULLNESS_MASK 0xFFF0000
/* ARC_FARM_KDMA_DBG_RD_DESC_ID */
#define ARC_FARM_KDMA_DBG_RD_DESC_ID_VAL_SHIFT 0
#define ARC_FARM_KDMA_DBG_RD_DESC_ID_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_DBG_WR_DESC_ID */
#define ARC_FARM_KDMA_DBG_WR_DESC_ID_VAL_SHIFT 0
#define ARC_FARM_KDMA_DBG_WR_DESC_ID_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_APB_DMA_LBW_BASE */
#define ARC_FARM_KDMA_APB_DMA_LBW_BASE_VAL_SHIFT 0
#define ARC_FARM_KDMA_APB_DMA_LBW_BASE_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE */
#define ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE_VAL_SHIFT 0
#define ARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE_VAL_MASK 0xFFFF
/* ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG */
#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_Y_X_FORCE_SHIFT 0
#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_Y_X_FORCE_MASK 0x1FF
#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_FORCE_EN_SHIFT 9
#define ARC_FARM_KDMA_E2E_CRED_ASYNC_CFG_FORCE_EN_MASK 0x200
/* ARC_FARM_KDMA_DBG_APB_ENABLER */
#define ARC_FARM_KDMA_DBG_APB_ENABLER_DIS_SHIFT 0
#define ARC_FARM_KDMA_DBG_APB_ENABLER_DIS_MASK 0x1
/* ARC_FARM_KDMA_L2H_CMPR_LO */
#define ARC_FARM_KDMA_L2H_CMPR_LO_VAL_SHIFT 20
#define ARC_FARM_KDMA_L2H_CMPR_LO_VAL_MASK 0xFFF00000
/* ARC_FARM_KDMA_L2H_CMPR_HI */
#define ARC_FARM_KDMA_L2H_CMPR_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_L2H_CMPR_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_L2H_MASK_LO */
#define ARC_FARM_KDMA_L2H_MASK_LO_VAL_SHIFT 20
#define ARC_FARM_KDMA_L2H_MASK_LO_VAL_MASK 0xFFF00000
/* ARC_FARM_KDMA_L2H_MASK_HI */
#define ARC_FARM_KDMA_L2H_MASK_HI_VAL_SHIFT 0
#define ARC_FARM_KDMA_L2H_MASK_HI_VAL_MASK 0xFFFFFFFF
/* ARC_FARM_KDMA_IDLE_IND_MASK */
#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_SHIFT 0
#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_MASK 0x1
#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_SHIFT 1
#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_MASK 0x2
#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_SHIFT 2
#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_MASK 0x4
#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_SHIFT 3
#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_MASK 0x8
#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_CNT_STS_SHIFT 8
#define ARC_FARM_KDMA_IDLE_IND_MASK_DESC_CNT_STS_MASK 0x1F00
#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_CNT_STS_SHIFT 16
#define ARC_FARM_KDMA_IDLE_IND_MASK_COMP_CNT_STS_MASK 0x1F0000
#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_EMPTY_SHIFT 24
#define ARC_FARM_KDMA_IDLE_IND_MASK_INSTAGE_EMPTY_MASK 0x1000000
#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_IDLE_STS_SHIFT 25
#define ARC_FARM_KDMA_IDLE_IND_MASK_CORE_IDLE_STS_MASK 0x2000000
/* ARC_FARM_KDMA_APB_ENABLER */
#define ARC_FARM_KDMA_APB_ENABLER_DIS_SHIFT 0
#define ARC_FARM_KDMA_APB_ENABLER_DIS_MASK 0x1
#endif /* ASIC_REG_ARC_FARM_KDMA_MASKS_H_ */

View File

@ -0,0 +1,157 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_ARC_FARM_KDMA_REGS_H_
#define ASIC_REG_ARC_FARM_KDMA_REGS_H_
/*
*****************************************
* ARC_FARM_KDMA
* (Prototype: DMA_CORE)
*****************************************
*/
#define mmARC_FARM_KDMA_CFG_0 0x4E8B000
#define mmARC_FARM_KDMA_CFG_1 0x4E8B004
#define mmARC_FARM_KDMA_PROT 0x4E8B008
#define mmARC_FARM_KDMA_CKG 0x4E8B00C
#define mmARC_FARM_KDMA_RD_GLBL 0x4E8B07C
#define mmARC_FARM_KDMA_RD_HBW_MAX_OUTSTAND 0x4E8B080
#define mmARC_FARM_KDMA_RD_HBW_MAX_SIZE 0x4E8B084
#define mmARC_FARM_KDMA_RD_HBW_ARCACHE 0x4E8B088
#define mmARC_FARM_KDMA_RD_HBW_INFLIGHTS 0x4E8B090
#define mmARC_FARM_KDMA_RD_HBW_RATE_LIM_CFG 0x4E8B094
#define mmARC_FARM_KDMA_RD_LBW_MAX_OUTSTAND 0x4E8B0C0
#define mmARC_FARM_KDMA_RD_LBW_MAX_SIZE 0x4E8B0C4
#define mmARC_FARM_KDMA_RD_LBW_ARCACHE 0x4E8B0C8
#define mmARC_FARM_KDMA_RD_LBW_INFLIGHTS 0x4E8B0D0
#define mmARC_FARM_KDMA_RD_LBW_RATE_LIM_CFG 0x4E8B0D4
#define mmARC_FARM_KDMA_WR_HBW_MAX_OUTSTAND 0x4E8B100
#define mmARC_FARM_KDMA_WR_HBW_MAX_AWID 0x4E8B104
#define mmARC_FARM_KDMA_WR_HBW_AWCACHE 0x4E8B108
#define mmARC_FARM_KDMA_WR_HBW_INFLIGHTS 0x4E8B10C
#define mmARC_FARM_KDMA_WR_HBW_RATE_LIM_CFG 0x4E8B110
#define mmARC_FARM_KDMA_WR_LBW_MAX_OUTSTAND 0x4E8B140
#define mmARC_FARM_KDMA_WR_LBW_MAX_AWID 0x4E8B144
#define mmARC_FARM_KDMA_WR_LBW_AWCACHE 0x4E8B148
#define mmARC_FARM_KDMA_WR_LBW_INFLIGHTS 0x4E8B14C
#define mmARC_FARM_KDMA_WR_LBW_RATE_LIM_CFG 0x4E8B150
#define mmARC_FARM_KDMA_WR_COMP_MAX_OUTSTAND 0x4E8B180
#define mmARC_FARM_KDMA_WR_COMP_AWUSER 0x4E8B184
#define mmARC_FARM_KDMA_ERR_CFG 0x4E8B300
#define mmARC_FARM_KDMA_ERR_CAUSE 0x4E8B304
#define mmARC_FARM_KDMA_ERRMSG_ADDR_LO 0x4E8B308
#define mmARC_FARM_KDMA_ERRMSG_ADDR_HI 0x4E8B30C
#define mmARC_FARM_KDMA_ERRMSG_WDATA 0x4E8B310
#define mmARC_FARM_KDMA_STS0 0x4E8B380
#define mmARC_FARM_KDMA_STS1 0x4E8B384
#define mmARC_FARM_KDMA_STS_RD_CTX_SEL 0x4E8B400
#define mmARC_FARM_KDMA_STS_RD_CTX_SIZE 0x4E8B404
#define mmARC_FARM_KDMA_STS_RD_CTX_BASE_LO 0x4E8B408
#define mmARC_FARM_KDMA_STS_RD_CTX_BASE_HI 0x4E8B40C
#define mmARC_FARM_KDMA_STS_RD_CTX_ID 0x4E8B410
#define mmARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_LO 0x4E8B414
#define mmARC_FARM_KDMA_STS_RD_HB_AXI_ADDR_HI 0x4E8B418
#define mmARC_FARM_KDMA_STS_RD_LB_AXI_ADDR 0x4E8B41C
#define mmARC_FARM_KDMA_STS_WR_CTX_SEL 0x4E8B420
#define mmARC_FARM_KDMA_STS_WR_CTX_SIZE 0x4E8B424
#define mmARC_FARM_KDMA_STS_WR_CTX_BASE_LO 0x4E8B428
#define mmARC_FARM_KDMA_STS_WR_CTX_BASE_HI 0x4E8B42C
#define mmARC_FARM_KDMA_STS_WR_CTX_ID 0x4E8B430
#define mmARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_LO 0x4E8B434
#define mmARC_FARM_KDMA_STS_WR_HB_AXI_ADDR_HI 0x4E8B438
#define mmARC_FARM_KDMA_STS_WR_LB_AXI_ADDR 0x4E8B43C
#define mmARC_FARM_KDMA_PWRLP_CFG 0x4E8B700
#define mmARC_FARM_KDMA_PWRLP_STS 0x4E8B704
#define mmARC_FARM_KDMA_DBG_DESC_CNT 0x4E8B710
#define mmARC_FARM_KDMA_DBG_STS 0x4E8B714
#define mmARC_FARM_KDMA_DBG_BUF_STS 0x4E8B718
#define mmARC_FARM_KDMA_DBG_RD_DESC_ID 0x4E8B720
#define mmARC_FARM_KDMA_DBG_WR_DESC_ID 0x4E8B724
#define mmARC_FARM_KDMA_APB_DMA_LBW_BASE 0x4E8B728
#define mmARC_FARM_KDMA_APB_MSTR_IF_LBW_BASE 0x4E8B72C
#define mmARC_FARM_KDMA_E2E_CRED_ASYNC_CFG 0x4E8B730
#define mmARC_FARM_KDMA_DBG_APB_ENABLER 0x4E8BE1C
#define mmARC_FARM_KDMA_L2H_CMPR_LO 0x4E8BE20
#define mmARC_FARM_KDMA_L2H_CMPR_HI 0x4E8BE24
#define mmARC_FARM_KDMA_L2H_MASK_LO 0x4E8BE28
#define mmARC_FARM_KDMA_L2H_MASK_HI 0x4E8BE2C
#define mmARC_FARM_KDMA_IDLE_IND_MASK 0x4E8BE30
#define mmARC_FARM_KDMA_APB_ENABLER 0x4E8BE34
#endif /* ASIC_REG_ARC_FARM_KDMA_REGS_H_ */

View File

@ -0,0 +1,777 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_CPU_IF_REGS_H_
#define ASIC_REG_CPU_IF_REGS_H_
/*
*****************************************
* CPU_IF
* (Prototype: CPU_IF)
*****************************************
*/
#define mmCPU_IF_ARUSER_OVR 0x4CC1104
#define mmCPU_IF_ARUSER_OVR_EN 0x4CC1108
#define mmCPU_IF_AWUSER_OVR 0x4CC110C
#define mmCPU_IF_AWUSER_OVR_EN 0x4CC1110
#define mmCPU_IF_ARUSER_MSB_OVR 0x4CC1114
#define mmCPU_IF_AWUSER_MSB_OVR 0x4CC1120
#define mmCPU_IF_AXCACHE_OVR 0x4CC1128
#define mmCPU_IF_LOCK_OVR 0x4CC112C
#define mmCPU_IF_PROT_OVR 0x4CC1130
#define mmCPU_IF_MAX_OUTSTANDING 0x4CC1134
#define mmCPU_IF_EARLY_BRESP_EN 0x4CC1138
#define mmCPU_IF_FORCE_RSP_OK 0x4CC113C
#define mmCPU_IF_CPU_SEI_INTR_STS 0x4CC1140
#define mmCPU_IF_CPU_SEI_INTR_CLR 0x4CC1144
#define mmCPU_IF_CPU_SEI_INTR_MASK 0x4CC1148
#define mmCPU_IF_AXI_SPLIT_NO_WR_INFLIGHT 0x4CC114C
#define mmCPU_IF_AXI_SPLIT_SEI_INTR_ID 0x4CC1150
#define mmCPU_IF_TOTAL_WR_CNT 0x4CC1154
#define mmCPU_IF_INFLIGHT_WR_CNT 0x4CC1158
#define mmCPU_IF_TOTAL_RD_CNT 0x4CC115C
#define mmCPU_IF_INFLIGHT_RD_CNT 0x4CC1160
#define mmCPU_IF_SRAM_MSB_ADDR 0x4CC1164
#define mmCPU_IF_CFG_MSB_ADDR 0x4CC1168
#define mmCPU_IF_HBM_MSB_ADDR 0x4CC116C
#define mmCPU_IF_PCIE_MSB_ADDR 0x4CC1170
#define mmCPU_IF_KMD_HW_DIRTY_STATUS 0x4CC1174
#define mmCPU_IF_MSTR_IF_E2E_FORCE_BP 0x4CC1188
#define mmCPU_IF_MSTR_IF_E2E_GRCFL_CLR 0x4CC118C
#define mmCPU_IF_LBW_TERMINATE_AWADDR_ERR 0x4CC11A0
#define mmCPU_IF_LBW_TERMINATE_ARADDR_ERR 0x4CC11A4
#define mmCPU_IF_CFG_LBW_TERMINATE_BRESP 0x4CC11A8
#define mmCPU_IF_CFG_LBW_TERMINATE_RRESP 0x4CC11AC
#define mmCPU_IF_PF_PQ_PI 0x4CC1200
#define mmCPU_IF_PQ_BASE_ADDR_LOW 0x4CC1204
#define mmCPU_IF_PQ_BASE_ADDR_HIGH 0x4CC1208
#define mmCPU_IF_PQ_LENGTH 0x4CC120C
#define mmCPU_IF_CQ_BASE_ADDR_LOW 0x4CC1210
#define mmCPU_IF_CQ_BASE_ADDR_HIGH 0x4CC1214
#define mmCPU_IF_CQ_LENGTH 0x4CC1218
#define mmCPU_IF_EQ_BASE_ADDR_LOW 0x4CC1220
#define mmCPU_IF_EQ_BASE_ADDR_HIGH 0x4CC1224
#define mmCPU_IF_EQ_LENGTH 0x4CC1228
#define mmCPU_IF_EQ_RD_OFFS 0x4CC122C
#define mmCPU_IF_QUEUE_INIT 0x4CC1230
#define mmCPU_IF_TPC_SERR_INTR_STS 0x4CC1300
#define mmCPU_IF_TPC_SERR_INTR_CLR 0x4CC1304
#define mmCPU_IF_TPC_SERR_INTR_MASK 0x4CC1308
#define mmCPU_IF_TPC_DERR_INTR_STS 0x4CC1310
#define mmCPU_IF_TPC_DERR_INTR_CLR 0x4CC1314
#define mmCPU_IF_TPC_DERR_INTR_MASK 0x4CC1318
#define mmCPU_IF_MME_SERR_INTR_STS_0 0x4CC1320
#define mmCPU_IF_MME_SERR_INTR_STS_1 0x4CC1324
#define mmCPU_IF_MME_SERR_INTR_STS_2 0x4CC1328
#define mmCPU_IF_MME_SERR_INTR_STS_3 0x4CC132C
#define mmCPU_IF_MME_SERR_INTR_CLR_0 0x4CC1330
#define mmCPU_IF_MME_SERR_INTR_CLR_1 0x4CC1334
#define mmCPU_IF_MME_SERR_INTR_CLR_2 0x4CC1338
#define mmCPU_IF_MME_SERR_INTR_CLR_3 0x4CC133C
#define mmCPU_IF_MME_SERR_INTR_MASK_0 0x4CC1340
#define mmCPU_IF_MME_SERR_INTR_MASK_1 0x4CC1344
#define mmCPU_IF_MME_SERR_INTR_MASK_2 0x4CC1348
#define mmCPU_IF_MME_SERR_INTR_MASK_3 0x4CC134C
#define mmCPU_IF_MME_DERR_INTR_STS_0 0x4CC1350
#define mmCPU_IF_MME_DERR_INTR_STS_1 0x4CC1354
#define mmCPU_IF_MME_DERR_INTR_STS_2 0x4CC1358
#define mmCPU_IF_MME_DERR_INTR_STS_3 0x4CC135C
#define mmCPU_IF_MME_DERR_INTR_CLR_0 0x4CC1360
#define mmCPU_IF_MME_DERR_INTR_CLR_1 0x4CC1364
#define mmCPU_IF_MME_DERR_INTR_CLR_2 0x4CC1368
#define mmCPU_IF_MME_DERR_INTR_CLR_3 0x4CC136C
#define mmCPU_IF_MME_DERR_INTR_MASK_0 0x4CC1370
#define mmCPU_IF_MME_DERR_INTR_MASK_1 0x4CC1374
#define mmCPU_IF_MME_DERR_INTR_MASK_2 0x4CC1378
#define mmCPU_IF_MME_DERR_INTR_MASK_3 0x4CC137C
#define mmCPU_IF_HDMA_SERR_INTR_STS 0x4CC1380
#define mmCPU_IF_HDMA_SERR_INTR_CLR 0x4CC1384
#define mmCPU_IF_HDMA_SERR_INTR_MASK 0x4CC1388
#define mmCPU_IF_HDMA_DERR_INTR_STS 0x4CC1390
#define mmCPU_IF_HDMA_DERR_INTR_CLR 0x4CC1394
#define mmCPU_IF_HDMA_DERR_INTR_MASK 0x4CC1398
#define mmCPU_IF_PDMA_SERR_INTR_STS 0x4CC13A0
#define mmCPU_IF_PDMA_SERR_INTR_CLR 0x4CC13A4
#define mmCPU_IF_PDMA_SERR_INTR_MASK 0x4CC13A8
#define mmCPU_IF_PDMA_DERR_INTR_STS 0x4CC13B0
#define mmCPU_IF_PDMA_DERR_INTR_CLR 0x4CC13B4
#define mmCPU_IF_PDMA_DERR_INTR_MASK 0x4CC13B8
#define mmCPU_IF_SRAM_SERR_INTR_STS 0x4CC13C0
#define mmCPU_IF_SRAM_SERR_INTR_CLR 0x4CC13C4
#define mmCPU_IF_SRAM_SERR_INTR_MASK 0x4CC13C8
#define mmCPU_IF_SRAM_DERR_INTR_STS 0x4CC13D0
#define mmCPU_IF_SRAM_DERR_INTR_CLR 0x4CC13D4
#define mmCPU_IF_SRAM_DERR_INTR_MASK 0x4CC13D8
#define mmCPU_IF_HBM_SERR_INTR_STS 0x4CC13E0
#define mmCPU_IF_HBM_SERR_INTR_CLR 0x4CC13E4
#define mmCPU_IF_HBM_SERR_INTR_MASK 0x4CC13E8
#define mmCPU_IF_HBM_DERR_INTR_STS 0x4CC13F0
#define mmCPU_IF_HBM_DERR_INTR_CLR 0x4CC13F4
#define mmCPU_IF_HBM_DERR_INTR_MASK 0x4CC13F8
#define mmCPU_IF_HMMU_SERR_INTR_STS 0x4CC1400
#define mmCPU_IF_HMMU_SERR_INTR_CLR 0x4CC1404
#define mmCPU_IF_HMMU_SERR_INTR_MASK 0x4CC1408
#define mmCPU_IF_HMMU_DERR_INTR_STS 0x4CC1410
#define mmCPU_IF_HMMU_DERR_INTR_CLR 0x4CC1414
#define mmCPU_IF_HMMU_DERR_INTR_MASK 0x4CC1418
#define mmCPU_IF_DEC_SERR_INTR_STS 0x4CC1420
#define mmCPU_IF_DEC_SERR_INTR_CLR 0x4CC1424
#define mmCPU_IF_DEC_SERR_INTR_MASK 0x4CC1428
#define mmCPU_IF_DEC_DERR_INTR_STS 0x4CC1430
#define mmCPU_IF_DEC_DERR_INTR_CLR 0x4CC1434
#define mmCPU_IF_DEC_DERR_INTR_MASK 0x4CC1438
#define mmCPU_IF_NIC_SERR_INTR_STS 0x4CC1440
#define mmCPU_IF_NIC_SERR_INTR_CLR 0x4CC1444
#define mmCPU_IF_NIC_SERR_INTR_MASK 0x4CC1448
#define mmCPU_IF_NIC_DERR_INTR_STS 0x4CC1450
#define mmCPU_IF_NIC_DERR_INTR_CLR 0x4CC1454
#define mmCPU_IF_NIC_DERR_INTR_MASK 0x4CC1458
#define mmCPU_IF_SYNC_MNGR_SERR_INTR_STS 0x4CC1460
#define mmCPU_IF_SYNC_MNGR_SERR_INTR_CLR 0x4CC1464
#define mmCPU_IF_SYNC_MNGR_SERR_INTR_MASK 0x4CC1468
#define mmCPU_IF_SYNC_MNGR_DERR_INTR_STS 0x4CC1470
#define mmCPU_IF_SYNC_MNGR_DERR_INTR_CLR 0x4CC1474
#define mmCPU_IF_SYNC_MNGR_DERR_INTR_MASK 0x4CC1478
#define mmCPU_IF_HIF_SERR_INTR_STS 0x4CC1480
#define mmCPU_IF_HIF_SERR_INTR_CLR 0x4CC1484
#define mmCPU_IF_HIF_SERR_INTR_MASK 0x4CC1488
#define mmCPU_IF_HIF_DERR_INTR_STS 0x4CC1490
#define mmCPU_IF_HIF_DERR_INTR_CLR 0x4CC1494
#define mmCPU_IF_HIF_DERR_INTR_MASK 0x4CC1498
#define mmCPU_IF_XBAR_SERR_INTR_STS 0x4CC14A0
#define mmCPU_IF_XBAR_SERR_INTR_CLR 0x4CC14A4
#define mmCPU_IF_XBAR_SERR_INTR_MASK 0x4CC14A8
#define mmCPU_IF_XBAR_DERR_INTR_STS 0x4CC14B0
#define mmCPU_IF_XBAR_DERR_INTR_CLR 0x4CC14B4
#define mmCPU_IF_XBAR_DERR_INTR_MASK 0x4CC14B8
#define mmCPU_IF_TPC_SEI_INTR_STS 0x4CC14C0
#define mmCPU_IF_TPC_SEI_INTR_CLR 0x4CC14C4
#define mmCPU_IF_TPC_SEI_INTR_MASK 0x4CC14C8
#define mmCPU_IF_MME_SEI_INTR_STS_0 0x4CC14D0
#define mmCPU_IF_MME_SEI_INTR_STS_1 0x4CC14D4
#define mmCPU_IF_MME_SEI_INTR_STS_2 0x4CC14D8
#define mmCPU_IF_MME_SEI_INTR_STS_3 0x4CC14DC
#define mmCPU_IF_MME_SEI_INTR_CLR_0 0x4CC14E0
#define mmCPU_IF_MME_SEI_INTR_CLR_1 0x4CC14E4
#define mmCPU_IF_MME_SEI_INTR_CLR_2 0x4CC14E8
#define mmCPU_IF_MME_SEI_INTR_CLR_3 0x4CC14EC
#define mmCPU_IF_MME_SEI_INTR_MASK_0 0x4CC14F0
#define mmCPU_IF_MME_SEI_INTR_MASK_1 0x4CC14F4
#define mmCPU_IF_MME_SEI_INTR_MASK_2 0x4CC14F8
#define mmCPU_IF_MME_SEI_INTR_MASK_3 0x4CC14FC
#define mmCPU_IF_PLL_LSB_SEI_INTR_STS 0x4CC1500
#define mmCPU_IF_PLL_LSB_SEI_INTR_CLR 0x4CC1504
#define mmCPU_IF_PLL_LSB_SEI_INTR_MASK 0x4CC1508
#define mmCPU_IF_PLL_MSB_SEI_INTR_STS 0x4CC1510
#define mmCPU_IF_PLL_MSB_SEI_INTR_CLR 0x4CC1514
#define mmCPU_IF_PLL_MSB_SEI_INTR_MASK 0x4CC1518
#define mmCPU_IF_HMMU_SEI_INTR_STS 0x4CC1520
#define mmCPU_IF_HMMU_SEI_INTR_CLR 0x4CC1524
#define mmCPU_IF_HMMU_SEI_INTR_MASK 0x4CC1528
#define mmCPU_IF_HDMA_SEI_INTR_STS 0x4CC1530
#define mmCPU_IF_HDMA_SEI_INTR_CLR 0x4CC1534
#define mmCPU_IF_HDMA_SEI_INTR_MASK 0x4CC1538
#define mmCPU_IF_PDMA_SEI_INTR_STS 0x4CC1540
#define mmCPU_IF_PDMA_SEI_INTR_CLR 0x4CC1544
#define mmCPU_IF_PDMA_SEI_INTR_MASK 0x4CC1548
#define mmCPU_IF_HBM_SEI_INTR_STS 0x4CC1550
#define mmCPU_IF_HBM_SEI_INTR_CLR 0x4CC1554
#define mmCPU_IF_HBM_SEI_INTR_MASK 0x4CC1558
#define mmCPU_IF_DEC_SEI_INTR_STS 0x4CC1560
#define mmCPU_IF_DEC_SEI_INTR_CLR 0x4CC1564
#define mmCPU_IF_DEC_SEI_INTR_MASK 0x4CC1568
#define mmCPU_IF_HIF_SEI_INTR_STS 0x4CC1570
#define mmCPU_IF_HIF_SEI_INTR_CLR 0x4CC1574
#define mmCPU_IF_HIF_SEI_INTR_MASK 0x4CC1578
#define mmCPU_IF_SYNC_MNGR_SEI_INTR_STS 0x4CC1580
#define mmCPU_IF_SYNC_MNGR_SEI_INTR_CLR 0x4CC1584
#define mmCPU_IF_SYNC_MNGR_SEI_INTR_MASK 0x4CC1588
#define mmCPU_IF_NIC_SEI_INTR_STS 0x4CC1590
#define mmCPU_IF_NIC_SEI_INTR_CLR 0x4CC1594
#define mmCPU_IF_NIC_SEI_INTR_MASK 0x4CC1598
#define mmCPU_IF_PCIE_SPI_INTR_STS 0x4CC1600
#define mmCPU_IF_PCIE_SPI_INTR_CLR 0x4CC1604
#define mmCPU_IF_PCIE_SPI_INTR_MASK 0x4CC1608
#define mmCPU_IF_MME_SPI_INTR_STS_0 0x4CC1610
#define mmCPU_IF_MME_SPI_INTR_STS_1 0x4CC1614
#define mmCPU_IF_MME_SPI_INTR_STS_2 0x4CC1618
#define mmCPU_IF_MME_SPI_INTR_STS_3 0x4CC161C
#define mmCPU_IF_MME_SPI_INTR_CLR_0 0x4CC1620
#define mmCPU_IF_MME_SPI_INTR_CLR_1 0x4CC1624
#define mmCPU_IF_MME_SPI_INTR_CLR_2 0x4CC1628
#define mmCPU_IF_MME_SPI_INTR_CLR_3 0x4CC162C
#define mmCPU_IF_MME_SPI_INTR_MASK_0 0x4CC1630
#define mmCPU_IF_MME_SPI_INTR_MASK_1 0x4CC1634
#define mmCPU_IF_MME_SPI_INTR_MASK_2 0x4CC1638
#define mmCPU_IF_MME_SPI_INTR_MASK_3 0x4CC163C
#define mmCPU_IF_HMMU_SPI_INTR_STS_0 0x4CC1640
#define mmCPU_IF_HMMU_SPI_INTR_STS_1 0x4CC1644
#define mmCPU_IF_HMMU_SPI_INTR_STS_2 0x4CC1648
#define mmCPU_IF_HMMU_SPI_INTR_STS_3 0x4CC164C
#define mmCPU_IF_HMMU_SPI_INTR_STS_4 0x4CC1650
#define mmCPU_IF_HMMU_SPI_INTR_STS_5 0x4CC1654
#define mmCPU_IF_HMMU_SPI_INTR_STS_6 0x4CC1658
#define mmCPU_IF_HMMU_SPI_INTR_STS_7 0x4CC165C
#define mmCPU_IF_HMMU_SPI_INTR_STS_8 0x4CC1660
#define mmCPU_IF_HMMU_SPI_INTR_STS_9 0x4CC1664
#define mmCPU_IF_HMMU_SPI_INTR_STS_10 0x4CC1668
#define mmCPU_IF_HMMU_SPI_INTR_STS_11 0x4CC166C
#define mmCPU_IF_HMMU_SPI_INTR_STS_12 0x4CC1670
#define mmCPU_IF_HMMU_SPI_INTR_STS_13 0x4CC1674
#define mmCPU_IF_HMMU_SPI_INTR_STS_14 0x4CC1678
#define mmCPU_IF_HMMU_SPI_INTR_STS_15 0x4CC167C
#define mmCPU_IF_HMMU_SPI_INTR_CLR_0 0x4CC1680
#define mmCPU_IF_HMMU_SPI_INTR_CLR_1 0x4CC1684
#define mmCPU_IF_HMMU_SPI_INTR_CLR_2 0x4CC1688
#define mmCPU_IF_HMMU_SPI_INTR_CLR_3 0x4CC168C
#define mmCPU_IF_HMMU_SPI_INTR_CLR_4 0x4CC1690
#define mmCPU_IF_HMMU_SPI_INTR_CLR_5 0x4CC1694
#define mmCPU_IF_HMMU_SPI_INTR_CLR_6 0x4CC1698
#define mmCPU_IF_HMMU_SPI_INTR_CLR_7 0x4CC169C
#define mmCPU_IF_HMMU_SPI_INTR_CLR_8 0x4CC16A0
#define mmCPU_IF_HMMU_SPI_INTR_CLR_9 0x4CC16A4
#define mmCPU_IF_HMMU_SPI_INTR_CLR_10 0x4CC16A8
#define mmCPU_IF_HMMU_SPI_INTR_CLR_11 0x4CC16AC
#define mmCPU_IF_HMMU_SPI_INTR_CLR_12 0x4CC16B0
#define mmCPU_IF_HMMU_SPI_INTR_CLR_13 0x4CC16B4
#define mmCPU_IF_HMMU_SPI_INTR_CLR_14 0x4CC16B8
#define mmCPU_IF_HMMU_SPI_INTR_CLR_15 0x4CC16BC
#define mmCPU_IF_HMMU_SPI_INTR_MASK_0 0x4CC16C0
#define mmCPU_IF_HMMU_SPI_INTR_MASK_1 0x4CC16C4
#define mmCPU_IF_HMMU_SPI_INTR_MASK_2 0x4CC16C8
#define mmCPU_IF_HMMU_SPI_INTR_MASK_3 0x4CC16CC
#define mmCPU_IF_HMMU_SPI_INTR_MASK_4 0x4CC16D0
#define mmCPU_IF_HMMU_SPI_INTR_MASK_5 0x4CC16D4
#define mmCPU_IF_HMMU_SPI_INTR_MASK_6 0x4CC16D8
#define mmCPU_IF_HMMU_SPI_INTR_MASK_7 0x4CC16DC
#define mmCPU_IF_HMMU_SPI_INTR_MASK_8 0x4CC16E0
#define mmCPU_IF_HMMU_SPI_INTR_MASK_9 0x4CC16E4
#define mmCPU_IF_HMMU_SPI_INTR_MASK_10 0x4CC16E8
#define mmCPU_IF_HMMU_SPI_INTR_MASK_11 0x4CC16EC
#define mmCPU_IF_HMMU_SPI_INTR_MASK_12 0x4CC16F0
#define mmCPU_IF_HMMU_SPI_INTR_MASK_13 0x4CC16F4
#define mmCPU_IF_HMMU_SPI_INTR_MASK_14 0x4CC16F8
#define mmCPU_IF_HMMU_SPI_INTR_MASK_15 0x4CC16FC
#define mmCPU_IF_DEC_SPI_INTR_STS_0 0x4CC1700
#define mmCPU_IF_DEC_SPI_INTR_STS_1 0x4CC1704
#define mmCPU_IF_DEC_SPI_INTR_STS_2 0x4CC1708
#define mmCPU_IF_DEC_SPI_INTR_STS_3 0x4CC170C
#define mmCPU_IF_DEC_SPI_INTR_STS_4 0x4CC1710
#define mmCPU_IF_DEC_SPI_INTR_STS_5 0x4CC1714
#define mmCPU_IF_DEC_SPI_INTR_STS_6 0x4CC1718
#define mmCPU_IF_DEC_SPI_INTR_STS_7 0x4CC171C
#define mmCPU_IF_DEC_SPI_INTR_STS_8 0x4CC1720
#define mmCPU_IF_DEC_SPI_INTR_STS_9 0x4CC1724
#define mmCPU_IF_DEC_SPI_INTR_CLR_0 0x4CC1730
#define mmCPU_IF_DEC_SPI_INTR_CLR_1 0x4CC1734
#define mmCPU_IF_DEC_SPI_INTR_CLR_2 0x4CC1738
#define mmCPU_IF_DEC_SPI_INTR_CLR_3 0x4CC173C
#define mmCPU_IF_DEC_SPI_INTR_CLR_4 0x4CC1740
#define mmCPU_IF_DEC_SPI_INTR_CLR_5 0x4CC1744
#define mmCPU_IF_DEC_SPI_INTR_CLR_6 0x4CC1748
#define mmCPU_IF_DEC_SPI_INTR_CLR_7 0x4CC174C
#define mmCPU_IF_DEC_SPI_INTR_CLR_8 0x4CC1750
#define mmCPU_IF_DEC_SPI_INTR_CLR_9 0x4CC1754
#define mmCPU_IF_DEC_SPI_INTR_MASK_0 0x4CC1760
#define mmCPU_IF_DEC_SPI_INTR_MASK_1 0x4CC1764
#define mmCPU_IF_DEC_SPI_INTR_MASK_2 0x4CC1768
#define mmCPU_IF_DEC_SPI_INTR_MASK_3 0x4CC176C
#define mmCPU_IF_DEC_SPI_INTR_MASK_4 0x4CC1770
#define mmCPU_IF_DEC_SPI_INTR_MASK_5 0x4CC1774
#define mmCPU_IF_DEC_SPI_INTR_MASK_6 0x4CC1778
#define mmCPU_IF_DEC_SPI_INTR_MASK_7 0x4CC177C
#define mmCPU_IF_DEC_SPI_INTR_MASK_8 0x4CC1780
#define mmCPU_IF_DEC_SPI_INTR_MASK_9 0x4CC1784
#define mmCPU_IF_HIF_SPI_INTR_STS 0x4CC17A0
#define mmCPU_IF_HIF_SPI_INTR_CLR 0x4CC17A4
#define mmCPU_IF_HIF_SPI_INTR_MASK 0x4CC17A8
#define mmCPU_IF_NIC_SPI_INTR_STS_0 0x4CC17B0
#define mmCPU_IF_NIC_SPI_INTR_STS_1 0x4CC17B4
#define mmCPU_IF_NIC_SPI_INTR_STS_2 0x4CC17B8
#define mmCPU_IF_NIC_SPI_INTR_STS_3 0x4CC17BC
#define mmCPU_IF_NIC_SPI_INTR_STS_4 0x4CC17C0
#define mmCPU_IF_NIC_SPI_INTR_STS_5 0x4CC17C4
#define mmCPU_IF_NIC_SPI_INTR_STS_6 0x4CC17C8
#define mmCPU_IF_NIC_SPI_INTR_STS_7 0x4CC17CC
#define mmCPU_IF_NIC_SPI_INTR_STS_8 0x4CC17D0
#define mmCPU_IF_NIC_SPI_INTR_STS_9 0x4CC17D4
#define mmCPU_IF_NIC_SPI_INTR_STS_10 0x4CC17D8
#define mmCPU_IF_NIC_SPI_INTR_STS_11 0x4CC17DC
#define mmCPU_IF_NIC_SPI_INTR_CLR_0 0x4CC17E0
#define mmCPU_IF_NIC_SPI_INTR_CLR_1 0x4CC17E4
#define mmCPU_IF_NIC_SPI_INTR_CLR_2 0x4CC17E8
#define mmCPU_IF_NIC_SPI_INTR_CLR_3 0x4CC17EC
#define mmCPU_IF_NIC_SPI_INTR_CLR_4 0x4CC17F0
#define mmCPU_IF_NIC_SPI_INTR_CLR_5 0x4CC17F4
#define mmCPU_IF_NIC_SPI_INTR_CLR_6 0x4CC17F8
#define mmCPU_IF_NIC_SPI_INTR_CLR_7 0x4CC17FC
#define mmCPU_IF_NIC_SPI_INTR_CLR_8 0x4CC1800
#define mmCPU_IF_NIC_SPI_INTR_CLR_9 0x4CC1804
#define mmCPU_IF_NIC_SPI_INTR_CLR_10 0x4CC1808
#define mmCPU_IF_NIC_SPI_INTR_CLR_11 0x4CC180C
#define mmCPU_IF_NIC_SPI_INTR_MASK_0 0x4CC1810
#define mmCPU_IF_NIC_SPI_INTR_MASK_1 0x4CC1814
#define mmCPU_IF_NIC_SPI_INTR_MASK_2 0x4CC1818
#define mmCPU_IF_NIC_SPI_INTR_MASK_3 0x4CC181C
#define mmCPU_IF_NIC_SPI_INTR_MASK_4 0x4CC1820
#define mmCPU_IF_NIC_SPI_INTR_MASK_5 0x4CC1824
#define mmCPU_IF_NIC_SPI_INTR_MASK_6 0x4CC1828
#define mmCPU_IF_NIC_SPI_INTR_MASK_7 0x4CC182C
#define mmCPU_IF_NIC_SPI_INTR_MASK_8 0x4CC1830
#define mmCPU_IF_NIC_SPI_INTR_MASK_9 0x4CC1834
#define mmCPU_IF_NIC_SPI_INTR_MASK_10 0x4CC1838
#define mmCPU_IF_NIC_SPI_INTR_MASK_11 0x4CC183C
#define mmCPU_IF_DEC_ECO_INTR_STS 0x4CC1840
#define mmCPU_IF_DEC_ECO_INTR_CLR 0x4CC1844
#define mmCPU_IF_DEC_ECO_INTR_MASK 0x4CC1848
#define mmCPU_IF_HIF_ECO_INTR_STS 0x4CC1850
#define mmCPU_IF_HIF_ECO_INTR_CLR 0x4CC1854
#define mmCPU_IF_HIF_ECO_INTR_MASK 0x4CC1858
#define mmCPU_IF_HMMU_ECO_INTR_STS 0x4CC1860
#define mmCPU_IF_HMMU_ECO_INTR_CLR 0x4CC1864
#define mmCPU_IF_HMMU_ECO_INTR_MASK 0x4CC1868
#define mmCPU_IF_NIC_ECO_INTR_STS 0x4CC1870
#define mmCPU_IF_NIC_ECO_INTR_CLR 0x4CC1874
#define mmCPU_IF_NIC_ECO_INTR_MASK 0x4CC1878
#define mmCPU_IF_MSI_X_INTR_STS_0 0x4CC1900
#define mmCPU_IF_MSI_X_INTR_STS_1 0x4CC1904
#define mmCPU_IF_MSI_X_INTR_STS_2 0x4CC1908
#define mmCPU_IF_MSI_X_INTR_STS_3 0x4CC190C
#define mmCPU_IF_MSI_X_INTR_STS_4 0x4CC1910
#define mmCPU_IF_MSI_X_INTR_STS_5 0x4CC1914
#define mmCPU_IF_MSI_X_INTR_STS_6 0x4CC1918
#define mmCPU_IF_MSI_X_INTR_STS_7 0x4CC191C
#define mmCPU_IF_MSI_X_INTR_STS_8 0x4CC1920
#define mmCPU_IF_MSI_X_INTR_STS_9 0x4CC1924
#define mmCPU_IF_MSI_X_INTR_STS_10 0x4CC1928
#define mmCPU_IF_MSI_X_INTR_STS_11 0x4CC192C
#define mmCPU_IF_MSI_X_INTR_STS_12 0x4CC1930
#define mmCPU_IF_MSI_X_INTR_STS_13 0x4CC1934
#define mmCPU_IF_MSI_X_INTR_STS_14 0x4CC1938
#define mmCPU_IF_MSI_X_INTR_STS_15 0x4CC193C
#define mmCPU_IF_MSI_X_INTR_CLR_0 0x4CC1940
#define mmCPU_IF_MSI_X_INTR_CLR_1 0x4CC1944
#define mmCPU_IF_MSI_X_INTR_CLR_2 0x4CC1948
#define mmCPU_IF_MSI_X_INTR_CLR_3 0x4CC194C
#define mmCPU_IF_MSI_X_INTR_CLR_4 0x4CC1950
#define mmCPU_IF_MSI_X_INTR_CLR_5 0x4CC1954
#define mmCPU_IF_MSI_X_INTR_CLR_6 0x4CC1958
#define mmCPU_IF_MSI_X_INTR_CLR_7 0x4CC195C
#define mmCPU_IF_MSI_X_INTR_CLR_8 0x4CC1960
#define mmCPU_IF_MSI_X_INTR_CLR_9 0x4CC1964
#define mmCPU_IF_MSI_X_INTR_CLR_10 0x4CC1968
#define mmCPU_IF_MSI_X_INTR_CLR_11 0x4CC196C
#define mmCPU_IF_MSI_X_INTR_CLR_12 0x4CC1970
#define mmCPU_IF_MSI_X_INTR_CLR_13 0x4CC1974
#define mmCPU_IF_MSI_X_INTR_CLR_14 0x4CC1978
#define mmCPU_IF_MSI_X_INTR_CLR_15 0x4CC197C
#define mmCPU_IF_MSI_X_INTR_MASK_0 0x4CC1980
#define mmCPU_IF_MSI_X_INTR_MASK_1 0x4CC1984
#define mmCPU_IF_MSI_X_INTR_MASK_2 0x4CC1988
#define mmCPU_IF_MSI_X_INTR_MASK_3 0x4CC198C
#define mmCPU_IF_MSI_X_INTR_MASK_4 0x4CC1990
#define mmCPU_IF_MSI_X_INTR_MASK_5 0x4CC1994
#define mmCPU_IF_MSI_X_INTR_MASK_6 0x4CC1998
#define mmCPU_IF_MSI_X_INTR_MASK_7 0x4CC199C
#define mmCPU_IF_MSI_X_INTR_MASK_8 0x4CC19A0
#define mmCPU_IF_MSI_X_INTR_MASK_9 0x4CC19A4
#define mmCPU_IF_MSI_X_INTR_MASK_10 0x4CC19A8
#define mmCPU_IF_MSI_X_INTR_MASK_11 0x4CC19AC
#define mmCPU_IF_MSI_X_INTR_MASK_12 0x4CC19B0
#define mmCPU_IF_MSI_X_INTR_MASK_13 0x4CC19B4
#define mmCPU_IF_MSI_X_INTR_MASK_14 0x4CC19B8
#define mmCPU_IF_MSI_X_INTR_MASK_15 0x4CC19BC
#define mmCPU_IF_MSI_X_BUSY_INTR_STS 0x4CC19C0
#define mmCPU_IF_MSI_X_BUSY_INTR_CLR 0x4CC19C4
#define mmCPU_IF_MSI_X_BUSY_INTR_MASK 0x4CC19C8
#define mmCPU_IF_MSI_X_GEN_ADDR 0x4CC19D0
#define mmCPU_IF_MSI_X_GEN_DATA 0x4CC19D4
#define mmCPU_IF_MSI_X_GEN_AWPROT 0x4CC19D8
#endif /* ASIC_REG_CPU_IF_REGS_H_ */

View File

@ -0,0 +1,229 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_
#define ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_
/*
*****************************************
* DCORE0_DEC0_CMD
* (Prototype: VSI_CMD)
*****************************************
*/
/* DCORE0_DEC0_CMD_SWREG0 */
#define DCORE0_DEC0_CMD_SWREG0_SW_HW_VERSION_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG0_SW_HW_VERSION_MASK 0xFFFF
#define DCORE0_DEC0_CMD_SWREG0_SW_HW_ID_SHIFT 16
#define DCORE0_DEC0_CMD_SWREG0_SW_HW_ID_MASK 0xFFFF0000
/* DCORE0_DEC0_CMD_SWREG1 */
#define DCORE0_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG1_SW_HW_BUILDDATE_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG2 */
#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_NORM_INTR_SRC_MASK 0xFFFF
#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_SHIFT 16
#define DCORE0_DEC0_CMD_SWREG2_SW_EXT_ABN_INTR_SRC_MASK 0xFFFF0000
/* DCORE0_DEC0_CMD_SWREG3 */
#define DCORE0_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG3_SW_EXE_CMDBUF_COUNT_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG4 */
#define DCORE0_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG4_SW_CMD_EXE_LSB_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG5 */
#define DCORE0_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG5_SW_CMD_EXE_MSB_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG6 */
#define DCORE0_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG6_SW_AXI_TOTALARLEN_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG7 */
#define DCORE0_DEC0_CMD_SWREG7_SW_AXI_TOTALR_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG7_SW_AXI_TOTALR_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG8 */
#define DCORE0_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG8_SW_AXI_TOTALAR_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG9 */
#define DCORE0_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG9_SW_AXI_TOTALRLAST_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG10 */
#define DCORE0_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG10_SW_AXI_TOTALAWLEN_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG11 */
#define DCORE0_DEC0_CMD_SWREG11_SW_AXI_TOTALW_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG11_SW_AXI_TOTALW_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG12 */
#define DCORE0_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG12_SW_AXI_TOTALAW_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG13 */
#define DCORE0_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG13_SW_AXI_TOTALWLAST_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG14 */
#define DCORE0_DEC0_CMD_SWREG14_SW_AXI_TOTALB_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG14_SW_AXI_TOTALB_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG15 */
#define DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK 0x7
#define DCORE0_DEC0_CMD_SWREG15_RSV_SHIFT 3
#define DCORE0_DEC0_CMD_SWREG15_RSV_MASK 0x3FFFF8
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BREADY_SHIFT 22
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BREADY_MASK 0x400000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BVALID_SHIFT 23
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_BVALID_MASK 0x800000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WREADY_SHIFT 24
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WREADY_MASK 0x1000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WVALID_SHIFT 25
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_WVALID_MASK 0x2000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWREADY_SHIFT 26
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWREADY_MASK 0x4000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWVALID_SHIFT 27
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_AWVALID_MASK 0x8000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RREADY_SHIFT 28
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RREADY_MASK 0x10000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RVALID_SHIFT 29
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_RVALID_MASK 0x20000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARREADY_SHIFT 30
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARREADY_MASK 0x40000000
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARVALID_SHIFT 31
#define DCORE0_DEC0_CMD_SWREG15_SW_AXI_ARVALID_MASK 0x80000000
/* DCORE0_DEC0_CMD_SWREG16 */
#define DCORE0_DEC0_CMD_SWREG16_SW_START_TRIGGER_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG16_SW_START_TRIGGER_MASK 0x1
#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_ALL_SHIFT 1
#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_ALL_MASK 0x2
#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_CORE_SHIFT 2
#define DCORE0_DEC0_CMD_SWREG16_SW_RESET_CORE_MASK 0x4
#define DCORE0_DEC0_CMD_SWREG16_SW_ABORT_MODE_SHIFT 3
#define DCORE0_DEC0_CMD_SWREG16_SW_ABORT_MODE_MASK 0x8
#define DCORE0_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_SHIFT 4
#define DCORE0_DEC0_CMD_SWREG16_SW_CORE_CLK_GATE_DISABLE_MASK 0x10
#define DCORE0_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_SHIFT 5
#define DCORE0_DEC0_CMD_SWREG16_SW_MASTER_OUT_CLK_GATE_DISABLE_MASK 0x20
#define DCORE0_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_SHIFT 6
#define DCORE0_DEC0_CMD_SWREG16_SW_AXI_CLK_GATE_DISABLE_MASK 0x40
#define DCORE0_DEC0_CMD_SWREG16_RSV_SHIFT 7
#define DCORE0_DEC0_CMD_SWREG16_RSV_MASK 0xFFFFFF80
/* DCORE0_DEC0_CMD_SWREG17 */
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ENDCMD_MASK 0x1
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_SHIFT 1
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_BUSERR_MASK 0x2
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_SHIFT 2
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_TIMEOUT_MASK 0x4
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_SHIFT 3
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_CMDERR_MASK 0x8
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ABORT_SHIFT 4
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_ABORT_MASK 0x10
#define DCORE0_DEC0_CMD_SWREG17_RSV_1_SHIFT 5
#define DCORE0_DEC0_CMD_SWREG17_RSV_1_MASK 0x20
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_JMP_SHIFT 6
#define DCORE0_DEC0_CMD_SWREG17_SW_IRQ_JMP_MASK 0x40
#define DCORE0_DEC0_CMD_SWREG17_RSV_SHIFT 7
#define DCORE0_DEC0_CMD_SWREG17_RSV_MASK 0xFFFFFF80
/* DCORE0_DEC0_CMD_SWREG18 */
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ENDCMD_EN_MASK 0x1
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_SHIFT 1
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_BUSERR_EN_MASK 0x2
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_SHIFT 2
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_TIMEOUT_EN_MASK 0x4
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_SHIFT 3
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_CMDERR_EN_MASK 0x8
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_SHIFT 4
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_ABORT_EN_MASK 0x10
#define DCORE0_DEC0_CMD_SWREG18_RSV_1_SHIFT 5
#define DCORE0_DEC0_CMD_SWREG18_RSV_1_MASK 0x20
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_SHIFT 6
#define DCORE0_DEC0_CMD_SWREG18_SW_IRQ_JMP_EN_MASK 0x40
#define DCORE0_DEC0_CMD_SWREG18_RSV_SHIFT 7
#define DCORE0_DEC0_CMD_SWREG18_RSV_MASK 0xFFFFFF80
/* DCORE0_DEC0_CMD_SWREG19 */
#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_CYCLES_MASK 0x7FFFFFFF
#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_SHIFT 31
#define DCORE0_DEC0_CMD_SWREG19_SW_TIMEOUT_ENABLE_MASK 0x80000000
/* DCORE0_DEC0_CMD_SWREG20 */
#define DCORE0_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG20_SW_CMDBUF_EXE_ADDR_LSB_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG21 */
#define DCORE0_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG21_SW_CMDBUF_EXE_ADDR_MSB_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG22 */
#define DCORE0_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG22_SW_CMDBUF_EXE_LENGTH_MASK 0xFFFF
#define DCORE0_DEC0_CMD_SWREG22_RSV_SHIFT 16
#define DCORE0_DEC0_CMD_SWREG22_RSV_MASK 0xFFFF0000
/* DCORE0_DEC0_CMD_SWREG23 */
#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_WR_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_WR_MASK 0xFF
#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_RD_SHIFT 8
#define DCORE0_DEC0_CMD_SWREG23_SW_AXI_ID_RD_MASK 0xFF00
#define DCORE0_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_SHIFT 16
#define DCORE0_DEC0_CMD_SWREG23_SW_MAX_BURST_LEN_MASK 0xFF0000
#define DCORE0_DEC0_CMD_SWREG23_RSV_SHIFT 24
#define DCORE0_DEC0_CMD_SWREG23_RSV_MASK 0xF000000
#define DCORE0_DEC0_CMD_SWREG23_SW_CMD_SWAP_SHIFT 28
#define DCORE0_DEC0_CMD_SWREG23_SW_CMD_SWAP_MASK 0xF0000000
/* DCORE0_DEC0_CMD_SWREG24 */
#define DCORE0_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG24_SW_RDY_CMDBUF_COUNT_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG25 */
#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_NORM_INTR_GATE_MASK 0xFFFF
#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_SHIFT 16
#define DCORE0_DEC0_CMD_SWREG25_SW_EXT_ABN_INTR_GATE_MASK 0xFFFF0000
/* DCORE0_DEC0_CMD_SWREG26 */
#define DCORE0_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG26_SW_CMDBUF_EXE_ID_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG64 */
#define DCORE0_DEC0_CMD_SWREG64_SW_DUMMY0_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG64_SW_DUMMY0_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG65 */
#define DCORE0_DEC0_CMD_SWREG65_SW_DUMMY1_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG65_SW_DUMMY1_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG66 */
#define DCORE0_DEC0_CMD_SWREG66_SW_DUMMY2_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG66_SW_DUMMY2_MASK 0xFFFFFFFF
/* DCORE0_DEC0_CMD_SWREG67 */
#define DCORE0_DEC0_CMD_SWREG67_SW_DUMMY3_SHIFT 0
#define DCORE0_DEC0_CMD_SWREG67_SW_DUMMY3_MASK 0xFFFFFFFF
#endif /* ASIC_REG_DCORE0_DEC0_CMD_MASKS_H_ */

View File

@ -0,0 +1,85 @@
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright 2016-2020 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
/************************************
** This is an auto-generated file **
** DO NOT EDIT BELOW **
************************************/
#ifndef ASIC_REG_DCORE0_DEC0_CMD_REGS_H_
#define ASIC_REG_DCORE0_DEC0_CMD_REGS_H_
/*
*****************************************
* DCORE0_DEC0_CMD
* (Prototype: VSI_CMD)
*****************************************
*/
#define mmDCORE0_DEC0_CMD_SWREG0 0x41E0000
#define mmDCORE0_DEC0_CMD_SWREG1 0x41E0004
#define mmDCORE0_DEC0_CMD_SWREG2 0x41E0008
#define mmDCORE0_DEC0_CMD_SWREG3 0x41E000C
#define mmDCORE0_DEC0_CMD_SWREG4 0x41E0010
#define mmDCORE0_DEC0_CMD_SWREG5 0x41E0014
#define mmDCORE0_DEC0_CMD_SWREG6 0x41E0018
#define mmDCORE0_DEC0_CMD_SWREG7 0x41E001C
#define mmDCORE0_DEC0_CMD_SWREG8 0x41E0020
#define mmDCORE0_DEC0_CMD_SWREG9 0x41E0024
#define mmDCORE0_DEC0_CMD_SWREG10 0x41E0028
#define mmDCORE0_DEC0_CMD_SWREG11 0x41E002C
#define mmDCORE0_DEC0_CMD_SWREG12 0x41E0030
#define mmDCORE0_DEC0_CMD_SWREG13 0x41E0034
#define mmDCORE0_DEC0_CMD_SWREG14 0x41E0038
#define mmDCORE0_DEC0_CMD_SWREG15 0x41E003C
#define mmDCORE0_DEC0_CMD_SWREG16 0x41E0040
#define mmDCORE0_DEC0_CMD_SWREG17 0x41E0044
#define mmDCORE0_DEC0_CMD_SWREG18 0x41E0048
#define mmDCORE0_DEC0_CMD_SWREG19 0x41E004C
#define mmDCORE0_DEC0_CMD_SWREG20 0x41E0050
#define mmDCORE0_DEC0_CMD_SWREG21 0x41E0054
#define mmDCORE0_DEC0_CMD_SWREG22 0x41E0058
#define mmDCORE0_DEC0_CMD_SWREG23 0x41E005C
#define mmDCORE0_DEC0_CMD_SWREG24 0x41E0060
#define mmDCORE0_DEC0_CMD_SWREG25 0x41E0064
#define mmDCORE0_DEC0_CMD_SWREG26 0x41E0068
#define mmDCORE0_DEC0_CMD_SWREG64 0x41E0100
#define mmDCORE0_DEC0_CMD_SWREG65 0x41E0104
#define mmDCORE0_DEC0_CMD_SWREG66 0x41E0108
#define mmDCORE0_DEC0_CMD_SWREG67 0x41E010C
#endif /* ASIC_REG_DCORE0_DEC0_CMD_REGS_H_ */

Some files were not shown because too many files have changed in this diff Show More