ppc/xive queue:

* Various bug fixes around lost interrupts particularly.
 * Major group interrupt work, in particular around redistributing
   interrupts. Upstream group support is not in a complete or usable
   state as it is.
 * Significant context push/pull improvements, particularly pool and
   phys context handling was quite incomplete beyond trivial OPAL
   case that pushes at boot.
 * Improved tracing and checking for unimp and guest error situations.
 * Various other missing feature support.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmh951cACgkQUaNDx8/7
 7KFK6w//SAmZpNmE380UN4OxMBcjsT5m5Cf2hy+Wq9pSEcwWckBFT03HyR86JAv3
 QLR1d6yx7dY0aVWAHtFC24vlU2jpv0Io97wfX9VbgG7e4TY/i1vRMSXYYehXuU/Y
 gLrwuJGxAMKWrd+4ymvHOyXHRAq3LMGQQYfqLCB77b8UJ18JyCL8FwAl/D6EsZ1y
 nUW8WlDy6qQ/SJQHZZ664kyJEv7Qw4xd81ZnmoPsy3xVd7c4ASNBWvDTjRoUn2EN
 sfJW76UqqFn3EqASaKsqoNPHu3kklQ/AX3KlE1wFCBjYoXwl/051wIX4RIb+b2S4
 SLtc/YSAie1n2Pp1sghfLRFiRpjrmnqaLlw04Buw1TXY2OaQbFc9zTkc9rvFSez1
 cNjdJcvm3myAWy2Pg//Nt3FgCqfMlrrdTlyGsdqmrEaplBy6pHnas+82o5tPGC3t
 SBMgTDqNMq0v/V/gOIsmHc5/9f+FS5s+v/nvm0xJDfLkY39qP73W+YZllYyyuTHY
 HiLVjD7x5BSGZAsP9EN6EnL7DPXKPIIQSfNwo2564tAhe3/IyJo8hpGhMeiZ83Hf
 G9oPiLa4YljsHzP0UPRNhID5IYyngEDoh2j3AXnew1tkikHd5LIpNCdbtW5x52RR
 kik4hBmqJU6sYpO0O9yCd6YWv/Bpm4bDs6tQOSWMc6uWqP0qN8M=
 =65BL
 -----END PGP SIGNATURE-----

Merge tag 'pull-ppc-20250721' of https://github.com/legoater/qemu into staging

ppc/xive queue:

* Various bug fixes around lost interrupts particularly.
* Major group interrupt work, in particular around redistributing
  interrupts. Upstream group support is not in a complete or usable
  state as it is.
* Significant context push/pull improvements, particularly pool and
  phys context handling was quite incomplete beyond trivial OPAL
  case that pushes at boot.
* Improved tracing and checking for unimp and guest error situations.
* Various other missing feature support.

# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEoPZlSPBIlev+awtgUaNDx8/77KEFAmh951cACgkQUaNDx8/7
# 7KFK6w//SAmZpNmE380UN4OxMBcjsT5m5Cf2hy+Wq9pSEcwWckBFT03HyR86JAv3
# QLR1d6yx7dY0aVWAHtFC24vlU2jpv0Io97wfX9VbgG7e4TY/i1vRMSXYYehXuU/Y
# gLrwuJGxAMKWrd+4ymvHOyXHRAq3LMGQQYfqLCB77b8UJ18JyCL8FwAl/D6EsZ1y
# nUW8WlDy6qQ/SJQHZZ664kyJEv7Qw4xd81ZnmoPsy3xVd7c4ASNBWvDTjRoUn2EN
# sfJW76UqqFn3EqASaKsqoNPHu3kklQ/AX3KlE1wFCBjYoXwl/051wIX4RIb+b2S4
# SLtc/YSAie1n2Pp1sghfLRFiRpjrmnqaLlw04Buw1TXY2OaQbFc9zTkc9rvFSez1
# cNjdJcvm3myAWy2Pg//Nt3FgCqfMlrrdTlyGsdqmrEaplBy6pHnas+82o5tPGC3t
# SBMgTDqNMq0v/V/gOIsmHc5/9f+FS5s+v/nvm0xJDfLkY39qP73W+YZllYyyuTHY
# HiLVjD7x5BSGZAsP9EN6EnL7DPXKPIIQSfNwo2564tAhe3/IyJo8hpGhMeiZ83Hf
# G9oPiLa4YljsHzP0UPRNhID5IYyngEDoh2j3AXnew1tkikHd5LIpNCdbtW5x52RR
# kik4hBmqJU6sYpO0O9yCd6YWv/Bpm4bDs6tQOSWMc6uWqP0qN8M=
# =65BL
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 21 Jul 2025 03:08:07 EDT
# gpg:                using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1
# gpg: Good signature from "Cédric Le Goater <clg@redhat.com>" [full]
# gpg:                 aka "Cédric Le Goater <clg@kaod.org>" [full]
# Primary key fingerprint: A0F6 6548 F048 95EB FE6B  0B60 51A3 43C7 CFFB ECA1

* tag 'pull-ppc-20250721' of https://github.com/legoater/qemu: (50 commits)
  ppc/xive2: Enable lower level contexts on VP push
  ppc/xive: Split need_resend into restore_nvp
  ppc/xive2: Implement PHYS ring VP push TIMA op
  ppc/xive2: Implement POOL LGS push TIMA op
  ppc/xive2: Implement set_os_pending TIMA op
  ppc/xive2: redistribute group interrupts on context push
  ppc/xive2: Implement pool context push TIMA op
  ppc/xive: Check TIMA operations validity
  ppc/xive: Redistribute phys after pulling of pool context
  ppc/xive2: Prevent pulling of pool context losing phys interrupt
  ppc/xive2: implement NVP context save restore for POOL ring
  ppc/xive: Assert group interrupts were redistributed
  ppc/xive2: Avoid needless interrupt re-check on CPPR set
  ppc/xive2: Consolidate presentation processing in context push
  ppc/xive2: split tctx presentation processing from set CPPR
  ppc/xive: Add xive_tctx_pipr_set() helper function
  ppc/xive: tctx_accept only lower irq line if an interrupt was presented
  ppc/xive: tctx signaling registers rework
  ppc/xive: Split xive recompute from IPB function
  ppc/xive: Fix high prio group interrupt being preempted by low prio VP
  ...

Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Stefan Hajnoczi 2025-07-21 14:55:31 -04:00
commit 4bc8fb0135
12 changed files with 1148 additions and 494 deletions

View file

@ -470,14 +470,13 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu)
return xive->regs[reg >> 3] & PPC_BIT(bit);
}
static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
static bool pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive *xive = PNV_XIVE(xptr);
PnvChip *chip = xive->chip;
int count = 0;
int i, j;
for (i = 0; i < chip->nr_cores; i++) {
@ -510,17 +509,18 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format,
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
"thread context NVT %x/%x\n",
nvt_blk, nvt_idx);
return -1;
match->count++;
continue;
}
match->ring = ring;
match->tctx = tctx;
count++;
match->count++;
}
}
}
return count;
return !!match->count;
}
static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr)

View file

@ -101,12 +101,10 @@ static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
}
/*
* Remote access to controllers. HW uses MMIOs. For now, a simple scan
* of the chips is good enough.
*
* TODO: Block scope support
* Remote access to INT controllers. HW uses MMIOs(?). For now, a simple
* scan of all the chips INT controller is good enough.
*/
static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
static PnvXive2 *pnv_xive2_get_remote(uint32_t vsd_type, hwaddr fwd_addr)
{
PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
int i;
@ -115,10 +113,23 @@ static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
PnvXive2 *xive = &chip10->xive;
if (pnv_xive2_block_id(xive) == blk) {
/*
* Is this the XIVE matching the forwarded VSD address is for this
* VSD type
*/
if ((vsd_type == VST_ESB && fwd_addr == xive->esb_base) ||
(vsd_type == VST_END && fwd_addr == xive->end_base) ||
((vsd_type == VST_NVP ||
vsd_type == VST_NVG) && fwd_addr == xive->nvpg_base) ||
(vsd_type == VST_NVC && fwd_addr == xive->nvc_base)) {
return xive;
}
}
qemu_log_mask(LOG_GUEST_ERROR,
"XIVE: >>>>> %s vsd_type %u fwd_addr 0x%"HWADDR_PRIx
" NOT FOUND\n",
__func__, vsd_type, fwd_addr);
return NULL;
}
@ -251,8 +262,7 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
/* Remote VST access */
if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
xive = pnv_xive2_get_remote(blk);
xive = pnv_xive2_get_remote(type, (vsd & VSD_ADDRESS_MASK));
return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
}
@ -595,20 +605,28 @@ static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
{
PnvXive2 *xive = PNV_XIVE2(xrtr);
uint32_t cfg = 0;
uint64_t reg = xive->cq_regs[CQ_XIVE_CFG >> 3];
if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
if (reg & CQ_XIVE_CFG_GEN1_TIMA_OS) {
cfg |= XIVE2_GEN1_TIMA_OS;
}
if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
if (reg & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
cfg |= XIVE2_VP_SAVE_RESTORE;
}
if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, reg) ==
CQ_XIVE_CFG_THREADID_8BITS) {
cfg |= XIVE2_THREADID_8BITS;
}
if (reg & CQ_XIVE_CFG_EN_VP_GRP_PRIORITY) {
cfg |= XIVE2_EN_VP_GRP_PRIORITY;
}
cfg = SETFIELD(XIVE2_VP_INT_PRIO, cfg,
GETFIELD(CQ_XIVE_CFG_VP_INT_PRIO, reg));
return cfg;
}
@ -622,24 +640,28 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
}
static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
static bool pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
PnvXive2 *xive = PNV_XIVE2(xptr);
PnvChip *chip = xive->chip;
int count = 0;
int i, j;
bool gen1_tima_os =
xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
static int next_start_core;
static int next_start_thread;
int start_core = next_start_core;
int start_thread = next_start_thread;
for (i = 0; i < chip->nr_cores; i++) {
PnvCore *pc = chip->cores[i];
PnvCore *pc = chip->cores[(i + start_core) % chip->nr_cores];
CPUCore *cc = CPU_CORE(pc);
for (j = 0; j < cc->nr_threads; j++) {
PowerPCCPU *cpu = pc->threads[j];
/* Start search for match with different thread each call */
PowerPCCPU *cpu = pc->threads[(j + start_thread) % cc->nr_threads];
XiveTCTX *tctx;
int ring;
@ -669,7 +691,8 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
"thread context NVT %x/%x\n",
nvt_blk, nvt_idx);
/* Should set a FIR if we ever model it */
return -1;
match->count++;
continue;
}
/*
* For a group notification, we need to know if the
@ -684,14 +707,23 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
if (!match->tctx) {
match->ring = ring;
match->tctx = tctx;
next_start_thread = j + start_thread + 1;
if (next_start_thread >= cc->nr_threads) {
next_start_thread = 0;
next_start_core = i + start_core + 1;
if (next_start_core >= chip->nr_cores) {
next_start_core = 0;
}
}
}
count++;
match->count++;
}
}
}
}
return count;
return !!match->count;
}
static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr)
@ -1173,7 +1205,8 @@ static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
case CQ_FIRMASK_OR: /* FIR error reporting */
break;
default:
xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx" value 0x%"PRIx64,
offset, val);
return;
}
@ -1304,7 +1337,6 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
case VC_ENDC_WATCH2_SPEC:
case VC_ENDC_WATCH3_SPEC:
watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6;
xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
pnv_xive2_endc_cache_watch_release(xive, watch_engine);
val = xive->vc_regs[reg];
break;
@ -1315,10 +1347,11 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
case VC_ENDC_WATCH3_DATA0:
/*
* Load DATA registers from cache with data requested by the
* SPEC register
* SPEC register. Clear gen_flipped bit in word 1.
*/
watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6;
pnv_xive2_end_cache_load(xive, watch_engine);
xive->vc_regs[reg] &= ~(uint64_t)END2_W1_GEN_FLIPPED;
val = xive->vc_regs[reg];
break;
@ -1386,7 +1419,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/*
* ESB cache updates (not modeled)
*/
/* case VC_ESBC_FLUSH_CTRL: */
case VC_ESBC_FLUSH_CTRL:
if (val & VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
" value 0x%"PRIx64" bit[2] poll_want_cache_disable",
offset, val);
return;
}
break;
case VC_ESBC_FLUSH_POLL:
xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
/* ESB update */
@ -1402,7 +1442,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
/*
* EAS cache updates (not modeled)
*/
/* case VC_EASC_FLUSH_CTRL: */
case VC_EASC_FLUSH_CTRL:
if (val & VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
" value 0x%"PRIx64" bit[2] poll_want_cache_disable",
offset, val);
return;
}
break;
case VC_EASC_FLUSH_POLL:
xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
/* EAS update */
@ -1441,7 +1488,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
break;
/* case VC_ENDC_FLUSH_CTRL: */
case VC_ENDC_FLUSH_CTRL:
if (val & VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
" value 0x%"PRIx64" bit[2] poll_want_cache_disable",
offset, val);
return;
}
break;
case VC_ENDC_FLUSH_POLL:
xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
break;
@ -1470,7 +1524,8 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
break;
default:
xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "VC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64,
offset, val);
return;
}
@ -1661,7 +1716,14 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
pnv_xive2_nxc_update(xive, watch_engine);
break;
/* case PC_NXC_FLUSH_CTRL: */
case PC_NXC_FLUSH_CTRL:
if (val & PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE) {
xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx
" value 0x%"PRIx64" bit[2] poll_want_cache_disable",
offset, val);
return;
}
break;
case PC_NXC_FLUSH_POLL:
xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
break;
@ -1678,7 +1740,8 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
break;
default:
xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "PC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64,
offset, val);
return;
}
@ -1765,7 +1828,8 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
xive->tctxt_regs[reg] = val;
break;
default:
xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "TCTXT: invalid write @0x%"HWADDR_PRIx
" data 0x%"PRIx64, offset, val);
return;
}
}
@ -1836,7 +1900,8 @@ static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
break;
default:
xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx
" value 0x%"PRIx64, offset, val);
}
}
@ -1904,7 +1969,8 @@ static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
break;
default:
xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx
" value 0x%"PRIx64, offset, val);
}
}
@ -1946,7 +2012,8 @@ static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
{
PnvXive2 *xive = PNV_XIVE2(opaque);
xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64,
offset, val);
}
static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
@ -2049,7 +2116,8 @@ static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI;
break;
default:
xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64,
offset, val);
return;
}

View file

@ -66,6 +66,7 @@
#define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */
#define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */
#define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28)
#define CQ_XIVE_CFG_EN_VP_GRP_PRIORITY PPC_BIT(32) /* 0 if bit[25]=1 */
#define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */
#define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */

View file

@ -428,14 +428,13 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk,
g_assert_not_reached();
}
static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore,
uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
static bool spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore,
uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
CPUState *cs;
int count = 0;
CPU_FOREACH(cs) {
PowerPCCPU *cpu = POWERPC_CPU(cs);
@ -463,16 +462,17 @@ static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format,
if (match->tctx) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread "
"context NVT %x/%x\n", nvt_blk, nvt_idx);
return -1;
match->count++;
continue;
}
match->ring = ring;
match->tctx = tctx;
count++;
match->count++;
}
}
return count;
return !!match->count;
}
static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr)

View file

@ -274,11 +274,13 @@ kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device"
kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x"
# xive.c
xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK"
xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !"
xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x"
xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK"
xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !"
xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x"
xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64
xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64
xive_source_notify(uint32_t srcno) "Processing notification for queued IRQ 0x%x"
xive_source_blocked(uint32_t srcno) "No action needed for IRQ 0x%x currently"
xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x"
xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x"
xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64
@ -289,6 +291,10 @@ xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x
# xive2.c
xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d"
xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d"
xive_redistribute(uint32_t index, uint8_t ring, uint8_t end_blk, uint32_t end_idx) "Redistribute from target=%d ring=0x%x NVP 0x%x/0x%x"
xive_end_enqueue(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "Queue event for END 0x%x/0x%x data=0x%x"
xive_escalate_end(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t esc_data) "Escalate from END 0x%x/0x%x to END 0x%x/0x%x data=0x%x"
xive_escalate_esb(uint8_t end_blk, uint32_t end_idx, uint32_t lisn) "Escalate from END 0x%x/0x%x to LISN=0x%x"
# pnv_xive.c
pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64

View file

@ -25,6 +25,58 @@
/*
* XIVE Thread Interrupt Management context
*/
bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring)
{
uint8_t cur_ring;
for (cur_ring = ring; cur_ring <= TM_QW3_HV_PHYS;
cur_ring += XIVE_TM_RING_SIZE) {
if (!(tctx->regs[cur_ring + TM_WORD2] & 0x80)) {
return false;
}
}
return true;
}
bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr)
{
switch (ring) {
case TM_QW1_OS:
return !!(nsr & TM_QW1_NSR_EO);
case TM_QW2_HV_POOL:
case TM_QW3_HV_PHYS:
return !!(nsr & TM_QW3_NSR_HE);
default:
g_assert_not_reached();
}
}
bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr)
{
if ((nsr & TM_NSR_GRP_LVL) > 0) {
g_assert(xive_nsr_indicates_exception(ring, nsr));
return true;
}
return false;
}
uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr)
{
/* NSR determines if pool/phys ring is for phys or pool interrupt */
if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) {
uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6;
if (he == TM_QW3_NSR_HE_PHYS) {
return TM_QW3_HV_PHYS;
} else if (he == TM_QW3_NSR_HE_POOL) {
return TM_QW2_HV_POOL;
} else {
/* Don't support LSI mode */
g_assert_not_reached();
}
}
return ring;
}
static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
{
@ -41,74 +93,83 @@ static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring)
}
}
static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring)
/*
* interrupt is accepted on the presentation ring, for PHYS ring the NSR
* directs it to the PHYS or POOL rings.
*/
uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t sig_ring)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t nsr = regs[TM_NSR];
uint8_t *sig_regs = &tctx->regs[sig_ring];
uint8_t nsr = sig_regs[TM_NSR];
qemu_irq_lower(xive_tctx_output(tctx, ring));
g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS);
if (regs[TM_NSR] != 0) {
uint8_t cppr = regs[TM_PIPR];
uint8_t alt_ring;
uint8_t *alt_regs;
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
/* POOL interrupt uses IPB in QW2, POOL ring */
if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) {
alt_ring = TM_QW2_HV_POOL;
} else {
alt_ring = ring;
}
alt_regs = &tctx->regs[alt_ring];
if (xive_nsr_indicates_exception(sig_ring, nsr)) {
uint8_t cppr = sig_regs[TM_PIPR];
uint8_t ring;
uint8_t *regs;
regs[TM_CPPR] = cppr;
ring = xive_nsr_exception_ring(sig_ring, nsr);
regs = &tctx->regs[ring];
sig_regs[TM_CPPR] = cppr;
/*
* If the interrupt was for a specific VP, reset the pending
* buffer bit, otherwise clear the logical server indicator
*/
if (regs[TM_NSR] & TM_NSR_GRP_LVL) {
regs[TM_NSR] &= ~TM_NSR_GRP_LVL;
} else {
alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
if (!xive_nsr_indicates_group_exception(sig_ring, nsr)) {
regs[TM_IPB] &= ~xive_priority_to_ipb(cppr);
}
/* Drop the exception bit and any group/crowd */
regs[TM_NSR] = 0;
/* Clear the exception from NSR */
sig_regs[TM_NSR] = 0;
qemu_irq_lower(xive_tctx_output(tctx, sig_ring));
trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring,
alt_regs[TM_IPB], regs[TM_PIPR],
regs[TM_CPPR], regs[TM_NSR]);
trace_xive_tctx_accept(tctx->cs->cpu_index, ring,
regs[TM_IPB], sig_regs[TM_PIPR],
sig_regs[TM_CPPR], sig_regs[TM_NSR]);
}
return ((uint64_t)nsr << 8) | regs[TM_CPPR];
return ((uint64_t)nsr << 8) | sig_regs[TM_CPPR];
}
void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level)
/* Change PIPR and calculate NSR and irq based on PIPR, CPPR, group */
void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t pipr,
uint8_t group_level)
{
/* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
uint8_t *alt_regs = &tctx->regs[alt_ring];
uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
uint8_t *regs = &tctx->regs[ring];
if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) {
g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR]));
sig_regs[TM_PIPR] = pipr;
if (pipr < sig_regs[TM_CPPR]) {
switch (ring) {
case TM_QW1_OS:
regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
sig_regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F);
break;
case TM_QW2_HV_POOL:
alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
sig_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F);
break;
case TM_QW3_HV_PHYS:
regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
sig_regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F);
break;
default:
g_assert_not_reached();
}
trace_xive_tctx_notify(tctx->cs->cpu_index, ring,
regs[TM_IPB], alt_regs[TM_PIPR],
alt_regs[TM_CPPR], alt_regs[TM_NSR]);
regs[TM_IPB], pipr,
sig_regs[TM_CPPR], sig_regs[TM_NSR]);
qemu_irq_raise(xive_tctx_output(tctx, ring));
} else {
sig_regs[TM_NSR] = 0;
qemu_irq_lower(xive_tctx_output(tctx, ring));
}
}
@ -124,25 +185,32 @@ void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring)
static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
{
uint8_t *regs = &tctx->regs[ring];
uint8_t *sig_regs = &tctx->regs[ring];
uint8_t pipr_min;
uint8_t ring_min;
g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
/* XXX: should show pool IPB for PHYS ring */
trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring,
regs[TM_IPB], regs[TM_PIPR],
cppr, regs[TM_NSR]);
sig_regs[TM_IPB], sig_regs[TM_PIPR],
cppr, sig_regs[TM_NSR]);
if (cppr > XIVE_PRIORITY_MAX) {
cppr = 0xff;
}
tctx->regs[ring + TM_CPPR] = cppr;
sig_regs[TM_CPPR] = cppr;
/*
* Recompute the PIPR based on local pending interrupts. The PHYS
* ring must take the minimum of both the PHYS and POOL PIPR values.
*/
pipr_min = xive_ipb_to_pipr(regs[TM_IPB]);
pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]);
ring_min = ring;
/* PHYS updates also depend on POOL values */
@ -151,7 +219,6 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
/* POOL values only matter if POOL ctx is valid */
if (pool_regs[TM_WORD2] & 0x80) {
uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]);
/*
@ -165,30 +232,39 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr)
}
}
regs[TM_PIPR] = pipr_min;
/* CPPR has changed, check if we need to raise a pending exception */
xive_tctx_notify(tctx, ring_min, 0);
/* CPPR has changed, this may present or preclude a pending exception */
xive_tctx_pipr_set(tctx, ring_min, pipr_min, 0);
}
void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
uint8_t group_level)
{
/* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */
uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring;
uint8_t *alt_regs = &tctx->regs[alt_ring];
static void xive_tctx_pipr_recompute_from_ipb(XiveTCTX *tctx, uint8_t ring)
{
uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
uint8_t *regs = &tctx->regs[ring];
/* Does not support a presented group interrupt */
g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR]));
xive_tctx_pipr_set(tctx, ring, xive_ipb_to_pipr(regs[TM_IPB]), 0);
}
void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
uint8_t group_level)
{
uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring);
uint8_t *regs = &tctx->regs[ring];
uint8_t pipr = xive_priority_to_pipr(priority);
if (group_level == 0) {
/* VP-specific */
regs[TM_IPB] |= xive_priority_to_ipb(priority);
alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]);
} else {
/* VP-group */
alt_regs[TM_PIPR] = xive_priority_to_pipr(priority);
if (pipr >= sig_regs[TM_PIPR]) {
/* VP interrupts can come here with lower priority than PIPR */
return;
}
}
xive_tctx_notify(tctx, ring, group_level);
}
g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB]));
g_assert(pipr < sig_regs[TM_PIPR]);
xive_tctx_pipr_set(tctx, ring, pipr, group_level);
}
/*
* XIVE Thread Interrupt Management Area (TIMA)
@ -206,25 +282,78 @@ static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx,
return xive_tctx_accept(tctx, TM_QW3_HV_PHYS);
}
static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk,
uint32_t *nvt_idx, bool *vp)
{
if (nvt_blk) {
*nvt_blk = xive_nvt_blk(cam);
}
if (nvt_idx) {
*nvt_idx = xive_nvt_idx(cam);
}
if (vp) {
*vp = !!(cam & TM_QW2W2_VP);
}
}
static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk,
uint32_t *nvt_idx, bool *vp)
{
uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
uint32_t cam = be32_to_cpu(qw2w2);
xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp);
return qw2w2;
}
static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2)
{
memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
}
static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size)
{
uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
uint32_t qw2w2;
uint32_t qw2w2_new;
uint8_t nvt_blk;
uint32_t nvt_idx;
bool vp;
qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp);
if (!vp) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n",
nvt_blk, nvt_idx);
}
/* Invalidate CAM line */
qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0);
xive_tctx_set_pool_cam(tctx, qw2w2_new);
xive_tctx_reset_signal(tctx, TM_QW1_OS);
xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL);
/* Re-check phys for interrupts if pool was disabled */
xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW3_HV_PHYS);
qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0);
memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4);
return qw2w2;
}
static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size)
{
uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
uint8_t qw3b8;
uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
uint8_t qw3b8_new;
qw3b8 = qw3b8_prev & ~TM_QW3B8_VT;
tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8;
qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2];
if (!(qw3b8 & TM_QW3B8_VT)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n");
}
qw3b8_new = qw3b8 & ~TM_QW3B8_VT;
tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new;
xive_tctx_reset_signal(tctx, TM_QW1_OS);
xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS);
return qw3b8;
}
@ -255,14 +384,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx,
static const uint8_t xive_tm_hw_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */
0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */
};
static const uint8_t xive_tm_hv_view[] = {
3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */
3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */
0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */
3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */
};
@ -326,7 +455,7 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value,
*/
if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%"
HWADDR_PRIx"\n", offset);
HWADDR_PRIx" size %d\n", offset, size);
return;
}
@ -357,7 +486,7 @@ static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size)
*/
if (size < 4 || !mask || ring_offset == TM_QW0_USER) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%"
HWADDR_PRIx"\n", offset);
HWADDR_PRIx" size %d\n", offset, size);
return -1;
}
@ -403,6 +532,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff);
}
static void xive_tm_set_pool_lgs(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive_tctx_set_lgs(tctx, TM_QW2_HV_POOL, value & 0xff);
}
/*
* Adjust the PIPR to allow a CPU to process event queues of other
* priorities during one physical interrupt cycle.
@ -410,7 +545,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx,
static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size)
{
xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0);
uint8_t ring = TM_QW1_OS;
uint8_t *regs = &tctx->regs[ring];
/* XXX: how should this work exactly? */
regs[TM_IPB] |= xive_priority_to_ipb(value & 0xff);
xive_tctx_pipr_recompute_from_ipb(tctx, ring);
}
static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk,
@ -454,7 +594,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo);
if (!vo) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n",
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n",
nvt_blk, nvt_idx);
}
@ -466,7 +606,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
return qw1w2;
}
static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
static void xive_tctx_restore_nvp(XiveRouter *xrtr, XiveTCTX *tctx,
uint8_t nvt_blk, uint32_t nvt_idx)
{
XiveNVT nvt;
@ -492,16 +632,6 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx,
uint8_t *regs = &tctx->regs[TM_QW1_OS];
regs[TM_IPB] |= ipb;
}
/*
* Always call xive_tctx_pipr_update(). Even if there were no
* escalation triggered, there could be a pending interrupt which
* was saved when the context was pulled and that we need to take
* into account by recalculating the PIPR (which is not
* saved/restored).
* It will also raise the External interrupt signal if needed.
*/
xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */
}
/*
@ -523,7 +653,17 @@ static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
/* Check the interrupt pending bits */
if (vo) {
xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
xive_tctx_restore_nvp(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx);
/*
* Always call xive_tctx_recompute_from_ipb(). Even if there were no
* escalation triggered, there could be a pending interrupt which
* was saved when the context was pulled and that we need to take
* into account by recalculating the PIPR (which is not
* saved/restored).
* It will also raise the External interrupt signal if needed.
*/
xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW1_OS); /* fxb */
}
}
@ -542,6 +682,8 @@ typedef struct XiveTmOp {
uint8_t page_offset;
uint32_t op_offset;
unsigned size;
bool hw_ok;
bool sw_ok;
void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset,
uint64_t value, unsigned size);
@ -554,34 +696,34 @@ static const XiveTmOp xive_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
xive_tm_vt_poll },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true,
xive_tm_set_os_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true,
xive_tm_push_os_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true,
xive_tm_set_hv_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true,
xive_tm_vt_push, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true,
NULL, xive_tm_vt_poll },
/* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
xive_tm_pull_phys_ctx },
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false,
NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false,
xive_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false,
NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false,
NULL, xive_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false,
NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false,
NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false,
NULL, xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false,
NULL, xive_tm_pull_phys_ctx },
};
static const XiveTmOp xive2_tm_operations[] = {
@ -589,50 +731,58 @@ static const XiveTmOp xive2_tm_operations[] = {
* MMIOs below 2K : raw values and special operations without side
* effects
*/
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx,
NULL },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push,
NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL,
xive_tm_vt_poll },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target,
NULL },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true,
xive2_tm_set_os_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true,
xive2_tm_push_os_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, true, true,
xive2_tm_push_os_ctx, NULL },
{ XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, true, true,
xive_tm_set_os_lgs, NULL },
{ XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 4, true, true,
xive2_tm_push_pool_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 8, true, true,
xive2_tm_push_pool_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_LGS, 1, true, true,
xive_tm_set_pool_lgs, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true,
xive2_tm_set_hv_cppr, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true,
xive2_tm_push_phys_ctx, NULL },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true,
NULL, xive_tm_vt_poll },
{ XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, true, true,
xive2_tm_set_hv_target, NULL },
/* MMIOs above 2K : special operations with side effects */
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL,
xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL,
xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL,
xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL,
xive_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol,
NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL,
xive_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL,
xive_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol,
NULL },
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false,
NULL, xive_tm_ack_os_reg },
{ XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false,
xive2_tm_set_os_pending, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false,
NULL, xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false,
NULL, xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false,
NULL, xive2_tm_pull_os_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false,
NULL, xive_tm_ack_hv_reg },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, true, false,
NULL, xive2_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false,
NULL, xive2_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false,
NULL, xive2_tm_pull_pool_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, true, false,
xive2_tm_pull_os_ctx_ol, NULL },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, true, false,
NULL, xive2_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false,
NULL, xive2_tm_pull_phys_ctx },
{ XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, true, false,
xive2_tm_pull_phys_ctx_ol, NULL },
{ XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, true, false,
xive2_tm_ack_os_el, NULL },
};
static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset,
@ -674,21 +824,31 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size)
{
const XiveTmOp *xto;
uint8_t ring = offset & TM_RING_OFFSET;
bool is_valid = xive_ring_valid(tctx, ring);
bool hw_owned = is_valid;
trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value);
/*
* TODO: check V bit in Q[0-3]W2
*/
/*
* First, check for special operations in the 2K region
*/
xto = xive_tm_find_op(tctx->xptr, offset, size, true);
if (xto) {
if (hw_owned && !xto->hw_ok) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA "
"@%"HWADDR_PRIx" size %d\n", offset, size);
}
if (!hw_owned && !xto->sw_ok) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to SW TIMA "
"@%"HWADDR_PRIx" size %d\n", offset, size);
}
}
if (offset & TM_SPECIAL_OP) {
xto = xive_tm_find_op(tctx->xptr, offset, size, true);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA "
"@%"HWADDR_PRIx"\n", offset);
"@%"HWADDR_PRIx" size %d\n", offset, size);
} else {
xto->write_handler(xptr, tctx, offset, value, size);
}
@ -698,7 +858,6 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
/*
* Then, for special operations in the region below 2K.
*/
xto = xive_tm_find_op(tctx->xptr, offset, size, true);
if (xto) {
xto->write_handler(xptr, tctx, offset, value, size);
return;
@ -707,6 +866,11 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
/*
* Finish with raw access to the register values
*/
if (hw_owned) {
/* Store context operations are dangerous when context is valid */
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA "
"@%"HWADDR_PRIx" size %d\n", offset, size);
}
xive_tm_raw_write(tctx, offset, value, size);
}
@ -714,20 +878,30 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
unsigned size)
{
const XiveTmOp *xto;
uint8_t ring = offset & TM_RING_OFFSET;
bool is_valid = xive_ring_valid(tctx, ring);
bool hw_owned = is_valid;
uint64_t ret;
/*
* TODO: check V bit in Q[0-3]W2
*/
xto = xive_tm_find_op(tctx->xptr, offset, size, false);
if (xto) {
if (hw_owned && !xto->hw_ok) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to HW TIMA "
"@%"HWADDR_PRIx" size %d\n", offset, size);
}
if (!hw_owned && !xto->sw_ok) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to SW TIMA "
"@%"HWADDR_PRIx" size %d\n", offset, size);
}
}
/*
* First, check for special operations in the 2K region
*/
if (offset & TM_SPECIAL_OP) {
xto = xive_tm_find_op(tctx->xptr, offset, size, false);
if (!xto) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA"
"@%"HWADDR_PRIx"\n", offset);
"@%"HWADDR_PRIx" size %d\n", offset, size);
return -1;
}
ret = xto->read_handler(xptr, tctx, offset, size);
@ -737,7 +911,6 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
/*
* Then, for special operations in the region below 2K.
*/
xto = xive_tm_find_op(tctx->xptr, offset, size, false);
if (xto) {
ret = xto->read_handler(xptr, tctx, offset, size);
goto out;
@ -1191,6 +1364,7 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size)
/* Forward the source event notification for routing */
if (ret) {
trace_xive_source_notify(srcno);
xive_source_notify(xsrc, srcno);
}
break;
@ -1286,6 +1460,8 @@ out:
/* Forward the source event notification for routing */
if (notify) {
xive_source_notify(xsrc, srcno);
} else {
trace_xive_source_blocked(srcno);
}
}
@ -1672,8 +1848,8 @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index)
return 1U << (first_zero + 1);
}
static uint8_t xive_get_group_level(bool crowd, bool ignore,
uint32_t nvp_blk, uint32_t nvp_index)
uint8_t xive_get_group_level(bool crowd, bool ignore,
uint32_t nvp_blk, uint32_t nvp_index)
{
int first_zero;
uint8_t level;
@ -1791,15 +1967,14 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
* This is our simple Xive Presenter Engine model. It is merged in the
* Router as it does not require an extra object.
*/
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
bool xive_presenter_match(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, bool *precluded)
uint32_t logic_serv, XiveTCTXMatch *match)
{
XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb);
XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false };
uint8_t group_level;
int count;
memset(match, 0, sizeof(*match));
/*
* Ask the machine to scan the interrupt controllers for a match.
@ -1824,22 +1999,8 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
* a new command to the presenters (the equivalent of the "assign"
* power bus command in the documented full notify sequence.
*/
count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, &match);
if (count < 0) {
return false;
}
/* handle CPU exception delivery */
if (count) {
group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx);
trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level);
xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level);
} else {
*precluded = match.precluded;
}
return !!count;
return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, match);
}
/*
@ -1876,7 +2037,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
uint8_t nvt_blk;
uint32_t nvt_idx;
XiveNVT nvt;
bool found, precluded;
XiveTCTXMatch match;
uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w);
uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w);
@ -1956,16 +2117,16 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas)
return;
}
found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx,
false /* crowd */,
xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority,
xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7),
&precluded);
/* we don't support VP-group notification on P9, so precluded is not used */
/* TODO: Auto EOI. */
if (found) {
/* we don't support VP-group notification on P9, so precluded is not used */
if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx,
false /* crowd */,
xive_get_field32(END_W7_F0_IGNORE, end.w7),
priority,
xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7),
&match)) {
trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0);
xive_tctx_pipr_present(match.tctx, match.ring, priority, 0);
return;
}

File diff suppressed because it is too large Load diff

View file

@ -2608,62 +2608,46 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf)
}
}
static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
static bool pnv_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
PnvMachineState *pnv = PNV_MACHINE(xfb);
int total_count = 0;
int i;
for (i = 0; i < pnv->num_chips; i++) {
Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]);
XivePresenter *xptr = XIVE_PRESENTER(&chip9->xive);
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
}
total_count += count;
xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
cam_ignore, priority, logic_serv, match);
}
return total_count;
return !!match->count;
}
static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
static bool pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv,
XiveTCTXMatch *match)
{
PnvMachineState *pnv = PNV_MACHINE(xfb);
int total_count = 0;
int i;
for (i = 0; i < pnv->num_chips; i++) {
Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
cam_ignore, priority, logic_serv, match);
if (count < 0) {
return count;
}
total_count += count;
xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd,
cam_ignore, priority, logic_serv, match);
}
return total_count;
return !!match->count;
}
static int pnv10_xive_broadcast(XiveFabric *xfb,

View file

@ -4468,21 +4468,14 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf)
/*
* This is a XIVE only operation
*/
static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
static bool spapr_match_nvt(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match)
{
SpaprMachineState *spapr = SPAPR_MACHINE(xfb);
XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc);
XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
int count;
count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, match);
if (count < 0) {
return count;
}
/*
* When we implement the save and restore of the thread interrupt
@ -4493,12 +4486,14 @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format,
* Until this is done, the sPAPR machine should find at least one
* matching context always.
*/
if (count == 0) {
if (!xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore,
priority, logic_serv, match)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n",
nvt_blk, nvt_idx);
return false;
}
return count;
return true;
}
int spapr_get_vcpu_id(PowerPCCPU *cpu)

View file

@ -365,6 +365,11 @@ static inline uint32_t xive_tctx_word2(uint8_t *ring)
return *((uint32_t *) &ring[TM_WORD2]);
}
bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring);
bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr);
bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr);
uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr);
/*
* XIVE Router
*/
@ -421,6 +426,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas);
typedef struct XiveTCTXMatch {
XiveTCTX *tctx;
int count;
uint8_t ring;
bool precluded;
} XiveTCTXMatch;
@ -436,10 +442,10 @@ DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER,
struct XivePresenterClass {
InterfaceClass parent;
int (*match_nvt)(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
bool (*match_nvt)(XivePresenter *xptr, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
bool (*in_kernel)(const XivePresenter *xptr);
uint32_t (*get_config)(XivePresenter *xptr);
int (*broadcast)(XivePresenter *xptr,
@ -451,12 +457,14 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool cam_ignore, uint32_t logic_serv);
bool xive_presenter_notify(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, bool *precluded);
bool xive_presenter_match(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
uint32_t xive_get_vpgroup_size(uint32_t nvp_index);
uint8_t xive_get_group_level(bool crowd, bool ignore,
uint32_t nvp_blk, uint32_t nvp_index);
/*
* XIVE Fabric (Interface between Interrupt Controller and Machine)
@ -471,10 +479,10 @@ DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC,
struct XiveFabricClass {
InterfaceClass parent;
int (*match_nvt)(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
bool (*match_nvt)(XiveFabric *xfb, uint8_t format,
uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority,
uint32_t logic_serv, XiveTCTXMatch *match);
int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx,
bool crowd, bool cam_ignore, uint8_t priority);
};
@ -532,7 +540,7 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
}
/*
* XIVE Thread Interrupt Management Aera (TIMA)
* XIVE Thread Interrupt Management Area (TIMA)
*
* This region gives access to the registers of the thread interrupt
* management context. It is four page wide, each page providing a
@ -544,6 +552,30 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp)
#define XIVE_TM_OS_PAGE 0x2
#define XIVE_TM_USER_PAGE 0x3
/*
* The TCTX (TIMA) has 4 rings (phys, pool, os, user), but only signals
* (raises an interrupt on) the CPU from 3 of them. Phys and pool both
* cause a hypervisor privileged interrupt so interrupts presented on
* those rings signal using the phys ring. This helper returns the signal
* regs from the given ring.
*/
static inline uint8_t *xive_tctx_signal_regs(XiveTCTX *tctx, uint8_t ring)
{
/*
* This is a good point to add invariants to ensure nothing has tried to
* signal using the POOL ring.
*/
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0);
g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0);
if (ring == TM_QW2_HV_POOL) {
/* POOL and PHYS rings share the signal regs (PIPR, NSR, CPPR) */
ring = TM_QW3_HV_PHYS;
}
return &tctx->regs[ring];
}
void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
@ -553,10 +585,12 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf);
Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp);
void xive_tctx_reset(XiveTCTX *tctx);
void xive_tctx_destroy(XiveTCTX *tctx);
void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
uint8_t group_level);
void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
uint8_t group_level);
void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority,
uint8_t group_level);
void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring);
void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level);
uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring);
/*
* KVM XIVE device helpers

View file

@ -29,9 +29,11 @@ OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER);
* Configuration flags
*/
#define XIVE2_GEN1_TIMA_OS 0x00000001
#define XIVE2_VP_SAVE_RESTORE 0x00000002
#define XIVE2_THREADID_8BITS 0x00000004
#define XIVE2_GEN1_TIMA_OS 0x00000001
#define XIVE2_VP_SAVE_RESTORE 0x00000002
#define XIVE2_THREADID_8BITS 0x00000004
#define XIVE2_EN_VP_GRP_PRIORITY 0x00000008
#define XIVE2_VP_INT_PRIO 0x00000030
typedef struct Xive2RouterClass {
SysBusDeviceClass parent;
@ -80,6 +82,7 @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd,
uint32_t xive2_router_get_config(Xive2Router *xrtr);
void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked);
void xive2_notify(Xive2Router *xrtr, uint32_t lisn, bool pq_checked);
/*
* XIVE2 Presenter (POWER10)
@ -127,6 +130,8 @@ void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset,
uint64_t value, unsigned size);
uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
@ -137,7 +142,16 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority);
void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority);
void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, unsigned size);
void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx,
hwaddr offset, uint64_t value, unsigned size);
#endif /* PPC_XIVE2_H */

View file

@ -39,15 +39,18 @@
typedef struct Xive2Eas {
uint64_t w;
#define EAS2_VALID PPC_BIT(0)
#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */
#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */
#define EAS2_MASKED PPC_BIT(32) /* Masked */
#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */
#define EAS2_VALID PPC_BIT(0)
#define EAS2_QOS PPC_BIT(1, 2) /* Quality of Service(unimp) */
#define EAS2_RESUME PPC_BIT(3) /* END Resume(unimp) */
#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */
#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */
#define EAS2_MASKED PPC_BIT(32) /* Masked */
#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */
} Xive2Eas;
#define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID)
#define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED)
#define xive2_eas_is_resume(eas) (be64_to_cpu((eas)->w) & EAS2_RESUME)
void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf);
@ -87,6 +90,7 @@ typedef struct Xive2End {
#define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31)
uint32_t w3;
#define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24)
#define END2_W3_CL PPC_BIT32(27)
#define END2_W3_QSIZE PPC_BITMASK32(28, 31)
uint32_t w4;
#define END2_W4_END_BLOCK PPC_BITMASK32(4, 7)
@ -154,6 +158,7 @@ typedef struct Xive2Nvp {
#define NVP2_W0_L PPC_BIT32(8)
#define NVP2_W0_G PPC_BIT32(9)
#define NVP2_W0_T PPC_BIT32(10)
#define NVP2_W0_P PPC_BIT32(11)
#define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */
#define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31)
uint32_t w1;
@ -205,9 +210,9 @@ static inline uint32_t xive2_nvp_idx(uint32_t cam_line)
return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1);
}
static inline uint32_t xive2_nvp_blk(uint32_t cam_line)
static inline uint8_t xive2_nvp_blk(uint32_t cam_line)
{
return (cam_line >> XIVE2_NVP_SHIFT) & 0xf;
return (uint8_t)((cam_line >> XIVE2_NVP_SHIFT) & 0xf);
}
void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf);
@ -220,6 +225,9 @@ typedef struct Xive2Nvgc {
#define NVGC2_W0_VALID PPC_BIT32(0)
#define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31)
uint32_t w1;
#define NVGC2_W1_PSIZE PPC_BITMASK32(0, 1)
#define NVGC2_W1_END_BLK PPC_BITMASK32(4, 7)
#define NVGC2_W1_END_IDX PPC_BITMASK32(8, 31)
uint32_t w2;
uint32_t w3;
uint32_t w4;