1dd08ebf6SMatthew Brost // SPDX-License-Identifier: MIT 2dd08ebf6SMatthew Brost /* 3dd08ebf6SMatthew Brost * Copyright © 2022 Intel Corporation 4dd08ebf6SMatthew Brost */ 5dd08ebf6SMatthew Brost 6dd08ebf6SMatthew Brost #include "xe_reg_sr.h" 7dd08ebf6SMatthew Brost 8dd08ebf6SMatthew Brost #include <linux/align.h> 9dd08ebf6SMatthew Brost #include <linux/string_helpers.h> 10dd08ebf6SMatthew Brost #include <linux/xarray.h> 11dd08ebf6SMatthew Brost 12dd08ebf6SMatthew Brost #include <drm/drm_managed.h> 13ea9f879dSLucas De Marchi #include <drm/drm_print.h> 14dd08ebf6SMatthew Brost 15b79e8fd9SLucas De Marchi #include "regs/xe_engine_regs.h" 16*226bfec8SLucas De Marchi #include "regs/xe_gt_regs.h" 17dd08ebf6SMatthew Brost #include "xe_device_types.h" 18dd08ebf6SMatthew Brost #include "xe_force_wake.h" 19dd08ebf6SMatthew Brost #include "xe_gt.h" 20dd08ebf6SMatthew Brost #include "xe_gt_mcr.h" 21dd08ebf6SMatthew Brost #include "xe_macros.h" 22dd08ebf6SMatthew Brost #include "xe_mmio.h" 23ea9f879dSLucas De Marchi #include "xe_rtp_types.h" 24dd08ebf6SMatthew Brost 25dd08ebf6SMatthew Brost #define XE_REG_SR_GROW_STEP_DEFAULT 16 26dd08ebf6SMatthew Brost 27dd08ebf6SMatthew Brost static void reg_sr_fini(struct drm_device *drm, void *arg) 28dd08ebf6SMatthew Brost { 29dd08ebf6SMatthew Brost struct xe_reg_sr *sr = arg; 30dd08ebf6SMatthew Brost 31dd08ebf6SMatthew Brost xa_destroy(&sr->xa); 32dd08ebf6SMatthew Brost kfree(sr->pool.arr); 33dd08ebf6SMatthew Brost memset(&sr->pool, 0, sizeof(sr->pool)); 34dd08ebf6SMatthew Brost } 35dd08ebf6SMatthew Brost 36dd08ebf6SMatthew Brost int xe_reg_sr_init(struct xe_reg_sr *sr, const char *name, struct xe_device *xe) 37dd08ebf6SMatthew Brost { 38dd08ebf6SMatthew Brost xa_init(&sr->xa); 39dd08ebf6SMatthew Brost memset(&sr->pool, 0, sizeof(sr->pool)); 40dd08ebf6SMatthew Brost sr->pool.grow_step = XE_REG_SR_GROW_STEP_DEFAULT; 41dd08ebf6SMatthew Brost sr->name = name; 42dd08ebf6SMatthew Brost 43dd08ebf6SMatthew Brost return drmm_add_action_or_reset(&xe->drm, reg_sr_fini, sr); 44dd08ebf6SMatthew Brost } 45dd08ebf6SMatthew Brost 46dd08ebf6SMatthew Brost int xe_reg_sr_dump_kv(struct xe_reg_sr *sr, 47dd08ebf6SMatthew Brost struct xe_reg_sr_kv **dst) 48dd08ebf6SMatthew Brost { 49dd08ebf6SMatthew Brost struct xe_reg_sr_kv *iter; 50dd08ebf6SMatthew Brost struct xe_reg_sr_entry *entry; 51dd08ebf6SMatthew Brost unsigned long idx; 52dd08ebf6SMatthew Brost 53dd08ebf6SMatthew Brost if (xa_empty(&sr->xa)) { 54dd08ebf6SMatthew Brost *dst = NULL; 55dd08ebf6SMatthew Brost return 0; 56dd08ebf6SMatthew Brost } 57dd08ebf6SMatthew Brost 58dd08ebf6SMatthew Brost *dst = kmalloc_array(sr->pool.used, sizeof(**dst), GFP_KERNEL); 59dd08ebf6SMatthew Brost if (!*dst) 60dd08ebf6SMatthew Brost return -ENOMEM; 61dd08ebf6SMatthew Brost 62dd08ebf6SMatthew Brost iter = *dst; 63dd08ebf6SMatthew Brost xa_for_each(&sr->xa, idx, entry) { 64dd08ebf6SMatthew Brost iter->k = idx; 65dd08ebf6SMatthew Brost iter->v = *entry; 66dd08ebf6SMatthew Brost iter++; 67dd08ebf6SMatthew Brost } 68dd08ebf6SMatthew Brost 69dd08ebf6SMatthew Brost return 0; 70dd08ebf6SMatthew Brost } 71dd08ebf6SMatthew Brost 72dd08ebf6SMatthew Brost static struct xe_reg_sr_entry *alloc_entry(struct xe_reg_sr *sr) 73dd08ebf6SMatthew Brost { 74dd08ebf6SMatthew Brost if (sr->pool.used == sr->pool.allocated) { 75dd08ebf6SMatthew Brost struct xe_reg_sr_entry *arr; 76dd08ebf6SMatthew Brost 77dd08ebf6SMatthew Brost arr = krealloc_array(sr->pool.arr, 78dd08ebf6SMatthew Brost ALIGN(sr->pool.allocated + 1, sr->pool.grow_step), 79dd08ebf6SMatthew Brost sizeof(*arr), GFP_KERNEL); 80dd08ebf6SMatthew Brost if (!arr) 81dd08ebf6SMatthew Brost return NULL; 82dd08ebf6SMatthew Brost 83dd08ebf6SMatthew Brost sr->pool.arr = arr; 84dd08ebf6SMatthew Brost sr->pool.allocated += sr->pool.grow_step; 85dd08ebf6SMatthew Brost } 86dd08ebf6SMatthew Brost 87dd08ebf6SMatthew Brost return &sr->pool.arr[sr->pool.used++]; 88dd08ebf6SMatthew Brost } 89dd08ebf6SMatthew Brost 90dd08ebf6SMatthew Brost static bool compatible_entries(const struct xe_reg_sr_entry *e1, 91dd08ebf6SMatthew Brost const struct xe_reg_sr_entry *e2) 92dd08ebf6SMatthew Brost { 93dd08ebf6SMatthew Brost /* 94dd08ebf6SMatthew Brost * Don't allow overwriting values: clr_bits/set_bits should be disjoint 95dd08ebf6SMatthew Brost * when operating in the same register 96dd08ebf6SMatthew Brost */ 97dd08ebf6SMatthew Brost if (e1->clr_bits & e2->clr_bits || e1->set_bits & e2->set_bits || 98dd08ebf6SMatthew Brost e1->clr_bits & e2->set_bits || e1->set_bits & e2->clr_bits) 99dd08ebf6SMatthew Brost return false; 100dd08ebf6SMatthew Brost 101dd08ebf6SMatthew Brost if (e1->masked_reg != e2->masked_reg) 102dd08ebf6SMatthew Brost return false; 103dd08ebf6SMatthew Brost 104dd08ebf6SMatthew Brost if (e1->reg_type != e2->reg_type) 105dd08ebf6SMatthew Brost return false; 106dd08ebf6SMatthew Brost 107dd08ebf6SMatthew Brost return true; 108dd08ebf6SMatthew Brost } 109dd08ebf6SMatthew Brost 110dd08ebf6SMatthew Brost int xe_reg_sr_add(struct xe_reg_sr *sr, u32 reg, 111dd08ebf6SMatthew Brost const struct xe_reg_sr_entry *e) 112dd08ebf6SMatthew Brost { 113dd08ebf6SMatthew Brost unsigned long idx = reg; 114dd08ebf6SMatthew Brost struct xe_reg_sr_entry *pentry = xa_load(&sr->xa, idx); 115dd08ebf6SMatthew Brost int ret; 116dd08ebf6SMatthew Brost 117dd08ebf6SMatthew Brost if (pentry) { 118dd08ebf6SMatthew Brost if (!compatible_entries(pentry, e)) { 119dd08ebf6SMatthew Brost ret = -EINVAL; 120dd08ebf6SMatthew Brost goto fail; 121dd08ebf6SMatthew Brost } 122dd08ebf6SMatthew Brost 123dd08ebf6SMatthew Brost pentry->clr_bits |= e->clr_bits; 124dd08ebf6SMatthew Brost pentry->set_bits |= e->set_bits; 125dd08ebf6SMatthew Brost pentry->read_mask |= e->read_mask; 126dd08ebf6SMatthew Brost 127dd08ebf6SMatthew Brost return 0; 128dd08ebf6SMatthew Brost } 129dd08ebf6SMatthew Brost 130dd08ebf6SMatthew Brost pentry = alloc_entry(sr); 131dd08ebf6SMatthew Brost if (!pentry) { 132dd08ebf6SMatthew Brost ret = -ENOMEM; 133dd08ebf6SMatthew Brost goto fail; 134dd08ebf6SMatthew Brost } 135dd08ebf6SMatthew Brost 136dd08ebf6SMatthew Brost *pentry = *e; 137dd08ebf6SMatthew Brost ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL)); 138dd08ebf6SMatthew Brost if (ret) 139dd08ebf6SMatthew Brost goto fail; 140dd08ebf6SMatthew Brost 141dd08ebf6SMatthew Brost return 0; 142dd08ebf6SMatthew Brost 143dd08ebf6SMatthew Brost fail: 144dd08ebf6SMatthew Brost DRM_ERROR("Discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s): ret=%d\n", 145dd08ebf6SMatthew Brost idx, e->clr_bits, e->set_bits, 146dd08ebf6SMatthew Brost str_yes_no(e->masked_reg), ret); 147dd08ebf6SMatthew Brost 148dd08ebf6SMatthew Brost return ret; 149dd08ebf6SMatthew Brost } 150dd08ebf6SMatthew Brost 151dd08ebf6SMatthew Brost static void apply_one_mmio(struct xe_gt *gt, u32 reg, 152dd08ebf6SMatthew Brost struct xe_reg_sr_entry *entry) 153dd08ebf6SMatthew Brost { 154dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt); 155dd08ebf6SMatthew Brost u32 val; 156dd08ebf6SMatthew Brost 157dd08ebf6SMatthew Brost /* 158dd08ebf6SMatthew Brost * If this is a masked register, need to figure what goes on the upper 159dd08ebf6SMatthew Brost * 16 bits: it's either the clr_bits (when using FIELD_SET and WR) or 160dd08ebf6SMatthew Brost * the set_bits, when using SET. 161dd08ebf6SMatthew Brost * 162dd08ebf6SMatthew Brost * When it's not masked, we have to read it from hardware, unless we are 163dd08ebf6SMatthew Brost * supposed to set all bits. 164dd08ebf6SMatthew Brost */ 165dd08ebf6SMatthew Brost if (entry->masked_reg) 166dd08ebf6SMatthew Brost val = (entry->clr_bits ?: entry->set_bits << 16); 167dd08ebf6SMatthew Brost else if (entry->clr_bits + 1) 168dd08ebf6SMatthew Brost val = (entry->reg_type == XE_RTP_REG_MCR ? 169dd08ebf6SMatthew Brost xe_gt_mcr_unicast_read_any(gt, MCR_REG(reg)) : 170dd08ebf6SMatthew Brost xe_mmio_read32(gt, reg)) & (~entry->clr_bits); 171dd08ebf6SMatthew Brost else 172dd08ebf6SMatthew Brost val = 0; 173dd08ebf6SMatthew Brost 174dd08ebf6SMatthew Brost /* 175dd08ebf6SMatthew Brost * TODO: add selftest to validate all tables, regardless of platform: 176dd08ebf6SMatthew Brost * - Masked registers can't have set_bits with upper bits set 177dd08ebf6SMatthew Brost * - set_bits must be contained in clr_bits 178dd08ebf6SMatthew Brost */ 179dd08ebf6SMatthew Brost val |= entry->set_bits; 180dd08ebf6SMatthew Brost 181dd08ebf6SMatthew Brost drm_dbg(&xe->drm, "REG[0x%x] = 0x%08x", reg, val); 182dd08ebf6SMatthew Brost 183dd08ebf6SMatthew Brost if (entry->reg_type == XE_RTP_REG_MCR) 184dd08ebf6SMatthew Brost xe_gt_mcr_multicast_write(gt, MCR_REG(reg), val); 185dd08ebf6SMatthew Brost else 186dd08ebf6SMatthew Brost xe_mmio_write32(gt, reg, val); 187dd08ebf6SMatthew Brost } 188dd08ebf6SMatthew Brost 189dd08ebf6SMatthew Brost void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) 190dd08ebf6SMatthew Brost { 191dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt); 192dd08ebf6SMatthew Brost struct xe_reg_sr_entry *entry; 193dd08ebf6SMatthew Brost unsigned long reg; 194dd08ebf6SMatthew Brost int err; 195dd08ebf6SMatthew Brost 196dd08ebf6SMatthew Brost drm_dbg(&xe->drm, "Applying %s save-restore MMIOs\n", sr->name); 197dd08ebf6SMatthew Brost 198dd08ebf6SMatthew Brost err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); 199dd08ebf6SMatthew Brost if (err) 200dd08ebf6SMatthew Brost goto err_force_wake; 201dd08ebf6SMatthew Brost 202dd08ebf6SMatthew Brost xa_for_each(&sr->xa, reg, entry) 203dd08ebf6SMatthew Brost apply_one_mmio(gt, reg, entry); 204dd08ebf6SMatthew Brost 205dd08ebf6SMatthew Brost err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); 206dd08ebf6SMatthew Brost XE_WARN_ON(err); 207dd08ebf6SMatthew Brost 208dd08ebf6SMatthew Brost return; 209dd08ebf6SMatthew Brost 210dd08ebf6SMatthew Brost err_force_wake: 211dd08ebf6SMatthew Brost drm_err(&xe->drm, "Failed to apply, err=%d\n", err); 212dd08ebf6SMatthew Brost } 213dd08ebf6SMatthew Brost 214dd08ebf6SMatthew Brost void xe_reg_sr_apply_whitelist(struct xe_reg_sr *sr, u32 mmio_base, 215dd08ebf6SMatthew Brost struct xe_gt *gt) 216dd08ebf6SMatthew Brost { 217dd08ebf6SMatthew Brost struct xe_device *xe = gt_to_xe(gt); 218dd08ebf6SMatthew Brost struct xe_reg_sr_entry *entry; 219dd08ebf6SMatthew Brost unsigned long reg; 220dd08ebf6SMatthew Brost unsigned int slot = 0; 221dd08ebf6SMatthew Brost int err; 222dd08ebf6SMatthew Brost 223dd08ebf6SMatthew Brost drm_dbg(&xe->drm, "Whitelisting %s registers\n", sr->name); 224dd08ebf6SMatthew Brost 225dd08ebf6SMatthew Brost err = xe_force_wake_get(>->mmio.fw, XE_FORCEWAKE_ALL); 226dd08ebf6SMatthew Brost if (err) 227dd08ebf6SMatthew Brost goto err_force_wake; 228dd08ebf6SMatthew Brost 229dd08ebf6SMatthew Brost xa_for_each(&sr->xa, reg, entry) { 230dd08ebf6SMatthew Brost xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg, 231dd08ebf6SMatthew Brost reg | entry->set_bits); 232dd08ebf6SMatthew Brost slot++; 233dd08ebf6SMatthew Brost } 234dd08ebf6SMatthew Brost 235dd08ebf6SMatthew Brost /* And clear the rest just in case of garbage */ 236dd08ebf6SMatthew Brost for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) 237dd08ebf6SMatthew Brost xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg, 238dd08ebf6SMatthew Brost RING_NOPID(mmio_base).reg); 239dd08ebf6SMatthew Brost 240dd08ebf6SMatthew Brost err = xe_force_wake_put(>->mmio.fw, XE_FORCEWAKE_ALL); 241dd08ebf6SMatthew Brost XE_WARN_ON(err); 242dd08ebf6SMatthew Brost 243dd08ebf6SMatthew Brost return; 244dd08ebf6SMatthew Brost 245dd08ebf6SMatthew Brost err_force_wake: 246dd08ebf6SMatthew Brost drm_err(&xe->drm, "Failed to apply, err=%d\n", err); 247dd08ebf6SMatthew Brost } 248