17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 500d0963fSdilpreet * Common Development and Distribution License (the "License"). 600d0963fSdilpreet * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 2286a9c507SGuoli Shu * Copyright 2010 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate /* 2712f080e7Smrj * x86 root nexus driver 287c478bd9Sstevel@tonic-gate */ 297c478bd9Sstevel@tonic-gate 307c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 317c478bd9Sstevel@tonic-gate #include <sys/conf.h> 327c478bd9Sstevel@tonic-gate #include <sys/autoconf.h> 337c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 347c478bd9Sstevel@tonic-gate #include <sys/debug.h> 357c478bd9Sstevel@tonic-gate #include <sys/psw.h> 367c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h> 377c478bd9Sstevel@tonic-gate #include <sys/promif.h> 387c478bd9Sstevel@tonic-gate #include <sys/devops.h> 397c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 407c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 417c478bd9Sstevel@tonic-gate #include <vm/seg.h> 427c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 437c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h> 447c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 457c478bd9Sstevel@tonic-gate #include <sys/mman.h> 467c478bd9Sstevel@tonic-gate #include <vm/hat.h> 477c478bd9Sstevel@tonic-gate #include <vm/as.h> 487c478bd9Sstevel@tonic-gate #include <vm/page.h> 497c478bd9Sstevel@tonic-gate #include <sys/avintr.h> 507c478bd9Sstevel@tonic-gate #include <sys/errno.h> 517c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 527c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 537c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 547c478bd9Sstevel@tonic-gate #include <sys/sunndi.h> 557a364d25Sschwartz #include <sys/mach_intr.h> 567c478bd9Sstevel@tonic-gate #include <sys/psm.h> 577c478bd9Sstevel@tonic-gate #include <sys/ontrap.h> 5812f080e7Smrj #include <sys/atomic.h> 5912f080e7Smrj #include <sys/sdt.h> 6012f080e7Smrj #include <sys/rootnex.h> 6112f080e7Smrj #include <vm/hat_i86.h> 6200d0963fSdilpreet #include <sys/ddifm.h> 6336945f79Smrj #include <sys/ddi_isa.h> 647c478bd9Sstevel@tonic-gate 65843e1988Sjohnlev #ifdef __xpv 66843e1988Sjohnlev #include <sys/bootinfo.h> 67843e1988Sjohnlev #include <sys/hypervisor.h> 68843e1988Sjohnlev #include <sys/bootconf.h> 69843e1988Sjohnlev #include <vm/kboot_mmu.h> 703a634bfcSVikram Hegde #endif 713a634bfcSVikram Hegde 723a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 733a634bfcSVikram Hegde #include <sys/immu.h> 74843e1988Sjohnlev #endif 75843e1988Sjohnlev 7686c1f4dcSVikram Hegde 7712f080e7Smrj /* 7812f080e7Smrj * enable/disable extra checking of function parameters. Useful for debugging 7912f080e7Smrj * drivers. 8012f080e7Smrj */ 8112f080e7Smrj #ifdef DEBUG 8212f080e7Smrj int rootnex_alloc_check_parms = 1; 8312f080e7Smrj int rootnex_bind_check_parms = 1; 8412f080e7Smrj int rootnex_bind_check_inuse = 1; 8512f080e7Smrj int rootnex_unbind_verify_buffer = 0; 8612f080e7Smrj int rootnex_sync_check_parms = 1; 8712f080e7Smrj #else 8812f080e7Smrj int rootnex_alloc_check_parms = 0; 8912f080e7Smrj int rootnex_bind_check_parms = 0; 9012f080e7Smrj int rootnex_bind_check_inuse = 0; 9112f080e7Smrj int rootnex_unbind_verify_buffer = 0; 9212f080e7Smrj int rootnex_sync_check_parms = 0; 9312f080e7Smrj #endif 947c478bd9Sstevel@tonic-gate 953a634bfcSVikram Hegde boolean_t rootnex_dmar_not_setup; 963a634bfcSVikram Hegde 977aec1d6eScindi /* Master Abort and Target Abort panic flag */ 987aec1d6eScindi int rootnex_fm_ma_ta_panic_flag = 0; 997aec1d6eScindi 10012f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */ 1017c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1; 1027c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1; 1037c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list; 1047c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 1057c478bd9Sstevel@tonic-gate #define ROOTNEX_BIND_WARNING (0x1 << 0) 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 10812f080e7Smrj * revert back to old broken behavior of always sync'ing entire copy buffer. 10912f080e7Smrj * This is useful if be have a buggy driver which doesn't correctly pass in 11012f080e7Smrj * the offset and size into ddi_dma_sync(). 1117c478bd9Sstevel@tonic-gate */ 11212f080e7Smrj int rootnex_sync_ignore_params = 0; 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate /* 11512f080e7Smrj * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1 11612f080e7Smrj * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a 11712f080e7Smrj * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit 11812f080e7Smrj * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65 11912f080e7Smrj * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages 12012f080e7Smrj * (< 8K). We will still need to allocate the copy buffer during bind though 12112f080e7Smrj * (if we need one). These can only be modified in /etc/system before rootnex 12212f080e7Smrj * attach. 1237c478bd9Sstevel@tonic-gate */ 12412f080e7Smrj #if defined(__amd64) 12512f080e7Smrj int rootnex_prealloc_cookies = 65; 12612f080e7Smrj int rootnex_prealloc_windows = 4; 12712f080e7Smrj int rootnex_prealloc_copybuf = 2; 12812f080e7Smrj #else 12912f080e7Smrj int rootnex_prealloc_cookies = 33; 13012f080e7Smrj int rootnex_prealloc_windows = 4; 13112f080e7Smrj int rootnex_prealloc_copybuf = 2; 13212f080e7Smrj #endif 1337c478bd9Sstevel@tonic-gate 13412f080e7Smrj /* driver global state */ 13512f080e7Smrj static rootnex_state_t *rootnex_state; 13612f080e7Smrj 13712f080e7Smrj /* shortcut to rootnex counters */ 13812f080e7Smrj static uint64_t *rootnex_cnt; 1397c478bd9Sstevel@tonic-gate 1407c478bd9Sstevel@tonic-gate /* 14112f080e7Smrj * XXX - does x86 even need these or are they left over from the SPARC days? 1427c478bd9Sstevel@tonic-gate */ 14312f080e7Smrj /* statically defined integer/boolean properties for the root node */ 14412f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = { 14512f080e7Smrj { "PAGESIZE", PAGESIZE }, 14612f080e7Smrj { "MMU_PAGESIZE", MMU_PAGESIZE }, 14712f080e7Smrj { "MMU_PAGEOFFSET", MMU_PAGEOFFSET }, 14812f080e7Smrj { DDI_RELATIVE_ADDRESSING, 1 }, 14912f080e7Smrj }; 15012f080e7Smrj #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t)) 1517c478bd9Sstevel@tonic-gate 152843e1988Sjohnlev #ifdef __xpv 153843e1988Sjohnlev typedef maddr_t rootnex_addr_t; 154843e1988Sjohnlev #define ROOTNEX_PADDR_TO_RBASE(xinfo, pa) \ 155843e1988Sjohnlev (DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa)) 156843e1988Sjohnlev #else 157843e1988Sjohnlev typedef paddr_t rootnex_addr_t; 158843e1988Sjohnlev #endif 159843e1988Sjohnlev 16020906b23SVikram Hegde #if !defined(__xpv) 1617e301000SVikram Hegde char _depends_on[] = "mach/pcplusmp misc/iommulib misc/acpica"; 16220906b23SVikram Hegde #endif 1637c478bd9Sstevel@tonic-gate 16412f080e7Smrj static struct cb_ops rootnex_cb_ops = { 16512f080e7Smrj nodev, /* open */ 16612f080e7Smrj nodev, /* close */ 16712f080e7Smrj nodev, /* strategy */ 16812f080e7Smrj nodev, /* print */ 16912f080e7Smrj nodev, /* dump */ 17012f080e7Smrj nodev, /* read */ 17112f080e7Smrj nodev, /* write */ 17212f080e7Smrj nodev, /* ioctl */ 17312f080e7Smrj nodev, /* devmap */ 17412f080e7Smrj nodev, /* mmap */ 17512f080e7Smrj nodev, /* segmap */ 17612f080e7Smrj nochpoll, /* chpoll */ 17712f080e7Smrj ddi_prop_op, /* cb_prop_op */ 17812f080e7Smrj NULL, /* struct streamtab */ 17912f080e7Smrj D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */ 18012f080e7Smrj CB_REV, /* Rev */ 18112f080e7Smrj nodev, /* cb_aread */ 18212f080e7Smrj nodev /* cb_awrite */ 18312f080e7Smrj }; 1847c478bd9Sstevel@tonic-gate 18512f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 1867c478bd9Sstevel@tonic-gate off_t offset, off_t len, caddr_t *vaddrp); 18712f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 1887c478bd9Sstevel@tonic-gate struct hat *hat, struct seg *seg, caddr_t addr, 1897c478bd9Sstevel@tonic-gate struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 19012f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 1917c478bd9Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 19212f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 19312f080e7Smrj ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 19412f080e7Smrj ddi_dma_handle_t *handlep); 19512f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 19612f080e7Smrj ddi_dma_handle_t handle); 19712f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 19812f080e7Smrj ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 19912f080e7Smrj ddi_dma_cookie_t *cookiep, uint_t *ccountp); 20012f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 20112f080e7Smrj ddi_dma_handle_t handle); 20212f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, 20312f080e7Smrj ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 20412f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 20512f080e7Smrj ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 20612f080e7Smrj ddi_dma_cookie_t *cookiep, uint_t *ccountp); 20712f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 2087c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 2097c478bd9Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 21012f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 21112f080e7Smrj ddi_ctl_enum_t ctlop, void *arg, void *result); 21200d0963fSdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 21300d0963fSdilpreet ddi_iblock_cookie_t *ibc); 21412f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, 21512f080e7Smrj ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 2167c478bd9Sstevel@tonic-gate 21720906b23SVikram Hegde static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 21820906b23SVikram Hegde ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 21920906b23SVikram Hegde ddi_dma_handle_t *handlep); 22020906b23SVikram Hegde static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 22120906b23SVikram Hegde ddi_dma_handle_t handle); 22220906b23SVikram Hegde static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 22320906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 22420906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp); 22520906b23SVikram Hegde static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 22620906b23SVikram Hegde ddi_dma_handle_t handle); 2273a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 22820906b23SVikram Hegde static void rootnex_coredma_reset_cookies(dev_info_t *dip, 22920906b23SVikram Hegde ddi_dma_handle_t handle); 23020906b23SVikram Hegde static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 23194f1124eSVikram Hegde ddi_dma_cookie_t **cookiepp, uint_t *ccountp); 23294f1124eSVikram Hegde static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 23394f1124eSVikram Hegde ddi_dma_cookie_t *cookiep, uint_t ccount); 23494f1124eSVikram Hegde static int rootnex_coredma_clear_cookies(dev_info_t *dip, 23594f1124eSVikram Hegde ddi_dma_handle_t handle); 23694f1124eSVikram Hegde static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle); 2375dfdb46bSVikram Hegde #endif 23820906b23SVikram Hegde static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, 23920906b23SVikram Hegde ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 24020906b23SVikram Hegde static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, 24120906b23SVikram Hegde ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 24220906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp); 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = { 2457c478bd9Sstevel@tonic-gate BUSO_REV, 2467c478bd9Sstevel@tonic-gate rootnex_map, 2477c478bd9Sstevel@tonic-gate NULL, 2487c478bd9Sstevel@tonic-gate NULL, 2497c478bd9Sstevel@tonic-gate NULL, 2507c478bd9Sstevel@tonic-gate rootnex_map_fault, 2517c478bd9Sstevel@tonic-gate rootnex_dma_map, 2527c478bd9Sstevel@tonic-gate rootnex_dma_allochdl, 2537c478bd9Sstevel@tonic-gate rootnex_dma_freehdl, 2547c478bd9Sstevel@tonic-gate rootnex_dma_bindhdl, 2557c478bd9Sstevel@tonic-gate rootnex_dma_unbindhdl, 25612f080e7Smrj rootnex_dma_sync, 2577c478bd9Sstevel@tonic-gate rootnex_dma_win, 2587c478bd9Sstevel@tonic-gate rootnex_dma_mctl, 2597c478bd9Sstevel@tonic-gate rootnex_ctlops, 2607c478bd9Sstevel@tonic-gate ddi_bus_prop_op, 2617c478bd9Sstevel@tonic-gate i_ddi_rootnex_get_eventcookie, 2627c478bd9Sstevel@tonic-gate i_ddi_rootnex_add_eventcall, 2637c478bd9Sstevel@tonic-gate i_ddi_rootnex_remove_eventcall, 2647c478bd9Sstevel@tonic-gate i_ddi_rootnex_post_event, 2657c478bd9Sstevel@tonic-gate 0, /* bus_intr_ctl */ 2667c478bd9Sstevel@tonic-gate 0, /* bus_config */ 2677c478bd9Sstevel@tonic-gate 0, /* bus_unconfig */ 26800d0963fSdilpreet rootnex_fm_init, /* bus_fm_init */ 2697c478bd9Sstevel@tonic-gate NULL, /* bus_fm_fini */ 2707c478bd9Sstevel@tonic-gate NULL, /* bus_fm_access_enter */ 2717c478bd9Sstevel@tonic-gate NULL, /* bus_fm_access_exit */ 2727c478bd9Sstevel@tonic-gate NULL, /* bus_powr */ 2737c478bd9Sstevel@tonic-gate rootnex_intr_ops /* bus_intr_op */ 2747c478bd9Sstevel@tonic-gate }; 2757c478bd9Sstevel@tonic-gate 27612f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 27712f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 2783a634bfcSVikram Hegde static int rootnex_quiesce(dev_info_t *dip); 2797c478bd9Sstevel@tonic-gate 2807c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = { 2817c478bd9Sstevel@tonic-gate DEVO_REV, 28212f080e7Smrj 0, 28312f080e7Smrj ddi_no_info, 2847c478bd9Sstevel@tonic-gate nulldev, 28512f080e7Smrj nulldev, 2867c478bd9Sstevel@tonic-gate rootnex_attach, 28712f080e7Smrj rootnex_detach, 28812f080e7Smrj nulldev, 28912f080e7Smrj &rootnex_cb_ops, 29019397407SSherry Moore &rootnex_bus_ops, 29119397407SSherry Moore NULL, 2923a634bfcSVikram Hegde rootnex_quiesce, /* quiesce */ 2937c478bd9Sstevel@tonic-gate }; 2947c478bd9Sstevel@tonic-gate 29512f080e7Smrj static struct modldrv rootnex_modldrv = { 29612f080e7Smrj &mod_driverops, 297613b2871SRichard Bean "i86pc root nexus", 29812f080e7Smrj &rootnex_ops 2997c478bd9Sstevel@tonic-gate }; 3007c478bd9Sstevel@tonic-gate 30112f080e7Smrj static struct modlinkage rootnex_modlinkage = { 30212f080e7Smrj MODREV_1, 30312f080e7Smrj (void *)&rootnex_modldrv, 30412f080e7Smrj NULL 3057c478bd9Sstevel@tonic-gate }; 3067c478bd9Sstevel@tonic-gate 3073a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 30820906b23SVikram Hegde static iommulib_nexops_t iommulib_nexops = { 30920906b23SVikram Hegde IOMMU_NEXOPS_VERSION, 31020906b23SVikram Hegde "Rootnex IOMMU ops Vers 1.1", 31120906b23SVikram Hegde NULL, 31220906b23SVikram Hegde rootnex_coredma_allochdl, 31320906b23SVikram Hegde rootnex_coredma_freehdl, 31420906b23SVikram Hegde rootnex_coredma_bindhdl, 31520906b23SVikram Hegde rootnex_coredma_unbindhdl, 31620906b23SVikram Hegde rootnex_coredma_reset_cookies, 31720906b23SVikram Hegde rootnex_coredma_get_cookies, 31894f1124eSVikram Hegde rootnex_coredma_set_cookies, 31994f1124eSVikram Hegde rootnex_coredma_clear_cookies, 32094f1124eSVikram Hegde rootnex_coredma_get_sleep_flags, 32120906b23SVikram Hegde rootnex_coredma_sync, 32220906b23SVikram Hegde rootnex_coredma_win, 323b51bbbf5SVikram Hegde rootnex_dma_map, 324b51bbbf5SVikram Hegde rootnex_dma_mctl 32520906b23SVikram Hegde }; 3265dfdb46bSVikram Hegde #endif 3277c478bd9Sstevel@tonic-gate 32812f080e7Smrj /* 32912f080e7Smrj * extern hacks 33012f080e7Smrj */ 33112f080e7Smrj extern struct seg_ops segdev_ops; 33212f080e7Smrj extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 33312f080e7Smrj #ifdef DDI_MAP_DEBUG 33412f080e7Smrj extern int ddi_map_debug_flag; 33512f080e7Smrj #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 33612f080e7Smrj #endif 33712f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr); 33812f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr); 33912f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 34012f080e7Smrj psm_intr_op_t, int *); 34112f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip); 34212f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip); 34336945f79Smrj 34412f080e7Smrj /* 34512f080e7Smrj * Use device arena to use for device control register mappings. 34612f080e7Smrj * Various kernel memory walkers (debugger, dtrace) need to know 34712f080e7Smrj * to avoid this address range to prevent undesired device activity. 34812f080e7Smrj */ 34912f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag); 35012f080e7Smrj extern void device_arena_free(void * vaddr, size_t size); 35112f080e7Smrj 35212f080e7Smrj 35312f080e7Smrj /* 35412f080e7Smrj * Internal functions 35512f080e7Smrj */ 35612f080e7Smrj static int rootnex_dma_init(); 35712f080e7Smrj static void rootnex_add_props(dev_info_t *); 35812f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip); 35912f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum); 36012f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 36112f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 36212f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp); 36312f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp); 36412f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize); 36512f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, 36612f080e7Smrj ddi_dma_attr_t *attr); 36712f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 36812f080e7Smrj rootnex_sglinfo_t *sglinfo); 36912f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 37012f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag); 37112f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 37212f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr); 37312f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma); 37412f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 37512f080e7Smrj ddi_dma_attr_t *attr, int kmflag); 37612f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma); 37712f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 37812f080e7Smrj rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset); 37912f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, 38012f080e7Smrj rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset, 38112f080e7Smrj size_t *copybuf_used, page_t **cur_pp); 38212f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, 38312f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, 38412f080e7Smrj ddi_dma_attr_t *attr, off_t cur_offset); 38512f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, 38612f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, 38712f080e7Smrj ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used); 38812f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, 38912f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie); 39012f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 39112f080e7Smrj off_t offset, size_t size, uint_t cache_flags); 39212f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma); 39300d0963fSdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle, 39400d0963fSdilpreet const void *comp_addr, const void *not_used); 395*07c6692fSMark Johnson static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, 396*07c6692fSMark Johnson rootnex_sglinfo_t *sglinfo); 39712f080e7Smrj 39812f080e7Smrj /* 39912f080e7Smrj * _init() 40012f080e7Smrj * 40112f080e7Smrj */ 4027c478bd9Sstevel@tonic-gate int 4037c478bd9Sstevel@tonic-gate _init(void) 4047c478bd9Sstevel@tonic-gate { 40512f080e7Smrj 40612f080e7Smrj rootnex_state = NULL; 40712f080e7Smrj return (mod_install(&rootnex_modlinkage)); 4087c478bd9Sstevel@tonic-gate } 4097c478bd9Sstevel@tonic-gate 41012f080e7Smrj 41112f080e7Smrj /* 41212f080e7Smrj * _info() 41312f080e7Smrj * 41412f080e7Smrj */ 41512f080e7Smrj int 41612f080e7Smrj _info(struct modinfo *modinfop) 41712f080e7Smrj { 41812f080e7Smrj return (mod_info(&rootnex_modlinkage, modinfop)); 41912f080e7Smrj } 42012f080e7Smrj 42112f080e7Smrj 42212f080e7Smrj /* 42312f080e7Smrj * _fini() 42412f080e7Smrj * 42512f080e7Smrj */ 4267c478bd9Sstevel@tonic-gate int 4277c478bd9Sstevel@tonic-gate _fini(void) 4287c478bd9Sstevel@tonic-gate { 4297c478bd9Sstevel@tonic-gate return (EBUSY); 4307c478bd9Sstevel@tonic-gate } 4317c478bd9Sstevel@tonic-gate 43212f080e7Smrj 43312f080e7Smrj /* 43412f080e7Smrj * rootnex_attach() 43512f080e7Smrj * 43612f080e7Smrj */ 43712f080e7Smrj static int 43812f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4397c478bd9Sstevel@tonic-gate { 4407aec1d6eScindi int fmcap; 44112f080e7Smrj int e; 44212f080e7Smrj 44312f080e7Smrj switch (cmd) { 44412f080e7Smrj case DDI_ATTACH: 44512f080e7Smrj break; 44612f080e7Smrj case DDI_RESUME: 4473a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4483a634bfcSVikram Hegde return (immu_unquiesce()); 4493a634bfcSVikram Hegde #else 45012f080e7Smrj return (DDI_SUCCESS); 4513a634bfcSVikram Hegde #endif 45212f080e7Smrj default: 45312f080e7Smrj return (DDI_FAILURE); 4547c478bd9Sstevel@tonic-gate } 4557c478bd9Sstevel@tonic-gate 4567c478bd9Sstevel@tonic-gate /* 45712f080e7Smrj * We should only have one instance of rootnex. Save it away since we 45812f080e7Smrj * don't have an easy way to get it back later. 4597c478bd9Sstevel@tonic-gate */ 46012f080e7Smrj ASSERT(rootnex_state == NULL); 46112f080e7Smrj rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP); 4627c478bd9Sstevel@tonic-gate 46312f080e7Smrj rootnex_state->r_dip = dip; 4647aec1d6eScindi rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15); 46512f080e7Smrj rootnex_state->r_reserved_msg_printed = B_FALSE; 46612f080e7Smrj rootnex_cnt = &rootnex_state->r_counters[0]; 4677c478bd9Sstevel@tonic-gate 4687aec1d6eScindi /* 4697aec1d6eScindi * Set minimum fm capability level for i86pc platforms and then 4707aec1d6eScindi * initialize error handling. Since we're the rootnex, we don't 4717aec1d6eScindi * care what's returned in the fmcap field. 4727aec1d6eScindi */ 47300d0963fSdilpreet ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 47400d0963fSdilpreet DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 4757aec1d6eScindi fmcap = ddi_system_fmcap; 4767aec1d6eScindi ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc); 4777aec1d6eScindi 47812f080e7Smrj /* initialize DMA related state */ 47912f080e7Smrj e = rootnex_dma_init(); 48012f080e7Smrj if (e != DDI_SUCCESS) { 48112f080e7Smrj kmem_free(rootnex_state, sizeof (rootnex_state_t)); 48212f080e7Smrj return (DDI_FAILURE); 48312f080e7Smrj } 48412f080e7Smrj 48512f080e7Smrj /* Add static root node properties */ 48612f080e7Smrj rootnex_add_props(dip); 48712f080e7Smrj 48812f080e7Smrj /* since we can't call ddi_report_dev() */ 48912f080e7Smrj cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip)); 49012f080e7Smrj 49112f080e7Smrj /* Initialize rootnex event handle */ 49212f080e7Smrj i_ddi_rootnex_init_events(dip); 49312f080e7Smrj 4943a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 49520906b23SVikram Hegde e = iommulib_nexus_register(dip, &iommulib_nexops, 49620906b23SVikram Hegde &rootnex_state->r_iommulib_handle); 49720906b23SVikram Hegde 49820906b23SVikram Hegde ASSERT(e == DDI_SUCCESS); 49920906b23SVikram Hegde #endif 50020906b23SVikram Hegde 50112f080e7Smrj return (DDI_SUCCESS); 50212f080e7Smrj } 50312f080e7Smrj 50412f080e7Smrj 50512f080e7Smrj /* 50612f080e7Smrj * rootnex_detach() 50712f080e7Smrj * 50812f080e7Smrj */ 5097c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5107c478bd9Sstevel@tonic-gate static int 51112f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 5127c478bd9Sstevel@tonic-gate { 51312f080e7Smrj switch (cmd) { 51412f080e7Smrj case DDI_SUSPEND: 5153a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 5163a634bfcSVikram Hegde return (immu_quiesce()); 5173a634bfcSVikram Hegde #else 5183a634bfcSVikram Hegde return (DDI_SUCCESS); 5193a634bfcSVikram Hegde #endif 52012f080e7Smrj default: 52112f080e7Smrj return (DDI_FAILURE); 52212f080e7Smrj } 5233a634bfcSVikram Hegde /*NOTREACHED*/ 5247c478bd9Sstevel@tonic-gate 52512f080e7Smrj } 5267c478bd9Sstevel@tonic-gate 5277c478bd9Sstevel@tonic-gate 52812f080e7Smrj /* 52912f080e7Smrj * rootnex_dma_init() 53012f080e7Smrj * 53112f080e7Smrj */ 53212f080e7Smrj /*ARGSUSED*/ 53312f080e7Smrj static int 53412f080e7Smrj rootnex_dma_init() 53512f080e7Smrj { 53612f080e7Smrj size_t bufsize; 53712f080e7Smrj 53812f080e7Smrj 53912f080e7Smrj /* 54012f080e7Smrj * size of our cookie/window/copybuf state needed in dma bind that we 54112f080e7Smrj * pre-alloc in dma_alloc_handle 54212f080e7Smrj */ 54312f080e7Smrj rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies; 54412f080e7Smrj rootnex_state->r_prealloc_size = 54512f080e7Smrj (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) + 54612f080e7Smrj (rootnex_prealloc_windows * sizeof (rootnex_window_t)) + 54712f080e7Smrj (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t)); 54812f080e7Smrj 54912f080e7Smrj /* 55012f080e7Smrj * setup DDI DMA handle kmem cache, align each handle on 64 bytes, 55112f080e7Smrj * allocate 16 extra bytes for struct pointer alignment 55212f080e7Smrj * (p->dmai_private & dma->dp_prealloc_buffer) 55312f080e7Smrj */ 55412f080e7Smrj bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) + 55512f080e7Smrj rootnex_state->r_prealloc_size + 0x10; 55612f080e7Smrj rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl", 55712f080e7Smrj bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0); 55812f080e7Smrj if (rootnex_state->r_dmahdl_cache == NULL) { 55912f080e7Smrj return (DDI_FAILURE); 56012f080e7Smrj } 5617c478bd9Sstevel@tonic-gate 5627c478bd9Sstevel@tonic-gate /* 5637c478bd9Sstevel@tonic-gate * allocate array to track which major numbers we have printed warnings 5647c478bd9Sstevel@tonic-gate * for. 5657c478bd9Sstevel@tonic-gate */ 5667c478bd9Sstevel@tonic-gate rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 5677c478bd9Sstevel@tonic-gate KM_SLEEP); 5687c478bd9Sstevel@tonic-gate 5697c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 5707c478bd9Sstevel@tonic-gate } 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate 5737c478bd9Sstevel@tonic-gate /* 57412f080e7Smrj * rootnex_add_props() 57512f080e7Smrj * 5767c478bd9Sstevel@tonic-gate */ 5777c478bd9Sstevel@tonic-gate static void 57812f080e7Smrj rootnex_add_props(dev_info_t *dip) 5797c478bd9Sstevel@tonic-gate { 58012f080e7Smrj rootnex_intprop_t *rpp; 5817c478bd9Sstevel@tonic-gate int i; 5827c478bd9Sstevel@tonic-gate 58312f080e7Smrj /* Add static integer/boolean properties to the root node */ 58412f080e7Smrj rpp = rootnex_intprp; 58512f080e7Smrj for (i = 0; i < NROOT_INTPROPS; i++) { 58612f080e7Smrj (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, 58712f080e7Smrj rpp[i].prop_name, rpp[i].prop_value); 58812f080e7Smrj } 5897c478bd9Sstevel@tonic-gate } 5907c478bd9Sstevel@tonic-gate 59112f080e7Smrj 59212f080e7Smrj 5937c478bd9Sstevel@tonic-gate /* 59412f080e7Smrj * ************************* 59512f080e7Smrj * ctlops related routines 59612f080e7Smrj * ************************* 59712f080e7Smrj */ 59812f080e7Smrj 59912f080e7Smrj /* 60012f080e7Smrj * rootnex_ctlops() 6017c478bd9Sstevel@tonic-gate * 6027c478bd9Sstevel@tonic-gate */ 603a195726fSgovinda /*ARGSUSED*/ 6047c478bd9Sstevel@tonic-gate static int 60512f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 60612f080e7Smrj void *arg, void *result) 6077c478bd9Sstevel@tonic-gate { 60812f080e7Smrj int n, *ptr; 60912f080e7Smrj struct ddi_parent_private_data *pdp; 6107c478bd9Sstevel@tonic-gate 61112f080e7Smrj switch (ctlop) { 61212f080e7Smrj case DDI_CTLOPS_DMAPMAPC: 6137c478bd9Sstevel@tonic-gate /* 61412f080e7Smrj * Return 'partial' to indicate that dma mapping 61512f080e7Smrj * has to be done in the main MMU. 6167c478bd9Sstevel@tonic-gate */ 61712f080e7Smrj return (DDI_DMA_PARTIAL); 6187c478bd9Sstevel@tonic-gate 61912f080e7Smrj case DDI_CTLOPS_BTOP: 6207c478bd9Sstevel@tonic-gate /* 62112f080e7Smrj * Convert byte count input to physical page units. 62212f080e7Smrj * (byte counts that are not a page-size multiple 62312f080e7Smrj * are rounded down) 6247c478bd9Sstevel@tonic-gate */ 62512f080e7Smrj *(ulong_t *)result = btop(*(ulong_t *)arg); 6267c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6277c478bd9Sstevel@tonic-gate 62812f080e7Smrj case DDI_CTLOPS_PTOB: 6297c478bd9Sstevel@tonic-gate /* 63012f080e7Smrj * Convert size in physical pages to bytes 6317c478bd9Sstevel@tonic-gate */ 63212f080e7Smrj *(ulong_t *)result = ptob(*(ulong_t *)arg); 6337c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6347c478bd9Sstevel@tonic-gate 63512f080e7Smrj case DDI_CTLOPS_BTOPR: 6367c478bd9Sstevel@tonic-gate /* 63712f080e7Smrj * Convert byte count input to physical page units 63812f080e7Smrj * (byte counts that are not a page-size multiple 63912f080e7Smrj * are rounded up) 6407c478bd9Sstevel@tonic-gate */ 64112f080e7Smrj *(ulong_t *)result = btopr(*(ulong_t *)arg); 64212f080e7Smrj return (DDI_SUCCESS); 64312f080e7Smrj 64412f080e7Smrj case DDI_CTLOPS_INITCHILD: 64512f080e7Smrj return (impl_ddi_sunbus_initchild(arg)); 64612f080e7Smrj 64712f080e7Smrj case DDI_CTLOPS_UNINITCHILD: 64812f080e7Smrj impl_ddi_sunbus_removechild(arg); 64912f080e7Smrj return (DDI_SUCCESS); 65012f080e7Smrj 65112f080e7Smrj case DDI_CTLOPS_REPORTDEV: 65212f080e7Smrj return (rootnex_ctl_reportdev(rdip)); 65312f080e7Smrj 65412f080e7Smrj case DDI_CTLOPS_IOMIN: 6557c478bd9Sstevel@tonic-gate /* 65612f080e7Smrj * Nothing to do here but reflect back.. 6577c478bd9Sstevel@tonic-gate */ 6587c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6597c478bd9Sstevel@tonic-gate 66012f080e7Smrj case DDI_CTLOPS_REGSIZE: 66112f080e7Smrj case DDI_CTLOPS_NREGS: 66212f080e7Smrj break; 6637c478bd9Sstevel@tonic-gate 66412f080e7Smrj case DDI_CTLOPS_SIDDEV: 66512f080e7Smrj if (ndi_dev_is_prom_node(rdip)) 6667c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 66712f080e7Smrj if (ndi_dev_is_persistent_node(rdip)) 66812f080e7Smrj return (DDI_SUCCESS); 6697c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6707c478bd9Sstevel@tonic-gate 67112f080e7Smrj case DDI_CTLOPS_POWER: 67212f080e7Smrj return ((*pm_platform_power)((power_req_t *)arg)); 67312f080e7Smrj 674a195726fSgovinda case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */ 67512f080e7Smrj case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 67612f080e7Smrj case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 67712f080e7Smrj case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 678a195726fSgovinda case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */ 679a195726fSgovinda case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */ 68012f080e7Smrj if (!rootnex_state->r_reserved_msg_printed) { 68112f080e7Smrj rootnex_state->r_reserved_msg_printed = B_TRUE; 68212f080e7Smrj cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 68312f080e7Smrj "1 or more reserved/obsolete operations."); 6847c478bd9Sstevel@tonic-gate } 68512f080e7Smrj return (DDI_FAILURE); 6867c478bd9Sstevel@tonic-gate 6877c478bd9Sstevel@tonic-gate default: 6887c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6897c478bd9Sstevel@tonic-gate } 69012f080e7Smrj /* 69112f080e7Smrj * The rest are for "hardware" properties 69212f080e7Smrj */ 69312f080e7Smrj if ((pdp = ddi_get_parent_data(rdip)) == NULL) 69412f080e7Smrj return (DDI_FAILURE); 6957c478bd9Sstevel@tonic-gate 69612f080e7Smrj if (ctlop == DDI_CTLOPS_NREGS) { 69712f080e7Smrj ptr = (int *)result; 69812f080e7Smrj *ptr = pdp->par_nreg; 69912f080e7Smrj } else { 70012f080e7Smrj off_t *size = (off_t *)result; 7017c478bd9Sstevel@tonic-gate 70212f080e7Smrj ptr = (int *)arg; 70312f080e7Smrj n = *ptr; 70412f080e7Smrj if (n >= pdp->par_nreg) { 70512f080e7Smrj return (DDI_FAILURE); 70612f080e7Smrj } 70712f080e7Smrj *size = (off_t)pdp->par_reg[n].regspec_size; 70812f080e7Smrj } 7097c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 7107c478bd9Sstevel@tonic-gate } 7117c478bd9Sstevel@tonic-gate 71212f080e7Smrj 71312f080e7Smrj /* 71412f080e7Smrj * rootnex_ctl_reportdev() 71512f080e7Smrj * 71612f080e7Smrj */ 7177c478bd9Sstevel@tonic-gate static int 71812f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev) 71912f080e7Smrj { 72012f080e7Smrj int i, n, len, f_len = 0; 72112f080e7Smrj char *buf; 72212f080e7Smrj 72312f080e7Smrj buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 72412f080e7Smrj f_len += snprintf(buf, REPORTDEV_BUFSIZE, 72512f080e7Smrj "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 72612f080e7Smrj len = strlen(buf); 72712f080e7Smrj 72812f080e7Smrj for (i = 0; i < sparc_pd_getnreg(dev); i++) { 72912f080e7Smrj 73012f080e7Smrj struct regspec *rp = sparc_pd_getreg(dev, i); 73112f080e7Smrj 73212f080e7Smrj if (i == 0) 73312f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 73412f080e7Smrj ": "); 73512f080e7Smrj else 73612f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 73712f080e7Smrj " and "); 73812f080e7Smrj len = strlen(buf); 73912f080e7Smrj 74012f080e7Smrj switch (rp->regspec_bustype) { 74112f080e7Smrj 74212f080e7Smrj case BTEISA: 74312f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 74412f080e7Smrj "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 74512f080e7Smrj break; 74612f080e7Smrj 74712f080e7Smrj case BTISA: 74812f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 74912f080e7Smrj "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 75012f080e7Smrj break; 75112f080e7Smrj 75212f080e7Smrj default: 75312f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 75412f080e7Smrj "space %x offset %x", 75512f080e7Smrj rp->regspec_bustype, rp->regspec_addr); 75612f080e7Smrj break; 75712f080e7Smrj } 75812f080e7Smrj len = strlen(buf); 75912f080e7Smrj } 76012f080e7Smrj for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 76112f080e7Smrj int pri; 76212f080e7Smrj 76312f080e7Smrj if (i != 0) { 76412f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 76512f080e7Smrj ","); 76612f080e7Smrj len = strlen(buf); 76712f080e7Smrj } 76812f080e7Smrj pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 76912f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 77012f080e7Smrj " sparc ipl %d", pri); 77112f080e7Smrj len = strlen(buf); 77212f080e7Smrj } 77312f080e7Smrj #ifdef DEBUG 77412f080e7Smrj if (f_len + 1 >= REPORTDEV_BUFSIZE) { 77512f080e7Smrj cmn_err(CE_NOTE, "next message is truncated: " 77612f080e7Smrj "printed length 1024, real length %d", f_len); 77712f080e7Smrj } 77812f080e7Smrj #endif /* DEBUG */ 77912f080e7Smrj cmn_err(CE_CONT, "?%s\n", buf); 78012f080e7Smrj kmem_free(buf, REPORTDEV_BUFSIZE); 78112f080e7Smrj return (DDI_SUCCESS); 78212f080e7Smrj } 78312f080e7Smrj 78412f080e7Smrj 78512f080e7Smrj /* 78612f080e7Smrj * ****************** 78712f080e7Smrj * map related code 78812f080e7Smrj * ****************** 78912f080e7Smrj */ 79012f080e7Smrj 79112f080e7Smrj /* 79212f080e7Smrj * rootnex_map() 79312f080e7Smrj * 79412f080e7Smrj */ 79512f080e7Smrj static int 79612f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, 79712f080e7Smrj off_t len, caddr_t *vaddrp) 7987c478bd9Sstevel@tonic-gate { 7997c478bd9Sstevel@tonic-gate struct regspec *rp, tmp_reg; 8007c478bd9Sstevel@tonic-gate ddi_map_req_t mr = *mp; /* Get private copy of request */ 8017c478bd9Sstevel@tonic-gate int error; 8027c478bd9Sstevel@tonic-gate 8037c478bd9Sstevel@tonic-gate mp = &mr; 8047c478bd9Sstevel@tonic-gate 8057c478bd9Sstevel@tonic-gate switch (mp->map_op) { 8067c478bd9Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 8077c478bd9Sstevel@tonic-gate case DDI_MO_UNMAP: 8087c478bd9Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 8097c478bd9Sstevel@tonic-gate break; 8107c478bd9Sstevel@tonic-gate default: 8117c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8127c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 8137c478bd9Sstevel@tonic-gate mp->map_op); 8147c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8157c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 8167c478bd9Sstevel@tonic-gate } 8177c478bd9Sstevel@tonic-gate 8187c478bd9Sstevel@tonic-gate if (mp->map_flags & DDI_MF_USER_MAPPING) { 8197c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8207c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 8217c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8227c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 8237c478bd9Sstevel@tonic-gate } 8247c478bd9Sstevel@tonic-gate 8257c478bd9Sstevel@tonic-gate /* 8267c478bd9Sstevel@tonic-gate * First, if given an rnumber, convert it to a regspec... 8277c478bd9Sstevel@tonic-gate * (Presumably, this is on behalf of a child of the root node?) 8287c478bd9Sstevel@tonic-gate */ 8297c478bd9Sstevel@tonic-gate 8307c478bd9Sstevel@tonic-gate if (mp->map_type == DDI_MT_RNUMBER) { 8317c478bd9Sstevel@tonic-gate 8327c478bd9Sstevel@tonic-gate int rnumber = mp->map_obj.rnumber; 8337c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8347c478bd9Sstevel@tonic-gate static char *out_of_range = 8357c478bd9Sstevel@tonic-gate "rootnex_map: Out of range rnumber <%d>, device <%s>"; 8367c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8377c478bd9Sstevel@tonic-gate 8387c478bd9Sstevel@tonic-gate rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 8397c478bd9Sstevel@tonic-gate if (rp == NULL) { 8407c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8417c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, out_of_range, rnumber, 8427c478bd9Sstevel@tonic-gate ddi_get_name(rdip)); 8437c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8447c478bd9Sstevel@tonic-gate return (DDI_ME_RNUMBER_RANGE); 8457c478bd9Sstevel@tonic-gate } 8467c478bd9Sstevel@tonic-gate 8477c478bd9Sstevel@tonic-gate /* 8487c478bd9Sstevel@tonic-gate * Convert the given ddi_map_req_t from rnumber to regspec... 8497c478bd9Sstevel@tonic-gate */ 8507c478bd9Sstevel@tonic-gate 8517c478bd9Sstevel@tonic-gate mp->map_type = DDI_MT_REGSPEC; 8527c478bd9Sstevel@tonic-gate mp->map_obj.rp = rp; 8537c478bd9Sstevel@tonic-gate } 8547c478bd9Sstevel@tonic-gate 8557c478bd9Sstevel@tonic-gate /* 8567c478bd9Sstevel@tonic-gate * Adjust offset and length correspnding to called values... 8577c478bd9Sstevel@tonic-gate * XXX: A non-zero length means override the one in the regspec 8587c478bd9Sstevel@tonic-gate * XXX: (regardless of what's in the parent's range?) 8597c478bd9Sstevel@tonic-gate */ 8607c478bd9Sstevel@tonic-gate 8617c478bd9Sstevel@tonic-gate tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 8627c478bd9Sstevel@tonic-gate rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 865843e1988Sjohnlev cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d " 866843e1988Sjohnlev "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 867843e1988Sjohnlev rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset, 868843e1988Sjohnlev len, mp->map_handlep); 8697c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8707c478bd9Sstevel@tonic-gate 8717c478bd9Sstevel@tonic-gate /* 8727c478bd9Sstevel@tonic-gate * I/O or memory mapping: 8737c478bd9Sstevel@tonic-gate * 8747c478bd9Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 8757c478bd9Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 8767c478bd9Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 8777c478bd9Sstevel@tonic-gate */ 8787c478bd9Sstevel@tonic-gate 8797c478bd9Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 8807c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "<%s,%s> invalid register spec" 8817c478bd9Sstevel@tonic-gate " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 8827c478bd9Sstevel@tonic-gate ddi_get_name(rdip), rp->regspec_bustype, 8837c478bd9Sstevel@tonic-gate rp->regspec_addr, rp->regspec_size); 8847c478bd9Sstevel@tonic-gate return (DDI_ME_INVAL); 8857c478bd9Sstevel@tonic-gate } 8867c478bd9Sstevel@tonic-gate 8877c478bd9Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 8887c478bd9Sstevel@tonic-gate /* 8897c478bd9Sstevel@tonic-gate * compatibility i/o mapping 8907c478bd9Sstevel@tonic-gate */ 8917c478bd9Sstevel@tonic-gate rp->regspec_bustype += (uint_t)offset; 8927c478bd9Sstevel@tonic-gate } else { 8937c478bd9Sstevel@tonic-gate /* 8947c478bd9Sstevel@tonic-gate * Normal memory or i/o mapping 8957c478bd9Sstevel@tonic-gate */ 8967c478bd9Sstevel@tonic-gate rp->regspec_addr += (uint_t)offset; 8977c478bd9Sstevel@tonic-gate } 8987c478bd9Sstevel@tonic-gate 8997c478bd9Sstevel@tonic-gate if (len != 0) 9007c478bd9Sstevel@tonic-gate rp->regspec_size = (uint_t)len; 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 903843e1988Sjohnlev cmn_err(CE_CONT, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d " 904843e1988Sjohnlev "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 9057c478bd9Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 9067c478bd9Sstevel@tonic-gate offset, len, mp->map_handlep); 9077c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9087c478bd9Sstevel@tonic-gate 9097c478bd9Sstevel@tonic-gate /* 9107c478bd9Sstevel@tonic-gate * Apply any parent ranges at this level, if applicable. 9117c478bd9Sstevel@tonic-gate * (This is where nexus specific regspec translation takes place. 9127c478bd9Sstevel@tonic-gate * Use of this function is implicit agreement that translation is 9137c478bd9Sstevel@tonic-gate * provided via ddi_apply_range.) 9147c478bd9Sstevel@tonic-gate */ 9157c478bd9Sstevel@tonic-gate 9167c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 9177c478bd9Sstevel@tonic-gate ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 9187c478bd9Sstevel@tonic-gate ddi_get_name(dip), ddi_get_name(rdip)); 9197c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9207c478bd9Sstevel@tonic-gate 9217c478bd9Sstevel@tonic-gate if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 9227c478bd9Sstevel@tonic-gate return (error); 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate switch (mp->map_op) { 9257c478bd9Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate /* 9287c478bd9Sstevel@tonic-gate * Set up the locked down kernel mapping to the regspec... 9297c478bd9Sstevel@tonic-gate */ 9307c478bd9Sstevel@tonic-gate 9317c478bd9Sstevel@tonic-gate return (rootnex_map_regspec(mp, vaddrp)); 9327c478bd9Sstevel@tonic-gate 9337c478bd9Sstevel@tonic-gate case DDI_MO_UNMAP: 9347c478bd9Sstevel@tonic-gate 9357c478bd9Sstevel@tonic-gate /* 9367c478bd9Sstevel@tonic-gate * Release mapping... 9377c478bd9Sstevel@tonic-gate */ 9387c478bd9Sstevel@tonic-gate 9397c478bd9Sstevel@tonic-gate return (rootnex_unmap_regspec(mp, vaddrp)); 9407c478bd9Sstevel@tonic-gate 9417c478bd9Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 9427c478bd9Sstevel@tonic-gate 9437c478bd9Sstevel@tonic-gate return (rootnex_map_handle(mp)); 9447c478bd9Sstevel@tonic-gate 9457c478bd9Sstevel@tonic-gate default: 9467c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 9477c478bd9Sstevel@tonic-gate } 9487c478bd9Sstevel@tonic-gate } 9497c478bd9Sstevel@tonic-gate 9507c478bd9Sstevel@tonic-gate 9517c478bd9Sstevel@tonic-gate /* 95212f080e7Smrj * rootnex_map_fault() 9537c478bd9Sstevel@tonic-gate * 9547c478bd9Sstevel@tonic-gate * fault in mappings for requestors 9557c478bd9Sstevel@tonic-gate */ 9567c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9577c478bd9Sstevel@tonic-gate static int 95812f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, 95912f080e7Smrj struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, 96012f080e7Smrj uint_t lock) 9617c478bd9Sstevel@tonic-gate { 9627c478bd9Sstevel@tonic-gate 9637c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 9647c478bd9Sstevel@tonic-gate ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 9657c478bd9Sstevel@tonic-gate ddi_map_debug(" Seg <%s>\n", 9667c478bd9Sstevel@tonic-gate seg->s_ops == &segdev_ops ? "segdev" : 9677c478bd9Sstevel@tonic-gate seg == &kvseg ? "segkmem" : "NONE!"); 9687c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9697c478bd9Sstevel@tonic-gate 9707c478bd9Sstevel@tonic-gate /* 9717c478bd9Sstevel@tonic-gate * This is all terribly broken, but it is a start 9727c478bd9Sstevel@tonic-gate * 9737c478bd9Sstevel@tonic-gate * XXX Note that this test means that segdev_ops 9747c478bd9Sstevel@tonic-gate * must be exported from seg_dev.c. 9757c478bd9Sstevel@tonic-gate * XXX What about devices with their own segment drivers? 9767c478bd9Sstevel@tonic-gate */ 9777c478bd9Sstevel@tonic-gate if (seg->s_ops == &segdev_ops) { 978843e1988Sjohnlev struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 9797c478bd9Sstevel@tonic-gate 9807c478bd9Sstevel@tonic-gate if (hat == NULL) { 9817c478bd9Sstevel@tonic-gate /* 9827c478bd9Sstevel@tonic-gate * This is one plausible interpretation of 9837c478bd9Sstevel@tonic-gate * a null hat i.e. use the first hat on the 9847c478bd9Sstevel@tonic-gate * address space hat list which by convention is 9857c478bd9Sstevel@tonic-gate * the hat of the system MMU. At alternative 9867c478bd9Sstevel@tonic-gate * would be to panic .. this might well be better .. 9877c478bd9Sstevel@tonic-gate */ 9887c478bd9Sstevel@tonic-gate ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 9897c478bd9Sstevel@tonic-gate hat = seg->s_as->a_hat; 9907c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 9917c478bd9Sstevel@tonic-gate } 9927c478bd9Sstevel@tonic-gate hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 9937c478bd9Sstevel@tonic-gate (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 9947c478bd9Sstevel@tonic-gate } else if (seg == &kvseg && dp == NULL) { 9957c478bd9Sstevel@tonic-gate hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 9967c478bd9Sstevel@tonic-gate HAT_LOAD_LOCK); 9977c478bd9Sstevel@tonic-gate } else 9987c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 9997c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 10007c478bd9Sstevel@tonic-gate } 10017c478bd9Sstevel@tonic-gate 10027c478bd9Sstevel@tonic-gate 10037c478bd9Sstevel@tonic-gate /* 100412f080e7Smrj * rootnex_map_regspec() 100512f080e7Smrj * we don't support mapping of I/O cards above 4Gb 10067c478bd9Sstevel@tonic-gate */ 10077c478bd9Sstevel@tonic-gate static int 100812f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 10097c478bd9Sstevel@tonic-gate { 1010843e1988Sjohnlev rootnex_addr_t rbase; 101112f080e7Smrj void *cvaddr; 101212f080e7Smrj uint_t npages, pgoffset; 101312f080e7Smrj struct regspec *rp; 101412f080e7Smrj ddi_acc_hdl_t *hp; 101512f080e7Smrj ddi_acc_impl_t *ap; 101612f080e7Smrj uint_t hat_acc_flags; 1017843e1988Sjohnlev paddr_t pbase; 10187c478bd9Sstevel@tonic-gate 101912f080e7Smrj rp = mp->map_obj.rp; 102012f080e7Smrj hp = mp->map_handlep; 102112f080e7Smrj 102212f080e7Smrj #ifdef DDI_MAP_DEBUG 102312f080e7Smrj ddi_map_debug( 102412f080e7Smrj "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 102512f080e7Smrj rp->regspec_bustype, rp->regspec_addr, 102612f080e7Smrj rp->regspec_size, mp->map_handlep); 102712f080e7Smrj #endif /* DDI_MAP_DEBUG */ 10287c478bd9Sstevel@tonic-gate 10297c478bd9Sstevel@tonic-gate /* 103012f080e7Smrj * I/O or memory mapping 103112f080e7Smrj * 103212f080e7Smrj * <bustype=0, addr=x, len=x>: memory 103312f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 103412f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 10357c478bd9Sstevel@tonic-gate */ 103612f080e7Smrj 103712f080e7Smrj if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 103812f080e7Smrj cmn_err(CE_WARN, "rootnex: invalid register spec" 103912f080e7Smrj " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 104012f080e7Smrj rp->regspec_addr, rp->regspec_size); 104112f080e7Smrj return (DDI_FAILURE); 10427c478bd9Sstevel@tonic-gate } 104312f080e7Smrj 104412f080e7Smrj if (rp->regspec_bustype != 0) { 10457c478bd9Sstevel@tonic-gate /* 104612f080e7Smrj * I/O space - needs a handle. 10477c478bd9Sstevel@tonic-gate */ 10487c478bd9Sstevel@tonic-gate if (hp == NULL) { 104912f080e7Smrj return (DDI_FAILURE); 10507c478bd9Sstevel@tonic-gate } 105112f080e7Smrj ap = (ddi_acc_impl_t *)hp->ah_platform_private; 105212f080e7Smrj ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 105312f080e7Smrj impl_acc_hdl_init(hp); 10547c478bd9Sstevel@tonic-gate 105512f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 105612f080e7Smrj #ifdef DDI_MAP_DEBUG 1057843e1988Sjohnlev ddi_map_debug("rootnex_map_regspec: mmap() " 1058843e1988Sjohnlev "to I/O space is not supported.\n"); 105912f080e7Smrj #endif /* DDI_MAP_DEBUG */ 106012f080e7Smrj return (DDI_ME_INVAL); 10617c478bd9Sstevel@tonic-gate } else { 10627c478bd9Sstevel@tonic-gate /* 106312f080e7Smrj * 1275-compliant vs. compatibility i/o mapping 10647c478bd9Sstevel@tonic-gate */ 106512f080e7Smrj *vaddrp = 106612f080e7Smrj (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 106712f080e7Smrj ((caddr_t)(uintptr_t)rp->regspec_bustype) : 106812f080e7Smrj ((caddr_t)(uintptr_t)rp->regspec_addr); 1069843e1988Sjohnlev #ifdef __xpv 1070843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1071843e1988Sjohnlev hp->ah_pfn = xen_assign_pfn( 1072843e1988Sjohnlev mmu_btop((ulong_t)rp->regspec_addr & 1073843e1988Sjohnlev MMU_PAGEMASK)); 1074843e1988Sjohnlev } else { 1075843e1988Sjohnlev hp->ah_pfn = mmu_btop( 1076843e1988Sjohnlev (ulong_t)rp->regspec_addr & MMU_PAGEMASK); 1077843e1988Sjohnlev } 1078843e1988Sjohnlev #else 107900d0963fSdilpreet hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr & 1080843e1988Sjohnlev MMU_PAGEMASK); 1081843e1988Sjohnlev #endif 108200d0963fSdilpreet hp->ah_pnum = mmu_btopr(rp->regspec_size + 108300d0963fSdilpreet (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET); 10847c478bd9Sstevel@tonic-gate } 10857c478bd9Sstevel@tonic-gate 108612f080e7Smrj #ifdef DDI_MAP_DEBUG 108712f080e7Smrj ddi_map_debug( 108812f080e7Smrj "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 108912f080e7Smrj rp->regspec_size, *vaddrp); 109012f080e7Smrj #endif /* DDI_MAP_DEBUG */ 109112f080e7Smrj return (DDI_SUCCESS); 10927c478bd9Sstevel@tonic-gate } 10937c478bd9Sstevel@tonic-gate 10947c478bd9Sstevel@tonic-gate /* 109512f080e7Smrj * Memory space 109612f080e7Smrj */ 109712f080e7Smrj 109812f080e7Smrj if (hp != NULL) { 109912f080e7Smrj /* 110012f080e7Smrj * hat layer ignores 110112f080e7Smrj * hp->ah_acc.devacc_attr_endian_flags. 110212f080e7Smrj */ 110312f080e7Smrj switch (hp->ah_acc.devacc_attr_dataorder) { 110412f080e7Smrj case DDI_STRICTORDER_ACC: 110512f080e7Smrj hat_acc_flags = HAT_STRICTORDER; 110612f080e7Smrj break; 110712f080e7Smrj case DDI_UNORDERED_OK_ACC: 110812f080e7Smrj hat_acc_flags = HAT_UNORDERED_OK; 110912f080e7Smrj break; 111012f080e7Smrj case DDI_MERGING_OK_ACC: 111112f080e7Smrj hat_acc_flags = HAT_MERGING_OK; 111212f080e7Smrj break; 111312f080e7Smrj case DDI_LOADCACHING_OK_ACC: 111412f080e7Smrj hat_acc_flags = HAT_LOADCACHING_OK; 111512f080e7Smrj break; 111612f080e7Smrj case DDI_STORECACHING_OK_ACC: 111712f080e7Smrj hat_acc_flags = HAT_STORECACHING_OK; 111812f080e7Smrj break; 111912f080e7Smrj } 112012f080e7Smrj ap = (ddi_acc_impl_t *)hp->ah_platform_private; 112112f080e7Smrj ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 112212f080e7Smrj impl_acc_hdl_init(hp); 112312f080e7Smrj hp->ah_hat_flags = hat_acc_flags; 112412f080e7Smrj } else { 112512f080e7Smrj hat_acc_flags = HAT_STRICTORDER; 112612f080e7Smrj } 112712f080e7Smrj 1128843e1988Sjohnlev rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK); 1129843e1988Sjohnlev #ifdef __xpv 1130843e1988Sjohnlev /* 1131843e1988Sjohnlev * If we're dom0, we're using a real device so we need to translate 1132843e1988Sjohnlev * the MA to a PA. 1133843e1988Sjohnlev */ 1134843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1135843e1988Sjohnlev pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))); 1136843e1988Sjohnlev } else { 1137843e1988Sjohnlev pbase = rbase; 1138843e1988Sjohnlev } 1139843e1988Sjohnlev #else 1140843e1988Sjohnlev pbase = rbase; 1141843e1988Sjohnlev #endif 1142843e1988Sjohnlev pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 114312f080e7Smrj 114412f080e7Smrj if (rp->regspec_size == 0) { 114512f080e7Smrj #ifdef DDI_MAP_DEBUG 114612f080e7Smrj ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 114712f080e7Smrj #endif /* DDI_MAP_DEBUG */ 114812f080e7Smrj return (DDI_ME_INVAL); 114912f080e7Smrj } 115012f080e7Smrj 115112f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1152843e1988Sjohnlev /* extra cast to make gcc happy */ 1153843e1988Sjohnlev *vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase)); 115412f080e7Smrj } else { 115512f080e7Smrj npages = mmu_btopr(rp->regspec_size + pgoffset); 115612f080e7Smrj 115712f080e7Smrj #ifdef DDI_MAP_DEBUG 1158843e1988Sjohnlev ddi_map_debug("rootnex_map_regspec: Mapping %d pages " 1159843e1988Sjohnlev "physical %llx", npages, pbase); 116012f080e7Smrj #endif /* DDI_MAP_DEBUG */ 116112f080e7Smrj 116212f080e7Smrj cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 116312f080e7Smrj if (cvaddr == NULL) 116412f080e7Smrj return (DDI_ME_NORESOURCES); 116512f080e7Smrj 116612f080e7Smrj /* 116712f080e7Smrj * Now map in the pages we've allocated... 116812f080e7Smrj */ 1169843e1988Sjohnlev hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), 1170843e1988Sjohnlev mmu_btop(pbase), mp->map_prot | hat_acc_flags, 1171843e1988Sjohnlev HAT_LOAD_LOCK); 117212f080e7Smrj *vaddrp = (caddr_t)cvaddr + pgoffset; 117300d0963fSdilpreet 117400d0963fSdilpreet /* save away pfn and npages for FMA */ 117500d0963fSdilpreet hp = mp->map_handlep; 117600d0963fSdilpreet if (hp) { 1177843e1988Sjohnlev hp->ah_pfn = mmu_btop(pbase); 117800d0963fSdilpreet hp->ah_pnum = npages; 117900d0963fSdilpreet } 118012f080e7Smrj } 118112f080e7Smrj 118212f080e7Smrj #ifdef DDI_MAP_DEBUG 118312f080e7Smrj ddi_map_debug("at virtual 0x%x\n", *vaddrp); 118412f080e7Smrj #endif /* DDI_MAP_DEBUG */ 118512f080e7Smrj return (DDI_SUCCESS); 118612f080e7Smrj } 118712f080e7Smrj 118812f080e7Smrj 118912f080e7Smrj /* 119012f080e7Smrj * rootnex_unmap_regspec() 11917c478bd9Sstevel@tonic-gate * 11927c478bd9Sstevel@tonic-gate */ 11937c478bd9Sstevel@tonic-gate static int 119412f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 11957c478bd9Sstevel@tonic-gate { 119612f080e7Smrj caddr_t addr = (caddr_t)*vaddrp; 119712f080e7Smrj uint_t npages, pgoffset; 119812f080e7Smrj struct regspec *rp; 11997c478bd9Sstevel@tonic-gate 120012f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 120112f080e7Smrj return (0); 12027c478bd9Sstevel@tonic-gate 120312f080e7Smrj rp = mp->map_obj.rp; 12047c478bd9Sstevel@tonic-gate 120512f080e7Smrj if (rp->regspec_size == 0) { 120612f080e7Smrj #ifdef DDI_MAP_DEBUG 120712f080e7Smrj ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 120812f080e7Smrj #endif /* DDI_MAP_DEBUG */ 120912f080e7Smrj return (DDI_ME_INVAL); 12107c478bd9Sstevel@tonic-gate } 12117c478bd9Sstevel@tonic-gate 12127c478bd9Sstevel@tonic-gate /* 121312f080e7Smrj * I/O or memory mapping: 12147c478bd9Sstevel@tonic-gate * 121512f080e7Smrj * <bustype=0, addr=x, len=x>: memory 121612f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 121712f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 12187c478bd9Sstevel@tonic-gate */ 121912f080e7Smrj if (rp->regspec_bustype != 0) { 12207c478bd9Sstevel@tonic-gate /* 122112f080e7Smrj * This is I/O space, which requires no particular 122212f080e7Smrj * processing on unmap since it isn't mapped in the 122312f080e7Smrj * first place. 12247c478bd9Sstevel@tonic-gate */ 12257c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12267c478bd9Sstevel@tonic-gate } 12277c478bd9Sstevel@tonic-gate 12287c478bd9Sstevel@tonic-gate /* 122912f080e7Smrj * Memory space 12307c478bd9Sstevel@tonic-gate */ 123112f080e7Smrj pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 123212f080e7Smrj npages = mmu_btopr(rp->regspec_size + pgoffset); 123312f080e7Smrj hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 123412f080e7Smrj device_arena_free(addr - pgoffset, ptob(npages)); 12357c478bd9Sstevel@tonic-gate 12367c478bd9Sstevel@tonic-gate /* 123712f080e7Smrj * Destroy the pointer - the mapping has logically gone 12387c478bd9Sstevel@tonic-gate */ 123912f080e7Smrj *vaddrp = NULL; 12407c478bd9Sstevel@tonic-gate 12417c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12427c478bd9Sstevel@tonic-gate } 12437c478bd9Sstevel@tonic-gate 124412f080e7Smrj 124512f080e7Smrj /* 124612f080e7Smrj * rootnex_map_handle() 124712f080e7Smrj * 124812f080e7Smrj */ 12497c478bd9Sstevel@tonic-gate static int 125012f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp) 12517c478bd9Sstevel@tonic-gate { 1252843e1988Sjohnlev rootnex_addr_t rbase; 125312f080e7Smrj ddi_acc_hdl_t *hp; 125412f080e7Smrj uint_t pgoffset; 125512f080e7Smrj struct regspec *rp; 1256843e1988Sjohnlev paddr_t pbase; 12577c478bd9Sstevel@tonic-gate 125812f080e7Smrj rp = mp->map_obj.rp; 12597c478bd9Sstevel@tonic-gate 126012f080e7Smrj #ifdef DDI_MAP_DEBUG 126112f080e7Smrj ddi_map_debug( 126212f080e7Smrj "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 126312f080e7Smrj rp->regspec_bustype, rp->regspec_addr, 126412f080e7Smrj rp->regspec_size, mp->map_handlep); 126512f080e7Smrj #endif /* DDI_MAP_DEBUG */ 12667c478bd9Sstevel@tonic-gate 12677c478bd9Sstevel@tonic-gate /* 126812f080e7Smrj * I/O or memory mapping: 126912f080e7Smrj * 127012f080e7Smrj * <bustype=0, addr=x, len=x>: memory 127112f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 127212f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 12737c478bd9Sstevel@tonic-gate */ 127412f080e7Smrj if (rp->regspec_bustype != 0) { 127512f080e7Smrj /* 127612f080e7Smrj * This refers to I/O space, and we don't support "mapping" 127712f080e7Smrj * I/O space to a user. 127812f080e7Smrj */ 12797c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12807c478bd9Sstevel@tonic-gate } 12817c478bd9Sstevel@tonic-gate 12827c478bd9Sstevel@tonic-gate /* 128312f080e7Smrj * Set up the hat_flags for the mapping. 12847c478bd9Sstevel@tonic-gate */ 128512f080e7Smrj hp = mp->map_handlep; 12867c478bd9Sstevel@tonic-gate 128712f080e7Smrj switch (hp->ah_acc.devacc_attr_endian_flags) { 128812f080e7Smrj case DDI_NEVERSWAP_ACC: 128912f080e7Smrj hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 12907c478bd9Sstevel@tonic-gate break; 129112f080e7Smrj case DDI_STRUCTURE_LE_ACC: 129212f080e7Smrj hp->ah_hat_flags = HAT_STRUCTURE_LE; 12937c478bd9Sstevel@tonic-gate break; 129412f080e7Smrj case DDI_STRUCTURE_BE_ACC: 12957c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12967c478bd9Sstevel@tonic-gate default: 129712f080e7Smrj return (DDI_REGS_ACC_CONFLICT); 12987c478bd9Sstevel@tonic-gate } 12997c478bd9Sstevel@tonic-gate 130012f080e7Smrj switch (hp->ah_acc.devacc_attr_dataorder) { 130112f080e7Smrj case DDI_STRICTORDER_ACC: 13027c478bd9Sstevel@tonic-gate break; 130312f080e7Smrj case DDI_UNORDERED_OK_ACC: 130412f080e7Smrj hp->ah_hat_flags |= HAT_UNORDERED_OK; 13057c478bd9Sstevel@tonic-gate break; 130612f080e7Smrj case DDI_MERGING_OK_ACC: 130712f080e7Smrj hp->ah_hat_flags |= HAT_MERGING_OK; 13087c478bd9Sstevel@tonic-gate break; 130912f080e7Smrj case DDI_LOADCACHING_OK_ACC: 131012f080e7Smrj hp->ah_hat_flags |= HAT_LOADCACHING_OK; 131112f080e7Smrj break; 131212f080e7Smrj case DDI_STORECACHING_OK_ACC: 131312f080e7Smrj hp->ah_hat_flags |= HAT_STORECACHING_OK; 131412f080e7Smrj break; 13157c478bd9Sstevel@tonic-gate default: 13167c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13177c478bd9Sstevel@tonic-gate } 13187c478bd9Sstevel@tonic-gate 1319843e1988Sjohnlev rbase = (rootnex_addr_t)rp->regspec_addr & 1320843e1988Sjohnlev (~(rootnex_addr_t)MMU_PAGEOFFSET); 1321843e1988Sjohnlev pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 13227c478bd9Sstevel@tonic-gate 132312f080e7Smrj if (rp->regspec_size == 0) 132412f080e7Smrj return (DDI_ME_INVAL); 13257c478bd9Sstevel@tonic-gate 1326843e1988Sjohnlev #ifdef __xpv 1327843e1988Sjohnlev /* 1328843e1988Sjohnlev * If we're dom0, we're using a real device so we need to translate 1329843e1988Sjohnlev * the MA to a PA. 1330843e1988Sjohnlev */ 1331843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1332843e1988Sjohnlev pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) | 1333843e1988Sjohnlev (rbase & MMU_PAGEOFFSET); 1334843e1988Sjohnlev } else { 1335843e1988Sjohnlev pbase = rbase; 1336843e1988Sjohnlev } 1337843e1988Sjohnlev #else 1338843e1988Sjohnlev pbase = rbase; 1339843e1988Sjohnlev #endif 1340843e1988Sjohnlev 1341843e1988Sjohnlev hp->ah_pfn = mmu_btop(pbase); 134212f080e7Smrj hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 13437c478bd9Sstevel@tonic-gate 13447c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 13457c478bd9Sstevel@tonic-gate } 13467c478bd9Sstevel@tonic-gate 134712f080e7Smrj 134812f080e7Smrj 13497c478bd9Sstevel@tonic-gate /* 135012f080e7Smrj * ************************ 135112f080e7Smrj * interrupt related code 135212f080e7Smrj * ************************ 13537c478bd9Sstevel@tonic-gate */ 13547c478bd9Sstevel@tonic-gate 13557c478bd9Sstevel@tonic-gate /* 135612f080e7Smrj * rootnex_intr_ops() 13577c478bd9Sstevel@tonic-gate * bus_intr_op() function for interrupt support 13587c478bd9Sstevel@tonic-gate */ 13597c478bd9Sstevel@tonic-gate /* ARGSUSED */ 13607c478bd9Sstevel@tonic-gate static int 13617c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 13627c478bd9Sstevel@tonic-gate ddi_intr_handle_impl_t *hdlp, void *result) 13637c478bd9Sstevel@tonic-gate { 13647c478bd9Sstevel@tonic-gate struct intrspec *ispec; 13657c478bd9Sstevel@tonic-gate struct ddi_parent_private_data *pdp; 13667c478bd9Sstevel@tonic-gate 13677c478bd9Sstevel@tonic-gate DDI_INTR_NEXDBG((CE_CONT, 13687c478bd9Sstevel@tonic-gate "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 13697c478bd9Sstevel@tonic-gate (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 13707c478bd9Sstevel@tonic-gate 13717c478bd9Sstevel@tonic-gate /* Process the interrupt operation */ 13727c478bd9Sstevel@tonic-gate switch (intr_op) { 13737c478bd9Sstevel@tonic-gate case DDI_INTROP_GETCAP: 13747c478bd9Sstevel@tonic-gate /* First check with pcplusmp */ 13757c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 13767c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13777c478bd9Sstevel@tonic-gate 13787c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 13797c478bd9Sstevel@tonic-gate *(int *)result = 0; 13807c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13817c478bd9Sstevel@tonic-gate } 13827c478bd9Sstevel@tonic-gate break; 13837c478bd9Sstevel@tonic-gate case DDI_INTROP_SETCAP: 13847c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 13857c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13867c478bd9Sstevel@tonic-gate 13877c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 13887c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13897c478bd9Sstevel@tonic-gate break; 13907c478bd9Sstevel@tonic-gate case DDI_INTROP_ALLOC: 13917c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 13927c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13937c478bd9Sstevel@tonic-gate hdlp->ih_pri = ispec->intrspec_pri; 13947c478bd9Sstevel@tonic-gate *(int *)result = hdlp->ih_scratch1; 13957c478bd9Sstevel@tonic-gate break; 13967c478bd9Sstevel@tonic-gate case DDI_INTROP_FREE: 13977c478bd9Sstevel@tonic-gate pdp = ddi_get_parent_data(rdip); 13987c478bd9Sstevel@tonic-gate /* 13997c478bd9Sstevel@tonic-gate * Special case for 'pcic' driver' only. 14007c478bd9Sstevel@tonic-gate * If an intrspec was created for it, clean it up here 14017c478bd9Sstevel@tonic-gate * See detailed comments on this in the function 14027c478bd9Sstevel@tonic-gate * rootnex_get_ispec(). 14037c478bd9Sstevel@tonic-gate */ 14047c478bd9Sstevel@tonic-gate if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 14057c478bd9Sstevel@tonic-gate kmem_free(pdp->par_intr, sizeof (struct intrspec) * 14067c478bd9Sstevel@tonic-gate pdp->par_nintr); 14077c478bd9Sstevel@tonic-gate /* 14087c478bd9Sstevel@tonic-gate * Set it to zero; so that 14097c478bd9Sstevel@tonic-gate * DDI framework doesn't free it again 14107c478bd9Sstevel@tonic-gate */ 14117c478bd9Sstevel@tonic-gate pdp->par_intr = NULL; 14127c478bd9Sstevel@tonic-gate pdp->par_nintr = 0; 14137c478bd9Sstevel@tonic-gate } 14147c478bd9Sstevel@tonic-gate break; 14157c478bd9Sstevel@tonic-gate case DDI_INTROP_GETPRI: 14167c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14177c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14187c478bd9Sstevel@tonic-gate *(int *)result = ispec->intrspec_pri; 14197c478bd9Sstevel@tonic-gate break; 14207c478bd9Sstevel@tonic-gate case DDI_INTROP_SETPRI: 14217c478bd9Sstevel@tonic-gate /* Validate the interrupt priority passed to us */ 14227c478bd9Sstevel@tonic-gate if (*(int *)result > LOCK_LEVEL) 14237c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14247c478bd9Sstevel@tonic-gate 14257c478bd9Sstevel@tonic-gate /* Ensure that PSM is all initialized and ispec is ok */ 14267c478bd9Sstevel@tonic-gate if ((psm_intr_ops == NULL) || 14277c478bd9Sstevel@tonic-gate ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 14287c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14297c478bd9Sstevel@tonic-gate 14307c478bd9Sstevel@tonic-gate /* Change the priority */ 14317c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 14327c478bd9Sstevel@tonic-gate PSM_FAILURE) 14337c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate /* update the ispec with the new priority */ 14367c478bd9Sstevel@tonic-gate ispec->intrspec_pri = *(int *)result; 14377c478bd9Sstevel@tonic-gate break; 14387c478bd9Sstevel@tonic-gate case DDI_INTROP_ADDISR: 14397c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14407c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14417c478bd9Sstevel@tonic-gate ispec->intrspec_func = hdlp->ih_cb_func; 14427c478bd9Sstevel@tonic-gate break; 14437c478bd9Sstevel@tonic-gate case DDI_INTROP_REMISR: 14447c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14457c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14467c478bd9Sstevel@tonic-gate ispec->intrspec_func = (uint_t (*)()) 0; 14477c478bd9Sstevel@tonic-gate break; 14487c478bd9Sstevel@tonic-gate case DDI_INTROP_ENABLE: 14497c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14507c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14517c478bd9Sstevel@tonic-gate 14527c478bd9Sstevel@tonic-gate /* Call psmi to translate irq with the dip */ 14537c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14547c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14557c478bd9Sstevel@tonic-gate 14567a364d25Sschwartz ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 145786a9c507SGuoli Shu if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 145886a9c507SGuoli Shu (int *)&hdlp->ih_vector) == PSM_FAILURE) 145986a9c507SGuoli Shu return (DDI_FAILURE); 14607c478bd9Sstevel@tonic-gate 14617c478bd9Sstevel@tonic-gate /* Add the interrupt handler */ 14627c478bd9Sstevel@tonic-gate if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 14637c478bd9Sstevel@tonic-gate hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 14647a364d25Sschwartz hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip)) 14657c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14667c478bd9Sstevel@tonic-gate break; 14677c478bd9Sstevel@tonic-gate case DDI_INTROP_DISABLE: 14687c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14697c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14707c478bd9Sstevel@tonic-gate 14717c478bd9Sstevel@tonic-gate /* Call psm_ops() to translate irq with the dip */ 14727c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14737c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14747c478bd9Sstevel@tonic-gate 14757a364d25Sschwartz ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 14767c478bd9Sstevel@tonic-gate (void) (*psm_intr_ops)(rdip, hdlp, 14777c478bd9Sstevel@tonic-gate PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 14787c478bd9Sstevel@tonic-gate 14797c478bd9Sstevel@tonic-gate /* Remove the interrupt handler */ 14807c478bd9Sstevel@tonic-gate rem_avintr((void *)hdlp, ispec->intrspec_pri, 14817c478bd9Sstevel@tonic-gate hdlp->ih_cb_func, hdlp->ih_vector); 14827c478bd9Sstevel@tonic-gate break; 14837c478bd9Sstevel@tonic-gate case DDI_INTROP_SETMASK: 14847c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14857c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14867c478bd9Sstevel@tonic-gate 14877c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 14887c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14897c478bd9Sstevel@tonic-gate break; 14907c478bd9Sstevel@tonic-gate case DDI_INTROP_CLRMASK: 14917c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14927c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14937c478bd9Sstevel@tonic-gate 14947c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 14957c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14967c478bd9Sstevel@tonic-gate break; 14977c478bd9Sstevel@tonic-gate case DDI_INTROP_GETPENDING: 14987c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14997c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15007c478bd9Sstevel@tonic-gate 15017c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 15027c478bd9Sstevel@tonic-gate result)) { 15037c478bd9Sstevel@tonic-gate *(int *)result = 0; 15047c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15057c478bd9Sstevel@tonic-gate } 15067c478bd9Sstevel@tonic-gate break; 1507a54f81fbSanish case DDI_INTROP_NAVAIL: 15087c478bd9Sstevel@tonic-gate case DDI_INTROP_NINTRS: 1509a54f81fbSanish *(int *)result = i_ddi_get_intx_nintrs(rdip); 1510a54f81fbSanish if (*(int *)result == 0) { 15117c478bd9Sstevel@tonic-gate /* 15127c478bd9Sstevel@tonic-gate * Special case for 'pcic' driver' only. This driver 15137c478bd9Sstevel@tonic-gate * driver is a child of 'isa' and 'rootnex' drivers. 15147c478bd9Sstevel@tonic-gate * 15157c478bd9Sstevel@tonic-gate * See detailed comments on this in the function 15167c478bd9Sstevel@tonic-gate * rootnex_get_ispec(). 15177c478bd9Sstevel@tonic-gate * 15187c478bd9Sstevel@tonic-gate * Children of 'pcic' send 'NINITR' request all the 15197c478bd9Sstevel@tonic-gate * way to rootnex driver. But, the 'pdp->par_nintr' 15207c478bd9Sstevel@tonic-gate * field may not initialized. So, we fake it here 15217c478bd9Sstevel@tonic-gate * to return 1 (a la what PCMCIA nexus does). 15227c478bd9Sstevel@tonic-gate */ 15237c478bd9Sstevel@tonic-gate if (strcmp(ddi_get_name(rdip), "pcic") == 0) 15247c478bd9Sstevel@tonic-gate *(int *)result = 1; 1525a54f81fbSanish else 1526a54f81fbSanish return (DDI_FAILURE); 15277c478bd9Sstevel@tonic-gate } 15287c478bd9Sstevel@tonic-gate break; 15297c478bd9Sstevel@tonic-gate case DDI_INTROP_SUPPORTED_TYPES: 1530a54f81fbSanish *(int *)result = DDI_INTR_TYPE_FIXED; /* Always ... */ 15317c478bd9Sstevel@tonic-gate break; 15327c478bd9Sstevel@tonic-gate default: 15337c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15347c478bd9Sstevel@tonic-gate } 15357c478bd9Sstevel@tonic-gate 15367c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 15377c478bd9Sstevel@tonic-gate } 15387c478bd9Sstevel@tonic-gate 15397c478bd9Sstevel@tonic-gate 15407c478bd9Sstevel@tonic-gate /* 154112f080e7Smrj * rootnex_get_ispec() 154212f080e7Smrj * convert an interrupt number to an interrupt specification. 154312f080e7Smrj * The interrupt number determines which interrupt spec will be 154412f080e7Smrj * returned if more than one exists. 154512f080e7Smrj * 154612f080e7Smrj * Look into the parent private data area of the 'rdip' to find out 154712f080e7Smrj * the interrupt specification. First check to make sure there is 154812f080e7Smrj * one that matchs "inumber" and then return a pointer to it. 154912f080e7Smrj * 155012f080e7Smrj * Return NULL if one could not be found. 155112f080e7Smrj * 155212f080e7Smrj * NOTE: This is needed for rootnex_intr_ops() 15537c478bd9Sstevel@tonic-gate */ 155412f080e7Smrj static struct intrspec * 155512f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum) 15567c478bd9Sstevel@tonic-gate { 155712f080e7Smrj struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 15587c478bd9Sstevel@tonic-gate 15597c478bd9Sstevel@tonic-gate /* 156012f080e7Smrj * Special case handling for drivers that provide their own 156112f080e7Smrj * intrspec structures instead of relying on the DDI framework. 156212f080e7Smrj * 156312f080e7Smrj * A broken hardware driver in ON could potentially provide its 156412f080e7Smrj * own intrspec structure, instead of relying on the hardware. 156512f080e7Smrj * If these drivers are children of 'rootnex' then we need to 156612f080e7Smrj * continue to provide backward compatibility to them here. 156712f080e7Smrj * 156812f080e7Smrj * Following check is a special case for 'pcic' driver which 156912f080e7Smrj * was found to have broken hardwre andby provides its own intrspec. 157012f080e7Smrj * 157112f080e7Smrj * Verbatim comments from this driver are shown here: 157212f080e7Smrj * "Don't use the ddi_add_intr since we don't have a 157312f080e7Smrj * default intrspec in all cases." 157412f080e7Smrj * 157512f080e7Smrj * Since an 'ispec' may not be always created for it, 157612f080e7Smrj * check for that and create one if so. 157712f080e7Smrj * 157812f080e7Smrj * NOTE: Currently 'pcic' is the only driver found to do this. 15797c478bd9Sstevel@tonic-gate */ 158012f080e7Smrj if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 158112f080e7Smrj pdp->par_nintr = 1; 158212f080e7Smrj pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 158312f080e7Smrj pdp->par_nintr, KM_SLEEP); 158412f080e7Smrj } 158512f080e7Smrj 158612f080e7Smrj /* Validate the interrupt number */ 158712f080e7Smrj if (inum >= pdp->par_nintr) 158812f080e7Smrj return (NULL); 158912f080e7Smrj 159012f080e7Smrj /* Get the interrupt structure pointer and return that */ 159112f080e7Smrj return ((struct intrspec *)&pdp->par_intr[inum]); 159212f080e7Smrj } 159312f080e7Smrj 159412f080e7Smrj 159512f080e7Smrj /* 159612f080e7Smrj * ****************** 159712f080e7Smrj * dma related code 159812f080e7Smrj * ****************** 159912f080e7Smrj */ 160012f080e7Smrj 160112f080e7Smrj /*ARGSUSED*/ 160212f080e7Smrj static int 160320906b23SVikram Hegde rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 160420906b23SVikram Hegde ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 160520906b23SVikram Hegde ddi_dma_handle_t *handlep) 160612f080e7Smrj { 160712f080e7Smrj uint64_t maxsegmentsize_ll; 160812f080e7Smrj uint_t maxsegmentsize; 160912f080e7Smrj ddi_dma_impl_t *hp; 161012f080e7Smrj rootnex_dma_t *dma; 161112f080e7Smrj uint64_t count_max; 161212f080e7Smrj uint64_t seg; 161312f080e7Smrj int kmflag; 161412f080e7Smrj int e; 161512f080e7Smrj 161612f080e7Smrj 161712f080e7Smrj /* convert our sleep flags */ 161812f080e7Smrj if (waitfp == DDI_DMA_SLEEP) { 161912f080e7Smrj kmflag = KM_SLEEP; 162012f080e7Smrj } else { 162112f080e7Smrj kmflag = KM_NOSLEEP; 162212f080e7Smrj } 162312f080e7Smrj 162412f080e7Smrj /* 162512f080e7Smrj * We try to do only one memory allocation here. We'll do a little 162612f080e7Smrj * pointer manipulation later. If the bind ends up taking more than 162712f080e7Smrj * our prealloc's space, we'll have to allocate more memory in the 162812f080e7Smrj * bind operation. Not great, but much better than before and the 162912f080e7Smrj * best we can do with the current bind interfaces. 163012f080e7Smrj */ 163112f080e7Smrj hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag); 163212f080e7Smrj if (hp == NULL) { 163312f080e7Smrj if (waitfp != DDI_DMA_DONTWAIT) { 163412f080e7Smrj ddi_set_callback(waitfp, arg, 163512f080e7Smrj &rootnex_state->r_dvma_call_list_id); 163612f080e7Smrj } 163712f080e7Smrj return (DDI_DMA_NORESOURCES); 163812f080e7Smrj } 163912f080e7Smrj 164012f080e7Smrj /* Do our pointer manipulation now, align the structures */ 164112f080e7Smrj hp->dmai_private = (void *)(((uintptr_t)hp + 164212f080e7Smrj (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7); 164312f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 164412f080e7Smrj dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma + 164512f080e7Smrj sizeof (rootnex_dma_t) + 0x7) & ~0x7); 164612f080e7Smrj 164712f080e7Smrj /* setup the handle */ 164812f080e7Smrj rootnex_clean_dmahdl(hp); 164912f080e7Smrj dma->dp_dip = rdip; 165012f080e7Smrj dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo; 165112f080e7Smrj dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi; 165212f080e7Smrj hp->dmai_minxfer = attr->dma_attr_minxfer; 165312f080e7Smrj hp->dmai_burstsizes = attr->dma_attr_burstsizes; 165412f080e7Smrj hp->dmai_rdip = rdip; 165512f080e7Smrj hp->dmai_attr = *attr; 165612f080e7Smrj 165712f080e7Smrj /* we don't need to worry about the SPL since we do a tryenter */ 165812f080e7Smrj mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL); 165912f080e7Smrj 166012f080e7Smrj /* 166112f080e7Smrj * Figure out our maximum segment size. If the segment size is greater 166212f080e7Smrj * than 4G, we will limit it to (4G - 1) since the max size of a dma 166312f080e7Smrj * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and 166412f080e7Smrj * dma_attr_count_max are size-1 type values. 166512f080e7Smrj * 166612f080e7Smrj * Maximum segment size is the largest physically contiguous chunk of 166712f080e7Smrj * memory that we can return from a bind (i.e. the maximum size of a 166812f080e7Smrj * single cookie). 166912f080e7Smrj */ 167012f080e7Smrj 167112f080e7Smrj /* handle the rollover cases */ 167212f080e7Smrj seg = attr->dma_attr_seg + 1; 167312f080e7Smrj if (seg < attr->dma_attr_seg) { 167412f080e7Smrj seg = attr->dma_attr_seg; 167512f080e7Smrj } 167612f080e7Smrj count_max = attr->dma_attr_count_max + 1; 167712f080e7Smrj if (count_max < attr->dma_attr_count_max) { 167812f080e7Smrj count_max = attr->dma_attr_count_max; 167912f080e7Smrj } 168012f080e7Smrj 168112f080e7Smrj /* 168212f080e7Smrj * granularity may or may not be a power of two. If it isn't, we can't 168312f080e7Smrj * use a simple mask. 168412f080e7Smrj */ 168512f080e7Smrj if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) { 168612f080e7Smrj dma->dp_granularity_power_2 = B_FALSE; 168712f080e7Smrj } else { 168812f080e7Smrj dma->dp_granularity_power_2 = B_TRUE; 168912f080e7Smrj } 169012f080e7Smrj 169112f080e7Smrj /* 169212f080e7Smrj * maxxfer should be a whole multiple of granularity. If we're going to 169312f080e7Smrj * break up a window because we're greater than maxxfer, we might as 169412f080e7Smrj * well make sure it's maxxfer is a whole multiple so we don't have to 169512f080e7Smrj * worry about triming the window later on for this case. 169612f080e7Smrj */ 169712f080e7Smrj if (attr->dma_attr_granular > 1) { 169812f080e7Smrj if (dma->dp_granularity_power_2) { 169912f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer - 170012f080e7Smrj (attr->dma_attr_maxxfer & 170112f080e7Smrj (attr->dma_attr_granular - 1)); 170212f080e7Smrj } else { 170312f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer - 170412f080e7Smrj (attr->dma_attr_maxxfer % attr->dma_attr_granular); 170512f080e7Smrj } 170612f080e7Smrj } else { 170712f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer; 170812f080e7Smrj } 170912f080e7Smrj 171012f080e7Smrj maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer); 171112f080e7Smrj maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max); 171212f080e7Smrj if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) { 171312f080e7Smrj maxsegmentsize = 0xFFFFFFFF; 171412f080e7Smrj } else { 171512f080e7Smrj maxsegmentsize = maxsegmentsize_ll; 171612f080e7Smrj } 171712f080e7Smrj dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize; 171812f080e7Smrj dma->dp_sglinfo.si_segmask = attr->dma_attr_seg; 1719*07c6692fSMark Johnson dma->dp_sglinfo.si_flags = attr->dma_attr_flags; 172012f080e7Smrj 172112f080e7Smrj /* check the ddi_dma_attr arg to make sure it makes a little sense */ 172212f080e7Smrj if (rootnex_alloc_check_parms) { 172312f080e7Smrj e = rootnex_valid_alloc_parms(attr, maxsegmentsize); 172412f080e7Smrj if (e != DDI_SUCCESS) { 172512f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]); 172612f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, 172712f080e7Smrj (ddi_dma_handle_t)hp); 172812f080e7Smrj return (e); 172912f080e7Smrj } 173012f080e7Smrj } 173112f080e7Smrj 173212f080e7Smrj *handlep = (ddi_dma_handle_t)hp; 173312f080e7Smrj 17340b7ba611SMark Johnson ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 17350b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t, 173612f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 173712f080e7Smrj 173812f080e7Smrj return (DDI_SUCCESS); 173912f080e7Smrj } 174012f080e7Smrj 174112f080e7Smrj 174212f080e7Smrj /* 174320906b23SVikram Hegde * rootnex_dma_allochdl() 174420906b23SVikram Hegde * called from ddi_dma_alloc_handle(). 174512f080e7Smrj */ 174620906b23SVikram Hegde static int 174720906b23SVikram Hegde rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 174820906b23SVikram Hegde int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 174920906b23SVikram Hegde { 17503a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 175120906b23SVikram Hegde uint_t error = ENOTSUP; 175220906b23SVikram Hegde int retval; 175320906b23SVikram Hegde 175420906b23SVikram Hegde retval = iommulib_nex_open(rdip, &error); 175520906b23SVikram Hegde 175620906b23SVikram Hegde if (retval != DDI_SUCCESS && error == ENOTSUP) { 175720906b23SVikram Hegde /* No IOMMU */ 175820906b23SVikram Hegde return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 175920906b23SVikram Hegde handlep)); 176020906b23SVikram Hegde } else if (retval != DDI_SUCCESS) { 176120906b23SVikram Hegde return (DDI_FAILURE); 176220906b23SVikram Hegde } 176320906b23SVikram Hegde 1764b51bbbf5SVikram Hegde ASSERT(IOMMU_USED(rdip)); 176520906b23SVikram Hegde 176620906b23SVikram Hegde /* has an IOMMU */ 176720906b23SVikram Hegde return (iommulib_nexdma_allochdl(dip, rdip, attr, 176820906b23SVikram Hegde waitfp, arg, handlep)); 176920906b23SVikram Hegde #else 177020906b23SVikram Hegde return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 177120906b23SVikram Hegde handlep)); 177220906b23SVikram Hegde #endif 177320906b23SVikram Hegde } 177420906b23SVikram Hegde 177512f080e7Smrj /*ARGSUSED*/ 177612f080e7Smrj static int 177720906b23SVikram Hegde rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 177820906b23SVikram Hegde ddi_dma_handle_t handle) 177912f080e7Smrj { 178012f080e7Smrj ddi_dma_impl_t *hp; 178112f080e7Smrj rootnex_dma_t *dma; 178212f080e7Smrj 178312f080e7Smrj 178412f080e7Smrj hp = (ddi_dma_impl_t *)handle; 178512f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 178612f080e7Smrj 178712f080e7Smrj /* unbind should have been called first */ 178812f080e7Smrj ASSERT(!dma->dp_inuse); 178912f080e7Smrj 179012f080e7Smrj mutex_destroy(&dma->dp_mutex); 179112f080e7Smrj kmem_cache_free(rootnex_state->r_dmahdl_cache, hp); 179212f080e7Smrj 17930b7ba611SMark Johnson ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 17940b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t, 179512f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 179612f080e7Smrj 179712f080e7Smrj if (rootnex_state->r_dvma_call_list_id) 179812f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 179912f080e7Smrj 180012f080e7Smrj return (DDI_SUCCESS); 180112f080e7Smrj } 180212f080e7Smrj 180312f080e7Smrj /* 180420906b23SVikram Hegde * rootnex_dma_freehdl() 180520906b23SVikram Hegde * called from ddi_dma_free_handle(). 180612f080e7Smrj */ 180720906b23SVikram Hegde static int 180820906b23SVikram Hegde rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 180920906b23SVikram Hegde { 18103a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 1811b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 181220906b23SVikram Hegde return (iommulib_nexdma_freehdl(dip, rdip, handle)); 181320906b23SVikram Hegde } 181420906b23SVikram Hegde #endif 181520906b23SVikram Hegde return (rootnex_coredma_freehdl(dip, rdip, handle)); 181620906b23SVikram Hegde } 181720906b23SVikram Hegde 181812f080e7Smrj /*ARGSUSED*/ 181912f080e7Smrj static int 182020906b23SVikram Hegde rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 182120906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 182220906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp) 182312f080e7Smrj { 182412f080e7Smrj rootnex_sglinfo_t *sinfo; 182512f080e7Smrj ddi_dma_attr_t *attr; 182612f080e7Smrj ddi_dma_impl_t *hp; 182712f080e7Smrj rootnex_dma_t *dma; 182812f080e7Smrj int kmflag; 182912f080e7Smrj int e; 183012f080e7Smrj 183112f080e7Smrj hp = (ddi_dma_impl_t *)handle; 183212f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 183312f080e7Smrj sinfo = &dma->dp_sglinfo; 183412f080e7Smrj attr = &hp->dmai_attr; 183512f080e7Smrj 183694f1124eSVikram Hegde if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 183794f1124eSVikram Hegde dma->dp_sleep_flags = KM_SLEEP; 183894f1124eSVikram Hegde } else { 183994f1124eSVikram Hegde dma->dp_sleep_flags = KM_NOSLEEP; 184094f1124eSVikram Hegde } 184194f1124eSVikram Hegde 184212f080e7Smrj hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 184312f080e7Smrj 184412f080e7Smrj /* 184512f080e7Smrj * This is useful for debugging a driver. Not as useful in a production 184612f080e7Smrj * system. The only time this will fail is if you have a driver bug. 184712f080e7Smrj */ 184812f080e7Smrj if (rootnex_bind_check_inuse) { 184912f080e7Smrj /* 185012f080e7Smrj * No one else should ever have this lock unless someone else 185112f080e7Smrj * is trying to use this handle. So contention on the lock 185212f080e7Smrj * is the same as inuse being set. 185312f080e7Smrj */ 185412f080e7Smrj e = mutex_tryenter(&dma->dp_mutex); 185512f080e7Smrj if (e == 0) { 185612f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 185712f080e7Smrj return (DDI_DMA_INUSE); 185812f080e7Smrj } 185912f080e7Smrj if (dma->dp_inuse) { 186012f080e7Smrj mutex_exit(&dma->dp_mutex); 186112f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 186212f080e7Smrj return (DDI_DMA_INUSE); 186312f080e7Smrj } 186412f080e7Smrj dma->dp_inuse = B_TRUE; 186512f080e7Smrj mutex_exit(&dma->dp_mutex); 186612f080e7Smrj } 186712f080e7Smrj 186812f080e7Smrj /* check the ddi_dma_attr arg to make sure it makes a little sense */ 186912f080e7Smrj if (rootnex_bind_check_parms) { 187012f080e7Smrj e = rootnex_valid_bind_parms(dmareq, attr); 187112f080e7Smrj if (e != DDI_SUCCESS) { 187212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 187312f080e7Smrj rootnex_clean_dmahdl(hp); 187412f080e7Smrj return (e); 187512f080e7Smrj } 187612f080e7Smrj } 187712f080e7Smrj 187812f080e7Smrj /* save away the original bind info */ 187912f080e7Smrj dma->dp_dma = dmareq->dmar_object; 188012f080e7Smrj 18813a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 18823a634bfcSVikram Hegde e = immu_map_sgl(hp, dmareq, rootnex_prealloc_cookies, rdip); 188386c1f4dcSVikram Hegde switch (e) { 18843a634bfcSVikram Hegde case DDI_DMA_MAPPED: 18853a634bfcSVikram Hegde goto out; 18863a634bfcSVikram Hegde case DDI_DMA_USE_PHYSICAL: 18873a634bfcSVikram Hegde break; 18883a634bfcSVikram Hegde case DDI_DMA_PARTIAL: 18893a634bfcSVikram Hegde ddi_err(DER_PANIC, rdip, "Partial DVMA map"); 18903a634bfcSVikram Hegde e = DDI_DMA_NORESOURCES; 18913a634bfcSVikram Hegde /*FALLTHROUGH*/ 189286c1f4dcSVikram Hegde default: 18933a634bfcSVikram Hegde ddi_err(DER_MODE, rdip, "DVMA map failed"); 18943a634bfcSVikram Hegde ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 189586c1f4dcSVikram Hegde rootnex_clean_dmahdl(hp); 18963a634bfcSVikram Hegde return (e); 189786c1f4dcSVikram Hegde } 189820906b23SVikram Hegde #endif 189986c1f4dcSVikram Hegde 190012f080e7Smrj /* 190112f080e7Smrj * Figure out a rough estimate of what maximum number of pages this 190212f080e7Smrj * buffer could use (a high estimate of course). 190312f080e7Smrj */ 190412f080e7Smrj sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1; 190512f080e7Smrj 190612f080e7Smrj /* 190712f080e7Smrj * We'll use the pre-allocated cookies for any bind that will *always* 190812f080e7Smrj * fit (more important to be consistent, we don't want to create 190912f080e7Smrj * additional degenerate cases). 191012f080e7Smrj */ 191112f080e7Smrj if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) { 191212f080e7Smrj dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer; 191312f080e7Smrj dma->dp_need_to_free_cookie = B_FALSE; 191412f080e7Smrj DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip, 191512f080e7Smrj uint_t, sinfo->si_max_pages); 191612f080e7Smrj 191712f080e7Smrj /* 191812f080e7Smrj * For anything larger than that, we'll go ahead and allocate the 191912f080e7Smrj * maximum number of pages we expect to see. Hopefuly, we won't be 192012f080e7Smrj * seeing this path in the fast path for high performance devices very 192112f080e7Smrj * frequently. 192212f080e7Smrj * 192312f080e7Smrj * a ddi bind interface that allowed the driver to provide storage to 192412f080e7Smrj * the bind interface would speed this case up. 192512f080e7Smrj */ 192612f080e7Smrj } else { 192712f080e7Smrj /* convert the sleep flags */ 192812f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 192912f080e7Smrj kmflag = KM_SLEEP; 193012f080e7Smrj } else { 193112f080e7Smrj kmflag = KM_NOSLEEP; 193212f080e7Smrj } 193312f080e7Smrj 193412f080e7Smrj /* 193512f080e7Smrj * Save away how much memory we allocated. If we're doing a 193612f080e7Smrj * nosleep, the alloc could fail... 193712f080e7Smrj */ 193812f080e7Smrj dma->dp_cookie_size = sinfo->si_max_pages * 193912f080e7Smrj sizeof (ddi_dma_cookie_t); 194012f080e7Smrj dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag); 194112f080e7Smrj if (dma->dp_cookies == NULL) { 194212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 194312f080e7Smrj rootnex_clean_dmahdl(hp); 194412f080e7Smrj return (DDI_DMA_NORESOURCES); 194512f080e7Smrj } 194612f080e7Smrj dma->dp_need_to_free_cookie = B_TRUE; 194712f080e7Smrj DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t, 194812f080e7Smrj sinfo->si_max_pages); 194912f080e7Smrj } 195012f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 195112f080e7Smrj 195212f080e7Smrj /* 195312f080e7Smrj * Get the real sgl. rootnex_get_sgl will fill in cookie array while 19543a634bfcSVikram Hegde * looking at the constraints in the dma structure. It will then put 19553a634bfcSVikram Hegde * some additional state about the sgl in the dma struct (i.e. is 19563a634bfcSVikram Hegde * the sgl clean, or do we need to do some munging; how many pages 19573a634bfcSVikram Hegde * need to be copied, etc.) 195812f080e7Smrj */ 195912f080e7Smrj rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies, 196012f080e7Smrj &dma->dp_sglinfo); 196112f080e7Smrj 19623a634bfcSVikram Hegde out: 196386c1f4dcSVikram Hegde ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages); 196412f080e7Smrj /* if we don't need a copy buffer, we don't need to sync */ 196512f080e7Smrj if (sinfo->si_copybuf_req == 0) { 196612f080e7Smrj hp->dmai_rflags |= DMP_NOSYNC; 196712f080e7Smrj } 196812f080e7Smrj 196912f080e7Smrj /* 197012f080e7Smrj * if we don't need the copybuf and we don't need to do a partial, we 197112f080e7Smrj * hit the fast path. All the high performance devices should be trying 197212f080e7Smrj * to hit this path. To hit this path, a device should be able to reach 197312f080e7Smrj * all of memory, shouldn't try to bind more than it can transfer, and 197412f080e7Smrj * the buffer shouldn't require more cookies than the driver/device can 197512f080e7Smrj * handle [sgllen]). 197612f080e7Smrj */ 197712f080e7Smrj if ((sinfo->si_copybuf_req == 0) && 197812f080e7Smrj (sinfo->si_sgl_size <= attr->dma_attr_sgllen) && 197912f080e7Smrj (dma->dp_dma.dmao_size < dma->dp_maxxfer)) { 198012f080e7Smrj /* 198185c8e0e8Sstephh * If the driver supports FMA, insert the handle in the FMA DMA 198285c8e0e8Sstephh * handle cache. 198385c8e0e8Sstephh */ 198485c8e0e8Sstephh if (attr->dma_attr_flags & DDI_DMA_FLAGERR) { 198585c8e0e8Sstephh hp->dmai_error.err_cf = rootnex_dma_check; 198685c8e0e8Sstephh (void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL); 198785c8e0e8Sstephh } 198885c8e0e8Sstephh 198985c8e0e8Sstephh /* 199012f080e7Smrj * copy out the first cookie and ccountp, set the cookie 199112f080e7Smrj * pointer to the second cookie. The first cookie is passed 199212f080e7Smrj * back on the stack. Additional cookies are accessed via 199312f080e7Smrj * ddi_dma_nextcookie() 199412f080e7Smrj */ 199512f080e7Smrj *cookiep = dma->dp_cookies[0]; 199612f080e7Smrj *ccountp = sinfo->si_sgl_size; 199712f080e7Smrj hp->dmai_cookie++; 199812f080e7Smrj hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 19993a634bfcSVikram Hegde ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 20003a634bfcSVikram Hegde DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, 20013a634bfcSVikram Hegde uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], 20023a634bfcSVikram Hegde uint_t, dma->dp_dma.dmao_size); 20033a634bfcSVikram Hegde 20043a634bfcSVikram Hegde 200512f080e7Smrj return (DDI_DMA_MAPPED); 200612f080e7Smrj } 200712f080e7Smrj 200812f080e7Smrj /* 200912f080e7Smrj * go to the slow path, we may need to alloc more memory, create 201012f080e7Smrj * multiple windows, and munge up a sgl to make the device happy. 201112f080e7Smrj */ 201212f080e7Smrj e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag); 201312f080e7Smrj if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 201412f080e7Smrj if (dma->dp_need_to_free_cookie) { 201512f080e7Smrj kmem_free(dma->dp_cookies, dma->dp_cookie_size); 201612f080e7Smrj } 201712f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 201812f080e7Smrj rootnex_clean_dmahdl(hp); /* must be after free cookie */ 201912f080e7Smrj return (e); 202012f080e7Smrj } 202112f080e7Smrj 202285c8e0e8Sstephh /* 202385c8e0e8Sstephh * If the driver supports FMA, insert the handle in the FMA DMA handle 202485c8e0e8Sstephh * cache. 202585c8e0e8Sstephh */ 202685c8e0e8Sstephh if (attr->dma_attr_flags & DDI_DMA_FLAGERR) { 202785c8e0e8Sstephh hp->dmai_error.err_cf = rootnex_dma_check; 202885c8e0e8Sstephh (void) ndi_fmc_insert(rdip, DMA_HANDLE, hp, NULL); 202985c8e0e8Sstephh } 203085c8e0e8Sstephh 203112f080e7Smrj /* if the first window uses the copy buffer, sync it for the device */ 203212f080e7Smrj if ((dma->dp_window[dma->dp_current_win].wd_dosync) && 203312f080e7Smrj (hp->dmai_rflags & DDI_DMA_WRITE)) { 203494f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 203512f080e7Smrj DDI_DMA_SYNC_FORDEV); 203612f080e7Smrj } 203712f080e7Smrj 203812f080e7Smrj /* 203912f080e7Smrj * copy out the first cookie and ccountp, set the cookie pointer to the 204012f080e7Smrj * second cookie. Make sure the partial flag is set/cleared correctly. 204112f080e7Smrj * If we have a partial map (i.e. multiple windows), the number of 204212f080e7Smrj * cookies we return is the number of cookies in the first window. 204312f080e7Smrj */ 204412f080e7Smrj if (e == DDI_DMA_MAPPED) { 204512f080e7Smrj hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 204612f080e7Smrj *ccountp = sinfo->si_sgl_size; 20473a634bfcSVikram Hegde hp->dmai_nwin = 1; 204812f080e7Smrj } else { 204912f080e7Smrj hp->dmai_rflags |= DDI_DMA_PARTIAL; 205012f080e7Smrj *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt; 205112f080e7Smrj ASSERT(hp->dmai_nwin <= dma->dp_max_win); 205212f080e7Smrj } 205312f080e7Smrj *cookiep = dma->dp_cookies[0]; 205412f080e7Smrj hp->dmai_cookie++; 205512f080e7Smrj 20560b7ba611SMark Johnson ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 20570b7ba611SMark Johnson ROOTNEX_DPROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t, 205812f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 205912f080e7Smrj dma->dp_dma.dmao_size); 206012f080e7Smrj return (e); 206112f080e7Smrj } 206212f080e7Smrj 206312f080e7Smrj /* 206420906b23SVikram Hegde * rootnex_dma_bindhdl() 206520906b23SVikram Hegde * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle(). 206612f080e7Smrj */ 206720906b23SVikram Hegde static int 206820906b23SVikram Hegde rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 206920906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 207020906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp) 207120906b23SVikram Hegde { 20723a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 2073b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 207420906b23SVikram Hegde return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq, 207520906b23SVikram Hegde cookiep, ccountp)); 207620906b23SVikram Hegde } 207720906b23SVikram Hegde #endif 207820906b23SVikram Hegde return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq, 207920906b23SVikram Hegde cookiep, ccountp)); 208020906b23SVikram Hegde } 208120906b23SVikram Hegde 20823a634bfcSVikram Hegde 20833a634bfcSVikram Hegde 208412f080e7Smrj /*ARGSUSED*/ 208512f080e7Smrj static int 208620906b23SVikram Hegde rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 208712f080e7Smrj ddi_dma_handle_t handle) 208812f080e7Smrj { 208912f080e7Smrj ddi_dma_impl_t *hp; 209012f080e7Smrj rootnex_dma_t *dma; 209112f080e7Smrj int e; 209212f080e7Smrj 209312f080e7Smrj hp = (ddi_dma_impl_t *)handle; 209412f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 209512f080e7Smrj 209612f080e7Smrj /* make sure the buffer wasn't free'd before calling unbind */ 209712f080e7Smrj if (rootnex_unbind_verify_buffer) { 209812f080e7Smrj e = rootnex_verify_buffer(dma); 209912f080e7Smrj if (e != DDI_SUCCESS) { 210012f080e7Smrj ASSERT(0); 210112f080e7Smrj return (DDI_FAILURE); 210212f080e7Smrj } 210312f080e7Smrj } 210412f080e7Smrj 210512f080e7Smrj /* sync the current window before unbinding the buffer */ 210612f080e7Smrj if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync && 210712f080e7Smrj (hp->dmai_rflags & DDI_DMA_READ)) { 210894f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 210912f080e7Smrj DDI_DMA_SYNC_FORCPU); 211012f080e7Smrj } 211112f080e7Smrj 211212f080e7Smrj /* 211300d0963fSdilpreet * If the driver supports FMA, remove the handle in the FMA DMA handle 211400d0963fSdilpreet * cache. 211500d0963fSdilpreet */ 211600d0963fSdilpreet if (hp->dmai_attr.dma_attr_flags & DDI_DMA_FLAGERR) { 211700d0963fSdilpreet if ((DEVI(rdip)->devi_fmhdl != NULL) && 211800d0963fSdilpreet (DDI_FM_DMA_ERR_CAP(DEVI(rdip)->devi_fmhdl->fh_cap))) { 211900d0963fSdilpreet (void) ndi_fmc_remove(rdip, DMA_HANDLE, hp); 212000d0963fSdilpreet } 212100d0963fSdilpreet } 212200d0963fSdilpreet 212300d0963fSdilpreet /* 212412f080e7Smrj * cleanup and copy buffer or window state. if we didn't use the copy 212512f080e7Smrj * buffer or windows, there won't be much to do :-) 212612f080e7Smrj */ 212712f080e7Smrj rootnex_teardown_copybuf(dma); 212812f080e7Smrj rootnex_teardown_windows(dma); 212912f080e7Smrj 21303a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 213112f080e7Smrj /* 21323a634bfcSVikram Hegde * Clean up the page tables and free the dvma 213386c1f4dcSVikram Hegde */ 21343a634bfcSVikram Hegde e = immu_unmap_sgl(hp, rdip); 21353a634bfcSVikram Hegde if (e != DDI_DMA_USE_PHYSICAL && e != DDI_SUCCESS) { 21363a634bfcSVikram Hegde return (e); 213786c1f4dcSVikram Hegde } 213820906b23SVikram Hegde #endif 213986c1f4dcSVikram Hegde 214086c1f4dcSVikram Hegde /* 214112f080e7Smrj * If we had to allocate space to for the worse case sgl (it didn't 214212f080e7Smrj * fit into our pre-allocate buffer), free that up now 214312f080e7Smrj */ 214412f080e7Smrj if (dma->dp_need_to_free_cookie) { 214512f080e7Smrj kmem_free(dma->dp_cookies, dma->dp_cookie_size); 214612f080e7Smrj } 214712f080e7Smrj 214812f080e7Smrj /* 214912f080e7Smrj * clean up the handle so it's ready for the next bind (i.e. if the 215012f080e7Smrj * handle is reused). 215112f080e7Smrj */ 215212f080e7Smrj rootnex_clean_dmahdl(hp); 215312f080e7Smrj 215412f080e7Smrj if (rootnex_state->r_dvma_call_list_id) 215512f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 215612f080e7Smrj 21570b7ba611SMark Johnson ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 21580b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__unbind, uint64_t, 215912f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 216012f080e7Smrj 216112f080e7Smrj return (DDI_SUCCESS); 216212f080e7Smrj } 216312f080e7Smrj 216420906b23SVikram Hegde /* 216520906b23SVikram Hegde * rootnex_dma_unbindhdl() 216620906b23SVikram Hegde * called from ddi_dma_unbind_handle() 216720906b23SVikram Hegde */ 216820906b23SVikram Hegde /*ARGSUSED*/ 216920906b23SVikram Hegde static int 217020906b23SVikram Hegde rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 217120906b23SVikram Hegde ddi_dma_handle_t handle) 217220906b23SVikram Hegde { 21733a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 2174b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 217520906b23SVikram Hegde return (iommulib_nexdma_unbindhdl(dip, rdip, handle)); 217620906b23SVikram Hegde } 217720906b23SVikram Hegde #endif 217820906b23SVikram Hegde return (rootnex_coredma_unbindhdl(dip, rdip, handle)); 217920906b23SVikram Hegde } 218020906b23SVikram Hegde 21813a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 218294f1124eSVikram Hegde 218394f1124eSVikram Hegde static int 218494f1124eSVikram Hegde rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle) 218594f1124eSVikram Hegde { 218694f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 218794f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 218894f1124eSVikram Hegde 218994f1124eSVikram Hegde if (dma->dp_sleep_flags != KM_SLEEP && 219094f1124eSVikram Hegde dma->dp_sleep_flags != KM_NOSLEEP) 219194f1124eSVikram Hegde cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle"); 219294f1124eSVikram Hegde return (dma->dp_sleep_flags); 219394f1124eSVikram Hegde } 219420906b23SVikram Hegde /*ARGSUSED*/ 219520906b23SVikram Hegde static void 219620906b23SVikram Hegde rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 219720906b23SVikram Hegde { 219820906b23SVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 219920906b23SVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 220094f1124eSVikram Hegde rootnex_window_t *window; 220120906b23SVikram Hegde 220294f1124eSVikram Hegde if (dma->dp_window) { 220394f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 220494f1124eSVikram Hegde hp->dmai_cookie = window->wd_first_cookie; 220594f1124eSVikram Hegde } else { 220694f1124eSVikram Hegde hp->dmai_cookie = dma->dp_cookies; 220794f1124eSVikram Hegde } 220820906b23SVikram Hegde hp->dmai_cookie++; 220920906b23SVikram Hegde } 221020906b23SVikram Hegde 221120906b23SVikram Hegde /*ARGSUSED*/ 221220906b23SVikram Hegde static int 221320906b23SVikram Hegde rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 221494f1124eSVikram Hegde ddi_dma_cookie_t **cookiepp, uint_t *ccountp) 221520906b23SVikram Hegde { 221694f1124eSVikram Hegde int i; 221794f1124eSVikram Hegde int km_flags; 221820906b23SVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 221920906b23SVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 222094f1124eSVikram Hegde rootnex_window_t *window; 222194f1124eSVikram Hegde ddi_dma_cookie_t *cp; 222294f1124eSVikram Hegde ddi_dma_cookie_t *cookie; 222320906b23SVikram Hegde 222494f1124eSVikram Hegde ASSERT(*cookiepp == NULL); 222594f1124eSVikram Hegde ASSERT(*ccountp == 0); 222620906b23SVikram Hegde 222794f1124eSVikram Hegde if (dma->dp_window) { 222894f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 222994f1124eSVikram Hegde cp = window->wd_first_cookie; 223094f1124eSVikram Hegde *ccountp = window->wd_cookie_cnt; 223120906b23SVikram Hegde } else { 223294f1124eSVikram Hegde cp = dma->dp_cookies; 223320906b23SVikram Hegde *ccountp = dma->dp_sglinfo.si_sgl_size; 223420906b23SVikram Hegde } 223520906b23SVikram Hegde 223694f1124eSVikram Hegde km_flags = rootnex_coredma_get_sleep_flags(handle); 223794f1124eSVikram Hegde cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags); 223894f1124eSVikram Hegde if (cookie == NULL) { 223994f1124eSVikram Hegde return (DDI_DMA_NORESOURCES); 224094f1124eSVikram Hegde } 224194f1124eSVikram Hegde 224294f1124eSVikram Hegde for (i = 0; i < *ccountp; i++) { 224394f1124eSVikram Hegde cookie[i].dmac_notused = cp[i].dmac_notused; 224494f1124eSVikram Hegde cookie[i].dmac_type = cp[i].dmac_type; 224594f1124eSVikram Hegde cookie[i].dmac_address = cp[i].dmac_address; 224694f1124eSVikram Hegde cookie[i].dmac_size = cp[i].dmac_size; 224794f1124eSVikram Hegde } 224894f1124eSVikram Hegde 224994f1124eSVikram Hegde *cookiepp = cookie; 225020906b23SVikram Hegde 225120906b23SVikram Hegde return (DDI_SUCCESS); 225220906b23SVikram Hegde } 225394f1124eSVikram Hegde 225494f1124eSVikram Hegde /*ARGSUSED*/ 225594f1124eSVikram Hegde static int 225694f1124eSVikram Hegde rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 225794f1124eSVikram Hegde ddi_dma_cookie_t *cookiep, uint_t ccount) 225894f1124eSVikram Hegde { 225994f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 226094f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 226194f1124eSVikram Hegde rootnex_window_t *window; 226294f1124eSVikram Hegde ddi_dma_cookie_t *cur_cookiep; 226394f1124eSVikram Hegde 226494f1124eSVikram Hegde ASSERT(cookiep); 226594f1124eSVikram Hegde ASSERT(ccount != 0); 226694f1124eSVikram Hegde ASSERT(dma->dp_need_to_switch_cookies == B_FALSE); 226794f1124eSVikram Hegde 226894f1124eSVikram Hegde if (dma->dp_window) { 226994f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 227094f1124eSVikram Hegde dma->dp_saved_cookies = window->wd_first_cookie; 227194f1124eSVikram Hegde window->wd_first_cookie = cookiep; 227294f1124eSVikram Hegde ASSERT(ccount == window->wd_cookie_cnt); 227394f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 227494f1124eSVikram Hegde + window->wd_first_cookie; 227594f1124eSVikram Hegde } else { 227694f1124eSVikram Hegde dma->dp_saved_cookies = dma->dp_cookies; 227794f1124eSVikram Hegde dma->dp_cookies = cookiep; 227894f1124eSVikram Hegde ASSERT(ccount == dma->dp_sglinfo.si_sgl_size); 227994f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 228094f1124eSVikram Hegde + dma->dp_cookies; 228194f1124eSVikram Hegde } 228294f1124eSVikram Hegde 228394f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_TRUE; 228494f1124eSVikram Hegde hp->dmai_cookie = cur_cookiep; 228594f1124eSVikram Hegde 228694f1124eSVikram Hegde return (DDI_SUCCESS); 228794f1124eSVikram Hegde } 228894f1124eSVikram Hegde 228994f1124eSVikram Hegde /*ARGSUSED*/ 229094f1124eSVikram Hegde static int 229194f1124eSVikram Hegde rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 229294f1124eSVikram Hegde { 229394f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 229494f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 229594f1124eSVikram Hegde rootnex_window_t *window; 229694f1124eSVikram Hegde ddi_dma_cookie_t *cur_cookiep; 229794f1124eSVikram Hegde ddi_dma_cookie_t *cookie_array; 229894f1124eSVikram Hegde uint_t ccount; 229994f1124eSVikram Hegde 230094f1124eSVikram Hegde /* check if cookies have not been switched */ 230194f1124eSVikram Hegde if (dma->dp_need_to_switch_cookies == B_FALSE) 230294f1124eSVikram Hegde return (DDI_SUCCESS); 230394f1124eSVikram Hegde 230494f1124eSVikram Hegde ASSERT(dma->dp_saved_cookies); 230594f1124eSVikram Hegde 230694f1124eSVikram Hegde if (dma->dp_window) { 230794f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 230894f1124eSVikram Hegde cookie_array = window->wd_first_cookie; 230994f1124eSVikram Hegde window->wd_first_cookie = dma->dp_saved_cookies; 231094f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 231194f1124eSVikram Hegde ccount = window->wd_cookie_cnt; 231294f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - cookie_array) 231394f1124eSVikram Hegde + window->wd_first_cookie; 231494f1124eSVikram Hegde } else { 231594f1124eSVikram Hegde cookie_array = dma->dp_cookies; 231694f1124eSVikram Hegde dma->dp_cookies = dma->dp_saved_cookies; 231794f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 231894f1124eSVikram Hegde ccount = dma->dp_sglinfo.si_sgl_size; 231994f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - cookie_array) 232094f1124eSVikram Hegde + dma->dp_cookies; 232194f1124eSVikram Hegde } 232294f1124eSVikram Hegde 232394f1124eSVikram Hegde kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount); 232494f1124eSVikram Hegde 232594f1124eSVikram Hegde hp->dmai_cookie = cur_cookiep; 232694f1124eSVikram Hegde 232794f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_FALSE; 232894f1124eSVikram Hegde 232994f1124eSVikram Hegde return (DDI_SUCCESS); 233094f1124eSVikram Hegde } 233194f1124eSVikram Hegde 23325dfdb46bSVikram Hegde #endif 233312f080e7Smrj 233412f080e7Smrj /* 233512f080e7Smrj * rootnex_verify_buffer() 233612f080e7Smrj * verify buffer wasn't free'd 233712f080e7Smrj */ 233812f080e7Smrj static int 233912f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma) 234012f080e7Smrj { 234112f080e7Smrj page_t **pplist; 234212f080e7Smrj caddr_t vaddr; 234312f080e7Smrj uint_t pcnt; 234412f080e7Smrj uint_t poff; 234512f080e7Smrj page_t *pp; 234600d0963fSdilpreet char b; 234712f080e7Smrj int i; 234812f080e7Smrj 234912f080e7Smrj /* Figure out how many pages this buffer occupies */ 235012f080e7Smrj if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) { 235112f080e7Smrj poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET; 235212f080e7Smrj } else { 235312f080e7Smrj vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr; 235412f080e7Smrj poff = (uintptr_t)vaddr & MMU_PAGEOFFSET; 235512f080e7Smrj } 235612f080e7Smrj pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff); 235712f080e7Smrj 235812f080e7Smrj switch (dma->dp_dma.dmao_type) { 235912f080e7Smrj case DMA_OTYP_PAGES: 236012f080e7Smrj /* 236112f080e7Smrj * for a linked list of pp's walk through them to make sure 236212f080e7Smrj * they're locked and not free. 236312f080e7Smrj */ 236412f080e7Smrj pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp; 236512f080e7Smrj for (i = 0; i < pcnt; i++) { 236612f080e7Smrj if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) { 236712f080e7Smrj return (DDI_FAILURE); 236812f080e7Smrj } 23697c478bd9Sstevel@tonic-gate pp = pp->p_next; 23707c478bd9Sstevel@tonic-gate } 23717c478bd9Sstevel@tonic-gate break; 237212f080e7Smrj 23737c478bd9Sstevel@tonic-gate case DMA_OTYP_VADDR: 23747c478bd9Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 237512f080e7Smrj pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv; 237612f080e7Smrj /* 237712f080e7Smrj * for an array of pp's walk through them to make sure they're 237812f080e7Smrj * not free. It's possible that they may not be locked. 237912f080e7Smrj */ 238012f080e7Smrj if (pplist) { 238112f080e7Smrj for (i = 0; i < pcnt; i++) { 238212f080e7Smrj if (PP_ISFREE(pplist[i])) { 238312f080e7Smrj return (DDI_FAILURE); 238412f080e7Smrj } 238512f080e7Smrj } 238612f080e7Smrj 238712f080e7Smrj /* For a virtual address, try to peek at each page */ 238812f080e7Smrj } else { 238912f080e7Smrj if (dma->dp_sglinfo.si_asp == &kas) { 239012f080e7Smrj for (i = 0; i < pcnt; i++) { 239100d0963fSdilpreet if (ddi_peek8(NULL, vaddr, &b) == 239200d0963fSdilpreet DDI_FAILURE) 239312f080e7Smrj return (DDI_FAILURE); 239400d0963fSdilpreet vaddr += MMU_PAGESIZE; 239512f080e7Smrj } 239612f080e7Smrj } 239712f080e7Smrj } 239812f080e7Smrj break; 239912f080e7Smrj 240012f080e7Smrj default: 240112f080e7Smrj ASSERT(0); 240212f080e7Smrj break; 240312f080e7Smrj } 240412f080e7Smrj 240512f080e7Smrj return (DDI_SUCCESS); 240612f080e7Smrj } 240712f080e7Smrj 240812f080e7Smrj 240912f080e7Smrj /* 241012f080e7Smrj * rootnex_clean_dmahdl() 241112f080e7Smrj * Clean the dma handle. This should be called on a handle alloc and an 241212f080e7Smrj * unbind handle. Set the handle state to the default settings. 241312f080e7Smrj */ 241412f080e7Smrj static void 241512f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp) 241612f080e7Smrj { 241712f080e7Smrj rootnex_dma_t *dma; 241812f080e7Smrj 241912f080e7Smrj 242012f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 242112f080e7Smrj 242212f080e7Smrj hp->dmai_nwin = 0; 242312f080e7Smrj dma->dp_current_cookie = 0; 242412f080e7Smrj dma->dp_copybuf_size = 0; 242512f080e7Smrj dma->dp_window = NULL; 242612f080e7Smrj dma->dp_cbaddr = NULL; 242712f080e7Smrj dma->dp_inuse = B_FALSE; 242812f080e7Smrj dma->dp_need_to_free_cookie = B_FALSE; 242994f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_FALSE; 243094f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 243194f1124eSVikram Hegde dma->dp_sleep_flags = KM_PANIC; 243212f080e7Smrj dma->dp_need_to_free_window = B_FALSE; 243312f080e7Smrj dma->dp_partial_required = B_FALSE; 243412f080e7Smrj dma->dp_trim_required = B_FALSE; 243512f080e7Smrj dma->dp_sglinfo.si_copybuf_req = 0; 243612f080e7Smrj #if !defined(__amd64) 243712f080e7Smrj dma->dp_cb_remaping = B_FALSE; 243812f080e7Smrj dma->dp_kva = NULL; 243912f080e7Smrj #endif 244012f080e7Smrj 244112f080e7Smrj /* FMA related initialization */ 244212f080e7Smrj hp->dmai_fault = 0; 244312f080e7Smrj hp->dmai_fault_check = NULL; 244412f080e7Smrj hp->dmai_fault_notify = NULL; 244512f080e7Smrj hp->dmai_error.err_ena = 0; 244612f080e7Smrj hp->dmai_error.err_status = DDI_FM_OK; 244712f080e7Smrj hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 244812f080e7Smrj hp->dmai_error.err_ontrap = NULL; 244912f080e7Smrj hp->dmai_error.err_fep = NULL; 245000d0963fSdilpreet hp->dmai_error.err_cf = NULL; 245112f080e7Smrj } 245212f080e7Smrj 245312f080e7Smrj 245412f080e7Smrj /* 245512f080e7Smrj * rootnex_valid_alloc_parms() 245612f080e7Smrj * Called in ddi_dma_alloc_handle path to validate its parameters. 245712f080e7Smrj */ 245812f080e7Smrj static int 245912f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize) 246012f080e7Smrj { 246112f080e7Smrj if ((attr->dma_attr_seg < MMU_PAGEOFFSET) || 246212f080e7Smrj (attr->dma_attr_count_max < MMU_PAGEOFFSET) || 246312f080e7Smrj (attr->dma_attr_granular > MMU_PAGESIZE) || 246412f080e7Smrj (attr->dma_attr_maxxfer < MMU_PAGESIZE)) { 246512f080e7Smrj return (DDI_DMA_BADATTR); 246612f080e7Smrj } 246712f080e7Smrj 246812f080e7Smrj if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) { 246912f080e7Smrj return (DDI_DMA_BADATTR); 247012f080e7Smrj } 247112f080e7Smrj 247212f080e7Smrj if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 247312f080e7Smrj MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 247412f080e7Smrj attr->dma_attr_sgllen <= 0) { 247512f080e7Smrj return (DDI_DMA_BADATTR); 247612f080e7Smrj } 247712f080e7Smrj 247812f080e7Smrj /* We should be able to DMA into every byte offset in a page */ 247912f080e7Smrj if (maxsegmentsize < MMU_PAGESIZE) { 248012f080e7Smrj return (DDI_DMA_BADATTR); 248112f080e7Smrj } 248212f080e7Smrj 2483*07c6692fSMark Johnson /* if we're bouncing on seg, seg must be <= addr_hi */ 2484*07c6692fSMark Johnson if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) && 2485*07c6692fSMark Johnson (attr->dma_attr_seg > attr->dma_attr_addr_hi)) { 2486*07c6692fSMark Johnson return (DDI_DMA_BADATTR); 2487*07c6692fSMark Johnson } 248812f080e7Smrj return (DDI_SUCCESS); 248912f080e7Smrj } 249012f080e7Smrj 249112f080e7Smrj /* 249212f080e7Smrj * rootnex_valid_bind_parms() 249312f080e7Smrj * Called in ddi_dma_*_bind_handle path to validate its parameters. 249412f080e7Smrj */ 249512f080e7Smrj /* ARGSUSED */ 249612f080e7Smrj static int 249712f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr) 249812f080e7Smrj { 249912f080e7Smrj #if !defined(__amd64) 250012f080e7Smrj /* 250112f080e7Smrj * we only support up to a 2G-1 transfer size on 32-bit kernels so 250212f080e7Smrj * we can track the offset for the obsoleted interfaces. 250312f080e7Smrj */ 250412f080e7Smrj if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) { 250512f080e7Smrj return (DDI_DMA_TOOBIG); 250612f080e7Smrj } 250712f080e7Smrj #endif 250812f080e7Smrj 250912f080e7Smrj return (DDI_SUCCESS); 251012f080e7Smrj } 251112f080e7Smrj 251212f080e7Smrj 251312f080e7Smrj /* 2514*07c6692fSMark Johnson * rootnex_need_bounce_seg() 2515*07c6692fSMark Johnson * check to see if the buffer lives on both side of the seg. 2516*07c6692fSMark Johnson */ 2517*07c6692fSMark Johnson static boolean_t 2518*07c6692fSMark Johnson rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo) 2519*07c6692fSMark Johnson { 2520*07c6692fSMark Johnson ddi_dma_atyp_t buftype; 2521*07c6692fSMark Johnson rootnex_addr_t raddr; 2522*07c6692fSMark Johnson boolean_t lower_addr; 2523*07c6692fSMark Johnson boolean_t upper_addr; 2524*07c6692fSMark Johnson uint64_t offset; 2525*07c6692fSMark Johnson page_t **pplist; 2526*07c6692fSMark Johnson uint64_t paddr; 2527*07c6692fSMark Johnson uint32_t psize; 2528*07c6692fSMark Johnson uint32_t size; 2529*07c6692fSMark Johnson caddr_t vaddr; 2530*07c6692fSMark Johnson uint_t pcnt; 2531*07c6692fSMark Johnson page_t *pp; 2532*07c6692fSMark Johnson 2533*07c6692fSMark Johnson 2534*07c6692fSMark Johnson /* shortcuts */ 2535*07c6692fSMark Johnson pplist = dmar_object->dmao_obj.virt_obj.v_priv; 2536*07c6692fSMark Johnson vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 2537*07c6692fSMark Johnson buftype = dmar_object->dmao_type; 2538*07c6692fSMark Johnson size = dmar_object->dmao_size; 2539*07c6692fSMark Johnson 2540*07c6692fSMark Johnson lower_addr = B_FALSE; 2541*07c6692fSMark Johnson upper_addr = B_FALSE; 2542*07c6692fSMark Johnson pcnt = 0; 2543*07c6692fSMark Johnson 2544*07c6692fSMark Johnson /* 2545*07c6692fSMark Johnson * Process the first page to handle the initial offset of the buffer. 2546*07c6692fSMark Johnson * We'll use the base address we get later when we loop through all 2547*07c6692fSMark Johnson * the pages. 2548*07c6692fSMark Johnson */ 2549*07c6692fSMark Johnson if (buftype == DMA_OTYP_PAGES) { 2550*07c6692fSMark Johnson pp = dmar_object->dmao_obj.pp_obj.pp_pp; 2551*07c6692fSMark Johnson offset = dmar_object->dmao_obj.pp_obj.pp_offset & 2552*07c6692fSMark Johnson MMU_PAGEOFFSET; 2553*07c6692fSMark Johnson paddr = pfn_to_pa(pp->p_pagenum) + offset; 2554*07c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 2555*07c6692fSMark Johnson pp = pp->p_next; 2556*07c6692fSMark Johnson sglinfo->si_asp = NULL; 2557*07c6692fSMark Johnson } else if (pplist != NULL) { 2558*07c6692fSMark Johnson offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2559*07c6692fSMark Johnson sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2560*07c6692fSMark Johnson if (sglinfo->si_asp == NULL) { 2561*07c6692fSMark Johnson sglinfo->si_asp = &kas; 2562*07c6692fSMark Johnson } 2563*07c6692fSMark Johnson paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2564*07c6692fSMark Johnson paddr += offset; 2565*07c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 2566*07c6692fSMark Johnson pcnt++; 2567*07c6692fSMark Johnson } else { 2568*07c6692fSMark Johnson offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 2569*07c6692fSMark Johnson sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 2570*07c6692fSMark Johnson if (sglinfo->si_asp == NULL) { 2571*07c6692fSMark Johnson sglinfo->si_asp = &kas; 2572*07c6692fSMark Johnson } 2573*07c6692fSMark Johnson paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 2574*07c6692fSMark Johnson paddr += offset; 2575*07c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 2576*07c6692fSMark Johnson vaddr += psize; 2577*07c6692fSMark Johnson } 2578*07c6692fSMark Johnson 2579*07c6692fSMark Johnson #ifdef __xpv 2580*07c6692fSMark Johnson /* 2581*07c6692fSMark Johnson * If we're dom0, we're using a real device so we need to load 2582*07c6692fSMark Johnson * the cookies with MFNs instead of PFNs. 2583*07c6692fSMark Johnson */ 2584*07c6692fSMark Johnson raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2585*07c6692fSMark Johnson #else 2586*07c6692fSMark Johnson raddr = paddr; 2587*07c6692fSMark Johnson #endif 2588*07c6692fSMark Johnson 2589*07c6692fSMark Johnson if ((raddr + psize) > sglinfo->si_segmask) { 2590*07c6692fSMark Johnson upper_addr = B_TRUE; 2591*07c6692fSMark Johnson } else { 2592*07c6692fSMark Johnson lower_addr = B_TRUE; 2593*07c6692fSMark Johnson } 2594*07c6692fSMark Johnson size -= psize; 2595*07c6692fSMark Johnson 2596*07c6692fSMark Johnson /* 2597*07c6692fSMark Johnson * Walk through the rest of the pages in the buffer. Track to see 2598*07c6692fSMark Johnson * if we have pages on both sides of the segment boundary. 2599*07c6692fSMark Johnson */ 2600*07c6692fSMark Johnson while (size > 0) { 2601*07c6692fSMark Johnson /* partial or full page */ 2602*07c6692fSMark Johnson psize = MIN(size, MMU_PAGESIZE); 2603*07c6692fSMark Johnson 2604*07c6692fSMark Johnson if (buftype == DMA_OTYP_PAGES) { 2605*07c6692fSMark Johnson /* get the paddr from the page_t */ 2606*07c6692fSMark Johnson ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2607*07c6692fSMark Johnson paddr = pfn_to_pa(pp->p_pagenum); 2608*07c6692fSMark Johnson pp = pp->p_next; 2609*07c6692fSMark Johnson } else if (pplist != NULL) { 2610*07c6692fSMark Johnson /* index into the array of page_t's to get the paddr */ 2611*07c6692fSMark Johnson ASSERT(!PP_ISFREE(pplist[pcnt])); 2612*07c6692fSMark Johnson paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 2613*07c6692fSMark Johnson pcnt++; 2614*07c6692fSMark Johnson } else { 2615*07c6692fSMark Johnson /* call into the VM to get the paddr */ 2616*07c6692fSMark Johnson paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 2617*07c6692fSMark Johnson vaddr)); 2618*07c6692fSMark Johnson vaddr += psize; 2619*07c6692fSMark Johnson } 2620*07c6692fSMark Johnson 2621*07c6692fSMark Johnson #ifdef __xpv 2622*07c6692fSMark Johnson /* 2623*07c6692fSMark Johnson * If we're dom0, we're using a real device so we need to load 2624*07c6692fSMark Johnson * the cookies with MFNs instead of PFNs. 2625*07c6692fSMark Johnson */ 2626*07c6692fSMark Johnson raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2627*07c6692fSMark Johnson #else 2628*07c6692fSMark Johnson raddr = paddr; 2629*07c6692fSMark Johnson #endif 2630*07c6692fSMark Johnson 2631*07c6692fSMark Johnson if ((raddr + psize) > sglinfo->si_segmask) { 2632*07c6692fSMark Johnson upper_addr = B_TRUE; 2633*07c6692fSMark Johnson } else { 2634*07c6692fSMark Johnson lower_addr = B_TRUE; 2635*07c6692fSMark Johnson } 2636*07c6692fSMark Johnson /* 2637*07c6692fSMark Johnson * if the buffer lives both above and below the segment 2638*07c6692fSMark Johnson * boundary, or the current page is the page immediately 2639*07c6692fSMark Johnson * after the segment, we will use a copy/bounce buffer for 2640*07c6692fSMark Johnson * all pages > seg. 2641*07c6692fSMark Johnson */ 2642*07c6692fSMark Johnson if ((lower_addr && upper_addr) || 2643*07c6692fSMark Johnson (raddr == (sglinfo->si_segmask + 1))) { 2644*07c6692fSMark Johnson return (B_TRUE); 2645*07c6692fSMark Johnson } 2646*07c6692fSMark Johnson 2647*07c6692fSMark Johnson size -= psize; 2648*07c6692fSMark Johnson } 2649*07c6692fSMark Johnson 2650*07c6692fSMark Johnson return (B_FALSE); 2651*07c6692fSMark Johnson } 2652*07c6692fSMark Johnson 2653*07c6692fSMark Johnson 2654*07c6692fSMark Johnson /* 265512f080e7Smrj * rootnex_get_sgl() 265612f080e7Smrj * Called in bind fastpath to get the sgl. Most of this will be replaced 265712f080e7Smrj * with a call to the vm layer when vm2.0 comes around... 265812f080e7Smrj */ 265912f080e7Smrj static void 266012f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 266112f080e7Smrj rootnex_sglinfo_t *sglinfo) 266212f080e7Smrj { 266312f080e7Smrj ddi_dma_atyp_t buftype; 2664843e1988Sjohnlev rootnex_addr_t raddr; 266512f080e7Smrj uint64_t last_page; 266612f080e7Smrj uint64_t offset; 266712f080e7Smrj uint64_t addrhi; 266812f080e7Smrj uint64_t addrlo; 266912f080e7Smrj uint64_t maxseg; 267012f080e7Smrj page_t **pplist; 267112f080e7Smrj uint64_t paddr; 267212f080e7Smrj uint32_t psize; 267312f080e7Smrj uint32_t size; 267412f080e7Smrj caddr_t vaddr; 267512f080e7Smrj uint_t pcnt; 267612f080e7Smrj page_t *pp; 267712f080e7Smrj uint_t cnt; 267812f080e7Smrj 267912f080e7Smrj 268012f080e7Smrj /* shortcuts */ 268112f080e7Smrj pplist = dmar_object->dmao_obj.virt_obj.v_priv; 268212f080e7Smrj vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 268312f080e7Smrj maxseg = sglinfo->si_max_cookie_size; 268412f080e7Smrj buftype = dmar_object->dmao_type; 268512f080e7Smrj addrhi = sglinfo->si_max_addr; 268612f080e7Smrj addrlo = sglinfo->si_min_addr; 268712f080e7Smrj size = dmar_object->dmao_size; 268812f080e7Smrj 268912f080e7Smrj pcnt = 0; 269012f080e7Smrj cnt = 0; 269112f080e7Smrj 2692*07c6692fSMark Johnson 2693*07c6692fSMark Johnson /* 2694*07c6692fSMark Johnson * check to see if we need to use the copy buffer for pages over 2695*07c6692fSMark Johnson * the segment attr. 2696*07c6692fSMark Johnson */ 2697*07c6692fSMark Johnson sglinfo->si_bounce_on_seg = B_FALSE; 2698*07c6692fSMark Johnson if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) { 2699*07c6692fSMark Johnson sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg( 2700*07c6692fSMark Johnson dmar_object, sglinfo); 2701*07c6692fSMark Johnson } 2702*07c6692fSMark Johnson 270312f080e7Smrj /* 270412f080e7Smrj * if we were passed down a linked list of pages, i.e. pointer to 270512f080e7Smrj * page_t, use this to get our physical address and buf offset. 270612f080e7Smrj */ 270712f080e7Smrj if (buftype == DMA_OTYP_PAGES) { 270812f080e7Smrj pp = dmar_object->dmao_obj.pp_obj.pp_pp; 270912f080e7Smrj ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 271012f080e7Smrj offset = dmar_object->dmao_obj.pp_obj.pp_offset & 271112f080e7Smrj MMU_PAGEOFFSET; 2712843e1988Sjohnlev paddr = pfn_to_pa(pp->p_pagenum) + offset; 271312f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 271412f080e7Smrj pp = pp->p_next; 271512f080e7Smrj sglinfo->si_asp = NULL; 271612f080e7Smrj 271712f080e7Smrj /* 271812f080e7Smrj * We weren't passed down a linked list of pages, but if we were passed 271912f080e7Smrj * down an array of pages, use this to get our physical address and buf 272012f080e7Smrj * offset. 272112f080e7Smrj */ 272212f080e7Smrj } else if (pplist != NULL) { 272312f080e7Smrj ASSERT((buftype == DMA_OTYP_VADDR) || 272412f080e7Smrj (buftype == DMA_OTYP_BUFVADDR)); 272512f080e7Smrj 272612f080e7Smrj offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 272712f080e7Smrj sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 272812f080e7Smrj if (sglinfo->si_asp == NULL) { 272912f080e7Smrj sglinfo->si_asp = &kas; 273012f080e7Smrj } 273112f080e7Smrj 273212f080e7Smrj ASSERT(!PP_ISFREE(pplist[pcnt])); 2733843e1988Sjohnlev paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 273412f080e7Smrj paddr += offset; 273512f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 273612f080e7Smrj pcnt++; 273712f080e7Smrj 273812f080e7Smrj /* 273912f080e7Smrj * All we have is a virtual address, we'll need to call into the VM 274012f080e7Smrj * to get the physical address. 274112f080e7Smrj */ 274212f080e7Smrj } else { 274312f080e7Smrj ASSERT((buftype == DMA_OTYP_VADDR) || 274412f080e7Smrj (buftype == DMA_OTYP_BUFVADDR)); 274512f080e7Smrj 274612f080e7Smrj offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 274712f080e7Smrj sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 274812f080e7Smrj if (sglinfo->si_asp == NULL) { 274912f080e7Smrj sglinfo->si_asp = &kas; 275012f080e7Smrj } 275112f080e7Smrj 2752843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 275312f080e7Smrj paddr += offset; 275412f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 275512f080e7Smrj vaddr += psize; 275612f080e7Smrj } 275712f080e7Smrj 2758843e1988Sjohnlev #ifdef __xpv 2759843e1988Sjohnlev /* 2760843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 2761843e1988Sjohnlev * the cookies with MFNs instead of PFNs. 2762843e1988Sjohnlev */ 2763843e1988Sjohnlev raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2764843e1988Sjohnlev #else 2765843e1988Sjohnlev raddr = paddr; 2766843e1988Sjohnlev #endif 2767843e1988Sjohnlev 276812f080e7Smrj /* 276912f080e7Smrj * Setup the first cookie with the physical address of the page and the 277012f080e7Smrj * size of the page (which takes into account the initial offset into 277112f080e7Smrj * the page. 277212f080e7Smrj */ 2773843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 277412f080e7Smrj sgl[cnt].dmac_size = psize; 277512f080e7Smrj sgl[cnt].dmac_type = 0; 277612f080e7Smrj 277712f080e7Smrj /* 277812f080e7Smrj * Save away the buffer offset into the page. We'll need this later in 277912f080e7Smrj * the copy buffer code to help figure out the page index within the 278012f080e7Smrj * buffer and the offset into the current page. 278112f080e7Smrj */ 278212f080e7Smrj sglinfo->si_buf_offset = offset; 278312f080e7Smrj 278412f080e7Smrj /* 2785*07c6692fSMark Johnson * If we are using the copy buffer for anything over the segment 2786*07c6692fSMark Johnson * boundary, and this page is over the segment boundary. 2787*07c6692fSMark Johnson * OR 2788*07c6692fSMark Johnson * if the DMA engine can't reach the physical address. 278912f080e7Smrj */ 2790*07c6692fSMark Johnson if (((sglinfo->si_bounce_on_seg) && 2791*07c6692fSMark Johnson ((raddr + psize) > sglinfo->si_segmask)) || 2792*07c6692fSMark Johnson ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 2793*07c6692fSMark Johnson /* 2794*07c6692fSMark Johnson * Increase how much copy buffer we use. We always increase by 2795*07c6692fSMark Johnson * pagesize so we don't have to worry about converting offsets. 2796*07c6692fSMark Johnson * Set a flag in the cookies dmac_type to indicate that it uses 2797*07c6692fSMark Johnson * the copy buffer. If this isn't the last cookie, go to the 2798*07c6692fSMark Johnson * next cookie (since we separate each page which uses the copy 2799*07c6692fSMark Johnson * buffer in case the copy buffer is not physically contiguous. 2800*07c6692fSMark Johnson */ 280112f080e7Smrj sglinfo->si_copybuf_req += MMU_PAGESIZE; 280212f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 280312f080e7Smrj if ((cnt + 1) < sglinfo->si_max_pages) { 280412f080e7Smrj cnt++; 280512f080e7Smrj sgl[cnt].dmac_laddress = 0; 280612f080e7Smrj sgl[cnt].dmac_size = 0; 280712f080e7Smrj sgl[cnt].dmac_type = 0; 280812f080e7Smrj } 280912f080e7Smrj } 281012f080e7Smrj 281112f080e7Smrj /* 281212f080e7Smrj * save this page's physical address so we can figure out if the next 281312f080e7Smrj * page is physically contiguous. Keep decrementing size until we are 281412f080e7Smrj * done with the buffer. 281512f080e7Smrj */ 2816843e1988Sjohnlev last_page = raddr & MMU_PAGEMASK; 281712f080e7Smrj size -= psize; 281812f080e7Smrj 281912f080e7Smrj while (size > 0) { 282012f080e7Smrj /* Get the size for this page (i.e. partial or full page) */ 282112f080e7Smrj psize = MIN(size, MMU_PAGESIZE); 282212f080e7Smrj 282312f080e7Smrj if (buftype == DMA_OTYP_PAGES) { 282412f080e7Smrj /* get the paddr from the page_t */ 282512f080e7Smrj ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2826843e1988Sjohnlev paddr = pfn_to_pa(pp->p_pagenum); 282712f080e7Smrj pp = pp->p_next; 282812f080e7Smrj } else if (pplist != NULL) { 282912f080e7Smrj /* index into the array of page_t's to get the paddr */ 283012f080e7Smrj ASSERT(!PP_ISFREE(pplist[pcnt])); 2831843e1988Sjohnlev paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 283212f080e7Smrj pcnt++; 283312f080e7Smrj } else { 283412f080e7Smrj /* call into the VM to get the paddr */ 2835843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 283612f080e7Smrj vaddr)); 283712f080e7Smrj vaddr += psize; 283812f080e7Smrj } 283912f080e7Smrj 2840843e1988Sjohnlev #ifdef __xpv 2841843e1988Sjohnlev /* 2842843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 2843843e1988Sjohnlev * the cookies with MFNs instead of PFNs. 2844843e1988Sjohnlev */ 2845843e1988Sjohnlev raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2846843e1988Sjohnlev #else 2847843e1988Sjohnlev raddr = paddr; 2848843e1988Sjohnlev #endif 2849*07c6692fSMark Johnson 2850*07c6692fSMark Johnson /* 2851*07c6692fSMark Johnson * If we are using the copy buffer for anything over the 2852*07c6692fSMark Johnson * segment boundary, and this page is over the segment 2853*07c6692fSMark Johnson * boundary. 2854*07c6692fSMark Johnson * OR 2855*07c6692fSMark Johnson * if the DMA engine can't reach the physical address. 2856*07c6692fSMark Johnson */ 2857*07c6692fSMark Johnson if (((sglinfo->si_bounce_on_seg) && 2858*07c6692fSMark Johnson ((raddr + psize) > sglinfo->si_segmask)) || 2859*07c6692fSMark Johnson ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 2860*07c6692fSMark Johnson 286112f080e7Smrj sglinfo->si_copybuf_req += MMU_PAGESIZE; 286212f080e7Smrj 286312f080e7Smrj /* 286412f080e7Smrj * if there is something in the current cookie, go to 286512f080e7Smrj * the next one. We only want one page in a cookie which 286612f080e7Smrj * uses the copybuf since the copybuf doesn't have to 286712f080e7Smrj * be physically contiguous. 286812f080e7Smrj */ 286912f080e7Smrj if (sgl[cnt].dmac_size != 0) { 287012f080e7Smrj cnt++; 287112f080e7Smrj } 2872843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 287312f080e7Smrj sgl[cnt].dmac_size = psize; 287412f080e7Smrj #if defined(__amd64) 287512f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 287612f080e7Smrj #else 287712f080e7Smrj /* 287812f080e7Smrj * save the buf offset for 32-bit kernel. used in the 287912f080e7Smrj * obsoleted interfaces. 288012f080e7Smrj */ 288112f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF | 288212f080e7Smrj (dmar_object->dmao_size - size); 288312f080e7Smrj #endif 288412f080e7Smrj /* if this isn't the last cookie, go to the next one */ 288512f080e7Smrj if ((cnt + 1) < sglinfo->si_max_pages) { 288612f080e7Smrj cnt++; 288712f080e7Smrj sgl[cnt].dmac_laddress = 0; 288812f080e7Smrj sgl[cnt].dmac_size = 0; 288912f080e7Smrj sgl[cnt].dmac_type = 0; 289012f080e7Smrj } 289112f080e7Smrj 289212f080e7Smrj /* 289312f080e7Smrj * this page didn't need the copy buffer, if it's not physically 289412f080e7Smrj * contiguous, or it would put us over a segment boundary, or it 289512f080e7Smrj * puts us over the max cookie size, or the current sgl doesn't 289612f080e7Smrj * have anything in it. 289712f080e7Smrj */ 2898843e1988Sjohnlev } else if (((last_page + MMU_PAGESIZE) != raddr) || 2899843e1988Sjohnlev !(raddr & sglinfo->si_segmask) || 290012f080e7Smrj ((sgl[cnt].dmac_size + psize) > maxseg) || 290112f080e7Smrj (sgl[cnt].dmac_size == 0)) { 290212f080e7Smrj /* 290312f080e7Smrj * if we're not already in a new cookie, go to the next 290412f080e7Smrj * cookie. 290512f080e7Smrj */ 290612f080e7Smrj if (sgl[cnt].dmac_size != 0) { 290712f080e7Smrj cnt++; 290812f080e7Smrj } 290912f080e7Smrj 291012f080e7Smrj /* save the cookie information */ 2911843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 291212f080e7Smrj sgl[cnt].dmac_size = psize; 291312f080e7Smrj #if defined(__amd64) 291412f080e7Smrj sgl[cnt].dmac_type = 0; 291512f080e7Smrj #else 291612f080e7Smrj /* 291712f080e7Smrj * save the buf offset for 32-bit kernel. used in the 291812f080e7Smrj * obsoleted interfaces. 291912f080e7Smrj */ 292012f080e7Smrj sgl[cnt].dmac_type = dmar_object->dmao_size - size; 292112f080e7Smrj #endif 292212f080e7Smrj 292312f080e7Smrj /* 292412f080e7Smrj * this page didn't need the copy buffer, it is physically 292512f080e7Smrj * contiguous with the last page, and it's <= the max cookie 292612f080e7Smrj * size. 292712f080e7Smrj */ 292812f080e7Smrj } else { 292912f080e7Smrj sgl[cnt].dmac_size += psize; 293012f080e7Smrj 293112f080e7Smrj /* 293212f080e7Smrj * if this exactly == the maximum cookie size, and 293312f080e7Smrj * it isn't the last cookie, go to the next cookie. 293412f080e7Smrj */ 293512f080e7Smrj if (((sgl[cnt].dmac_size + psize) == maxseg) && 293612f080e7Smrj ((cnt + 1) < sglinfo->si_max_pages)) { 293712f080e7Smrj cnt++; 293812f080e7Smrj sgl[cnt].dmac_laddress = 0; 293912f080e7Smrj sgl[cnt].dmac_size = 0; 294012f080e7Smrj sgl[cnt].dmac_type = 0; 294112f080e7Smrj } 294212f080e7Smrj } 294312f080e7Smrj 294412f080e7Smrj /* 294512f080e7Smrj * save this page's physical address so we can figure out if the 294612f080e7Smrj * next page is physically contiguous. Keep decrementing size 294712f080e7Smrj * until we are done with the buffer. 294812f080e7Smrj */ 2949843e1988Sjohnlev last_page = raddr; 295012f080e7Smrj size -= psize; 295112f080e7Smrj } 295212f080e7Smrj 295312f080e7Smrj /* we're done, save away how many cookies the sgl has */ 295412f080e7Smrj if (sgl[cnt].dmac_size == 0) { 295512f080e7Smrj ASSERT(cnt < sglinfo->si_max_pages); 295612f080e7Smrj sglinfo->si_sgl_size = cnt; 295712f080e7Smrj } else { 295812f080e7Smrj sglinfo->si_sgl_size = cnt + 1; 295912f080e7Smrj } 296012f080e7Smrj } 296112f080e7Smrj 296212f080e7Smrj /* 296312f080e7Smrj * rootnex_bind_slowpath() 296412f080e7Smrj * Call in the bind path if the calling driver can't use the sgl without 296512f080e7Smrj * modifying it. We either need to use the copy buffer and/or we will end up 296612f080e7Smrj * with a partial bind. 296712f080e7Smrj */ 296812f080e7Smrj static int 296912f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 297012f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag) 297112f080e7Smrj { 297212f080e7Smrj rootnex_sglinfo_t *sinfo; 297312f080e7Smrj rootnex_window_t *window; 297412f080e7Smrj ddi_dma_cookie_t *cookie; 297512f080e7Smrj size_t copybuf_used; 297612f080e7Smrj size_t dmac_size; 297712f080e7Smrj boolean_t partial; 297812f080e7Smrj off_t cur_offset; 297912f080e7Smrj page_t *cur_pp; 298012f080e7Smrj major_t mnum; 298112f080e7Smrj int e; 298212f080e7Smrj int i; 298312f080e7Smrj 298412f080e7Smrj 298512f080e7Smrj sinfo = &dma->dp_sglinfo; 298612f080e7Smrj copybuf_used = 0; 298712f080e7Smrj partial = B_FALSE; 298812f080e7Smrj 298912f080e7Smrj /* 299012f080e7Smrj * If we're using the copybuf, set the copybuf state in dma struct. 299112f080e7Smrj * Needs to be first since it sets the copy buffer size. 299212f080e7Smrj */ 299312f080e7Smrj if (sinfo->si_copybuf_req != 0) { 299412f080e7Smrj e = rootnex_setup_copybuf(hp, dmareq, dma, attr); 299512f080e7Smrj if (e != DDI_SUCCESS) { 299612f080e7Smrj return (e); 299712f080e7Smrj } 299812f080e7Smrj } else { 299912f080e7Smrj dma->dp_copybuf_size = 0; 300012f080e7Smrj } 300112f080e7Smrj 300212f080e7Smrj /* 300312f080e7Smrj * Figure out if we need to do a partial mapping. If so, figure out 300412f080e7Smrj * if we need to trim the buffers when we munge the sgl. 300512f080e7Smrj */ 300612f080e7Smrj if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) || 300712f080e7Smrj (dma->dp_dma.dmao_size > dma->dp_maxxfer) || 300812f080e7Smrj (attr->dma_attr_sgllen < sinfo->si_sgl_size)) { 300912f080e7Smrj dma->dp_partial_required = B_TRUE; 301012f080e7Smrj if (attr->dma_attr_granular != 1) { 301112f080e7Smrj dma->dp_trim_required = B_TRUE; 301212f080e7Smrj } 301312f080e7Smrj } else { 301412f080e7Smrj dma->dp_partial_required = B_FALSE; 301512f080e7Smrj dma->dp_trim_required = B_FALSE; 301612f080e7Smrj } 301712f080e7Smrj 301812f080e7Smrj /* If we need to do a partial bind, make sure the driver supports it */ 301912f080e7Smrj if (dma->dp_partial_required && 302012f080e7Smrj !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) { 302112f080e7Smrj 302212f080e7Smrj mnum = ddi_driver_major(dma->dp_dip); 302312f080e7Smrj /* 302412f080e7Smrj * patchable which allows us to print one warning per major 302512f080e7Smrj * number. 302612f080e7Smrj */ 302712f080e7Smrj if ((rootnex_bind_warn) && 302812f080e7Smrj ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 302912f080e7Smrj rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 303012f080e7Smrj cmn_err(CE_WARN, "!%s: coding error detected, the " 303112f080e7Smrj "driver is using ddi_dma_attr(9S) incorrectly. " 303212f080e7Smrj "There is a small risk of data corruption in " 303312f080e7Smrj "particular with large I/Os. The driver should be " 303412f080e7Smrj "replaced with a corrected version for proper " 303512f080e7Smrj "system operation. To disable this warning, add " 303612f080e7Smrj "'set rootnex:rootnex_bind_warn=0' to " 303712f080e7Smrj "/etc/system(4).", ddi_driver_name(dma->dp_dip)); 303812f080e7Smrj } 303912f080e7Smrj return (DDI_DMA_TOOBIG); 304012f080e7Smrj } 304112f080e7Smrj 304212f080e7Smrj /* 304312f080e7Smrj * we might need multiple windows, setup state to handle them. In this 304412f080e7Smrj * code path, we will have at least one window. 304512f080e7Smrj */ 304612f080e7Smrj e = rootnex_setup_windows(hp, dma, attr, kmflag); 304712f080e7Smrj if (e != DDI_SUCCESS) { 304812f080e7Smrj rootnex_teardown_copybuf(dma); 304912f080e7Smrj return (e); 305012f080e7Smrj } 305112f080e7Smrj 305212f080e7Smrj window = &dma->dp_window[0]; 305312f080e7Smrj cookie = &dma->dp_cookies[0]; 305412f080e7Smrj cur_offset = 0; 305512f080e7Smrj rootnex_init_win(hp, dma, window, cookie, cur_offset); 305612f080e7Smrj if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) { 305712f080e7Smrj cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 305812f080e7Smrj } 305912f080e7Smrj 306012f080e7Smrj /* loop though all the cookies we got back from get_sgl() */ 306112f080e7Smrj for (i = 0; i < sinfo->si_sgl_size; i++) { 306212f080e7Smrj /* 306312f080e7Smrj * If we're using the copy buffer, check this cookie and setup 306412f080e7Smrj * its associated copy buffer state. If this cookie uses the 306512f080e7Smrj * copy buffer, make sure we sync this window during dma_sync. 306612f080e7Smrj */ 306712f080e7Smrj if (dma->dp_copybuf_size > 0) { 306812f080e7Smrj rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie, 306912f080e7Smrj cur_offset, ©buf_used, &cur_pp); 307012f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 307112f080e7Smrj window->wd_dosync = B_TRUE; 307212f080e7Smrj } 307312f080e7Smrj } 307412f080e7Smrj 307512f080e7Smrj /* 307612f080e7Smrj * save away the cookie size, since it could be modified in 307712f080e7Smrj * the windowing code. 307812f080e7Smrj */ 307912f080e7Smrj dmac_size = cookie->dmac_size; 308012f080e7Smrj 308112f080e7Smrj /* if we went over max copybuf size */ 308212f080e7Smrj if (dma->dp_copybuf_size && 308312f080e7Smrj (copybuf_used > dma->dp_copybuf_size)) { 308412f080e7Smrj partial = B_TRUE; 308512f080e7Smrj e = rootnex_copybuf_window_boundary(hp, dma, &window, 308612f080e7Smrj cookie, cur_offset, ©buf_used); 308712f080e7Smrj if (e != DDI_SUCCESS) { 308812f080e7Smrj rootnex_teardown_copybuf(dma); 308912f080e7Smrj rootnex_teardown_windows(dma); 309012f080e7Smrj return (e); 309112f080e7Smrj } 309212f080e7Smrj 309312f080e7Smrj /* 309412f080e7Smrj * if the coookie uses the copy buffer, make sure the 309512f080e7Smrj * new window we just moved to is set to sync. 309612f080e7Smrj */ 309712f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 309812f080e7Smrj window->wd_dosync = B_TRUE; 309912f080e7Smrj } 310012f080e7Smrj DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *, 310112f080e7Smrj dma->dp_dip); 310212f080e7Smrj 310312f080e7Smrj /* if the cookie cnt == max sgllen, move to the next window */ 310412f080e7Smrj } else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) { 310512f080e7Smrj partial = B_TRUE; 310612f080e7Smrj ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen); 310712f080e7Smrj e = rootnex_sgllen_window_boundary(hp, dma, &window, 310812f080e7Smrj cookie, attr, cur_offset); 310912f080e7Smrj if (e != DDI_SUCCESS) { 311012f080e7Smrj rootnex_teardown_copybuf(dma); 311112f080e7Smrj rootnex_teardown_windows(dma); 311212f080e7Smrj return (e); 311312f080e7Smrj } 311412f080e7Smrj 311512f080e7Smrj /* 311612f080e7Smrj * if the coookie uses the copy buffer, make sure the 311712f080e7Smrj * new window we just moved to is set to sync. 311812f080e7Smrj */ 311912f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 312012f080e7Smrj window->wd_dosync = B_TRUE; 312112f080e7Smrj } 312212f080e7Smrj DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *, 312312f080e7Smrj dma->dp_dip); 312412f080e7Smrj 312512f080e7Smrj /* else if we will be over maxxfer */ 312612f080e7Smrj } else if ((window->wd_size + dmac_size) > 312712f080e7Smrj dma->dp_maxxfer) { 312812f080e7Smrj partial = B_TRUE; 312912f080e7Smrj e = rootnex_maxxfer_window_boundary(hp, dma, &window, 313012f080e7Smrj cookie); 313112f080e7Smrj if (e != DDI_SUCCESS) { 313212f080e7Smrj rootnex_teardown_copybuf(dma); 313312f080e7Smrj rootnex_teardown_windows(dma); 313412f080e7Smrj return (e); 313512f080e7Smrj } 313612f080e7Smrj 313712f080e7Smrj /* 313812f080e7Smrj * if the coookie uses the copy buffer, make sure the 313912f080e7Smrj * new window we just moved to is set to sync. 314012f080e7Smrj */ 314112f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 314212f080e7Smrj window->wd_dosync = B_TRUE; 314312f080e7Smrj } 314412f080e7Smrj DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *, 314512f080e7Smrj dma->dp_dip); 314612f080e7Smrj 314712f080e7Smrj /* else this cookie fits in the current window */ 314812f080e7Smrj } else { 314912f080e7Smrj window->wd_cookie_cnt++; 315012f080e7Smrj window->wd_size += dmac_size; 315112f080e7Smrj } 315212f080e7Smrj 315312f080e7Smrj /* track our offset into the buffer, go to the next cookie */ 315412f080e7Smrj ASSERT(dmac_size <= dma->dp_dma.dmao_size); 315512f080e7Smrj ASSERT(cookie->dmac_size <= dmac_size); 315612f080e7Smrj cur_offset += dmac_size; 315712f080e7Smrj cookie++; 315812f080e7Smrj } 315912f080e7Smrj 316012f080e7Smrj /* if we ended up with a zero sized window in the end, clean it up */ 316112f080e7Smrj if (window->wd_size == 0) { 316212f080e7Smrj hp->dmai_nwin--; 316312f080e7Smrj window--; 316412f080e7Smrj } 316512f080e7Smrj 316612f080e7Smrj ASSERT(window->wd_trim.tr_trim_last == B_FALSE); 316712f080e7Smrj 316812f080e7Smrj if (!partial) { 316912f080e7Smrj return (DDI_DMA_MAPPED); 317012f080e7Smrj } 317112f080e7Smrj 317212f080e7Smrj ASSERT(dma->dp_partial_required); 317312f080e7Smrj return (DDI_DMA_PARTIAL_MAP); 317412f080e7Smrj } 317512f080e7Smrj 317612f080e7Smrj 317712f080e7Smrj /* 317812f080e7Smrj * rootnex_setup_copybuf() 317912f080e7Smrj * Called in bind slowpath. Figures out if we're going to use the copy 318012f080e7Smrj * buffer, and if we do, sets up the basic state to handle it. 318112f080e7Smrj */ 318212f080e7Smrj static int 318312f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 318412f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr) 318512f080e7Smrj { 318612f080e7Smrj rootnex_sglinfo_t *sinfo; 318712f080e7Smrj ddi_dma_attr_t lattr; 318812f080e7Smrj size_t max_copybuf; 318912f080e7Smrj int cansleep; 319012f080e7Smrj int e; 319112f080e7Smrj #if !defined(__amd64) 319212f080e7Smrj int vmflag; 319312f080e7Smrj #endif 319412f080e7Smrj 319512f080e7Smrj 319612f080e7Smrj sinfo = &dma->dp_sglinfo; 319712f080e7Smrj 319836945f79Smrj /* read this first so it's consistent through the routine */ 319936945f79Smrj max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK; 320012f080e7Smrj 320112f080e7Smrj /* We need to call into the rootnex on ddi_dma_sync() */ 320212f080e7Smrj hp->dmai_rflags &= ~DMP_NOSYNC; 320312f080e7Smrj 320412f080e7Smrj /* make sure the copybuf size <= the max size */ 320512f080e7Smrj dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf); 320612f080e7Smrj ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0); 320712f080e7Smrj 320812f080e7Smrj #if !defined(__amd64) 320912f080e7Smrj /* 321012f080e7Smrj * if we don't have kva space to copy to/from, allocate the KVA space 321112f080e7Smrj * now. We only do this for the 32-bit kernel. We use seg kpm space for 321212f080e7Smrj * the 64-bit kernel. 321312f080e7Smrj */ 321412f080e7Smrj if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) || 321512f080e7Smrj (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) { 321612f080e7Smrj 321712f080e7Smrj /* convert the sleep flags */ 321812f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 321912f080e7Smrj vmflag = VM_SLEEP; 322012f080e7Smrj } else { 322112f080e7Smrj vmflag = VM_NOSLEEP; 322212f080e7Smrj } 322312f080e7Smrj 322412f080e7Smrj /* allocate Kernel VA space that we can bcopy to/from */ 322512f080e7Smrj dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size, 322612f080e7Smrj vmflag); 322712f080e7Smrj if (dma->dp_kva == NULL) { 322812f080e7Smrj return (DDI_DMA_NORESOURCES); 322912f080e7Smrj } 323012f080e7Smrj } 323112f080e7Smrj #endif 323212f080e7Smrj 323312f080e7Smrj /* convert the sleep flags */ 323412f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 323512f080e7Smrj cansleep = 1; 323612f080e7Smrj } else { 323712f080e7Smrj cansleep = 0; 323812f080e7Smrj } 323912f080e7Smrj 324012f080e7Smrj /* 3241d21b39ddSmrj * Allocate the actual copy buffer. This needs to fit within the DMA 3242d21b39ddSmrj * engine limits, so we can't use kmem_alloc... We don't need 3243d21b39ddSmrj * contiguous memory (sgllen) since we will be forcing windows on 3244d21b39ddSmrj * sgllen anyway. 324512f080e7Smrj */ 324612f080e7Smrj lattr = *attr; 324712f080e7Smrj lattr.dma_attr_align = MMU_PAGESIZE; 3248d21b39ddSmrj /* 3249d21b39ddSmrj * this should be < 0 to indicate no limit, but due to a bug in 3250d21b39ddSmrj * the rootnex, we'll set it to the maximum positive int. 3251d21b39ddSmrj */ 3252d21b39ddSmrj lattr.dma_attr_sgllen = 0x7fffffff; 3253*07c6692fSMark Johnson /* 3254*07c6692fSMark Johnson * if we're using the copy buffer because of seg, use that for our 3255*07c6692fSMark Johnson * upper address limit. 3256*07c6692fSMark Johnson */ 3257*07c6692fSMark Johnson if (sinfo->si_bounce_on_seg) { 3258*07c6692fSMark Johnson lattr.dma_attr_addr_hi = lattr.dma_attr_seg; 3259*07c6692fSMark Johnson } 326012f080e7Smrj e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep, 326112f080e7Smrj 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL); 326212f080e7Smrj if (e != DDI_SUCCESS) { 326312f080e7Smrj #if !defined(__amd64) 326412f080e7Smrj if (dma->dp_kva != NULL) { 326512f080e7Smrj vmem_free(heap_arena, dma->dp_kva, 326612f080e7Smrj dma->dp_copybuf_size); 326712f080e7Smrj } 326812f080e7Smrj #endif 326912f080e7Smrj return (DDI_DMA_NORESOURCES); 327012f080e7Smrj } 327112f080e7Smrj 327212f080e7Smrj DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip, 327312f080e7Smrj size_t, dma->dp_copybuf_size); 327412f080e7Smrj 327512f080e7Smrj return (DDI_SUCCESS); 327612f080e7Smrj } 327712f080e7Smrj 327812f080e7Smrj 327912f080e7Smrj /* 328012f080e7Smrj * rootnex_setup_windows() 328112f080e7Smrj * Called in bind slowpath to setup the window state. We always have windows 328212f080e7Smrj * in the slowpath. Even if the window count = 1. 328312f080e7Smrj */ 328412f080e7Smrj static int 328512f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 328612f080e7Smrj ddi_dma_attr_t *attr, int kmflag) 328712f080e7Smrj { 328812f080e7Smrj rootnex_window_t *windowp; 328912f080e7Smrj rootnex_sglinfo_t *sinfo; 329012f080e7Smrj size_t copy_state_size; 329112f080e7Smrj size_t win_state_size; 329212f080e7Smrj size_t state_available; 329312f080e7Smrj size_t space_needed; 329412f080e7Smrj uint_t copybuf_win; 329512f080e7Smrj uint_t maxxfer_win; 329612f080e7Smrj size_t space_used; 329712f080e7Smrj uint_t sglwin; 329812f080e7Smrj 329912f080e7Smrj 330012f080e7Smrj sinfo = &dma->dp_sglinfo; 330112f080e7Smrj 330212f080e7Smrj dma->dp_current_win = 0; 330312f080e7Smrj hp->dmai_nwin = 0; 330412f080e7Smrj 330512f080e7Smrj /* If we don't need to do a partial, we only have one window */ 330612f080e7Smrj if (!dma->dp_partial_required) { 330712f080e7Smrj dma->dp_max_win = 1; 330812f080e7Smrj 330912f080e7Smrj /* 331012f080e7Smrj * we need multiple windows, need to figure out the worse case number 331112f080e7Smrj * of windows. 331212f080e7Smrj */ 33137c478bd9Sstevel@tonic-gate } else { 33147c478bd9Sstevel@tonic-gate /* 331512f080e7Smrj * if we need windows because we need more copy buffer that 331612f080e7Smrj * we allow, the worse case number of windows we could need 331712f080e7Smrj * here would be (copybuf space required / copybuf space that 331812f080e7Smrj * we have) plus one for remainder, and plus 2 to handle the 331912f080e7Smrj * extra pages on the trim for the first and last pages of the 332012f080e7Smrj * buffer (a page is the minimum window size so under the right 332112f080e7Smrj * attr settings, you could have a window for each page). 332212f080e7Smrj * The last page will only be hit here if the size is not a 332312f080e7Smrj * multiple of the granularity (which theoretically shouldn't 332412f080e7Smrj * be the case but never has been enforced, so we could have 332512f080e7Smrj * broken things without it). 33267c478bd9Sstevel@tonic-gate */ 332712f080e7Smrj if (sinfo->si_copybuf_req > dma->dp_copybuf_size) { 332812f080e7Smrj ASSERT(dma->dp_copybuf_size > 0); 332912f080e7Smrj copybuf_win = (sinfo->si_copybuf_req / 333012f080e7Smrj dma->dp_copybuf_size) + 1 + 2; 33317c478bd9Sstevel@tonic-gate } else { 333212f080e7Smrj copybuf_win = 0; 33337c478bd9Sstevel@tonic-gate } 333412f080e7Smrj 333512f080e7Smrj /* 333612f080e7Smrj * if we need windows because we have more cookies than the H/W 333712f080e7Smrj * can handle, the number of windows we would need here would 333812f080e7Smrj * be (cookie count / cookies count H/W supports) plus one for 333912f080e7Smrj * remainder, and plus 2 to handle the extra pages on the trim 334012f080e7Smrj * (see above comment about trim) 334112f080e7Smrj */ 334212f080e7Smrj if (attr->dma_attr_sgllen < sinfo->si_sgl_size) { 334312f080e7Smrj sglwin = ((sinfo->si_sgl_size / attr->dma_attr_sgllen) 334412f080e7Smrj + 1) + 2; 33457c478bd9Sstevel@tonic-gate } else { 334612f080e7Smrj sglwin = 0; 33477c478bd9Sstevel@tonic-gate } 334812f080e7Smrj 334912f080e7Smrj /* 335012f080e7Smrj * if we need windows because we're binding more memory than the 335112f080e7Smrj * H/W can transfer at once, the number of windows we would need 335212f080e7Smrj * here would be (xfer count / max xfer H/W supports) plus one 335312f080e7Smrj * for remainder, and plus 2 to handle the extra pages on the 335412f080e7Smrj * trim (see above comment about trim) 335512f080e7Smrj */ 335612f080e7Smrj if (dma->dp_dma.dmao_size > dma->dp_maxxfer) { 335712f080e7Smrj maxxfer_win = (dma->dp_dma.dmao_size / 335812f080e7Smrj dma->dp_maxxfer) + 1 + 2; 335912f080e7Smrj } else { 336012f080e7Smrj maxxfer_win = 0; 33617c478bd9Sstevel@tonic-gate } 336212f080e7Smrj dma->dp_max_win = copybuf_win + sglwin + maxxfer_win; 336312f080e7Smrj ASSERT(dma->dp_max_win > 0); 336412f080e7Smrj } 336512f080e7Smrj win_state_size = dma->dp_max_win * sizeof (rootnex_window_t); 336612f080e7Smrj 336712f080e7Smrj /* 336812f080e7Smrj * Get space for window and potential copy buffer state. Before we 336912f080e7Smrj * go and allocate memory, see if we can get away with using what's 337012f080e7Smrj * left in the pre-allocted state or the dynamically allocated sgl. 337112f080e7Smrj */ 337212f080e7Smrj space_used = (uintptr_t)(sinfo->si_sgl_size * 337312f080e7Smrj sizeof (ddi_dma_cookie_t)); 337412f080e7Smrj 337512f080e7Smrj /* if we dynamically allocated space for the cookies */ 337612f080e7Smrj if (dma->dp_need_to_free_cookie) { 337712f080e7Smrj /* if we have more space in the pre-allocted buffer, use it */ 337812f080e7Smrj ASSERT(space_used <= dma->dp_cookie_size); 337912f080e7Smrj if ((dma->dp_cookie_size - space_used) <= 338012f080e7Smrj rootnex_state->r_prealloc_size) { 338112f080e7Smrj state_available = rootnex_state->r_prealloc_size; 338212f080e7Smrj windowp = (rootnex_window_t *)dma->dp_prealloc_buffer; 338312f080e7Smrj 338412f080e7Smrj /* 338512f080e7Smrj * else, we have more free space in the dynamically allocated 338612f080e7Smrj * buffer, i.e. the buffer wasn't worse case fragmented so we 338712f080e7Smrj * didn't need a lot of cookies. 338812f080e7Smrj */ 338912f080e7Smrj } else { 339012f080e7Smrj state_available = dma->dp_cookie_size - space_used; 339112f080e7Smrj windowp = (rootnex_window_t *) 339212f080e7Smrj &dma->dp_cookies[sinfo->si_sgl_size]; 339312f080e7Smrj } 339412f080e7Smrj 339512f080e7Smrj /* we used the pre-alloced buffer */ 339612f080e7Smrj } else { 339712f080e7Smrj ASSERT(space_used <= rootnex_state->r_prealloc_size); 339812f080e7Smrj state_available = rootnex_state->r_prealloc_size - space_used; 339912f080e7Smrj windowp = (rootnex_window_t *) 340012f080e7Smrj &dma->dp_cookies[sinfo->si_sgl_size]; 340112f080e7Smrj } 340212f080e7Smrj 340312f080e7Smrj /* 340412f080e7Smrj * figure out how much state we need to track the copy buffer. Add an 340512f080e7Smrj * addition 8 bytes for pointer alignemnt later. 340612f080e7Smrj */ 340712f080e7Smrj if (dma->dp_copybuf_size > 0) { 340812f080e7Smrj copy_state_size = sinfo->si_max_pages * 340912f080e7Smrj sizeof (rootnex_pgmap_t); 341012f080e7Smrj } else { 341112f080e7Smrj copy_state_size = 0; 341212f080e7Smrj } 341312f080e7Smrj /* add an additional 8 bytes for pointer alignment */ 341412f080e7Smrj space_needed = win_state_size + copy_state_size + 0x8; 341512f080e7Smrj 341612f080e7Smrj /* if we have enough space already, use it */ 341712f080e7Smrj if (state_available >= space_needed) { 341812f080e7Smrj dma->dp_window = windowp; 341912f080e7Smrj dma->dp_need_to_free_window = B_FALSE; 342012f080e7Smrj 342112f080e7Smrj /* not enough space, need to allocate more. */ 342212f080e7Smrj } else { 342312f080e7Smrj dma->dp_window = kmem_alloc(space_needed, kmflag); 342412f080e7Smrj if (dma->dp_window == NULL) { 342512f080e7Smrj return (DDI_DMA_NORESOURCES); 342612f080e7Smrj } 342712f080e7Smrj dma->dp_need_to_free_window = B_TRUE; 342812f080e7Smrj dma->dp_window_size = space_needed; 342912f080e7Smrj DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *, 343012f080e7Smrj dma->dp_dip, size_t, space_needed); 343112f080e7Smrj } 343212f080e7Smrj 343312f080e7Smrj /* 343412f080e7Smrj * we allocate copy buffer state and window state at the same time. 343512f080e7Smrj * setup our copy buffer state pointers. Make sure it's aligned. 343612f080e7Smrj */ 343712f080e7Smrj if (dma->dp_copybuf_size > 0) { 343812f080e7Smrj dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t) 343912f080e7Smrj &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7); 344012f080e7Smrj 344112f080e7Smrj #if !defined(__amd64) 344212f080e7Smrj /* 344312f080e7Smrj * make sure all pm_mapped, pm_vaddr, and pm_pp are set to 344412f080e7Smrj * false/NULL. Should be quicker to bzero vs loop and set. 344512f080e7Smrj */ 344612f080e7Smrj bzero(dma->dp_pgmap, copy_state_size); 344712f080e7Smrj #endif 344812f080e7Smrj } else { 344912f080e7Smrj dma->dp_pgmap = NULL; 345012f080e7Smrj } 345112f080e7Smrj 345212f080e7Smrj return (DDI_SUCCESS); 345312f080e7Smrj } 345412f080e7Smrj 345512f080e7Smrj 345612f080e7Smrj /* 345712f080e7Smrj * rootnex_teardown_copybuf() 345812f080e7Smrj * cleans up after rootnex_setup_copybuf() 345912f080e7Smrj */ 346012f080e7Smrj static void 346112f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma) 346212f080e7Smrj { 346312f080e7Smrj #if !defined(__amd64) 346412f080e7Smrj int i; 346512f080e7Smrj 346612f080e7Smrj /* 346712f080e7Smrj * if we allocated kernel heap VMEM space, go through all the pages and 346812f080e7Smrj * map out any of the ones that we're mapped into the kernel heap VMEM 346912f080e7Smrj * arena. Then free the VMEM space. 347012f080e7Smrj */ 347112f080e7Smrj if (dma->dp_kva != NULL) { 347212f080e7Smrj for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) { 347312f080e7Smrj if (dma->dp_pgmap[i].pm_mapped) { 347412f080e7Smrj hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr, 347512f080e7Smrj MMU_PAGESIZE, HAT_UNLOAD); 347612f080e7Smrj dma->dp_pgmap[i].pm_mapped = B_FALSE; 347712f080e7Smrj } 347812f080e7Smrj } 347912f080e7Smrj 348012f080e7Smrj vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size); 348112f080e7Smrj } 348212f080e7Smrj 348312f080e7Smrj #endif 348412f080e7Smrj 348512f080e7Smrj /* if we allocated a copy buffer, free it */ 348612f080e7Smrj if (dma->dp_cbaddr != NULL) { 34877b93957cSeota i_ddi_mem_free(dma->dp_cbaddr, NULL); 348812f080e7Smrj } 348912f080e7Smrj } 349012f080e7Smrj 349112f080e7Smrj 349212f080e7Smrj /* 349312f080e7Smrj * rootnex_teardown_windows() 349412f080e7Smrj * cleans up after rootnex_setup_windows() 349512f080e7Smrj */ 349612f080e7Smrj static void 349712f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma) 349812f080e7Smrj { 349912f080e7Smrj /* 350012f080e7Smrj * if we had to allocate window state on the last bind (because we 350112f080e7Smrj * didn't have enough pre-allocated space in the handle), free it. 350212f080e7Smrj */ 350312f080e7Smrj if (dma->dp_need_to_free_window) { 350412f080e7Smrj kmem_free(dma->dp_window, dma->dp_window_size); 350512f080e7Smrj } 350612f080e7Smrj } 350712f080e7Smrj 350812f080e7Smrj 350912f080e7Smrj /* 351012f080e7Smrj * rootnex_init_win() 351112f080e7Smrj * Called in bind slow path during creation of a new window. Initializes 351212f080e7Smrj * window state to default values. 351312f080e7Smrj */ 351412f080e7Smrj /*ARGSUSED*/ 351512f080e7Smrj static void 351612f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 351712f080e7Smrj rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset) 351812f080e7Smrj { 351912f080e7Smrj hp->dmai_nwin++; 352012f080e7Smrj window->wd_dosync = B_FALSE; 352112f080e7Smrj window->wd_offset = cur_offset; 352212f080e7Smrj window->wd_size = 0; 352312f080e7Smrj window->wd_first_cookie = cookie; 352412f080e7Smrj window->wd_cookie_cnt = 0; 352512f080e7Smrj window->wd_trim.tr_trim_first = B_FALSE; 352612f080e7Smrj window->wd_trim.tr_trim_last = B_FALSE; 352712f080e7Smrj window->wd_trim.tr_first_copybuf_win = B_FALSE; 352812f080e7Smrj window->wd_trim.tr_last_copybuf_win = B_FALSE; 352912f080e7Smrj #if !defined(__amd64) 353012f080e7Smrj window->wd_remap_copybuf = dma->dp_cb_remaping; 353112f080e7Smrj #endif 353212f080e7Smrj } 353312f080e7Smrj 353412f080e7Smrj 353512f080e7Smrj /* 353612f080e7Smrj * rootnex_setup_cookie() 353712f080e7Smrj * Called in the bind slow path when the sgl uses the copy buffer. If any of 353812f080e7Smrj * the sgl uses the copy buffer, we need to go through each cookie, figure 353912f080e7Smrj * out if it uses the copy buffer, and if it does, save away everything we'll 354012f080e7Smrj * need during sync. 354112f080e7Smrj */ 354212f080e7Smrj static void 354312f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma, 354412f080e7Smrj ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used, 354512f080e7Smrj page_t **cur_pp) 354612f080e7Smrj { 354712f080e7Smrj boolean_t copybuf_sz_power_2; 354812f080e7Smrj rootnex_sglinfo_t *sinfo; 3549843e1988Sjohnlev paddr_t paddr; 355012f080e7Smrj uint_t pidx; 355112f080e7Smrj uint_t pcnt; 355212f080e7Smrj off_t poff; 355312f080e7Smrj #if defined(__amd64) 355412f080e7Smrj pfn_t pfn; 355512f080e7Smrj #else 355612f080e7Smrj page_t **pplist; 355712f080e7Smrj #endif 355812f080e7Smrj 355912f080e7Smrj sinfo = &dma->dp_sglinfo; 356012f080e7Smrj 356112f080e7Smrj /* 356212f080e7Smrj * Calculate the page index relative to the start of the buffer. The 356312f080e7Smrj * index to the current page for our buffer is the offset into the 356412f080e7Smrj * first page of the buffer plus our current offset into the buffer 356512f080e7Smrj * itself, shifted of course... 356612f080e7Smrj */ 356712f080e7Smrj pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT; 356812f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 356912f080e7Smrj 357012f080e7Smrj /* if this cookie uses the copy buffer */ 357112f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 357212f080e7Smrj /* 357312f080e7Smrj * NOTE: we know that since this cookie uses the copy buffer, it 357412f080e7Smrj * is <= MMU_PAGESIZE. 357512f080e7Smrj */ 357612f080e7Smrj 357712f080e7Smrj /* 357812f080e7Smrj * get the offset into the page. For the 64-bit kernel, get the 357912f080e7Smrj * pfn which we'll use with seg kpm. 358012f080e7Smrj */ 3581843e1988Sjohnlev poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 358212f080e7Smrj #if defined(__amd64) 3583843e1988Sjohnlev /* mfn_to_pfn() is a NOP on i86pc */ 3584843e1988Sjohnlev pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT); 3585843e1988Sjohnlev #endif /* __amd64 */ 358612f080e7Smrj 358712f080e7Smrj /* figure out if the copybuf size is a power of 2 */ 358812f080e7Smrj if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) { 358912f080e7Smrj copybuf_sz_power_2 = B_FALSE; 359012f080e7Smrj } else { 359112f080e7Smrj copybuf_sz_power_2 = B_TRUE; 359212f080e7Smrj } 359312f080e7Smrj 359412f080e7Smrj /* This page uses the copy buffer */ 359512f080e7Smrj dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE; 359612f080e7Smrj 359712f080e7Smrj /* 359812f080e7Smrj * save the copy buffer KVA that we'll use with this page. 359912f080e7Smrj * if we still fit within the copybuf, it's a simple add. 360012f080e7Smrj * otherwise, we need to wrap over using & or % accordingly. 360112f080e7Smrj */ 360212f080e7Smrj if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) { 360312f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr + 360412f080e7Smrj *copybuf_used; 360512f080e7Smrj } else { 360612f080e7Smrj if (copybuf_sz_power_2) { 360712f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 360812f080e7Smrj (uintptr_t)dma->dp_cbaddr + 360912f080e7Smrj (*copybuf_used & 361012f080e7Smrj (dma->dp_copybuf_size - 1))); 361112f080e7Smrj } else { 361212f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 361312f080e7Smrj (uintptr_t)dma->dp_cbaddr + 361412f080e7Smrj (*copybuf_used % dma->dp_copybuf_size)); 361512f080e7Smrj } 361612f080e7Smrj } 361712f080e7Smrj 361812f080e7Smrj /* 361912f080e7Smrj * over write the cookie physical address with the address of 362012f080e7Smrj * the physical address of the copy buffer page that we will 362112f080e7Smrj * use. 362212f080e7Smrj */ 3623843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 362412f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr)) + poff; 362512f080e7Smrj 3626843e1988Sjohnlev #ifdef __xpv 3627843e1988Sjohnlev /* 3628843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 3629843e1988Sjohnlev * the cookies with MAs instead of PAs. 3630843e1988Sjohnlev */ 3631843e1988Sjohnlev cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3632843e1988Sjohnlev #else 3633843e1988Sjohnlev cookie->dmac_laddress = paddr; 3634843e1988Sjohnlev #endif 3635843e1988Sjohnlev 363612f080e7Smrj /* if we have a kernel VA, it's easy, just save that address */ 363712f080e7Smrj if ((dmar_object->dmao_type != DMA_OTYP_PAGES) && 363812f080e7Smrj (sinfo->si_asp == &kas)) { 363912f080e7Smrj /* 364012f080e7Smrj * save away the page aligned virtual address of the 364112f080e7Smrj * driver buffer. Offsets are handled in the sync code. 364212f080e7Smrj */ 364312f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t) 364412f080e7Smrj dmar_object->dmao_obj.virt_obj.v_addr + cur_offset) 364512f080e7Smrj & MMU_PAGEMASK); 364612f080e7Smrj #if !defined(__amd64) 364712f080e7Smrj /* 364812f080e7Smrj * we didn't need to, and will never need to map this 364912f080e7Smrj * page. 365012f080e7Smrj */ 365112f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 365212f080e7Smrj #endif 365312f080e7Smrj 365412f080e7Smrj /* we don't have a kernel VA. We need one for the bcopy. */ 365512f080e7Smrj } else { 365612f080e7Smrj #if defined(__amd64) 365712f080e7Smrj /* 365812f080e7Smrj * for the 64-bit kernel, it's easy. We use seg kpm to 365912f080e7Smrj * get a Kernel VA for the corresponding pfn. 366012f080e7Smrj */ 366112f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn); 366212f080e7Smrj #else 366312f080e7Smrj /* 366412f080e7Smrj * for the 32-bit kernel, this is a pain. First we'll 366512f080e7Smrj * save away the page_t or user VA for this page. This 366612f080e7Smrj * is needed in rootnex_dma_win() when we switch to a 366712f080e7Smrj * new window which requires us to re-map the copy 366812f080e7Smrj * buffer. 366912f080e7Smrj */ 367012f080e7Smrj pplist = dmar_object->dmao_obj.virt_obj.v_priv; 367112f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 367212f080e7Smrj dma->dp_pgmap[pidx].pm_pp = *cur_pp; 367312f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = NULL; 367412f080e7Smrj } else if (pplist != NULL) { 367512f080e7Smrj dma->dp_pgmap[pidx].pm_pp = pplist[pidx]; 367612f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = NULL; 367712f080e7Smrj } else { 367812f080e7Smrj dma->dp_pgmap[pidx].pm_pp = NULL; 367912f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = (caddr_t) 368012f080e7Smrj (((uintptr_t) 368112f080e7Smrj dmar_object->dmao_obj.virt_obj.v_addr + 368212f080e7Smrj cur_offset) & MMU_PAGEMASK); 368312f080e7Smrj } 368412f080e7Smrj 368512f080e7Smrj /* 368612f080e7Smrj * save away the page aligned virtual address which was 368712f080e7Smrj * allocated from the kernel heap arena (taking into 368812f080e7Smrj * account if we need more copy buffer than we alloced 368912f080e7Smrj * and use multiple windows to handle this, i.e. &,%). 369012f080e7Smrj * NOTE: there isn't and physical memory backing up this 369112f080e7Smrj * virtual address space currently. 369212f080e7Smrj */ 369312f080e7Smrj if ((*copybuf_used + MMU_PAGESIZE) <= 369412f080e7Smrj dma->dp_copybuf_size) { 369512f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 369612f080e7Smrj (((uintptr_t)dma->dp_kva + *copybuf_used) & 369712f080e7Smrj MMU_PAGEMASK); 369812f080e7Smrj } else { 369912f080e7Smrj if (copybuf_sz_power_2) { 370012f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 370112f080e7Smrj (((uintptr_t)dma->dp_kva + 370212f080e7Smrj (*copybuf_used & 370312f080e7Smrj (dma->dp_copybuf_size - 1))) & 370412f080e7Smrj MMU_PAGEMASK); 370512f080e7Smrj } else { 370612f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 370712f080e7Smrj (((uintptr_t)dma->dp_kva + 370812f080e7Smrj (*copybuf_used % 370912f080e7Smrj dma->dp_copybuf_size)) & 371012f080e7Smrj MMU_PAGEMASK); 371112f080e7Smrj } 371212f080e7Smrj } 371312f080e7Smrj 371412f080e7Smrj /* 371512f080e7Smrj * if we haven't used up the available copy buffer yet, 371612f080e7Smrj * map the kva to the physical page. 371712f080e7Smrj */ 371812f080e7Smrj if (!dma->dp_cb_remaping && ((*copybuf_used + 371912f080e7Smrj MMU_PAGESIZE) <= dma->dp_copybuf_size)) { 372012f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_TRUE; 372112f080e7Smrj if (dma->dp_pgmap[pidx].pm_pp != NULL) { 372212f080e7Smrj i86_pp_map(dma->dp_pgmap[pidx].pm_pp, 372312f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr); 372412f080e7Smrj } else { 372512f080e7Smrj i86_va_map(dma->dp_pgmap[pidx].pm_vaddr, 372612f080e7Smrj sinfo->si_asp, 372712f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr); 372812f080e7Smrj } 372912f080e7Smrj 373012f080e7Smrj /* 373112f080e7Smrj * we've used up the available copy buffer, this page 373212f080e7Smrj * will have to be mapped during rootnex_dma_win() when 373312f080e7Smrj * we switch to a new window which requires a re-map 373412f080e7Smrj * the copy buffer. (32-bit kernel only) 373512f080e7Smrj */ 373612f080e7Smrj } else { 373712f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 373812f080e7Smrj } 373912f080e7Smrj #endif 374012f080e7Smrj /* go to the next page_t */ 374112f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 374212f080e7Smrj *cur_pp = (*cur_pp)->p_next; 374312f080e7Smrj } 374412f080e7Smrj } 374512f080e7Smrj 374612f080e7Smrj /* add to the copy buffer count */ 374712f080e7Smrj *copybuf_used += MMU_PAGESIZE; 374812f080e7Smrj 374912f080e7Smrj /* 375012f080e7Smrj * This cookie doesn't use the copy buffer. Walk through the pages this 375112f080e7Smrj * cookie occupies to reflect this. 375212f080e7Smrj */ 375312f080e7Smrj } else { 375412f080e7Smrj /* 375512f080e7Smrj * figure out how many pages the cookie occupies. We need to 375612f080e7Smrj * use the original page offset of the buffer and the cookies 375712f080e7Smrj * offset in the buffer to do this. 375812f080e7Smrj */ 375912f080e7Smrj poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET; 376012f080e7Smrj pcnt = mmu_btopr(cookie->dmac_size + poff); 376112f080e7Smrj 376212f080e7Smrj while (pcnt > 0) { 376312f080e7Smrj #if !defined(__amd64) 376412f080e7Smrj /* 376512f080e7Smrj * the 32-bit kernel doesn't have seg kpm, so we need 376612f080e7Smrj * to map in the driver buffer (if it didn't come down 376712f080e7Smrj * with a kernel VA) on the fly. Since this page doesn't 376812f080e7Smrj * use the copy buffer, it's not, or will it ever, have 376912f080e7Smrj * to be mapped in. 377012f080e7Smrj */ 377112f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 377212f080e7Smrj #endif 377312f080e7Smrj dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE; 377412f080e7Smrj 377512f080e7Smrj /* 377612f080e7Smrj * we need to update pidx and cur_pp or we'll loose 377712f080e7Smrj * track of where we are. 377812f080e7Smrj */ 377912f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 378012f080e7Smrj *cur_pp = (*cur_pp)->p_next; 378112f080e7Smrj } 378212f080e7Smrj pidx++; 378312f080e7Smrj pcnt--; 378412f080e7Smrj } 378512f080e7Smrj } 378612f080e7Smrj } 378712f080e7Smrj 378812f080e7Smrj 378912f080e7Smrj /* 379012f080e7Smrj * rootnex_sgllen_window_boundary() 379112f080e7Smrj * Called in the bind slow path when the next cookie causes us to exceed (in 379212f080e7Smrj * this case == since we start at 0 and sgllen starts at 1) the maximum sgl 379312f080e7Smrj * length supported by the DMA H/W. 379412f080e7Smrj */ 379512f080e7Smrj static int 379612f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 379712f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr, 379812f080e7Smrj off_t cur_offset) 379912f080e7Smrj { 380012f080e7Smrj off_t new_offset; 380112f080e7Smrj size_t trim_sz; 380212f080e7Smrj off_t coffset; 380312f080e7Smrj 380412f080e7Smrj 380512f080e7Smrj /* 380612f080e7Smrj * if we know we'll never have to trim, it's pretty easy. Just move to 380712f080e7Smrj * the next window and init it. We're done. 380812f080e7Smrj */ 380912f080e7Smrj if (!dma->dp_trim_required) { 381012f080e7Smrj (*windowp)++; 381112f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 381212f080e7Smrj (*windowp)->wd_cookie_cnt++; 381312f080e7Smrj (*windowp)->wd_size = cookie->dmac_size; 381412f080e7Smrj return (DDI_SUCCESS); 381512f080e7Smrj } 381612f080e7Smrj 381712f080e7Smrj /* figure out how much we need to trim from the window */ 381812f080e7Smrj ASSERT(attr->dma_attr_granular != 0); 381912f080e7Smrj if (dma->dp_granularity_power_2) { 382012f080e7Smrj trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1); 382112f080e7Smrj } else { 382212f080e7Smrj trim_sz = (*windowp)->wd_size % attr->dma_attr_granular; 382312f080e7Smrj } 382412f080e7Smrj 382512f080e7Smrj /* The window's a whole multiple of granularity. We're done */ 382612f080e7Smrj if (trim_sz == 0) { 382712f080e7Smrj (*windowp)++; 382812f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 382912f080e7Smrj (*windowp)->wd_cookie_cnt++; 383012f080e7Smrj (*windowp)->wd_size = cookie->dmac_size; 383112f080e7Smrj return (DDI_SUCCESS); 383212f080e7Smrj } 383312f080e7Smrj 383412f080e7Smrj /* 383512f080e7Smrj * The window's not a whole multiple of granularity, since we know this 383612f080e7Smrj * is due to the sgllen, we need to go back to the last cookie and trim 383712f080e7Smrj * that one, add the left over part of the old cookie into the new 383812f080e7Smrj * window, and then add in the new cookie into the new window. 383912f080e7Smrj */ 384012f080e7Smrj 384112f080e7Smrj /* 384212f080e7Smrj * make sure the driver isn't making us do something bad... Trimming and 384312f080e7Smrj * sgllen == 1 don't go together. 384412f080e7Smrj */ 384512f080e7Smrj if (attr->dma_attr_sgllen == 1) { 384612f080e7Smrj return (DDI_DMA_NOMAPPING); 384712f080e7Smrj } 384812f080e7Smrj 384912f080e7Smrj /* 385012f080e7Smrj * first, setup the current window to account for the trim. Need to go 385112f080e7Smrj * back to the last cookie for this. 385212f080e7Smrj */ 385312f080e7Smrj cookie--; 385412f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 385512f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 3856843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 385712f080e7Smrj ASSERT(cookie->dmac_size > trim_sz); 385812f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 385912f080e7Smrj (*windowp)->wd_size -= trim_sz; 386012f080e7Smrj 386112f080e7Smrj /* save the buffer offsets for the next window */ 386212f080e7Smrj coffset = cookie->dmac_size - trim_sz; 386312f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 386412f080e7Smrj 386512f080e7Smrj /* 386612f080e7Smrj * set this now in case this is the first window. all other cases are 386712f080e7Smrj * set in dma_win() 386812f080e7Smrj */ 386912f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 387012f080e7Smrj 387112f080e7Smrj /* 387212f080e7Smrj * initialize the next window using what's left over in the previous 387312f080e7Smrj * cookie. 387412f080e7Smrj */ 387512f080e7Smrj (*windowp)++; 387612f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 387712f080e7Smrj (*windowp)->wd_cookie_cnt++; 387812f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3879843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 388012f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 388112f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 388212f080e7Smrj (*windowp)->wd_dosync = B_TRUE; 388312f080e7Smrj } 388412f080e7Smrj 388512f080e7Smrj /* 388612f080e7Smrj * now go back to the current cookie and add it to the new window. set 388712f080e7Smrj * the new window size to the what was left over from the previous 388812f080e7Smrj * cookie and what's in the current cookie. 388912f080e7Smrj */ 389012f080e7Smrj cookie++; 389112f080e7Smrj (*windowp)->wd_cookie_cnt++; 389212f080e7Smrj (*windowp)->wd_size = trim_sz + cookie->dmac_size; 389312f080e7Smrj 389412f080e7Smrj /* 389512f080e7Smrj * trim plus the next cookie could put us over maxxfer (a cookie can be 389612f080e7Smrj * a max size of maxxfer). Handle that case. 389712f080e7Smrj */ 389812f080e7Smrj if ((*windowp)->wd_size > dma->dp_maxxfer) { 389912f080e7Smrj /* 390012f080e7Smrj * maxxfer is already a whole multiple of granularity, and this 390112f080e7Smrj * trim will be <= the previous trim (since a cookie can't be 390212f080e7Smrj * larger than maxxfer). Make things simple here. 390312f080e7Smrj */ 390412f080e7Smrj trim_sz = (*windowp)->wd_size - dma->dp_maxxfer; 390512f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 390612f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 3907843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 390812f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 390912f080e7Smrj (*windowp)->wd_size -= trim_sz; 391012f080e7Smrj ASSERT((*windowp)->wd_size == dma->dp_maxxfer); 391112f080e7Smrj 391212f080e7Smrj /* save the buffer offsets for the next window */ 391312f080e7Smrj coffset = cookie->dmac_size - trim_sz; 391412f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 391512f080e7Smrj 391612f080e7Smrj /* setup the next window */ 391712f080e7Smrj (*windowp)++; 391812f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 391912f080e7Smrj (*windowp)->wd_cookie_cnt++; 392012f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3921843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 392212f080e7Smrj coffset; 392312f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 392412f080e7Smrj } 392512f080e7Smrj 392612f080e7Smrj return (DDI_SUCCESS); 392712f080e7Smrj } 392812f080e7Smrj 392912f080e7Smrj 393012f080e7Smrj /* 393112f080e7Smrj * rootnex_copybuf_window_boundary() 393212f080e7Smrj * Called in bind slowpath when we get to a window boundary because we used 393312f080e7Smrj * up all the copy buffer that we have. 393412f080e7Smrj */ 393512f080e7Smrj static int 393612f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 393712f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset, 393812f080e7Smrj size_t *copybuf_used) 393912f080e7Smrj { 394012f080e7Smrj rootnex_sglinfo_t *sinfo; 394112f080e7Smrj off_t new_offset; 394212f080e7Smrj size_t trim_sz; 3943843e1988Sjohnlev paddr_t paddr; 394412f080e7Smrj off_t coffset; 394512f080e7Smrj uint_t pidx; 394612f080e7Smrj off_t poff; 394712f080e7Smrj 394812f080e7Smrj 394912f080e7Smrj sinfo = &dma->dp_sglinfo; 395012f080e7Smrj 395112f080e7Smrj /* 395212f080e7Smrj * the copy buffer should be a whole multiple of page size. We know that 395312f080e7Smrj * this cookie is <= MMU_PAGESIZE. 395412f080e7Smrj */ 395512f080e7Smrj ASSERT(cookie->dmac_size <= MMU_PAGESIZE); 395612f080e7Smrj 395712f080e7Smrj /* 395812f080e7Smrj * from now on, all new windows in this bind need to be re-mapped during 395912f080e7Smrj * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf 396012f080e7Smrj * space... 396112f080e7Smrj */ 396212f080e7Smrj #if !defined(__amd64) 396312f080e7Smrj dma->dp_cb_remaping = B_TRUE; 396412f080e7Smrj #endif 396512f080e7Smrj 396612f080e7Smrj /* reset copybuf used */ 396712f080e7Smrj *copybuf_used = 0; 396812f080e7Smrj 396912f080e7Smrj /* 397012f080e7Smrj * if we don't have to trim (since granularity is set to 1), go to the 397112f080e7Smrj * next window and add the current cookie to it. We know the current 397212f080e7Smrj * cookie uses the copy buffer since we're in this code path. 397312f080e7Smrj */ 397412f080e7Smrj if (!dma->dp_trim_required) { 397512f080e7Smrj (*windowp)++; 397612f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 397712f080e7Smrj 397812f080e7Smrj /* Add this cookie to the new window */ 397912f080e7Smrj (*windowp)->wd_cookie_cnt++; 398012f080e7Smrj (*windowp)->wd_size += cookie->dmac_size; 398112f080e7Smrj *copybuf_used += MMU_PAGESIZE; 398212f080e7Smrj return (DDI_SUCCESS); 398312f080e7Smrj } 398412f080e7Smrj 398512f080e7Smrj /* 398612f080e7Smrj * *** may need to trim, figure it out. 398712f080e7Smrj */ 398812f080e7Smrj 398912f080e7Smrj /* figure out how much we need to trim from the window */ 399012f080e7Smrj if (dma->dp_granularity_power_2) { 399112f080e7Smrj trim_sz = (*windowp)->wd_size & 399212f080e7Smrj (hp->dmai_attr.dma_attr_granular - 1); 399312f080e7Smrj } else { 399412f080e7Smrj trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular; 399512f080e7Smrj } 399612f080e7Smrj 399712f080e7Smrj /* 399812f080e7Smrj * if the window's a whole multiple of granularity, go to the next 399912f080e7Smrj * window, init it, then add in the current cookie. We know the current 400012f080e7Smrj * cookie uses the copy buffer since we're in this code path. 400112f080e7Smrj */ 400212f080e7Smrj if (trim_sz == 0) { 400312f080e7Smrj (*windowp)++; 400412f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 400512f080e7Smrj 400612f080e7Smrj /* Add this cookie to the new window */ 400712f080e7Smrj (*windowp)->wd_cookie_cnt++; 400812f080e7Smrj (*windowp)->wd_size += cookie->dmac_size; 400912f080e7Smrj *copybuf_used += MMU_PAGESIZE; 401012f080e7Smrj return (DDI_SUCCESS); 401112f080e7Smrj } 401212f080e7Smrj 401312f080e7Smrj /* 401412f080e7Smrj * *** We figured it out, we definitly need to trim 401512f080e7Smrj */ 401612f080e7Smrj 401712f080e7Smrj /* 401812f080e7Smrj * make sure the driver isn't making us do something bad... 401912f080e7Smrj * Trimming and sgllen == 1 don't go together. 402012f080e7Smrj */ 402112f080e7Smrj if (hp->dmai_attr.dma_attr_sgllen == 1) { 402212f080e7Smrj return (DDI_DMA_NOMAPPING); 402312f080e7Smrj } 402412f080e7Smrj 402512f080e7Smrj /* 402612f080e7Smrj * first, setup the current window to account for the trim. Need to go 402712f080e7Smrj * back to the last cookie for this. Some of the last cookie will be in 402812f080e7Smrj * the current window, and some of the last cookie will be in the new 402912f080e7Smrj * window. All of the current cookie will be in the new window. 403012f080e7Smrj */ 403112f080e7Smrj cookie--; 403212f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 403312f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 4034843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 403512f080e7Smrj ASSERT(cookie->dmac_size > trim_sz); 403612f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 403712f080e7Smrj (*windowp)->wd_size -= trim_sz; 403812f080e7Smrj 403912f080e7Smrj /* 404012f080e7Smrj * we're trimming the last cookie (not the current cookie). So that 404112f080e7Smrj * last cookie may have or may not have been using the copy buffer ( 404212f080e7Smrj * we know the cookie passed in uses the copy buffer since we're in 404312f080e7Smrj * this code path). 404412f080e7Smrj * 404512f080e7Smrj * If the last cookie doesn't use the copy buffer, nothing special to 404612f080e7Smrj * do. However, if it does uses the copy buffer, it will be both the 404712f080e7Smrj * last page in the current window and the first page in the next 404812f080e7Smrj * window. Since we are reusing the copy buffer (and KVA space on the 404912f080e7Smrj * 32-bit kernel), this page will use the end of the copy buffer in the 405012f080e7Smrj * current window, and the start of the copy buffer in the next window. 405112f080e7Smrj * Track that info... The cookie physical address was already set to 405212f080e7Smrj * the copy buffer physical address in setup_cookie.. 405312f080e7Smrj */ 405412f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 405512f080e7Smrj pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset + 405612f080e7Smrj (*windowp)->wd_size) >> MMU_PAGESHIFT; 405712f080e7Smrj (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE; 405812f080e7Smrj (*windowp)->wd_trim.tr_last_pidx = pidx; 405912f080e7Smrj (*windowp)->wd_trim.tr_last_cbaddr = 406012f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr; 406112f080e7Smrj #if !defined(__amd64) 406212f080e7Smrj (*windowp)->wd_trim.tr_last_kaddr = 406312f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr; 406412f080e7Smrj #endif 406512f080e7Smrj } 406612f080e7Smrj 406712f080e7Smrj /* save the buffer offsets for the next window */ 406812f080e7Smrj coffset = cookie->dmac_size - trim_sz; 406912f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 407012f080e7Smrj 407112f080e7Smrj /* 407212f080e7Smrj * set this now in case this is the first window. all other cases are 407312f080e7Smrj * set in dma_win() 407412f080e7Smrj */ 407512f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 407612f080e7Smrj 407712f080e7Smrj /* 407812f080e7Smrj * initialize the next window using what's left over in the previous 407912f080e7Smrj * cookie. 408012f080e7Smrj */ 408112f080e7Smrj (*windowp)++; 408212f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 408312f080e7Smrj (*windowp)->wd_cookie_cnt++; 408412f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4085843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 408612f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 408712f080e7Smrj 408812f080e7Smrj /* 408912f080e7Smrj * again, we're tracking if the last cookie uses the copy buffer. 409012f080e7Smrj * read the comment above for more info on why we need to track 409112f080e7Smrj * additional state. 409212f080e7Smrj * 409312f080e7Smrj * For the first cookie in the new window, we need reset the physical 409412f080e7Smrj * address to DMA into to the start of the copy buffer plus any 409512f080e7Smrj * initial page offset which may be present. 409612f080e7Smrj */ 409712f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 409812f080e7Smrj (*windowp)->wd_dosync = B_TRUE; 409912f080e7Smrj (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE; 410012f080e7Smrj (*windowp)->wd_trim.tr_first_pidx = pidx; 410112f080e7Smrj (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr; 410212f080e7Smrj poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET; 4103843e1988Sjohnlev 4104843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) + 4105843e1988Sjohnlev poff; 4106843e1988Sjohnlev #ifdef __xpv 4107843e1988Sjohnlev /* 4108843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 4109843e1988Sjohnlev * the cookies with MAs instead of PAs. 4110843e1988Sjohnlev */ 4111843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = 4112843e1988Sjohnlev ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4113843e1988Sjohnlev #else 4114843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = paddr; 4115843e1988Sjohnlev #endif 4116843e1988Sjohnlev 411712f080e7Smrj #if !defined(__amd64) 411812f080e7Smrj (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva; 411912f080e7Smrj #endif 412012f080e7Smrj /* account for the cookie copybuf usage in the new window */ 412112f080e7Smrj *copybuf_used += MMU_PAGESIZE; 412212f080e7Smrj 412312f080e7Smrj /* 412412f080e7Smrj * every piece of code has to have a hack, and here is this 412512f080e7Smrj * ones :-) 412612f080e7Smrj * 412712f080e7Smrj * There is a complex interaction between setup_cookie and the 412812f080e7Smrj * copybuf window boundary. The complexity had to be in either 412912f080e7Smrj * the maxxfer window, or the copybuf window, and I chose the 413012f080e7Smrj * copybuf code. 413112f080e7Smrj * 413212f080e7Smrj * So in this code path, we have taken the last cookie, 413312f080e7Smrj * virtually broken it in half due to the trim, and it happens 413412f080e7Smrj * to use the copybuf which further complicates life. At the 413512f080e7Smrj * same time, we have already setup the current cookie, which 413612f080e7Smrj * is now wrong. More background info: the current cookie uses 413712f080e7Smrj * the copybuf, so it is only a page long max. So we need to 413812f080e7Smrj * fix the current cookies copy buffer address, physical 413912f080e7Smrj * address, and kva for the 32-bit kernel. We due this by 414012f080e7Smrj * bumping them by page size (of course, we can't due this on 414112f080e7Smrj * the physical address since the copy buffer may not be 414212f080e7Smrj * physically contiguous). 414312f080e7Smrj */ 414412f080e7Smrj cookie++; 414512f080e7Smrj dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE; 4146843e1988Sjohnlev poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 4147843e1988Sjohnlev 4148843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 414912f080e7Smrj dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff; 4150843e1988Sjohnlev #ifdef __xpv 4151843e1988Sjohnlev /* 4152843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 4153843e1988Sjohnlev * the cookies with MAs instead of PAs. 4154843e1988Sjohnlev */ 4155843e1988Sjohnlev cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4156843e1988Sjohnlev #else 4157843e1988Sjohnlev cookie->dmac_laddress = paddr; 4158843e1988Sjohnlev #endif 4159843e1988Sjohnlev 416012f080e7Smrj #if !defined(__amd64) 416112f080e7Smrj ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE); 416212f080e7Smrj dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE; 416312f080e7Smrj #endif 416412f080e7Smrj } else { 416512f080e7Smrj /* go back to the current cookie */ 416612f080e7Smrj cookie++; 416712f080e7Smrj } 416812f080e7Smrj 416912f080e7Smrj /* 417012f080e7Smrj * add the current cookie to the new window. set the new window size to 417112f080e7Smrj * the what was left over from the previous cookie and what's in the 417212f080e7Smrj * current cookie. 417312f080e7Smrj */ 417412f080e7Smrj (*windowp)->wd_cookie_cnt++; 417512f080e7Smrj (*windowp)->wd_size = trim_sz + cookie->dmac_size; 417612f080e7Smrj ASSERT((*windowp)->wd_size < dma->dp_maxxfer); 417712f080e7Smrj 417812f080e7Smrj /* 417912f080e7Smrj * we know that the cookie passed in always uses the copy buffer. We 418012f080e7Smrj * wouldn't be here if it didn't. 418112f080e7Smrj */ 418212f080e7Smrj *copybuf_used += MMU_PAGESIZE; 418312f080e7Smrj 418412f080e7Smrj return (DDI_SUCCESS); 418512f080e7Smrj } 418612f080e7Smrj 418712f080e7Smrj 418812f080e7Smrj /* 418912f080e7Smrj * rootnex_maxxfer_window_boundary() 419012f080e7Smrj * Called in bind slowpath when we get to a window boundary because we will 419112f080e7Smrj * go over maxxfer. 419212f080e7Smrj */ 419312f080e7Smrj static int 419412f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 419512f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie) 419612f080e7Smrj { 419712f080e7Smrj size_t dmac_size; 419812f080e7Smrj off_t new_offset; 419912f080e7Smrj size_t trim_sz; 420012f080e7Smrj off_t coffset; 420112f080e7Smrj 420212f080e7Smrj 420312f080e7Smrj /* 420412f080e7Smrj * calculate how much we have to trim off of the current cookie to equal 420512f080e7Smrj * maxxfer. We don't have to account for granularity here since our 420612f080e7Smrj * maxxfer already takes that into account. 420712f080e7Smrj */ 420812f080e7Smrj trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer; 420912f080e7Smrj ASSERT(trim_sz <= cookie->dmac_size); 421012f080e7Smrj ASSERT(trim_sz <= dma->dp_maxxfer); 421112f080e7Smrj 421212f080e7Smrj /* save cookie size since we need it later and we might change it */ 421312f080e7Smrj dmac_size = cookie->dmac_size; 421412f080e7Smrj 421512f080e7Smrj /* 421612f080e7Smrj * if we're not trimming the entire cookie, setup the current window to 421712f080e7Smrj * account for the trim. 421812f080e7Smrj */ 421912f080e7Smrj if (trim_sz < cookie->dmac_size) { 422012f080e7Smrj (*windowp)->wd_cookie_cnt++; 422112f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 422212f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 4223843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 422412f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 422512f080e7Smrj (*windowp)->wd_size = dma->dp_maxxfer; 422612f080e7Smrj 422712f080e7Smrj /* 422812f080e7Smrj * set the adjusted cookie size now in case this is the first 422912f080e7Smrj * window. All other windows are taken care of in get win 423012f080e7Smrj */ 423112f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 423212f080e7Smrj } 423312f080e7Smrj 423412f080e7Smrj /* 423512f080e7Smrj * coffset is the current offset within the cookie, new_offset is the 423612f080e7Smrj * current offset with the entire buffer. 423712f080e7Smrj */ 423812f080e7Smrj coffset = dmac_size - trim_sz; 423912f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 424012f080e7Smrj 424112f080e7Smrj /* initialize the next window */ 424212f080e7Smrj (*windowp)++; 424312f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 424412f080e7Smrj (*windowp)->wd_cookie_cnt++; 424512f080e7Smrj (*windowp)->wd_size = trim_sz; 424612f080e7Smrj if (trim_sz < dmac_size) { 424712f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4248843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 424912f080e7Smrj coffset; 425012f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 425112f080e7Smrj } 425212f080e7Smrj 425312f080e7Smrj return (DDI_SUCCESS); 425412f080e7Smrj } 425512f080e7Smrj 425612f080e7Smrj 425712f080e7Smrj /*ARGSUSED*/ 425812f080e7Smrj static int 425920906b23SVikram Hegde rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 426012f080e7Smrj off_t off, size_t len, uint_t cache_flags) 426112f080e7Smrj { 426212f080e7Smrj rootnex_sglinfo_t *sinfo; 426312f080e7Smrj rootnex_pgmap_t *cbpage; 426412f080e7Smrj rootnex_window_t *win; 426512f080e7Smrj ddi_dma_impl_t *hp; 426612f080e7Smrj rootnex_dma_t *dma; 426712f080e7Smrj caddr_t fromaddr; 426812f080e7Smrj caddr_t toaddr; 426912f080e7Smrj uint_t psize; 427012f080e7Smrj off_t offset; 427112f080e7Smrj uint_t pidx; 427212f080e7Smrj size_t size; 427312f080e7Smrj off_t poff; 427412f080e7Smrj int e; 427512f080e7Smrj 427612f080e7Smrj 427712f080e7Smrj hp = (ddi_dma_impl_t *)handle; 427812f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 427912f080e7Smrj sinfo = &dma->dp_sglinfo; 428012f080e7Smrj 428112f080e7Smrj /* 428212f080e7Smrj * if we don't have any windows, we don't need to sync. A copybuf 428312f080e7Smrj * will cause us to have at least one window. 428412f080e7Smrj */ 428512f080e7Smrj if (dma->dp_window == NULL) { 428612f080e7Smrj return (DDI_SUCCESS); 428712f080e7Smrj } 428812f080e7Smrj 428912f080e7Smrj /* This window may not need to be sync'd */ 429012f080e7Smrj win = &dma->dp_window[dma->dp_current_win]; 429112f080e7Smrj if (!win->wd_dosync) { 429212f080e7Smrj return (DDI_SUCCESS); 429312f080e7Smrj } 429412f080e7Smrj 429512f080e7Smrj /* handle off and len special cases */ 429612f080e7Smrj if ((off == 0) || (rootnex_sync_ignore_params)) { 429712f080e7Smrj offset = win->wd_offset; 429812f080e7Smrj } else { 429912f080e7Smrj offset = off; 430012f080e7Smrj } 430112f080e7Smrj if ((len == 0) || (rootnex_sync_ignore_params)) { 430212f080e7Smrj size = win->wd_size; 430312f080e7Smrj } else { 430412f080e7Smrj size = len; 430512f080e7Smrj } 430612f080e7Smrj 430712f080e7Smrj /* check the sync args to make sure they make a little sense */ 430812f080e7Smrj if (rootnex_sync_check_parms) { 430912f080e7Smrj e = rootnex_valid_sync_parms(hp, win, offset, size, 431012f080e7Smrj cache_flags); 431112f080e7Smrj if (e != DDI_SUCCESS) { 431212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]); 431312f080e7Smrj return (DDI_FAILURE); 431412f080e7Smrj } 431512f080e7Smrj } 431612f080e7Smrj 431712f080e7Smrj /* 431812f080e7Smrj * special case the first page to handle the offset into the page. The 431912f080e7Smrj * offset to the current page for our buffer is the offset into the 432012f080e7Smrj * first page of the buffer plus our current offset into the buffer 432112f080e7Smrj * itself, masked of course. 432212f080e7Smrj */ 432312f080e7Smrj poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET; 432412f080e7Smrj psize = MIN((MMU_PAGESIZE - poff), size); 432512f080e7Smrj 432612f080e7Smrj /* go through all the pages that we want to sync */ 432712f080e7Smrj while (size > 0) { 432812f080e7Smrj /* 432912f080e7Smrj * Calculate the page index relative to the start of the buffer. 433012f080e7Smrj * The index to the current page for our buffer is the offset 433112f080e7Smrj * into the first page of the buffer plus our current offset 433212f080e7Smrj * into the buffer itself, shifted of course... 433312f080e7Smrj */ 433412f080e7Smrj pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT; 433512f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 433612f080e7Smrj 433712f080e7Smrj /* 433812f080e7Smrj * if this page uses the copy buffer, we need to sync it, 433912f080e7Smrj * otherwise, go on to the next page. 434012f080e7Smrj */ 434112f080e7Smrj cbpage = &dma->dp_pgmap[pidx]; 434212f080e7Smrj ASSERT((cbpage->pm_uses_copybuf == B_TRUE) || 434312f080e7Smrj (cbpage->pm_uses_copybuf == B_FALSE)); 434412f080e7Smrj if (cbpage->pm_uses_copybuf) { 434512f080e7Smrj /* cbaddr and kaddr should be page aligned */ 434612f080e7Smrj ASSERT(((uintptr_t)cbpage->pm_cbaddr & 434712f080e7Smrj MMU_PAGEOFFSET) == 0); 434812f080e7Smrj ASSERT(((uintptr_t)cbpage->pm_kaddr & 434912f080e7Smrj MMU_PAGEOFFSET) == 0); 435012f080e7Smrj 435112f080e7Smrj /* 435212f080e7Smrj * if we're copying for the device, we are going to 435312f080e7Smrj * copy from the drivers buffer and to the rootnex 435412f080e7Smrj * allocated copy buffer. 435512f080e7Smrj */ 435612f080e7Smrj if (cache_flags == DDI_DMA_SYNC_FORDEV) { 435712f080e7Smrj fromaddr = cbpage->pm_kaddr + poff; 435812f080e7Smrj toaddr = cbpage->pm_cbaddr + poff; 435912f080e7Smrj DTRACE_PROBE2(rootnex__sync__dev, 436012f080e7Smrj dev_info_t *, dma->dp_dip, size_t, psize); 436112f080e7Smrj 436212f080e7Smrj /* 436312f080e7Smrj * if we're copying for the cpu/kernel, we are going to 436412f080e7Smrj * copy from the rootnex allocated copy buffer to the 436512f080e7Smrj * drivers buffer. 436612f080e7Smrj */ 436712f080e7Smrj } else { 436812f080e7Smrj fromaddr = cbpage->pm_cbaddr + poff; 436912f080e7Smrj toaddr = cbpage->pm_kaddr + poff; 437012f080e7Smrj DTRACE_PROBE2(rootnex__sync__cpu, 437112f080e7Smrj dev_info_t *, dma->dp_dip, size_t, psize); 437212f080e7Smrj } 437312f080e7Smrj 437412f080e7Smrj bcopy(fromaddr, toaddr, psize); 437512f080e7Smrj } 437612f080e7Smrj 437712f080e7Smrj /* 437812f080e7Smrj * decrement size until we're done, update our offset into the 437912f080e7Smrj * buffer, and get the next page size. 438012f080e7Smrj */ 438112f080e7Smrj size -= psize; 438212f080e7Smrj offset += psize; 438312f080e7Smrj psize = MIN(MMU_PAGESIZE, size); 438412f080e7Smrj 438512f080e7Smrj /* page offset is zero for the rest of this loop */ 438612f080e7Smrj poff = 0; 438712f080e7Smrj } 438812f080e7Smrj 438912f080e7Smrj return (DDI_SUCCESS); 439012f080e7Smrj } 439112f080e7Smrj 439220906b23SVikram Hegde /* 439320906b23SVikram Hegde * rootnex_dma_sync() 439420906b23SVikram Hegde * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags. 439520906b23SVikram Hegde * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC 439620906b23SVikram Hegde * is set, ddi_dma_sync() returns immediately passing back success. 439720906b23SVikram Hegde */ 439820906b23SVikram Hegde /*ARGSUSED*/ 439920906b23SVikram Hegde static int 440020906b23SVikram Hegde rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 440120906b23SVikram Hegde off_t off, size_t len, uint_t cache_flags) 440220906b23SVikram Hegde { 44033a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4404b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 440520906b23SVikram Hegde return (iommulib_nexdma_sync(dip, rdip, handle, off, len, 440620906b23SVikram Hegde cache_flags)); 440720906b23SVikram Hegde } 440820906b23SVikram Hegde #endif 440920906b23SVikram Hegde return (rootnex_coredma_sync(dip, rdip, handle, off, len, 441020906b23SVikram Hegde cache_flags)); 441120906b23SVikram Hegde } 441212f080e7Smrj 441312f080e7Smrj /* 441412f080e7Smrj * rootnex_valid_sync_parms() 441512f080e7Smrj * checks the parameters passed to sync to verify they are correct. 441612f080e7Smrj */ 441712f080e7Smrj static int 441812f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 441912f080e7Smrj off_t offset, size_t size, uint_t cache_flags) 442012f080e7Smrj { 442112f080e7Smrj off_t woffset; 442212f080e7Smrj 442312f080e7Smrj 442412f080e7Smrj /* 442512f080e7Smrj * the first part of the test to make sure the offset passed in is 442612f080e7Smrj * within the window. 442712f080e7Smrj */ 442812f080e7Smrj if (offset < win->wd_offset) { 442912f080e7Smrj return (DDI_FAILURE); 443012f080e7Smrj } 443112f080e7Smrj 443212f080e7Smrj /* 443312f080e7Smrj * second and last part of the test to make sure the offset and length 443412f080e7Smrj * passed in is within the window. 443512f080e7Smrj */ 443612f080e7Smrj woffset = offset - win->wd_offset; 443712f080e7Smrj if ((woffset + size) > win->wd_size) { 443812f080e7Smrj return (DDI_FAILURE); 443912f080e7Smrj } 444012f080e7Smrj 444112f080e7Smrj /* 444212f080e7Smrj * if we are sync'ing for the device, the DDI_DMA_WRITE flag should 444312f080e7Smrj * be set too. 444412f080e7Smrj */ 444512f080e7Smrj if ((cache_flags == DDI_DMA_SYNC_FORDEV) && 444612f080e7Smrj (hp->dmai_rflags & DDI_DMA_WRITE)) { 444712f080e7Smrj return (DDI_SUCCESS); 444812f080e7Smrj } 444912f080e7Smrj 445012f080e7Smrj /* 445112f080e7Smrj * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL 445212f080e7Smrj * should be set. Also DDI_DMA_READ should be set in the flags. 445312f080e7Smrj */ 445412f080e7Smrj if (((cache_flags == DDI_DMA_SYNC_FORCPU) || 445512f080e7Smrj (cache_flags == DDI_DMA_SYNC_FORKERNEL)) && 445612f080e7Smrj (hp->dmai_rflags & DDI_DMA_READ)) { 445712f080e7Smrj return (DDI_SUCCESS); 445812f080e7Smrj } 445912f080e7Smrj 446012f080e7Smrj return (DDI_FAILURE); 446112f080e7Smrj } 446212f080e7Smrj 446312f080e7Smrj 446412f080e7Smrj /*ARGSUSED*/ 446512f080e7Smrj static int 446620906b23SVikram Hegde rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 446712f080e7Smrj uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 446812f080e7Smrj uint_t *ccountp) 446912f080e7Smrj { 447012f080e7Smrj rootnex_window_t *window; 447112f080e7Smrj rootnex_trim_t *trim; 447212f080e7Smrj ddi_dma_impl_t *hp; 447312f080e7Smrj rootnex_dma_t *dma; 447412f080e7Smrj #if !defined(__amd64) 447512f080e7Smrj rootnex_sglinfo_t *sinfo; 447612f080e7Smrj rootnex_pgmap_t *pmap; 447712f080e7Smrj uint_t pidx; 447812f080e7Smrj uint_t pcnt; 447912f080e7Smrj off_t poff; 448012f080e7Smrj int i; 448112f080e7Smrj #endif 448212f080e7Smrj 448312f080e7Smrj 448412f080e7Smrj hp = (ddi_dma_impl_t *)handle; 448512f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 448612f080e7Smrj #if !defined(__amd64) 448712f080e7Smrj sinfo = &dma->dp_sglinfo; 448812f080e7Smrj #endif 448912f080e7Smrj 449012f080e7Smrj /* If we try and get a window which doesn't exist, return failure */ 449112f080e7Smrj if (win >= hp->dmai_nwin) { 449212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 449312f080e7Smrj return (DDI_FAILURE); 449412f080e7Smrj } 449512f080e7Smrj 449612f080e7Smrj /* 449712f080e7Smrj * if we don't have any windows, and they're asking for the first 449812f080e7Smrj * window, setup the cookie pointer to the first cookie in the bind. 449912f080e7Smrj * setup our return values, then increment the cookie since we return 450012f080e7Smrj * the first cookie on the stack. 450112f080e7Smrj */ 450212f080e7Smrj if (dma->dp_window == NULL) { 450312f080e7Smrj if (win != 0) { 450412f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 450512f080e7Smrj return (DDI_FAILURE); 450612f080e7Smrj } 450712f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 450812f080e7Smrj *offp = 0; 450912f080e7Smrj *lenp = dma->dp_dma.dmao_size; 451012f080e7Smrj *ccountp = dma->dp_sglinfo.si_sgl_size; 451112f080e7Smrj *cookiep = hp->dmai_cookie[0]; 451212f080e7Smrj hp->dmai_cookie++; 451312f080e7Smrj return (DDI_SUCCESS); 451412f080e7Smrj } 451512f080e7Smrj 451612f080e7Smrj /* sync the old window before moving on to the new one */ 451712f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 451812f080e7Smrj if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) { 451994f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 452012f080e7Smrj DDI_DMA_SYNC_FORCPU); 452112f080e7Smrj } 452212f080e7Smrj 452312f080e7Smrj #if !defined(__amd64) 452412f080e7Smrj /* 452512f080e7Smrj * before we move to the next window, if we need to re-map, unmap all 452612f080e7Smrj * the pages in this window. 452712f080e7Smrj */ 452812f080e7Smrj if (dma->dp_cb_remaping) { 452912f080e7Smrj /* 453012f080e7Smrj * If we switch to this window again, we'll need to map in 453112f080e7Smrj * on the fly next time. 453212f080e7Smrj */ 453312f080e7Smrj window->wd_remap_copybuf = B_TRUE; 453412f080e7Smrj 453512f080e7Smrj /* 453612f080e7Smrj * calculate the page index into the buffer where this window 453712f080e7Smrj * starts, and the number of pages this window takes up. 453812f080e7Smrj */ 453912f080e7Smrj pidx = (sinfo->si_buf_offset + window->wd_offset) >> 454012f080e7Smrj MMU_PAGESHIFT; 454112f080e7Smrj poff = (sinfo->si_buf_offset + window->wd_offset) & 454212f080e7Smrj MMU_PAGEOFFSET; 454312f080e7Smrj pcnt = mmu_btopr(window->wd_size + poff); 454412f080e7Smrj ASSERT((pidx + pcnt) <= sinfo->si_max_pages); 454512f080e7Smrj 454612f080e7Smrj /* unmap pages which are currently mapped in this window */ 454712f080e7Smrj for (i = 0; i < pcnt; i++) { 454812f080e7Smrj if (dma->dp_pgmap[pidx].pm_mapped) { 454912f080e7Smrj hat_unload(kas.a_hat, 455012f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE, 455112f080e7Smrj HAT_UNLOAD); 455212f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 455312f080e7Smrj } 455412f080e7Smrj pidx++; 455512f080e7Smrj } 455612f080e7Smrj } 455712f080e7Smrj #endif 455812f080e7Smrj 455912f080e7Smrj /* 456012f080e7Smrj * Move to the new window. 456112f080e7Smrj * NOTE: current_win must be set for sync to work right 456212f080e7Smrj */ 456312f080e7Smrj dma->dp_current_win = win; 456412f080e7Smrj window = &dma->dp_window[win]; 456512f080e7Smrj 456612f080e7Smrj /* if needed, adjust the first and/or last cookies for trim */ 456712f080e7Smrj trim = &window->wd_trim; 456812f080e7Smrj if (trim->tr_trim_first) { 4569843e1988Sjohnlev window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr; 457012f080e7Smrj window->wd_first_cookie->dmac_size = trim->tr_first_size; 457112f080e7Smrj #if !defined(__amd64) 457212f080e7Smrj window->wd_first_cookie->dmac_type = 457312f080e7Smrj (window->wd_first_cookie->dmac_type & 457412f080e7Smrj ROOTNEX_USES_COPYBUF) + window->wd_offset; 457512f080e7Smrj #endif 457612f080e7Smrj if (trim->tr_first_copybuf_win) { 457712f080e7Smrj dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr = 457812f080e7Smrj trim->tr_first_cbaddr; 457912f080e7Smrj #if !defined(__amd64) 458012f080e7Smrj dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr = 458112f080e7Smrj trim->tr_first_kaddr; 458212f080e7Smrj #endif 458312f080e7Smrj } 458412f080e7Smrj } 458512f080e7Smrj if (trim->tr_trim_last) { 4586843e1988Sjohnlev trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr; 458712f080e7Smrj trim->tr_last_cookie->dmac_size = trim->tr_last_size; 458812f080e7Smrj if (trim->tr_last_copybuf_win) { 458912f080e7Smrj dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr = 459012f080e7Smrj trim->tr_last_cbaddr; 459112f080e7Smrj #if !defined(__amd64) 459212f080e7Smrj dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr = 459312f080e7Smrj trim->tr_last_kaddr; 459412f080e7Smrj #endif 459512f080e7Smrj } 459612f080e7Smrj } 459712f080e7Smrj 459812f080e7Smrj /* 459912f080e7Smrj * setup the cookie pointer to the first cookie in the window. setup 460012f080e7Smrj * our return values, then increment the cookie since we return the 460112f080e7Smrj * first cookie on the stack. 460212f080e7Smrj */ 460312f080e7Smrj hp->dmai_cookie = window->wd_first_cookie; 460412f080e7Smrj *offp = window->wd_offset; 460512f080e7Smrj *lenp = window->wd_size; 460612f080e7Smrj *ccountp = window->wd_cookie_cnt; 460712f080e7Smrj *cookiep = hp->dmai_cookie[0]; 460812f080e7Smrj hp->dmai_cookie++; 460912f080e7Smrj 461012f080e7Smrj #if !defined(__amd64) 461112f080e7Smrj /* re-map copybuf if required for this window */ 461212f080e7Smrj if (dma->dp_cb_remaping) { 461312f080e7Smrj /* 461412f080e7Smrj * calculate the page index into the buffer where this 461512f080e7Smrj * window starts. 461612f080e7Smrj */ 461712f080e7Smrj pidx = (sinfo->si_buf_offset + window->wd_offset) >> 461812f080e7Smrj MMU_PAGESHIFT; 461912f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 462012f080e7Smrj 462112f080e7Smrj /* 462212f080e7Smrj * the first page can get unmapped if it's shared with the 462312f080e7Smrj * previous window. Even if the rest of this window is already 462412f080e7Smrj * mapped in, we need to still check this one. 462512f080e7Smrj */ 462612f080e7Smrj pmap = &dma->dp_pgmap[pidx]; 462712f080e7Smrj if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) { 462812f080e7Smrj if (pmap->pm_pp != NULL) { 462912f080e7Smrj pmap->pm_mapped = B_TRUE; 463012f080e7Smrj i86_pp_map(pmap->pm_pp, pmap->pm_kaddr); 463112f080e7Smrj } else if (pmap->pm_vaddr != NULL) { 463212f080e7Smrj pmap->pm_mapped = B_TRUE; 463312f080e7Smrj i86_va_map(pmap->pm_vaddr, sinfo->si_asp, 463412f080e7Smrj pmap->pm_kaddr); 463512f080e7Smrj } 463612f080e7Smrj } 463712f080e7Smrj pidx++; 463812f080e7Smrj 463912f080e7Smrj /* map in the rest of the pages if required */ 464012f080e7Smrj if (window->wd_remap_copybuf) { 464112f080e7Smrj window->wd_remap_copybuf = B_FALSE; 464212f080e7Smrj 464312f080e7Smrj /* figure out many pages this window takes up */ 464412f080e7Smrj poff = (sinfo->si_buf_offset + window->wd_offset) & 464512f080e7Smrj MMU_PAGEOFFSET; 464612f080e7Smrj pcnt = mmu_btopr(window->wd_size + poff); 464712f080e7Smrj ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages); 464812f080e7Smrj 464912f080e7Smrj /* map pages which require it */ 465012f080e7Smrj for (i = 1; i < pcnt; i++) { 465112f080e7Smrj pmap = &dma->dp_pgmap[pidx]; 465212f080e7Smrj if (pmap->pm_uses_copybuf) { 465312f080e7Smrj ASSERT(pmap->pm_mapped == B_FALSE); 465412f080e7Smrj if (pmap->pm_pp != NULL) { 465512f080e7Smrj pmap->pm_mapped = B_TRUE; 465612f080e7Smrj i86_pp_map(pmap->pm_pp, 465712f080e7Smrj pmap->pm_kaddr); 465812f080e7Smrj } else if (pmap->pm_vaddr != NULL) { 465912f080e7Smrj pmap->pm_mapped = B_TRUE; 466012f080e7Smrj i86_va_map(pmap->pm_vaddr, 466112f080e7Smrj sinfo->si_asp, 466212f080e7Smrj pmap->pm_kaddr); 466312f080e7Smrj } 466412f080e7Smrj } 466512f080e7Smrj pidx++; 466612f080e7Smrj } 466712f080e7Smrj } 466812f080e7Smrj } 466912f080e7Smrj #endif 467012f080e7Smrj 467112f080e7Smrj /* if the new window uses the copy buffer, sync it for the device */ 467212f080e7Smrj if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) { 467394f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 467412f080e7Smrj DDI_DMA_SYNC_FORDEV); 467512f080e7Smrj } 467612f080e7Smrj 467712f080e7Smrj return (DDI_SUCCESS); 467812f080e7Smrj } 467912f080e7Smrj 468020906b23SVikram Hegde /* 468120906b23SVikram Hegde * rootnex_dma_win() 468220906b23SVikram Hegde * called from ddi_dma_getwin() 468320906b23SVikram Hegde */ 468420906b23SVikram Hegde /*ARGSUSED*/ 468520906b23SVikram Hegde static int 468620906b23SVikram Hegde rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 468720906b23SVikram Hegde uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 468820906b23SVikram Hegde uint_t *ccountp) 468920906b23SVikram Hegde { 46903a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4691b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 469220906b23SVikram Hegde return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp, 469320906b23SVikram Hegde cookiep, ccountp)); 469420906b23SVikram Hegde } 469520906b23SVikram Hegde #endif 469612f080e7Smrj 469720906b23SVikram Hegde return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp, 469820906b23SVikram Hegde cookiep, ccountp)); 469920906b23SVikram Hegde } 470012f080e7Smrj 470112f080e7Smrj /* 470212f080e7Smrj * ************************ 470312f080e7Smrj * obsoleted dma routines 470412f080e7Smrj * ************************ 470512f080e7Smrj */ 470612f080e7Smrj 4707b51bbbf5SVikram Hegde /* 4708b51bbbf5SVikram Hegde * rootnex_dma_map() 4709b51bbbf5SVikram Hegde * called from ddi_dma_setup() 4710b51bbbf5SVikram Hegde * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode. 4711b51bbbf5SVikram Hegde */ 471212f080e7Smrj /* ARGSUSED */ 471312f080e7Smrj static int 4714b51bbbf5SVikram Hegde rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 471520906b23SVikram Hegde struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 471612f080e7Smrj { 471712f080e7Smrj #if defined(__amd64) 471812f080e7Smrj /* 471912f080e7Smrj * this interface is not supported in 64-bit x86 kernel. See comment in 472012f080e7Smrj * rootnex_dma_mctl() 472112f080e7Smrj */ 472212f080e7Smrj return (DDI_DMA_NORESOURCES); 472312f080e7Smrj 472412f080e7Smrj #else /* 32-bit x86 kernel */ 472512f080e7Smrj ddi_dma_handle_t *lhandlep; 472612f080e7Smrj ddi_dma_handle_t lhandle; 472712f080e7Smrj ddi_dma_cookie_t cookie; 472812f080e7Smrj ddi_dma_attr_t dma_attr; 472912f080e7Smrj ddi_dma_lim_t *dma_lim; 473012f080e7Smrj uint_t ccnt; 473112f080e7Smrj int e; 473212f080e7Smrj 473312f080e7Smrj 473412f080e7Smrj /* 473512f080e7Smrj * if the driver is just testing to see if it's possible to do the bind, 473612f080e7Smrj * we'll use local state. Otherwise, use the handle pointer passed in. 473712f080e7Smrj */ 473812f080e7Smrj if (handlep == NULL) { 473912f080e7Smrj lhandlep = &lhandle; 474012f080e7Smrj } else { 474112f080e7Smrj lhandlep = handlep; 474212f080e7Smrj } 474312f080e7Smrj 474412f080e7Smrj /* convert the limit structure to a dma_attr one */ 474512f080e7Smrj dma_lim = dmareq->dmar_limits; 474612f080e7Smrj dma_attr.dma_attr_version = DMA_ATTR_V0; 474712f080e7Smrj dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 474812f080e7Smrj dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 474912f080e7Smrj dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 475012f080e7Smrj dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 475112f080e7Smrj dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 475212f080e7Smrj dma_attr.dma_attr_granular = dma_lim->dlim_granular; 475312f080e7Smrj dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 475412f080e7Smrj dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 475512f080e7Smrj dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 475612f080e7Smrj dma_attr.dma_attr_align = MMU_PAGESIZE; 475712f080e7Smrj dma_attr.dma_attr_flags = 0; 475812f080e7Smrj 475912f080e7Smrj e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp, 476012f080e7Smrj dmareq->dmar_arg, lhandlep); 476112f080e7Smrj if (e != DDI_SUCCESS) { 476212f080e7Smrj return (e); 476312f080e7Smrj } 476412f080e7Smrj 476512f080e7Smrj e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt); 476612f080e7Smrj if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 476712f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 476812f080e7Smrj return (e); 476912f080e7Smrj } 477012f080e7Smrj 477112f080e7Smrj /* 477212f080e7Smrj * if the driver is just testing to see if it's possible to do the bind, 477312f080e7Smrj * free up the local state and return the result. 477412f080e7Smrj */ 477512f080e7Smrj if (handlep == NULL) { 477612f080e7Smrj (void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep); 477712f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 477812f080e7Smrj if (e == DDI_DMA_MAPPED) { 477912f080e7Smrj return (DDI_DMA_MAPOK); 478012f080e7Smrj } else { 478112f080e7Smrj return (DDI_DMA_NOMAPPING); 478212f080e7Smrj } 478312f080e7Smrj } 478412f080e7Smrj 478512f080e7Smrj return (e); 478612f080e7Smrj #endif /* defined(__amd64) */ 478712f080e7Smrj } 478812f080e7Smrj 478920906b23SVikram Hegde /* 479012f080e7Smrj * rootnex_dma_mctl() 479112f080e7Smrj * 4792b51bbbf5SVikram Hegde * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode. 479312f080e7Smrj */ 479412f080e7Smrj /* ARGSUSED */ 479512f080e7Smrj static int 4796b51bbbf5SVikram Hegde rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 479712f080e7Smrj enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 479812f080e7Smrj uint_t cache_flags) 479912f080e7Smrj { 480012f080e7Smrj #if defined(__amd64) 480112f080e7Smrj /* 480212f080e7Smrj * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a 480312f080e7Smrj * common implementation in genunix, so they no longer have x86 480412f080e7Smrj * specific functionality which called into dma_ctl. 480512f080e7Smrj * 480612f080e7Smrj * The rest of the obsoleted interfaces were never supported in the 480712f080e7Smrj * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface 480812f080e7Smrj * was not ported to the x86 64-bit kernel do to serious x86 rootnex 480912f080e7Smrj * implementation issues. 481012f080e7Smrj * 481112f080e7Smrj * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and 481212f080e7Smrj * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we 481312f080e7Smrj * reflect that now too... 481412f080e7Smrj * 481512f080e7Smrj * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are 481612f080e7Smrj * not going to put this functionality into the 64-bit x86 kernel now. 481712f080e7Smrj * It wasn't ported to the 64-bit kernel for s10, no reason to change 481812f080e7Smrj * that in a future release. 481912f080e7Smrj */ 482012f080e7Smrj return (DDI_FAILURE); 482112f080e7Smrj 482212f080e7Smrj #else /* 32-bit x86 kernel */ 482312f080e7Smrj ddi_dma_cookie_t lcookie; 482412f080e7Smrj ddi_dma_cookie_t *cookie; 482512f080e7Smrj rootnex_window_t *window; 482612f080e7Smrj ddi_dma_impl_t *hp; 482712f080e7Smrj rootnex_dma_t *dma; 482812f080e7Smrj uint_t nwin; 482912f080e7Smrj uint_t ccnt; 483012f080e7Smrj size_t len; 483112f080e7Smrj off_t off; 483212f080e7Smrj int e; 483312f080e7Smrj 483412f080e7Smrj 483512f080e7Smrj /* 483612f080e7Smrj * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little 483712f080e7Smrj * hacky since were optimizing for the current interfaces and so we can 483812f080e7Smrj * cleanup the mess in genunix. Hopefully we will remove the this 483912f080e7Smrj * obsoleted routines someday soon. 484012f080e7Smrj */ 484112f080e7Smrj 484212f080e7Smrj switch (request) { 484312f080e7Smrj 484412f080e7Smrj case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */ 484512f080e7Smrj hp = (ddi_dma_impl_t *)handle; 484612f080e7Smrj cookie = (ddi_dma_cookie_t *)objpp; 484712f080e7Smrj 484812f080e7Smrj /* 484912f080e7Smrj * convert segment to cookie. We don't distinguish between the 485012f080e7Smrj * two :-) 485112f080e7Smrj */ 485212f080e7Smrj *cookie = *hp->dmai_cookie; 485312f080e7Smrj *lenp = cookie->dmac_size; 485412f080e7Smrj *offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF; 485512f080e7Smrj return (DDI_SUCCESS); 485612f080e7Smrj 485712f080e7Smrj case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */ 485812f080e7Smrj hp = (ddi_dma_impl_t *)handle; 485912f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 486012f080e7Smrj 486112f080e7Smrj if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) { 486212f080e7Smrj return (DDI_DMA_STALE); 486312f080e7Smrj } 486412f080e7Smrj 486512f080e7Smrj /* handle the case where we don't have any windows */ 486612f080e7Smrj if (dma->dp_window == NULL) { 486712f080e7Smrj /* 486812f080e7Smrj * if seg == NULL, and we don't have any windows, 486912f080e7Smrj * return the first cookie in the sgl. 487012f080e7Smrj */ 487112f080e7Smrj if (*lenp == NULL) { 487212f080e7Smrj dma->dp_current_cookie = 0; 487312f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 487412f080e7Smrj *objpp = (caddr_t)handle; 487512f080e7Smrj return (DDI_SUCCESS); 487612f080e7Smrj 487712f080e7Smrj /* if we have more cookies, go to the next cookie */ 487812f080e7Smrj } else { 487912f080e7Smrj if ((dma->dp_current_cookie + 1) >= 488012f080e7Smrj dma->dp_sglinfo.si_sgl_size) { 488112f080e7Smrj return (DDI_DMA_DONE); 488212f080e7Smrj } 488312f080e7Smrj dma->dp_current_cookie++; 488412f080e7Smrj hp->dmai_cookie++; 488512f080e7Smrj return (DDI_SUCCESS); 488612f080e7Smrj } 488712f080e7Smrj } 488812f080e7Smrj 488912f080e7Smrj /* We have one or more windows */ 489012f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 489112f080e7Smrj 489212f080e7Smrj /* 489312f080e7Smrj * if seg == NULL, return the first cookie in the current 489412f080e7Smrj * window 489512f080e7Smrj */ 489612f080e7Smrj if (*lenp == NULL) { 489712f080e7Smrj dma->dp_current_cookie = 0; 4898cf4e9a1dSmrj hp->dmai_cookie = window->wd_first_cookie; 489912f080e7Smrj 490012f080e7Smrj /* 490112f080e7Smrj * go to the next cookie in the window then see if we done with 490212f080e7Smrj * this window. 490312f080e7Smrj */ 490412f080e7Smrj } else { 490512f080e7Smrj if ((dma->dp_current_cookie + 1) >= 490612f080e7Smrj window->wd_cookie_cnt) { 490712f080e7Smrj return (DDI_DMA_DONE); 490812f080e7Smrj } 490912f080e7Smrj dma->dp_current_cookie++; 491012f080e7Smrj hp->dmai_cookie++; 491112f080e7Smrj } 491212f080e7Smrj *objpp = (caddr_t)handle; 491312f080e7Smrj return (DDI_SUCCESS); 491412f080e7Smrj 491512f080e7Smrj case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */ 491612f080e7Smrj hp = (ddi_dma_impl_t *)handle; 491712f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 491812f080e7Smrj 491912f080e7Smrj if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) { 492012f080e7Smrj return (DDI_DMA_STALE); 492112f080e7Smrj } 492212f080e7Smrj 492312f080e7Smrj /* if win == NULL, return the first window in the bind */ 492412f080e7Smrj if (*offp == NULL) { 492512f080e7Smrj nwin = 0; 492612f080e7Smrj 492712f080e7Smrj /* 492812f080e7Smrj * else, go to the next window then see if we're done with all 492912f080e7Smrj * the windows. 493012f080e7Smrj */ 493112f080e7Smrj } else { 493212f080e7Smrj nwin = dma->dp_current_win + 1; 493312f080e7Smrj if (nwin >= hp->dmai_nwin) { 493412f080e7Smrj return (DDI_DMA_DONE); 493512f080e7Smrj } 493612f080e7Smrj } 493712f080e7Smrj 493812f080e7Smrj /* switch to the next window */ 493912f080e7Smrj e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len, 494012f080e7Smrj &lcookie, &ccnt); 494112f080e7Smrj ASSERT(e == DDI_SUCCESS); 494212f080e7Smrj if (e != DDI_SUCCESS) { 494312f080e7Smrj return (DDI_DMA_STALE); 494412f080e7Smrj } 494512f080e7Smrj 494612f080e7Smrj /* reset the cookie back to the first cookie in the window */ 494712f080e7Smrj if (dma->dp_window != NULL) { 494812f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 494912f080e7Smrj hp->dmai_cookie = window->wd_first_cookie; 495012f080e7Smrj } else { 495112f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 495212f080e7Smrj } 495312f080e7Smrj 495412f080e7Smrj *objpp = (caddr_t)handle; 495512f080e7Smrj return (DDI_SUCCESS); 495612f080e7Smrj 495712f080e7Smrj case DDI_DMA_FREE: /* ddi_dma_free() */ 495812f080e7Smrj (void) rootnex_dma_unbindhdl(dip, rdip, handle); 495912f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, handle); 496012f080e7Smrj if (rootnex_state->r_dvma_call_list_id) { 496112f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 496212f080e7Smrj } 496312f080e7Smrj return (DDI_SUCCESS); 496412f080e7Smrj 496512f080e7Smrj case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 496612f080e7Smrj case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 496712f080e7Smrj /* should never get here, handled in genunix */ 496812f080e7Smrj ASSERT(0); 496912f080e7Smrj return (DDI_FAILURE); 497012f080e7Smrj 497112f080e7Smrj case DDI_DMA_KVADDR: 497212f080e7Smrj case DDI_DMA_GETERR: 497312f080e7Smrj case DDI_DMA_COFF: 497412f080e7Smrj return (DDI_FAILURE); 497512f080e7Smrj } 497612f080e7Smrj 497712f080e7Smrj return (DDI_FAILURE); 497812f080e7Smrj #endif /* defined(__amd64) */ 49797c478bd9Sstevel@tonic-gate } 49807aec1d6eScindi 498120906b23SVikram Hegde /* 498200d0963fSdilpreet * ********* 498300d0963fSdilpreet * FMA Code 498400d0963fSdilpreet * ********* 498500d0963fSdilpreet */ 498600d0963fSdilpreet 498700d0963fSdilpreet /* 498800d0963fSdilpreet * rootnex_fm_init() 498900d0963fSdilpreet * FMA init busop 499000d0963fSdilpreet */ 49917aec1d6eScindi /* ARGSUSED */ 49927aec1d6eScindi static int 499300d0963fSdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 499400d0963fSdilpreet ddi_iblock_cookie_t *ibc) 49957aec1d6eScindi { 499600d0963fSdilpreet *ibc = rootnex_state->r_err_ibc; 499700d0963fSdilpreet 499800d0963fSdilpreet return (ddi_system_fmcap); 499900d0963fSdilpreet } 500000d0963fSdilpreet 500100d0963fSdilpreet /* 500200d0963fSdilpreet * rootnex_dma_check() 500300d0963fSdilpreet * Function called after a dma fault occurred to find out whether the 500400d0963fSdilpreet * fault address is associated with a driver that is able to handle faults 500500d0963fSdilpreet * and recover from faults. 500600d0963fSdilpreet */ 500700d0963fSdilpreet /* ARGSUSED */ 500800d0963fSdilpreet static int 500900d0963fSdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr, 501000d0963fSdilpreet const void *not_used) 501100d0963fSdilpreet { 501200d0963fSdilpreet rootnex_window_t *window; 501300d0963fSdilpreet uint64_t start_addr; 501400d0963fSdilpreet uint64_t fault_addr; 501500d0963fSdilpreet ddi_dma_impl_t *hp; 501600d0963fSdilpreet rootnex_dma_t *dma; 501700d0963fSdilpreet uint64_t end_addr; 501800d0963fSdilpreet size_t csize; 501900d0963fSdilpreet int i; 502000d0963fSdilpreet int j; 502100d0963fSdilpreet 502200d0963fSdilpreet 502300d0963fSdilpreet /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */ 502400d0963fSdilpreet hp = (ddi_dma_impl_t *)handle; 502500d0963fSdilpreet ASSERT(hp); 502600d0963fSdilpreet 502700d0963fSdilpreet dma = (rootnex_dma_t *)hp->dmai_private; 502800d0963fSdilpreet 502900d0963fSdilpreet /* Get the address that we need to search for */ 503000d0963fSdilpreet fault_addr = *(uint64_t *)addr; 503100d0963fSdilpreet 503200d0963fSdilpreet /* 503300d0963fSdilpreet * if we don't have any windows, we can just walk through all the 503400d0963fSdilpreet * cookies. 503500d0963fSdilpreet */ 503600d0963fSdilpreet if (dma->dp_window == NULL) { 503700d0963fSdilpreet /* for each cookie */ 503800d0963fSdilpreet for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) { 503900d0963fSdilpreet /* 504000d0963fSdilpreet * if the faulted address is within the physical address 504100d0963fSdilpreet * range of the cookie, return DDI_FM_NONFATAL. 504200d0963fSdilpreet */ 504300d0963fSdilpreet if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) && 504400d0963fSdilpreet (fault_addr <= (dma->dp_cookies[i].dmac_laddress + 504500d0963fSdilpreet dma->dp_cookies[i].dmac_size))) { 504600d0963fSdilpreet return (DDI_FM_NONFATAL); 504700d0963fSdilpreet } 504800d0963fSdilpreet } 504900d0963fSdilpreet 505000d0963fSdilpreet /* fault_addr not within this DMA handle */ 505100d0963fSdilpreet return (DDI_FM_UNKNOWN); 505200d0963fSdilpreet } 505300d0963fSdilpreet 505400d0963fSdilpreet /* we have mutiple windows, walk through each window */ 505500d0963fSdilpreet for (i = 0; i < hp->dmai_nwin; i++) { 505600d0963fSdilpreet window = &dma->dp_window[i]; 505700d0963fSdilpreet 505800d0963fSdilpreet /* Go through all the cookies in the window */ 505900d0963fSdilpreet for (j = 0; j < window->wd_cookie_cnt; j++) { 506000d0963fSdilpreet 506100d0963fSdilpreet start_addr = window->wd_first_cookie[j].dmac_laddress; 506200d0963fSdilpreet csize = window->wd_first_cookie[j].dmac_size; 506300d0963fSdilpreet 506400d0963fSdilpreet /* 506500d0963fSdilpreet * if we are trimming the first cookie in the window, 506600d0963fSdilpreet * and this is the first cookie, adjust the start 506700d0963fSdilpreet * address and size of the cookie to account for the 506800d0963fSdilpreet * trim. 506900d0963fSdilpreet */ 507000d0963fSdilpreet if (window->wd_trim.tr_trim_first && (j == 0)) { 507100d0963fSdilpreet start_addr = window->wd_trim.tr_first_paddr; 507200d0963fSdilpreet csize = window->wd_trim.tr_first_size; 507300d0963fSdilpreet } 507400d0963fSdilpreet 507500d0963fSdilpreet /* 507600d0963fSdilpreet * if we are trimming the last cookie in the window, 507700d0963fSdilpreet * and this is the last cookie, adjust the start 507800d0963fSdilpreet * address and size of the cookie to account for the 507900d0963fSdilpreet * trim. 508000d0963fSdilpreet */ 508100d0963fSdilpreet if (window->wd_trim.tr_trim_last && 508200d0963fSdilpreet (j == (window->wd_cookie_cnt - 1))) { 508300d0963fSdilpreet start_addr = window->wd_trim.tr_last_paddr; 508400d0963fSdilpreet csize = window->wd_trim.tr_last_size; 508500d0963fSdilpreet } 508600d0963fSdilpreet 508700d0963fSdilpreet end_addr = start_addr + csize; 508800d0963fSdilpreet 508900d0963fSdilpreet /* 50903a634bfcSVikram Hegde * if the faulted address is within the physical 50913a634bfcSVikram Hegde * address of the cookie, return DDI_FM_NONFATAL. 509200d0963fSdilpreet */ 509300d0963fSdilpreet if ((fault_addr >= start_addr) && 509400d0963fSdilpreet (fault_addr <= end_addr)) { 509500d0963fSdilpreet return (DDI_FM_NONFATAL); 509600d0963fSdilpreet } 509700d0963fSdilpreet } 509800d0963fSdilpreet } 509900d0963fSdilpreet 510000d0963fSdilpreet /* fault_addr not within this DMA handle */ 510100d0963fSdilpreet return (DDI_FM_UNKNOWN); 51027aec1d6eScindi } 51033a634bfcSVikram Hegde 51043a634bfcSVikram Hegde /*ARGSUSED*/ 51053a634bfcSVikram Hegde static int 51063a634bfcSVikram Hegde rootnex_quiesce(dev_info_t *dip) 51073a634bfcSVikram Hegde { 51083a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 51093a634bfcSVikram Hegde return (immu_quiesce()); 51103a634bfcSVikram Hegde #else 51113a634bfcSVikram Hegde return (DDI_SUCCESS); 51123a634bfcSVikram Hegde #endif 51133a634bfcSVikram Hegde } 51143a634bfcSVikram Hegde 51153a634bfcSVikram Hegde #if defined(__xpv) 51163a634bfcSVikram Hegde void 51173a634bfcSVikram Hegde immu_init(void) 51183a634bfcSVikram Hegde { 51193a634bfcSVikram Hegde ; 51203a634bfcSVikram Hegde } 51213a634bfcSVikram Hegde 51223a634bfcSVikram Hegde void 51233a634bfcSVikram Hegde immu_startup(void) 51243a634bfcSVikram Hegde { 51253a634bfcSVikram Hegde ; 51263a634bfcSVikram Hegde } 51273a634bfcSVikram Hegde /*ARGSUSED*/ 51283a634bfcSVikram Hegde void 51293a634bfcSVikram Hegde immu_physmem_update(uint64_t addr, uint64_t size) 51303a634bfcSVikram Hegde { 51313a634bfcSVikram Hegde ; 51323a634bfcSVikram Hegde } 51333a634bfcSVikram Hegde #endif 5134