ni.c (d6d2730c71a5d41a121a7b567bf7ff9c5d4cd3ab) ni.c (e32eb50dbe43862606a51caa94368ec6bd019434)
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 1035 unchanged lines hidden (view full) ---

1044 WREG32(CP_PFP_UCODE_ADDR, 0);
1045 WREG32(CP_ME_RAM_WADDR, 0);
1046 WREG32(CP_ME_RAM_RADDR, 0);
1047 return 0;
1048}
1049
1050static int cayman_cp_start(struct radeon_device *rdev)
1051{
1/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the

--- 1035 unchanged lines hidden (view full) ---

1044 WREG32(CP_PFP_UCODE_ADDR, 0);
1045 WREG32(CP_ME_RAM_WADDR, 0);
1046 WREG32(CP_ME_RAM_RADDR, 0);
1047 return 0;
1048}
1049
1050static int cayman_cp_start(struct radeon_device *rdev)
1051{
1052 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1052 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1053 int r, i;
1054
1053 int r, i;
1054
1055 r = radeon_ring_lock(rdev, cp, 7);
1055 r = radeon_ring_lock(rdev, ring, 7);
1056 if (r) {
1057 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1058 return r;
1059 }
1056 if (r) {
1057 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1058 return r;
1059 }
1060 radeon_ring_write(cp, PACKET3(PACKET3_ME_INITIALIZE, 5));
1061 radeon_ring_write(cp, 0x1);
1062 radeon_ring_write(cp, 0x0);
1063 radeon_ring_write(cp, rdev->config.cayman.max_hw_contexts - 1);
1064 radeon_ring_write(cp, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1065 radeon_ring_write(cp, 0);
1066 radeon_ring_write(cp, 0);
1067 radeon_ring_unlock_commit(rdev, cp);
1060 radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
1061 radeon_ring_write(ring, 0x1);
1062 radeon_ring_write(ring, 0x0);
1063 radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
1064 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1065 radeon_ring_write(ring, 0);
1066 radeon_ring_write(ring, 0);
1067 radeon_ring_unlock_commit(rdev, ring);
1068
1069 cayman_cp_enable(rdev, true);
1070
1068
1069 cayman_cp_enable(rdev, true);
1070
1071 r = radeon_ring_lock(rdev, cp, cayman_default_size + 19);
1071 r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
1072 if (r) {
1073 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1074 return r;
1075 }
1076
1077 /* setup clear context state */
1072 if (r) {
1073 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
1074 return r;
1075 }
1076
1077 /* setup clear context state */
1078 radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1079 radeon_ring_write(cp, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1078 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1079 radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
1080
1081 for (i = 0; i < cayman_default_size; i++)
1080
1081 for (i = 0; i < cayman_default_size; i++)
1082 radeon_ring_write(cp, cayman_default_state[i]);
1082 radeon_ring_write(ring, cayman_default_state[i]);
1083
1083
1084 radeon_ring_write(cp, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1085 radeon_ring_write(cp, PACKET3_PREAMBLE_END_CLEAR_STATE);
1084 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
1085 radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
1086
1087 /* set clear context state */
1086
1087 /* set clear context state */
1088 radeon_ring_write(cp, PACKET3(PACKET3_CLEAR_STATE, 0));
1089 radeon_ring_write(cp, 0);
1088 radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
1089 radeon_ring_write(ring, 0);
1090
1091 /* SQ_VTX_BASE_VTX_LOC */
1090
1091 /* SQ_VTX_BASE_VTX_LOC */
1092 radeon_ring_write(cp, 0xc0026f00);
1093 radeon_ring_write(cp, 0x00000000);
1094 radeon_ring_write(cp, 0x00000000);
1095 radeon_ring_write(cp, 0x00000000);
1092 radeon_ring_write(ring, 0xc0026f00);
1093 radeon_ring_write(ring, 0x00000000);
1094 radeon_ring_write(ring, 0x00000000);
1095 radeon_ring_write(ring, 0x00000000);
1096
1097 /* Clear consts */
1096
1097 /* Clear consts */
1098 radeon_ring_write(cp, 0xc0036f00);
1099 radeon_ring_write(cp, 0x00000bc4);
1100 radeon_ring_write(cp, 0xffffffff);
1101 radeon_ring_write(cp, 0xffffffff);
1102 radeon_ring_write(cp, 0xffffffff);
1098 radeon_ring_write(ring, 0xc0036f00);
1099 radeon_ring_write(ring, 0x00000bc4);
1100 radeon_ring_write(ring, 0xffffffff);
1101 radeon_ring_write(ring, 0xffffffff);
1102 radeon_ring_write(ring, 0xffffffff);
1103
1103
1104 radeon_ring_write(cp, 0xc0026900);
1105 radeon_ring_write(cp, 0x00000316);
1106 radeon_ring_write(cp, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1107 radeon_ring_write(cp, 0x00000010); /* */
1104 radeon_ring_write(ring, 0xc0026900);
1105 radeon_ring_write(ring, 0x00000316);
1106 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1107 radeon_ring_write(ring, 0x00000010); /* */
1108
1108
1109 radeon_ring_unlock_commit(rdev, cp);
1109 radeon_ring_unlock_commit(rdev, ring);
1110
1111 /* XXX init other rings */
1112
1113 return 0;
1114}
1115
1116static void cayman_cp_fini(struct radeon_device *rdev)
1117{
1118 cayman_cp_enable(rdev, false);
1110
1111 /* XXX init other rings */
1112
1113 return 0;
1114}
1115
1116static void cayman_cp_fini(struct radeon_device *rdev)
1117{
1118 cayman_cp_enable(rdev, false);
1119 radeon_ring_fini(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
1119 radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1120}
1121
1122int cayman_cp_resume(struct radeon_device *rdev)
1123{
1120}
1121
1122int cayman_cp_resume(struct radeon_device *rdev)
1123{
1124 struct radeon_cp *cp;
1124 struct radeon_ring *ring;
1125 u32 tmp;
1126 u32 rb_bufsz;
1127 int r;
1128
1129 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1130 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1131 SOFT_RESET_PA |
1132 SOFT_RESET_SH |

--- 9 unchanged lines hidden (view full) ---

1142
1143 /* Set the write pointer delay */
1144 WREG32(CP_RB_WPTR_DELAY, 0);
1145
1146 WREG32(CP_DEBUG, (1 << 27));
1147
1148 /* ring 0 - compute and gfx */
1149 /* Set ring buffer size */
1125 u32 tmp;
1126 u32 rb_bufsz;
1127 int r;
1128
1129 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1130 WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1131 SOFT_RESET_PA |
1132 SOFT_RESET_SH |

--- 9 unchanged lines hidden (view full) ---

1142
1143 /* Set the write pointer delay */
1144 WREG32(CP_RB_WPTR_DELAY, 0);
1145
1146 WREG32(CP_DEBUG, (1 << 27));
1147
1148 /* ring 0 - compute and gfx */
1149 /* Set ring buffer size */
1150 cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1151 rb_bufsz = drm_order(cp->ring_size / 8);
1150 ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1151 rb_bufsz = drm_order(ring->ring_size / 8);
1152 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1153#ifdef __BIG_ENDIAN
1154 tmp |= BUF_SWAP_32BIT;
1155#endif
1156 WREG32(CP_RB0_CNTL, tmp);
1157
1158 /* Initialize the ring buffer's read and write pointers */
1159 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1152 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1153#ifdef __BIG_ENDIAN
1154 tmp |= BUF_SWAP_32BIT;
1155#endif
1156 WREG32(CP_RB0_CNTL, tmp);
1157
1158 /* Initialize the ring buffer's read and write pointers */
1159 WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
1160 cp->wptr = 0;
1161 WREG32(CP_RB0_WPTR, cp->wptr);
1160 ring->wptr = 0;
1161 WREG32(CP_RB0_WPTR, ring->wptr);
1162
1163 /* set the wb address wether it's enabled or not */
1164 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1165 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1166 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1167
1168 if (rdev->wb.enabled)
1169 WREG32(SCRATCH_UMSK, 0xff);
1170 else {
1171 tmp |= RB_NO_UPDATE;
1172 WREG32(SCRATCH_UMSK, 0);
1173 }
1174
1175 mdelay(1);
1176 WREG32(CP_RB0_CNTL, tmp);
1177
1162
1163 /* set the wb address wether it's enabled or not */
1164 WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
1165 WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1166 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1167
1168 if (rdev->wb.enabled)
1169 WREG32(SCRATCH_UMSK, 0xff);
1170 else {
1171 tmp |= RB_NO_UPDATE;
1172 WREG32(SCRATCH_UMSK, 0);
1173 }
1174
1175 mdelay(1);
1176 WREG32(CP_RB0_CNTL, tmp);
1177
1178 WREG32(CP_RB0_BASE, cp->gpu_addr >> 8);
1178 WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
1179
1179
1180 cp->rptr = RREG32(CP_RB0_RPTR);
1180 ring->rptr = RREG32(CP_RB0_RPTR);
1181
1182 /* ring1 - compute only */
1183 /* Set ring buffer size */
1181
1182 /* ring1 - compute only */
1183 /* Set ring buffer size */
1184 cp = &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX];
1185 rb_bufsz = drm_order(cp->ring_size / 8);
1184 ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
1185 rb_bufsz = drm_order(ring->ring_size / 8);
1186 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1187#ifdef __BIG_ENDIAN
1188 tmp |= BUF_SWAP_32BIT;
1189#endif
1190 WREG32(CP_RB1_CNTL, tmp);
1191
1192 /* Initialize the ring buffer's read and write pointers */
1193 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1186 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1187#ifdef __BIG_ENDIAN
1188 tmp |= BUF_SWAP_32BIT;
1189#endif
1190 WREG32(CP_RB1_CNTL, tmp);
1191
1192 /* Initialize the ring buffer's read and write pointers */
1193 WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
1194 cp->wptr = 0;
1195 WREG32(CP_RB1_WPTR, cp->wptr);
1194 ring->wptr = 0;
1195 WREG32(CP_RB1_WPTR, ring->wptr);
1196
1197 /* set the wb address wether it's enabled or not */
1198 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
1199 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
1200
1201 mdelay(1);
1202 WREG32(CP_RB1_CNTL, tmp);
1203
1196
1197 /* set the wb address wether it's enabled or not */
1198 WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
1199 WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
1200
1201 mdelay(1);
1202 WREG32(CP_RB1_CNTL, tmp);
1203
1204 WREG32(CP_RB1_BASE, cp->gpu_addr >> 8);
1204 WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
1205
1205
1206 cp->rptr = RREG32(CP_RB1_RPTR);
1206 ring->rptr = RREG32(CP_RB1_RPTR);
1207
1208 /* ring2 - compute only */
1209 /* Set ring buffer size */
1207
1208 /* ring2 - compute only */
1209 /* Set ring buffer size */
1210 cp = &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX];
1211 rb_bufsz = drm_order(cp->ring_size / 8);
1210 ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
1211 rb_bufsz = drm_order(ring->ring_size / 8);
1212 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1213#ifdef __BIG_ENDIAN
1214 tmp |= BUF_SWAP_32BIT;
1215#endif
1216 WREG32(CP_RB2_CNTL, tmp);
1217
1218 /* Initialize the ring buffer's read and write pointers */
1219 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1212 tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
1213#ifdef __BIG_ENDIAN
1214 tmp |= BUF_SWAP_32BIT;
1215#endif
1216 WREG32(CP_RB2_CNTL, tmp);
1217
1218 /* Initialize the ring buffer's read and write pointers */
1219 WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
1220 cp->wptr = 0;
1221 WREG32(CP_RB2_WPTR, cp->wptr);
1220 ring->wptr = 0;
1221 WREG32(CP_RB2_WPTR, ring->wptr);
1222
1223 /* set the wb address wether it's enabled or not */
1224 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
1225 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
1226
1227 mdelay(1);
1228 WREG32(CP_RB2_CNTL, tmp);
1229
1222
1223 /* set the wb address wether it's enabled or not */
1224 WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
1225 WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
1226
1227 mdelay(1);
1228 WREG32(CP_RB2_CNTL, tmp);
1229
1230 WREG32(CP_RB2_BASE, cp->gpu_addr >> 8);
1230 WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
1231
1231
1232 cp->rptr = RREG32(CP_RB2_RPTR);
1232 ring->rptr = RREG32(CP_RB2_RPTR);
1233
1234 /* start the rings */
1235 cayman_cp_start(rdev);
1233
1234 /* start the rings */
1235 cayman_cp_start(rdev);
1236 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1237 rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1238 rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1236 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
1237 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1238 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1239 /* this only test cp0 */
1239 /* this only test cp0 */
1240 r = radeon_ring_test(rdev, &rdev->cp[RADEON_RING_TYPE_GFX_INDEX]);
1240 r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
1241 if (r) {
1241 if (r) {
1242 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1243 rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1244 rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1242 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1243 rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
1244 rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
1245 return r;
1246 }
1247
1248 return 0;
1249}
1250
1245 return r;
1246 }
1247
1248 return 0;
1249}
1250
1251bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_cp *cp)
1251bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
1252{
1253 u32 srbm_status;
1254 u32 grbm_status;
1255 u32 grbm_status_se0, grbm_status_se1;
1256 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
1257 int r;
1258
1259 srbm_status = RREG32(SRBM_STATUS);
1260 grbm_status = RREG32(GRBM_STATUS);
1261 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1262 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1263 if (!(grbm_status & GUI_ACTIVE)) {
1252{
1253 u32 srbm_status;
1254 u32 grbm_status;
1255 u32 grbm_status_se0, grbm_status_se1;
1256 struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
1257 int r;
1258
1259 srbm_status = RREG32(SRBM_STATUS);
1260 grbm_status = RREG32(GRBM_STATUS);
1261 grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
1262 grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
1263 if (!(grbm_status & GUI_ACTIVE)) {
1264 r100_gpu_lockup_update(lockup, cp);
1264 r100_gpu_lockup_update(lockup, ring);
1265 return false;
1266 }
1267 /* force CP activities */
1265 return false;
1266 }
1267 /* force CP activities */
1268 r = radeon_ring_lock(rdev, cp, 2);
1268 r = radeon_ring_lock(rdev, ring, 2);
1269 if (!r) {
1270 /* PACKET2 NOP */
1269 if (!r) {
1270 /* PACKET2 NOP */
1271 radeon_ring_write(cp, 0x80000000);
1272 radeon_ring_write(cp, 0x80000000);
1273 radeon_ring_unlock_commit(rdev, cp);
1271 radeon_ring_write(ring, 0x80000000);
1272 radeon_ring_write(ring, 0x80000000);
1273 radeon_ring_unlock_commit(rdev, ring);
1274 }
1275 /* XXX deal with CP0,1,2 */
1274 }
1275 /* XXX deal with CP0,1,2 */
1276 cp->rptr = RREG32(cp->rptr_reg);
1277 return r100_gpu_cp_is_lockup(rdev, lockup, cp);
1276 ring->rptr = RREG32(ring->rptr_reg);
1277 return r100_gpu_cp_is_lockup(rdev, lockup, ring);
1278}
1279
1280static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1281{
1282 struct evergreen_mc_save save;
1283 u32 grbm_reset = 0;
1284
1285 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))

--- 52 unchanged lines hidden (view full) ---

1338
1339int cayman_asic_reset(struct radeon_device *rdev)
1340{
1341 return cayman_gpu_soft_reset(rdev);
1342}
1343
1344static int cayman_startup(struct radeon_device *rdev)
1345{
1278}
1279
1280static int cayman_gpu_soft_reset(struct radeon_device *rdev)
1281{
1282 struct evergreen_mc_save save;
1283 u32 grbm_reset = 0;
1284
1285 if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))

--- 52 unchanged lines hidden (view full) ---

1338
1339int cayman_asic_reset(struct radeon_device *rdev)
1340{
1341 return cayman_gpu_soft_reset(rdev);
1342}
1343
1344static int cayman_startup(struct radeon_device *rdev)
1345{
1346 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1346 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1347 int r;
1348
1349 /* enable pcie gen2 link */
1350 evergreen_pcie_gen2_enable(rdev);
1351
1352 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1353 r = ni_init_microcode(rdev);
1354 if (r) {

--- 33 unchanged lines hidden (view full) ---

1388 r = r600_irq_init(rdev);
1389 if (r) {
1390 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1391 radeon_irq_kms_fini(rdev);
1392 return r;
1393 }
1394 evergreen_irq_set(rdev);
1395
1347 int r;
1348
1349 /* enable pcie gen2 link */
1350 evergreen_pcie_gen2_enable(rdev);
1351
1352 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
1353 r = ni_init_microcode(rdev);
1354 if (r) {

--- 33 unchanged lines hidden (view full) ---

1388 r = r600_irq_init(rdev);
1389 if (r) {
1390 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1391 radeon_irq_kms_fini(rdev);
1392 return r;
1393 }
1394 evergreen_irq_set(rdev);
1395
1396 r = radeon_ring_init(rdev, cp, cp->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1396 r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
1397 CP_RB0_RPTR, CP_RB0_WPTR);
1398 if (r)
1399 return r;
1400 r = cayman_cp_load_microcode(rdev);
1401 if (r)
1402 return r;
1403 r = cayman_cp_resume(rdev);
1404 if (r)

--- 28 unchanged lines hidden (view full) ---

1433 return r;
1434
1435}
1436
1437int cayman_suspend(struct radeon_device *rdev)
1438{
1439 /* FIXME: we should wait for ring to be empty */
1440 cayman_cp_enable(rdev, false);
1397 CP_RB0_RPTR, CP_RB0_WPTR);
1398 if (r)
1399 return r;
1400 r = cayman_cp_load_microcode(rdev);
1401 if (r)
1402 return r;
1403 r = cayman_cp_resume(rdev);
1404 if (r)

--- 28 unchanged lines hidden (view full) ---

1433 return r;
1434
1435}
1436
1437int cayman_suspend(struct radeon_device *rdev)
1438{
1439 /* FIXME: we should wait for ring to be empty */
1440 cayman_cp_enable(rdev, false);
1441 rdev->cp[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1441 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
1442 evergreen_irq_suspend(rdev);
1443 radeon_wb_disable(rdev);
1444 cayman_pcie_gart_disable(rdev);
1445 r600_blit_suspend(rdev);
1446
1447 return 0;
1448}
1449
1450/* Plan is to move initialization in that function and use
1451 * helper function so that radeon_device_init pretty much
1452 * do nothing more than calling asic specific function. This
1453 * should also allow to remove a bunch of callback function
1454 * like vram_info.
1455 */
1456int cayman_init(struct radeon_device *rdev)
1457{
1442 evergreen_irq_suspend(rdev);
1443 radeon_wb_disable(rdev);
1444 cayman_pcie_gart_disable(rdev);
1445 r600_blit_suspend(rdev);
1446
1447 return 0;
1448}
1449
1450/* Plan is to move initialization in that function and use
1451 * helper function so that radeon_device_init pretty much
1452 * do nothing more than calling asic specific function. This
1453 * should also allow to remove a bunch of callback function
1454 * like vram_info.
1455 */
1456int cayman_init(struct radeon_device *rdev)
1457{
1458 struct radeon_cp *cp = &rdev->cp[RADEON_RING_TYPE_GFX_INDEX];
1458 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
1459 int r;
1460
1461 /* This don't do much */
1462 r = radeon_gem_init(rdev);
1463 if (r)
1464 return r;
1465 /* Read BIOS */
1466 if (!radeon_get_bios(rdev)) {

--- 36 unchanged lines hidden (view full) ---

1503 r = radeon_bo_init(rdev);
1504 if (r)
1505 return r;
1506
1507 r = radeon_irq_kms_init(rdev);
1508 if (r)
1509 return r;
1510
1459 int r;
1460
1461 /* This don't do much */
1462 r = radeon_gem_init(rdev);
1463 if (r)
1464 return r;
1465 /* Read BIOS */
1466 if (!radeon_get_bios(rdev)) {

--- 36 unchanged lines hidden (view full) ---

1503 r = radeon_bo_init(rdev);
1504 if (r)
1505 return r;
1506
1507 r = radeon_irq_kms_init(rdev);
1508 if (r)
1509 return r;
1510
1511 cp->ring_obj = NULL;
1512 r600_ring_init(rdev, cp, 1024 * 1024);
1511 ring->ring_obj = NULL;
1512 r600_ring_init(rdev, ring, 1024 * 1024);
1513
1514 rdev->ih.ring_obj = NULL;
1515 r600_ih_ring_init(rdev, 64 * 1024);
1516
1517 r = r600_pcie_gart_init(rdev);
1518 if (r)
1519 return r;
1520

--- 55 unchanged lines hidden ---
1513
1514 rdev->ih.ring_obj = NULL;
1515 r600_ih_ring_init(rdev, 64 * 1024);
1516
1517 r = r600_pcie_gart_init(rdev);
1518 if (r)
1519 return r;
1520

--- 55 unchanged lines hidden ---