diff options
author | Christian König <[email protected]> | 2011-02-24 22:02:42 +0100 |
---|---|---|
committer | Christian König <[email protected]> | 2011-02-24 22:02:42 +0100 |
commit | b922a0ce12916a91cfc3e56714913fcf63279ff2 (patch) | |
tree | e24fa039925220882d155ccb987f7914e83f4372 /src/gallium/drivers/r600 | |
parent | f013b4f8f1329982727691a55cc263e3011d02bf (diff) | |
parent | c0ad70ae31ee5501281b434d56e389fc92b13a3a (diff) |
Merge remote branch 'origin/master' into pipe-video
Conflicts:
configure.ac
src/gallium/auxiliary/Makefile
src/gallium/auxiliary/SConscript
src/gallium/drivers/r600/r600_asm.c
src/gallium/drivers/r600/r600_asm.h
src/gallium/drivers/r600/r600_shader.c
src/gallium/drivers/r600/r600_state_inlines.h
src/gallium/drivers/r600/r600_texture.c
Diffstat (limited to 'src/gallium/drivers/r600')
22 files changed, 1725 insertions, 1973 deletions
diff --git a/src/gallium/drivers/r600/Makefile b/src/gallium/drivers/r600/Makefile index a690b671e49..436de9c4dbd 100644 --- a/src/gallium/drivers/r600/Makefile +++ b/src/gallium/drivers/r600/Makefile @@ -22,7 +22,6 @@ C_SOURCES = \ evergreen_state.c \ eg_asm.c \ r600_translate.c \ - r600_state_common.c \ - r600_upload.c + r600_state_common.c include ../../Makefile.template diff --git a/src/gallium/drivers/r600/SConscript b/src/gallium/drivers/r600/SConscript index e51f50c5df5..5a5fa6d65fd 100644 --- a/src/gallium/drivers/r600/SConscript +++ b/src/gallium/drivers/r600/SConscript @@ -28,7 +28,6 @@ r600 = env.ConvenienceLibrary( 'r600_state_common.c', 'r600_texture.c', 'r600_translate.c', - 'r600_upload.c', 'r700_asm.c', 'evergreen_state.c', 'eg_asm.c', diff --git a/src/gallium/drivers/r600/eg_state_inlines.h b/src/gallium/drivers/r600/eg_state_inlines.h index 5a39d7cdeec..b5fcc7106fe 100644 --- a/src/gallium/drivers/r600/eg_state_inlines.h +++ b/src/gallium/drivers/r600/eg_state_inlines.h @@ -253,9 +253,13 @@ static inline unsigned r600_tex_dim(unsigned dim) default: case PIPE_TEXTURE_1D: return V_030000_SQ_TEX_DIM_1D; + case PIPE_TEXTURE_1D_ARRAY: + return V_030000_SQ_TEX_DIM_1D_ARRAY; case PIPE_TEXTURE_2D: case PIPE_TEXTURE_RECT: return V_030000_SQ_TEX_DIM_2D; + case PIPE_TEXTURE_2D_ARRAY: + return V_030000_SQ_TEX_DIM_2D_ARRAY; case PIPE_TEXTURE_3D: return V_030000_SQ_TEX_DIM_3D; case PIPE_TEXTURE_CUBE: @@ -289,10 +293,14 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) { switch (format) { /* 8-bit buffers. */ + case PIPE_FORMAT_L4A4_UNORM: + return V_028C70_SWAP_ALT; + case PIPE_FORMAT_A8_UNORM: return V_028C70_SWAP_ALT_REV; case PIPE_FORMAT_I8_UNORM: case PIPE_FORMAT_L8_UNORM: + case PIPE_FORMAT_L8_SRGB: case PIPE_FORMAT_R8_UNORM: case PIPE_FORMAT_R8_SNORM: return V_028C70_SWAP_STD; @@ -313,6 +321,7 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) return V_028C70_SWAP_STD; case PIPE_FORMAT_L8A8_UNORM: + case PIPE_FORMAT_L8A8_SRGB: return V_028C70_SWAP_ALT; case PIPE_FORMAT_R8G8_UNORM: return V_028C70_SWAP_STD; @@ -352,9 +361,11 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) case PIPE_FORMAT_R10G10B10A2_UNORM: case PIPE_FORMAT_R10G10B10X2_SNORM: - case PIPE_FORMAT_B10G10R10A2_UNORM: case PIPE_FORMAT_R10SG10SB10SA2U_NORM: - return V_028C70_SWAP_STD_REV; + return V_028C70_SWAP_STD; + + case PIPE_FORMAT_B10G10R10A2_UNORM: + return V_028C70_SWAP_ALT; case PIPE_FORMAT_R16G16_UNORM: return V_028C70_SWAP_STD; @@ -362,14 +373,13 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) /* 64-bit buffers. */ case PIPE_FORMAT_R16G16B16A16_UNORM: case PIPE_FORMAT_R16G16B16A16_SNORM: - // return V_028C70_COLOR_16_16_16_16; case PIPE_FORMAT_R16G16B16A16_FLOAT: - // return V_028C70_COLOR_16_16_16_16_FLOAT; /* 128-bit buffers. */ case PIPE_FORMAT_R32G32B32A32_FLOAT: - // return V_028C70_COLOR_32_32_32_32_FLOAT; - return 0; + case PIPE_FORMAT_R32G32B32A32_SNORM: + case PIPE_FORMAT_R32G32B32A32_UNORM: + return V_028C70_SWAP_STD; default: R600_ERR("unsupported colorswap format %d\n", format); return ~0; @@ -381,9 +391,13 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) { switch (format) { /* 8-bit buffers. */ + case PIPE_FORMAT_L4A4_UNORM: + return V_028C70_COLOR_4_4; + case PIPE_FORMAT_A8_UNORM: case PIPE_FORMAT_I8_UNORM: case PIPE_FORMAT_L8_UNORM: + case PIPE_FORMAT_L8_SRGB: case PIPE_FORMAT_R8_UNORM: case PIPE_FORMAT_R8_SNORM: return V_028C70_COLOR_8; @@ -404,6 +418,7 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) return V_028C70_COLOR_16; case PIPE_FORMAT_L8A8_UNORM: + case PIPE_FORMAT_L8A8_SRGB: case PIPE_FORMAT_R8G8_UNORM: return V_028C70_COLOR_8_8; @@ -430,7 +445,7 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) case PIPE_FORMAT_R10G10B10X2_SNORM: case PIPE_FORMAT_B10G10R10A2_UNORM: case PIPE_FORMAT_R10SG10SB10SA2U_NORM: - return V_028C70_COLOR_10_10_10_2; + return V_028C70_COLOR_2_10_10_10; case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_Z24_UNORM_S8_USCALED: @@ -471,6 +486,9 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) return V_028C70_COLOR_32_32; /* 128-bit buffers. */ + case PIPE_FORMAT_R32G32B32A32_SNORM: + case PIPE_FORMAT_R32G32B32A32_UNORM: + return V_028C70_COLOR_32_32_32_32; case PIPE_FORMAT_R32G32B32_FLOAT: return V_028C70_COLOR_32_32_32_FLOAT; case PIPE_FORMAT_R32G32B32A32_FLOAT: @@ -501,9 +519,4 @@ static INLINE boolean r600_is_zs_format_supported(enum pipe_format format) return r600_translate_dbformat(format) != ~0; } -static INLINE boolean r600_is_vertex_format_supported(enum pipe_format format) -{ - return r600_translate_colorformat(format) != ~0; -} - #endif diff --git a/src/gallium/drivers/r600/evergreen_state.c b/src/gallium/drivers/r600/evergreen_state.c index 306ca03234f..3efdbaba0c3 100644 --- a/src/gallium/drivers/r600/evergreen_state.c +++ b/src/gallium/drivers/r600/evergreen_state.c @@ -103,7 +103,7 @@ static void *evergreen_create_blend_state(struct pipe_context *ctx, } blend->cb_target_mask = target_mask; r600_pipe_state_add_reg(rstate, R_028808_CB_COLOR_CONTROL, - color_control, 0xFFFFFFFF, NULL); + color_control, 0xFFFFFFFD, NULL); r600_pipe_state_add_reg(rstate, R_028C3C_PA_SC_AA_MASK, 0xFFFFFFFF, 0xFFFFFFFF, NULL); for (int i = 0; i < 8; i++) { @@ -351,7 +351,7 @@ static struct pipe_sampler_view *evergreen_create_sampler_view(struct pipe_conte struct r600_resource *rbuffer; unsigned format; uint32_t word4 = 0, yuv_format = 0, pitch = 0; - unsigned char swizzle[4]; + unsigned char swizzle[4], array_mode = 0, tile_type = 0; struct r600_bo *bo[2]; if (resource == NULL) @@ -380,35 +380,42 @@ static struct pipe_sampler_view *evergreen_create_sampler_view(struct pipe_conte if (desc == NULL) { R600_ERR("unknow format %d\n", state->format); } - tmp = (struct r600_resource_texture*)texture; + tmp = (struct r600_resource_texture *)texture; + if (tmp->depth && !tmp->is_flushing_texture) { + r600_texture_depth_flush(ctx, texture, TRUE); + tmp = tmp->flushed_depth_texture; + } + + if (tmp->force_int_type) { + word4 &= C_030010_NUM_FORMAT_ALL; + word4 |= S_030010_NUM_FORMAT_ALL(V_030010_SQ_NUM_FORMAT_INT); + } + rbuffer = &tmp->resource; bo[0] = rbuffer->bo; bo[1] = rbuffer->bo; - /* FIXME depth texture decompression */ - if (tmp->depth) { - r600_texture_depth_flush(ctx, texture); - tmp = (struct r600_resource_texture*)texture; - rbuffer = &tmp->flushed_depth_texture->resource; - bo[0] = rbuffer->bo; - bo[1] = rbuffer->bo; - } - pitch = align(tmp->pitch_in_pixels[0], 8); + + pitch = align(tmp->pitch_in_blocks[0] * util_format_get_blockwidth(state->format), 8); + array_mode = tmp->array_mode[0]; + tile_type = tmp->tile_type; /* FIXME properly handle first level != 0 */ r600_pipe_state_add_reg(rstate, R_030000_RESOURCE0_WORD0, S_030000_DIM(r600_tex_dim(texture->target)) | S_030000_PITCH((pitch / 8) - 1) | + S_030000_NON_DISP_TILING_ORDER(tile_type) | S_030000_TEX_WIDTH(texture->width0 - 1), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_030004_RESOURCE0_WORD1, S_030004_TEX_HEIGHT(texture->height0 - 1) | - S_030004_TEX_DEPTH(texture->depth0 - 1), + S_030004_TEX_DEPTH(texture->depth0 - 1) | + S_030004_ARRAY_MODE(array_mode), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_030008_RESOURCE0_WORD2, (tmp->offset[0] + r600_bo_offset(bo[0])) >> 8, 0xFFFFFFFF, bo[0]); r600_pipe_state_add_reg(rstate, R_03000C_RESOURCE0_WORD3, (tmp->offset[1] + r600_bo_offset(bo[1])) >> 8, 0xFFFFFFFF, bo[1]); r600_pipe_state_add_reg(rstate, R_030010_RESOURCE0_WORD4, - word4 | S_030010_NUM_FORMAT_ALL(V_030010_SQ_NUM_FORMAT_NORM) | + word4 | S_030010_SRF_MODE_ALL(V_030010_SRF_MODE_NO_ZERO) | S_030010_BASE_LEVEL(state->u.tex.first_level), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_030014_RESOURCE0_WORD5, @@ -431,7 +438,8 @@ static void evergreen_set_vs_sampler_view(struct pipe_context *ctx, unsigned cou for (int i = 0; i < count; i++) { if (resource[i]) { - evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, &resource[i]->state, i); + evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, &resource[i]->state, + i + R600_MAX_CONST_BUFFERS); } } } @@ -446,9 +454,11 @@ static void evergreen_set_ps_sampler_view(struct pipe_context *ctx, unsigned cou for (i = 0; i < count; i++) { if (&rctx->ps_samplers.views[i]->base != views[i]) { if (resource[i]) - evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, i); + evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, + i + R600_MAX_CONST_BUFFERS); else - evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i); + evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, + i + R600_MAX_CONST_BUFFERS); pipe_sampler_view_reference( (struct pipe_sampler_view **)&rctx->ps_samplers.views[i], @@ -457,7 +467,8 @@ static void evergreen_set_ps_sampler_view(struct pipe_context *ctx, unsigned cou } for (i = count; i < NUM_TEX_UNITS; i++) { if (rctx->ps_samplers.views[i]) { - evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i); + evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, + i + R600_MAX_CONST_BUFFERS); pipe_sampler_view_reference((struct pipe_sampler_view **)&rctx->ps_samplers.views[i], NULL); } } @@ -638,11 +649,19 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state unsigned color_info; unsigned format, swap, ntype; unsigned offset; + unsigned tile_type; const struct util_format_description *desc; struct r600_bo *bo[3]; + int i; surf = (struct r600_surface *)state->cbufs[cb]; rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture; + + if (rtex->depth && !rtex->is_flushing_texture) { + r600_texture_depth_flush(&rctx->context, state->cbufs[cb]->texture, TRUE); + rtex = rtex->flushed_depth_texture; + } + rbuffer = &rtex->resource; bo[0] = rbuffer->bo; bo[1] = rbuffer->bo; @@ -651,21 +670,43 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state /* XXX quite sure for dx10+ hw don't need any offset hacks */ offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture, level, state->cbufs[cb]->u.tex.first_layer); - pitch = rtex->pitch_in_pixels[level] / 8 - 1; - slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1; + pitch = rtex->pitch_in_blocks[level] / 8 - 1; + slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1; ntype = 0; - desc = util_format_description(rtex->resource.base.b.format); + desc = util_format_description(surf->base.format); if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) ntype = V_028C70_NUMBER_SRGB; - format = r600_translate_colorformat(rtex->resource.base.b.format); - swap = r600_translate_colorswap(rtex->resource.base.b.format); + format = r600_translate_colorformat(surf->base.format); + swap = r600_translate_colorswap(surf->base.format); + + /* disable when gallium grows int textures */ + if ((format == FMT_32_32_32_32 || format == FMT_16_16_16_16) && rtex->force_int_type) + ntype = 4; + color_info = S_028C70_FORMAT(format) | S_028C70_COMP_SWAP(swap) | + S_028C70_ARRAY_MODE(rtex->array_mode[level]) | S_028C70_BLEND_CLAMP(1) | S_028C70_NUMBER_TYPE(ntype); - if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) - color_info |= S_028C70_SOURCE_FORMAT(1); + + for (i = 0; i < 4; i++) { + if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { + break; + } + } + + /* we can only set the export size if any thing is snorm/unorm component is > 11 bits, + if we aren't a float, sint or uint */ + if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && + desc->channel[i].size < 12 && desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && + ntype != 4 && ntype != 5) + color_info |= S_028C70_SOURCE_FORMAT(V_028C70_EXPORT_4C_16BPC); + + if (rtex->array_mode[level] > V_028C70_ARRAY_LINEAR_ALIGNED) { + tile_type = rtex->tile_type; + } else /* workaround for linear buffers */ + tile_type = 1; /* FIXME handle enabling of CB beyond BASE8 which has different offset */ r600_pipe_state_add_reg(rstate, @@ -690,7 +731,7 @@ static void evergreen_cb(struct r600_pipe_context *rctx, struct r600_pipe_state 0x00000000, 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_028C74_CB_COLOR0_ATTRIB + cb * 0x3C, - S_028C74_NON_DISP_TILING_ORDER(1), + S_028C74_NON_DISP_TILING_ORDER(tile_type), 0xFFFFFFFF, bo[0]); } @@ -711,17 +752,14 @@ static void evergreen_db(struct r600_pipe_context *rctx, struct r600_pipe_state surf = (struct r600_surface *)state->zsbuf; rtex = (struct r600_resource_texture*)state->zsbuf->texture; - rtex->tiled = 1; - rtex->array_mode[level] = 2; - rtex->tile_type = 1; - rtex->depth = 1; + rbuffer = &rtex->resource; /* XXX quite sure for dx10+ hw don't need any offset hacks */ offset = r600_texture_get_offset((struct r600_resource_texture *)state->zsbuf->texture, level, state->zsbuf->u.tex.first_layer); - pitch = rtex->pitch_in_pixels[level] / 8 - 1; - slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1; + pitch = rtex->pitch_in_blocks[level] / 8 - 1; + slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1; format = r600_translate_dbformat(state->zsbuf->texture->format); stencil_format = r600_translate_stencilformat(state->zsbuf->texture->format); @@ -837,51 +875,6 @@ static void evergreen_set_framebuffer_state(struct pipe_context *ctx, } } -static void evergreen_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, - struct pipe_resource *buffer) -{ - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - struct r600_resource *rbuffer = (struct r600_resource*)buffer; - uint32_t offset; - - /* Note that the state tracker can unbind constant buffers by - * passing NULL here. - */ - if (buffer == NULL) { - return; - } - - r600_upload_const_buffer(rctx, buffer, &offset); - - switch (shader) { - case PIPE_SHADER_VERTEX: - rctx->vs_const_buffer.nregs = 0; - r600_pipe_state_add_reg(&rctx->vs_const_buffer, - R_028180_ALU_CONST_BUFFER_SIZE_VS_0, - ALIGN_DIVUP(buffer->width0 >> 4, 16), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&rctx->vs_const_buffer, - R_028980_ALU_CONST_CACHE_VS_0, - (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo); - r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer); - break; - case PIPE_SHADER_FRAGMENT: - rctx->ps_const_buffer.nregs = 0; - r600_pipe_state_add_reg(&rctx->ps_const_buffer, - R_028140_ALU_CONST_BUFFER_SIZE_PS_0, - ALIGN_DIVUP(buffer->width0 >> 4, 16), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&rctx->ps_const_buffer, - R_028940_ALU_CONST_CACHE_PS_0, - (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo); - r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer); - break; - default: - R600_ERR("unsupported %d\n", shader); - return; - } -} - void evergreen_init_state_functions(struct r600_pipe_context *rctx) { rctx->context.create_blend_state = evergreen_create_blend_state; @@ -909,7 +902,7 @@ void evergreen_init_state_functions(struct r600_pipe_context *rctx) rctx->context.delete_vs_state = r600_delete_vs_shader; rctx->context.set_blend_color = evergreen_set_blend_color; rctx->context.set_clip_state = evergreen_set_clip_state; - rctx->context.set_constant_buffer = evergreen_set_constant_buffer; + rctx->context.set_constant_buffer = r600_set_constant_buffer; rctx->context.set_fragment_sampler_views = evergreen_set_ps_sampler_view; rctx->context.set_framebuffer_state = evergreen_set_framebuffer_state; rctx->context.set_polygon_stipple = evergreen_set_polygon_stipple; @@ -921,6 +914,7 @@ void evergreen_init_state_functions(struct r600_pipe_context *rctx) rctx->context.set_vertex_sampler_views = evergreen_set_vs_sampler_view; rctx->context.set_viewport_state = evergreen_set_viewport_state; rctx->context.sampler_view_destroy = r600_sampler_view_destroy; + rctx->context.redefine_user_buffer = u_default_redefine_user_buffer; } void evergreen_init_config(struct r600_pipe_context *rctx) @@ -1325,216 +1319,6 @@ void evergreen_polygon_offset_update(struct r600_pipe_context *rctx) } } -static void evergreen_spi_update(struct r600_pipe_context *rctx) -{ - struct r600_pipe_shader *shader = rctx->ps_shader; - struct r600_pipe_state rstate; - struct r600_shader *rshader = &shader->shader; - unsigned i, tmp; - - rstate.nregs = 0; - for (i = 0; i < rshader->ninput; i++) { - tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i)); - if (rshader->input[i].name == TGSI_SEMANTIC_COLOR || - rshader->input[i].name == TGSI_SEMANTIC_BCOLOR || - rshader->input[i].name == TGSI_SEMANTIC_POSITION) { - tmp |= S_028644_FLAT_SHADE(rctx->flatshade); - } - if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && - rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) { - tmp |= S_028644_PT_SPRITE_TEX(1); - } - r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL); - } - r600_context_pipe_state_set(&rctx->ctx, &rstate); -} - -void evergreen_vertex_buffer_update(struct r600_pipe_context *rctx) -{ - struct r600_pipe_state *rstate; - struct r600_resource *rbuffer; - struct pipe_vertex_buffer *vertex_buffer; - unsigned i, offset; - - /* we don't update until we know vertex elements */ - if (rctx->vertex_elements == NULL || !rctx->nvertex_buffer) - return; - - if (rctx->vertex_elements->incompatible_layout) { - /* translate rebind new vertex elements so - * return once translated - */ - r600_begin_vertex_translate(rctx); - return; - } - - if (rctx->any_user_vbs) { - r600_upload_user_buffers(rctx); - rctx->any_user_vbs = FALSE; - } - - if (rctx->vertex_elements->vbuffer_need_offset) { - /* one resource per vertex elements */ - rctx->nvs_resource = rctx->vertex_elements->count; - } else { - /* bind vertex buffer once */ - rctx->nvs_resource = rctx->nvertex_buffer; - } - - for (i = 0 ; i < rctx->nvs_resource; i++) { - rstate = &rctx->vs_resource[i]; - rstate->id = R600_PIPE_STATE_RESOURCE; - rstate->nregs = 0; - - if (rctx->vertex_elements->vbuffer_need_offset) { - /* one resource per vertex elements */ - unsigned vbuffer_index; - vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index; - vertex_buffer = &rctx->vertex_buffer[vbuffer_index]; - rbuffer = (struct r600_resource*)vertex_buffer->buffer; - offset = rctx->vertex_elements->vbuffer_offset[i]; - } else { - /* bind vertex buffer once */ - vertex_buffer = &rctx->vertex_buffer[i]; - rbuffer = (struct r600_resource*)vertex_buffer->buffer; - offset = 0; - } - if (vertex_buffer == NULL || rbuffer == NULL) - continue; - offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo); - - r600_pipe_state_add_reg(rstate, R_030000_RESOURCE0_WORD0, - offset, 0xFFFFFFFF, rbuffer->bo); - r600_pipe_state_add_reg(rstate, R_030004_RESOURCE0_WORD1, - rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_030008_RESOURCE0_WORD2, - S_030008_STRIDE(vertex_buffer->stride), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_03000C_RESOURCE0_WORD3, - S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | - S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | - S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | - S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_030010_RESOURCE0_WORD4, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_030014_RESOURCE0_WORD5, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_030018_RESOURCE0_WORD6, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_03001C_RESOURCE0_WORD7, - 0xC0000000, 0xFFFFFFFF, NULL); - evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); - } -} - -int r600_conv_pipe_prim(unsigned pprim, unsigned *prim); -void evergreen_draw(struct pipe_context *ctx, const struct pipe_draw_info *info) -{ - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - struct r600_resource *rbuffer; - u32 vgt_dma_index_type, vgt_draw_initiator, mask; - struct r600_draw rdraw; - struct r600_pipe_state vgt; - struct r600_drawl draw; - unsigned prim; - - memset(&draw, 0, sizeof(struct r600_drawl)); - draw.ctx = ctx; - draw.mode = info->mode; - draw.start = info->start; - draw.count = info->count; - if (info->indexed && rctx->index_buffer.buffer) { - draw.start += rctx->index_buffer.offset / rctx->index_buffer.index_size; - draw.min_index = info->min_index; - draw.max_index = info->max_index; - draw.index_bias = info->index_bias; - - r600_translate_index_buffer(rctx, &rctx->index_buffer.buffer, - &rctx->index_buffer.index_size, - &draw.start, - info->count); - - draw.index_size = rctx->index_buffer.index_size; - pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer); - draw.index_buffer_offset = draw.start * draw.index_size; - draw.start = 0; - r600_upload_index_buffer(rctx, &draw); - } else { - draw.index_size = 0; - draw.index_buffer = NULL; - draw.min_index = info->min_index; - draw.max_index = info->max_index; - draw.index_bias = info->start; - } - - switch (draw.index_size) { - case 2: - vgt_draw_initiator = 0; - vgt_dma_index_type = 0; - break; - case 4: - vgt_draw_initiator = 0; - vgt_dma_index_type = 1; - break; - case 0: - vgt_draw_initiator = 2; - vgt_dma_index_type = 0; - break; - default: - R600_ERR("unsupported index size %d\n", draw.index_size); - return; - } - if (r600_conv_pipe_prim(draw.mode, &prim)) - return; - if (unlikely(rctx->ps_shader == NULL)) { - R600_ERR("missing vertex shader\n"); - return; - } - if (unlikely(rctx->vs_shader == NULL)) { - R600_ERR("missing vertex shader\n"); - return; - } - /* there should be enough input */ - if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) { - R600_ERR("%d resources provided, expecting %d\n", - rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource); - return; - } - - evergreen_spi_update(rctx); - - mask = 0; - for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) { - mask |= (0xF << (i * 4)); - } - - vgt.id = R600_PIPE_STATE_VGT; - vgt.nregs = 0; - r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw.index_bias, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw.max_index, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw.min_index, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL); - r600_context_pipe_state_set(&rctx->ctx, &vgt); - - rdraw.vgt_num_indices = draw.count; - rdraw.vgt_num_instances = 1; - rdraw.vgt_index_type = vgt_dma_index_type; - rdraw.vgt_draw_initiator = vgt_draw_initiator; - rdraw.indices = NULL; - if (draw.index_buffer) { - rbuffer = (struct r600_resource*)draw.index_buffer; - rdraw.indices = rbuffer->bo; - rdraw.indices_bo_offset = draw.index_buffer_offset; - } - evergreen_context_draw(&rctx->ctx, &rdraw); - - pipe_resource_reference(&draw.index_buffer, NULL); -} - void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader) { struct r600_pipe_state *rstate = &shader->rstate; @@ -1733,3 +1517,31 @@ void *evergreen_create_db_flush_dsa(struct r600_pipe_context *rctx) S_028000_COPY_CENTROID(1), NULL); return rstate; } + +void evergreen_pipe_set_buffer_resource(struct r600_pipe_context *rctx, + struct r600_pipe_state *rstate, + struct r600_resource *rbuffer, + unsigned offset, unsigned stride) +{ + r600_pipe_state_add_reg(rstate, R_030000_RESOURCE0_WORD0, + offset, 0xFFFFFFFF, rbuffer->bo); + r600_pipe_state_add_reg(rstate, R_030004_RESOURCE0_WORD1, + rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_030008_RESOURCE0_WORD2, + S_030008_STRIDE(stride), + 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_03000C_RESOURCE0_WORD3, + S_03000C_DST_SEL_X(V_03000C_SQ_SEL_X) | + S_03000C_DST_SEL_Y(V_03000C_SQ_SEL_Y) | + S_03000C_DST_SEL_Z(V_03000C_SQ_SEL_Z) | + S_03000C_DST_SEL_W(V_03000C_SQ_SEL_W), + 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_030010_RESOURCE0_WORD4, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_030014_RESOURCE0_WORD5, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_030018_RESOURCE0_WORD6, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_03001C_RESOURCE0_WORD7, + 0xC0000000, 0xFFFFFFFF, NULL); +} diff --git a/src/gallium/drivers/r600/evergreend.h b/src/gallium/drivers/r600/evergreend.h index e09e02ca000..f0a1ee0cd02 100644 --- a/src/gallium/drivers/r600/evergreend.h +++ b/src/gallium/drivers/r600/evergreend.h @@ -327,6 +327,9 @@ #define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24) #define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3) #define C_028C70_SOURCE_FORMAT 0xFCFFFFFF +#define V_028C70_EXPORT_4C_32BPC 0x0 +#define V_028C70_EXPORT_4C_16BPC 0x1 +#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */ #define S_028C70_RAT(x) (((x) & 0x1) << 26) #define G_028C70_RAT(x) (((x) >> 26) & 0x1) #define C_028C70_RAT 0xFBFFFFFF @@ -427,15 +430,6 @@ #define C_028800_STENCILZFAIL_BF 0x1FFFFFFF #define R_028808_CB_COLOR_CONTROL 0x028808 -#define S_028808_FOG_ENABLE(x) (((x) & 0x1) << 0) -#define G_028808_FOG_ENABLE(x) (((x) >> 0) & 0x1) -#define C_028808_FOG_ENABLE 0xFFFFFFFE -#define S_028808_MULTIWRITE_ENABLE(x) (((x) & 0x1) << 1) -#define G_028808_MULTIWRITE_ENABLE(x) (((x) >> 1) & 0x1) -#define C_028808_MULTIWRITE_ENABLE 0xFFFFFFFD -#define S_028808_DITHER_ENABLE(x) (((x) & 0x1) << 2) -#define G_028808_DITHER_ENABLE(x) (((x) >> 2) & 0x1) -#define C_028808_DITHER_ENABLE 0xFFFFFFFB #define S_028808_DEGAMMA_ENABLE(x) (((x) & 0x1) << 3) #define G_028808_DEGAMMA_ENABLE(x) (((x) >> 3) & 0x1) #define C_028808_DEGAMMA_ENABLE 0xFFFFFFF7 @@ -939,6 +933,9 @@ #define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005 #define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006 #define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007 +#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5) +#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1) +#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF #define S_030000_PITCH(x) (((x) & 0xFFF) << 6) #define G_030000_PITCH(x) (((x) >> 6) & 0xFFF) #define C_030000_PITCH 0xFFFC003F diff --git a/src/gallium/drivers/r600/r600.h b/src/gallium/drivers/r600/r600.h index a852bef6156..64c52bca795 100644 --- a/src/gallium/drivers/r600/r600.h +++ b/src/gallium/drivers/r600/r600.h @@ -113,6 +113,7 @@ struct r600_tiling_info { enum radeon_family r600_get_family(struct radeon *rw); enum chip_class r600_get_family_class(struct radeon *radeon); struct r600_tiling_info *r600_get_tiling_info(struct radeon *radeon); +unsigned r600_get_clock_crystal_freq(struct radeon *radeon); /* r600_bo.c */ struct r600_bo; @@ -249,6 +250,7 @@ struct r600_context { struct list_head query_list; unsigned num_query_running; struct list_head fenced_bo; + unsigned max_db; /* for OQ */ }; struct r600_draw { diff --git a/src/gallium/drivers/r600/r600_asm.c b/src/gallium/drivers/r600/r600_asm.c index f4ff2fc3d43..1393df88757 100644 --- a/src/gallium/drivers/r600/r600_asm.c +++ b/src/gallium/drivers/r600/r600_asm.c @@ -50,6 +50,7 @@ static inline unsigned int r600_bc_get_num_operands(struct r600_bc *bc, struct r case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP: return 0; case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD: + case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT: case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE: case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT: case V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE: @@ -97,6 +98,7 @@ static inline unsigned int r600_bc_get_num_operands(struct r600_bc *bc, struct r case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_NOP: return 0; case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD: + case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT: case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLE: case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGT: case EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_KILLGE: @@ -288,6 +290,31 @@ int r600_bc_add_output(struct r600_bc *bc, const struct r600_bc_output *output) { int r; + if (bc->cf_last && bc->cf_last->inst == BC_INST(bc, V_SQ_CF_ALLOC_EXPORT_WORD1_SQ_CF_INST_EXPORT) && + output->type == bc->cf_last->output.type && + output->elem_size == bc->cf_last->output.elem_size && + output->swizzle_x == bc->cf_last->output.swizzle_x && + output->swizzle_y == bc->cf_last->output.swizzle_y && + output->swizzle_z == bc->cf_last->output.swizzle_z && + output->swizzle_w == bc->cf_last->output.swizzle_w && + (output->burst_count + bc->cf_last->output.burst_count) <= 16) { + + if ((output->gpr + output->burst_count) == bc->cf_last->output.gpr && + (output->array_base + output->burst_count) == bc->cf_last->output.array_base) { + + bc->cf_last->output.gpr = output->gpr; + bc->cf_last->output.array_base = output->array_base; + bc->cf_last->output.burst_count += output->burst_count; + return 0; + + } else if (output->gpr == (bc->cf_last->output.gpr + bc->cf_last->output.burst_count) && + output->array_base == (bc->cf_last->output.array_base + bc->cf_last->output.burst_count)) { + + bc->cf_last->output.burst_count += output->burst_count; + return 0; + } + } + r = r600_bc_add_cf(bc); if (r) return r; @@ -418,6 +445,20 @@ static int is_alu_reduction_inst(struct r600_bc *bc, struct r600_bc_alu *alu) } } +static int is_alu_cube_inst(struct r600_bc *bc, struct r600_bc_alu *alu) +{ + switch (bc->chiprev) { + case CHIPREV_R600: + case CHIPREV_R700: + return !alu->is_op3 && + alu->inst == V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE; + case CHIPREV_EVERGREEN: + default: + return !alu->is_op3 && + alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_CUBE; + } +} + static int is_alu_mova_inst(struct r600_bc *bc, struct r600_bc_alu *alu) { switch (bc->chiprev) { @@ -480,9 +521,9 @@ static int is_alu_trans_unit_inst(struct r600_bc *bc, struct r600_bc_alu *alu) case CHIPREV_EVERGREEN: default: if (!alu->is_op3) + /* Note that FLT_TO_INT* instructions are vector instructions + * on Evergreen, despite what the documentation says. */ return alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ASHR_INT || - alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT || - alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT_FLOOR || alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_INT_TO_FLT || alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHL_INT || alu->inst == EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LSHR_INT || @@ -563,7 +604,7 @@ struct alu_bank_swizzle { int hw_cfile_elem[4]; }; -const unsigned cycle_for_bank_swizzle_vec[][3] = { +static const unsigned cycle_for_bank_swizzle_vec[][3] = { [SQ_ALU_VEC_012] = { 0, 1, 2 }, [SQ_ALU_VEC_021] = { 0, 2, 1 }, [SQ_ALU_VEC_120] = { 1, 2, 0 }, @@ -572,7 +613,7 @@ const unsigned cycle_for_bank_swizzle_vec[][3] = { [SQ_ALU_VEC_210] = { 2, 1, 0 } }; -const unsigned cycle_for_bank_swizzle_scl[][3] = { +static const unsigned cycle_for_bank_swizzle_scl[][3] = { [SQ_ALU_SCL_210] = { 2, 1, 0 }, [SQ_ALU_SCL_122] = { 1, 2, 2 }, [SQ_ALU_SCL_212] = { 2, 1, 2 }, @@ -785,7 +826,8 @@ static int replace_gpr_with_pv_ps(struct r600_bc *bc, for (i = 0; i < 5; ++i) { if(prev[i] && prev[i]->dst.write && !prev[i]->dst.rel) { gpr[i] = prev[i]->dst.sel; - if (is_alu_reduction_inst(bc, prev[i])) + /* cube writes more than PV.X */ + if (!is_alu_cube_inst(bc, prev[i]) && is_alu_reduction_inst(bc, prev[i])) chan[i] = 0; else chan[i] = prev[i]->dst.chan; @@ -865,7 +907,7 @@ static int r600_bc_alu_nliterals(struct r600_bc *bc, struct r600_bc_alu *alu, for (i = 0; i < num_src; ++i) { if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) { - uint32_t value = alu->src[i].value[alu->src[i].chan]; + uint32_t value = alu->src[i].value; unsigned found = 0; for (j = 0; j < *nliteral; ++j) { if (literal[j] == value) { @@ -892,7 +934,7 @@ static void r600_bc_alu_adjust_literals(struct r600_bc *bc, for (i = 0; i < num_src; ++i) { if (alu->src[i].sel == V_SQ_ALU_SRC_LITERAL) { - uint32_t value = alu->src[i].value[alu->src[i].chan]; + uint32_t value = alu->src[i].value; for (j = 0; j < nliteral; ++j) { if (literal[j] == value) { alu->src[i].chan = j; @@ -1195,8 +1237,7 @@ int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int bc->ngpr = nalu->src[i].sel + 1; } if (nalu->src[i].sel == V_SQ_ALU_SRC_LITERAL) - r600_bc_special_constants( - nalu->src[i].value[nalu->src[i].chan], + r600_bc_special_constants(nalu->src[i].value, &nalu->src[i].sel, &nalu->src[i].neg); } if (nalu->dst.sel >= bc->ngpr) { @@ -1308,6 +1349,18 @@ int r600_bc_add_tex(struct r600_bc *bc, const struct r600_bc_tex *tex) return -ENOMEM; memcpy(ntex, tex, sizeof(struct r600_bc_tex)); + /* we can't fetch data und use it as texture lookup address in the same TEX clause */ + if (bc->cf_last != NULL && + bc->cf_last->inst == V_SQ_CF_WORD1_SQ_CF_INST_TEX) { + struct r600_bc_tex *ttex; + LIST_FOR_EACH_ENTRY(ttex, &bc->cf_last->tex, list) { + if (ttex->dst_gpr == ntex->src_gpr) { + bc->force_add_cf = 1; + break; + } + } + } + /* cf can contains only alu or only vtx or only tex */ if (bc->cf_last == NULL || bc->cf_last->inst != V_SQ_CF_WORD1_SQ_CF_INST_TEX || @@ -1374,6 +1427,7 @@ static int r600_bc_vtx_build(struct r600_bc *bc, struct r600_bc_vtx *vtx, unsign } } bc->bytecode[id++] = S_SQ_VTX_WORD0_BUFFER_ID(vtx->buffer_id + fetch_resource_start) | + S_SQ_VTX_WORD0_FETCH_TYPE(vtx->fetch_type) | S_SQ_VTX_WORD0_SRC_GPR(vtx->src_gpr) | S_SQ_VTX_WORD0_SRC_SEL_X(vtx->src_sel_x) | S_SQ_VTX_WORD0_MEGA_FETCH_COUNT(vtx->mega_fetch_count); @@ -2674,18 +2728,73 @@ void r600_bc_dump(struct r600_bc *bc) } LIST_FOR_EACH_ENTRY(tex, &cf->tex, list) { - //TODO + fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]); + fprintf(stderr, "INST:%d ", tex->inst); + fprintf(stderr, "RESOURCE_ID:%d ", tex->resource_id); + fprintf(stderr, "SRC(GPR:%d ", tex->src_gpr); + fprintf(stderr, "REL:%d)\n", tex->src_rel); + id++; + fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]); + fprintf(stderr, "DST(GPR:%d ", tex->dst_gpr); + fprintf(stderr, "REL:%d ", tex->dst_rel); + fprintf(stderr, "SEL_X:%d ", tex->dst_sel_x); + fprintf(stderr, "SEL_Y:%d ", tex->dst_sel_y); + fprintf(stderr, "SEL_Z:%d ", tex->dst_sel_z); + fprintf(stderr, "SEL_W:%d) ", tex->dst_sel_w); + fprintf(stderr, "LOD_BIAS:%d ", tex->lod_bias); + fprintf(stderr, "COORD_TYPE_X:%d ", tex->coord_type_x); + fprintf(stderr, "COORD_TYPE_Y:%d ", tex->coord_type_y); + fprintf(stderr, "COORD_TYPE_Z:%d ", tex->coord_type_z); + fprintf(stderr, "COORD_TYPE_W:%d\n", tex->coord_type_w); + id++; + fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]); + fprintf(stderr, "OFFSET_X:%d ", tex->offset_x); + fprintf(stderr, "OFFSET_Y:%d ", tex->offset_y); + fprintf(stderr, "OFFSET_Z:%d ", tex->offset_z); + fprintf(stderr, "SAMPLER_ID:%d ", tex->sampler_id); + fprintf(stderr, "SRC(SEL_X:%d ", tex->src_sel_x); + fprintf(stderr, "SEL_Y:%d ", tex->src_sel_y); + fprintf(stderr, "SEL_Z:%d ", tex->src_sel_z); + fprintf(stderr, "SEL_W:%d)\n", tex->src_sel_w); + id++; + fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]); + id++; } LIST_FOR_EACH_ENTRY(vtx, &cf->vtx, list) { + fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]); + fprintf(stderr, "INST:%d ", vtx->inst); + fprintf(stderr, "FETCH_TYPE:%d ", vtx->fetch_type); + fprintf(stderr, "BUFFER_ID:%d\n", vtx->buffer_id); + id++; + /* This assumes that no semantic fetches exist */ + fprintf(stderr, "%04d %08X ", id, bc->bytecode[id]); + fprintf(stderr, "SRC(GPR:%d ", vtx->src_gpr); + fprintf(stderr, "SEL_X:%d) ", vtx->src_sel_x); + fprintf(stderr, "MEGA_FETCH_COUNT:%d ", vtx->mega_fetch_count); + fprintf(stderr, "DST(GPR:%d ", vtx->dst_gpr); + fprintf(stderr, "SEL_X:%d ", vtx->dst_sel_x); + fprintf(stderr, "SEL_Y:%d ", vtx->dst_sel_y); + fprintf(stderr, "SEL_Z:%d ", vtx->dst_sel_z); + fprintf(stderr, "SEL_W:%d) ", vtx->dst_sel_w); + fprintf(stderr, "USE_CONST_FIELDS:%d ", vtx->use_const_fields); + fprintf(stderr, "DATA_FORMAT:%d ", vtx->data_format); + fprintf(stderr, "NUM_FORMAT_ALL:%d ", vtx->num_format_all); + fprintf(stderr, "FORMAT_COMP_ALL:%d ", vtx->format_comp_all); + fprintf(stderr, "SRF_MODE_ALL:%d\n", vtx->srf_mode_all); + id++; + fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]); //TODO + id++; + fprintf(stderr, "%04d %08X \n", id, bc->bytecode[id]); + id++; } } fprintf(stderr, "--------------------------------------\n"); } -void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count) +static void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count) { struct r600_pipe_state *rstate; unsigned i = 0; @@ -2721,42 +2830,6 @@ void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count) 0xFFFFFFFF, ve->fetch_shader); } -void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count) -{ - struct r600_pipe_state *rstate; - unsigned i = 0; - - if (count > 8) { - bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1); - bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) | - S_SQ_CF_WORD1_BARRIER(1) | - S_SQ_CF_WORD1_COUNT(8 - 1); - bytecode[i++] = S_SQ_CF_WORD0_ADDR(40 >> 1); - bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) | - S_SQ_CF_WORD1_BARRIER(1) | - S_SQ_CF_WORD1_COUNT((count - 8) - 1); - } else { - bytecode[i++] = S_SQ_CF_WORD0_ADDR(8 >> 1); - bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_VTX_TC) | - S_SQ_CF_WORD1_BARRIER(1) | - S_SQ_CF_WORD1_COUNT(count - 1); - } - bytecode[i++] = S_SQ_CF_WORD0_ADDR(0); - bytecode[i++] = S_SQ_CF_WORD1_CF_INST(V_SQ_CF_WORD1_SQ_CF_INST_RETURN) | - S_SQ_CF_WORD1_BARRIER(1); - - rstate = &ve->rstate; - rstate->id = R600_PIPE_STATE_FETCH_SHADER; - rstate->nregs = 0; - r600_pipe_state_add_reg(rstate, R_0288A4_SQ_PGM_RESOURCES_FS, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_0288DC_SQ_PGM_CF_OFFSET_FS, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_028894_SQ_PGM_START_FS, - r600_bo_offset(ve->fetch_shader) >> 8, - 0xFFFFFFFF, ve->fetch_shader); -} - static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format, unsigned *num_format, unsigned *format_comp) { @@ -2780,7 +2853,7 @@ static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format, } switch (desc->channel[i].type) { - /* Half-floats, floats, doubles */ + /* Half-floats, floats, ints */ case UTIL_FORMAT_TYPE_FLOAT: switch (desc->channel[i].size) { case 16: @@ -2792,8 +2865,6 @@ static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format, *format = FMT_16_16_FLOAT; break; case 3: - *format = FMT_16_16_16_FLOAT; - break; case 4: *format = FMT_16_16_16_16_FLOAT; break; @@ -2833,8 +2904,6 @@ static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format, *format = FMT_8_8; break; case 3: - // *format = FMT_8_8_8; /* fails piglit draw-vertices test */ - // break; case 4: *format = FMT_8_8_8_8; break; @@ -2849,8 +2918,6 @@ static void r600_vertex_data_type(enum pipe_format pformat, unsigned *format, *format = FMT_16_16; break; case 3: - // *format = FMT_16_16_16; /* fails piglit draw-vertices test */ - // break; case 4: *format = FMT_16_16_16_16; break; @@ -2938,10 +3005,10 @@ int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, stru for (i = 0; i < ve->count; i++) { unsigned vbuffer_index; - r600_vertex_data_type(ve->hw_format[i], &format, &num_format, &format_comp); - desc = util_format_description(ve->hw_format[i]); + r600_vertex_data_type(ve->elements[i].src_format, &format, &num_format, &format_comp); + desc = util_format_description(ve->elements[i].src_format); if (desc == NULL) { - R600_ERR("unknown format %d\n", ve->hw_format[i]); + R600_ERR("unknown format %d\n", ve->elements[i].src_format); r600_bo_reference(rctx->radeon, &ve->fetch_shader, NULL); return -EINVAL; } diff --git a/src/gallium/drivers/r600/r600_asm.h b/src/gallium/drivers/r600/r600_asm.h index 519245f3af2..453c29790c1 100644 --- a/src/gallium/drivers/r600/r600_asm.h +++ b/src/gallium/drivers/r600/r600_asm.h @@ -34,7 +34,7 @@ struct r600_bc_alu_src { unsigned neg; unsigned abs; unsigned rel; - u32 *value; + uint32_t value; }; struct r600_bc_alu_dst { @@ -201,8 +201,6 @@ int r600_bc_add_cfinst(struct r600_bc *bc, int inst); int r600_bc_add_alu_type(struct r600_bc *bc, const struct r600_bc_alu *alu, int type); void r600_bc_special_constants(u32 value, unsigned *sel, unsigned *neg); void r600_bc_dump(struct r600_bc *bc); -void r600_cf_vtx(struct r600_vertex_element *ve, u32 *bytecode, unsigned count); -void r600_cf_vtx_tc(struct r600_vertex_element *ve, u32 *bytecode, unsigned count); int r600_vertex_elements_build_fetch_shader(struct r600_pipe_context *rctx, struct r600_vertex_element *ve); diff --git a/src/gallium/drivers/r600/r600_blit.c b/src/gallium/drivers/r600/r600_blit.c index b9ec9592e35..9865ea17ae5 100644 --- a/src/gallium/drivers/r600/r600_blit.c +++ b/src/gallium/drivers/r600/r600_blit.c @@ -36,6 +36,7 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; + rctx->blit = true; r600_context_queries_suspend(&rctx->ctx); util_blitter_save_blend(rctx->blitter, rctx->states[R600_PIPE_STATE_BLEND]); @@ -53,9 +54,9 @@ static void r600_blitter_begin(struct pipe_context *ctx, enum r600_blitter_op op if (rctx->states[R600_PIPE_STATE_CLIP]) { util_blitter_save_clip(rctx->blitter, &rctx->clip); } - util_blitter_save_vertex_buffers(rctx->blitter, rctx->nvertex_buffer, rctx->vertex_buffer); - - rctx->vertex_elements = NULL; + util_blitter_save_vertex_buffers(rctx->blitter, + rctx->vbuf_mgr->nr_vertex_buffers, + rctx->vbuf_mgr->vertex_buffer); if (op & (R600_CLEAR_SURFACE | R600_COPY)) util_blitter_save_framebuffer(rctx->blitter, &rctx->framebuffer); @@ -76,6 +77,7 @@ static void r600_blitter_end(struct pipe_context *ctx) { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; r600_context_queries_resume(&rctx->ctx); + rctx->blit = false; } void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_texture *texture) @@ -84,13 +86,17 @@ void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_t struct pipe_surface *zsurf, *cbsurf, surf_tmpl; int level = 0; float depth = 1.0f; - surf_tmpl.format = texture->resource.base.b.format; + + if (!texture->dirty_db) + return; + + surf_tmpl.format = texture->resource.b.b.b.format; surf_tmpl.u.tex.level = level; surf_tmpl.u.tex.first_layer = 0; surf_tmpl.u.tex.last_layer = 0; surf_tmpl.usage = PIPE_BIND_DEPTH_STENCIL; - zsurf = ctx->create_surface(ctx, &texture->resource.base.b, &surf_tmpl); + zsurf = ctx->create_surface(ctx, &texture->resource.b.b.b, &surf_tmpl); surf_tmpl.format = ((struct pipe_resource*)texture->flushed_depth_texture)->format; surf_tmpl.usage = PIPE_BIND_RENDER_TARGET; @@ -107,6 +113,48 @@ void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_t pipe_surface_reference(&zsurf, NULL); pipe_surface_reference(&cbsurf, NULL); + + texture->dirty_db = FALSE; +} + +void r600_flush_depth_textures(struct r600_pipe_context *rctx) +{ + unsigned int i; + + if (rctx->blit) return; + + /* FIXME: This handles fragment shader textures only. */ + + for (i = 0; i < rctx->ps_samplers.n_views; ++i) { + struct r600_pipe_sampler_view *view; + struct r600_resource_texture *tex; + + view = rctx->ps_samplers.views[i]; + if (!view) continue; + + tex = (struct r600_resource_texture *)view->base.texture; + if (!tex->depth) + continue; + + if (tex->is_flushing_texture) + continue; + + r600_blit_uncompress_depth(&rctx->context, tex); + } + + /* also check CB here */ + for (i = 0; i < rctx->framebuffer.nr_cbufs; i++) { + struct r600_resource_texture *tex; + tex = (struct r600_resource_texture *)rctx->framebuffer.cbufs[i]->texture; + + if (!tex->depth) + continue; + + if (tex->is_flushing_texture) + continue; + + r600_blit_uncompress_depth(&rctx->context, tex); + } } static void r600_clear(struct pipe_context *ctx, unsigned buffers, @@ -171,6 +219,52 @@ static void r600_hw_copy_region(struct pipe_context *ctx, r600_blitter_end(ctx); } +struct texture_orig_info { + unsigned format; + unsigned width0; + unsigned height0; +}; + +static void r600_s3tc_to_blittable(struct pipe_resource *tex, + unsigned level, + struct texture_orig_info *orig) +{ + struct r600_resource_texture *rtex = (struct r600_resource_texture*)tex; + unsigned pixsize = util_format_get_blocksize(tex->format); + int new_format; + int new_height, new_width; + + orig->format = tex->format; + orig->width0 = tex->width0; + orig->height0 = tex->height0; + + if (pixsize == 8) + new_format = PIPE_FORMAT_R16G16B16A16_UNORM; /* 64-bit block */ + else + new_format = PIPE_FORMAT_R32G32B32A32_UNORM; /* 128-bit block */ + + new_width = util_format_get_nblocksx(tex->format, orig->width0); + new_height = util_format_get_nblocksy(tex->format, orig->height0); + + rtex->force_int_type = true; + tex->width0 = new_width; + tex->height0 = new_height; + tex->format = new_format; + +} + +static void r600_reset_blittable_to_s3tc(struct pipe_resource *tex, + unsigned level, + struct texture_orig_info *orig) +{ + struct r600_resource_texture *rtex = (struct r600_resource_texture*)tex; + rtex->force_int_type = false; + + tex->format = orig->format; + tex->width0 = orig->width0; + tex->height0 = orig->height0; +} + static void r600_resource_copy_region(struct pipe_context *ctx, struct pipe_resource *dst, unsigned dst_level, @@ -179,15 +273,36 @@ static void r600_resource_copy_region(struct pipe_context *ctx, unsigned src_level, const struct pipe_box *src_box) { - boolean is_depth; - /* there is something wrong with depth resource copies at the moment so avoid them for now */ - is_depth = util_format_get_component_bits(src->format, UTIL_FORMAT_COLORSPACE_ZS, 0) != 0; - if (is_depth) - util_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, - src, src_level, src_box); - else - r600_hw_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, - src, src_level, src_box); + struct r600_resource_texture *rsrc = (struct r600_resource_texture*)src; + struct texture_orig_info orig_info[2]; + boolean restore_orig[2]; + + if (rsrc->depth && !rsrc->is_flushing_texture) + r600_texture_depth_flush(ctx, src, FALSE); + + restore_orig[0] = restore_orig[1] = FALSE; + + if (util_format_is_s3tc(src->format)) { + r600_s3tc_to_blittable(src, src_level, &orig_info[0]); + restore_orig[0] = TRUE; + } + + if (util_format_is_s3tc(dst->format)) { + r600_s3tc_to_blittable(dst, dst_level, &orig_info[1]); + restore_orig[1] = TRUE; + /* translate the dst box as well */ + dstx = util_format_get_nblocksx(orig_info[1].format, dstx); + dsty = util_format_get_nblocksy(orig_info[1].format, dsty); + } + + r600_hw_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, + src, src_level, src_box); + + if (restore_orig[0]) + r600_reset_blittable_to_s3tc(src, src_level, &orig_info[0]); + + if (restore_orig[1]) + r600_reset_blittable_to_s3tc(dst, dst_level, &orig_info[1]); } void r600_init_blit_functions(struct r600_pipe_context *rctx) @@ -197,3 +312,19 @@ void r600_init_blit_functions(struct r600_pipe_context *rctx) rctx->context.clear_depth_stencil = r600_clear_depth_stencil; rctx->context.resource_copy_region = r600_resource_copy_region; } + +void r600_blit_push_depth(struct pipe_context *ctx, struct r600_resource_texture *texture) +{ + struct pipe_box sbox; + + sbox.x = sbox.y = sbox.z = 0; + sbox.width = texture->resource.b.b.b.width0; + sbox.height = texture->resource.b.b.b.height0; + /* XXX that might be wrong */ + sbox.depth = 1; + + r600_hw_copy_region(ctx, (struct pipe_resource *)texture, 0, + 0, 0, 0, + (struct pipe_resource *)texture->flushed_depth_texture, 0, + &sbox); +} diff --git a/src/gallium/drivers/r600/r600_buffer.c b/src/gallium/drivers/r600/r600_buffer.c index 469c8195fe9..0c5d7133c7a 100644 --- a/src/gallium/drivers/r600/r600_buffer.c +++ b/src/gallium/drivers/r600/r600_buffer.c @@ -29,85 +29,58 @@ #include <util/u_math.h> #include <util/u_inlines.h> #include <util/u_memory.h> +#include "util/u_upload_mgr.h" + #include "state_tracker/drm_driver.h" + #include <xf86drm.h> #include "radeon_drm.h" + #include "r600.h" #include "r600_pipe.h" -extern struct u_resource_vtbl r600_buffer_vtbl; - - -struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, - const struct pipe_resource *templ) -{ - struct r600_resource_buffer *rbuffer; - struct r600_bo *bo; - /* XXX We probably want a different alignment for buffers and textures. */ - unsigned alignment = 4096; - - rbuffer = CALLOC_STRUCT(r600_resource_buffer); - if (rbuffer == NULL) - return NULL; - - rbuffer->magic = R600_BUFFER_MAGIC; - rbuffer->user_buffer = NULL; - rbuffer->r.base.b = *templ; - pipe_reference_init(&rbuffer->r.base.b.reference, 1); - rbuffer->r.base.b.screen = screen; - rbuffer->r.base.vtbl = &r600_buffer_vtbl; - rbuffer->r.size = rbuffer->r.base.b.width0; - rbuffer->r.bo_size = rbuffer->r.size; - rbuffer->uploaded = FALSE; - bo = r600_bo((struct radeon*)screen->winsys, rbuffer->r.base.b.width0, alignment, rbuffer->r.base.b.bind, rbuffer->r.base.b.usage); - if (bo == NULL) { - FREE(rbuffer); - return NULL; - } - rbuffer->r.bo = bo; - return &rbuffer->r.base.b; -} - -struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen, - void *ptr, unsigned bytes, - unsigned bind) -{ - struct r600_resource_buffer *rbuffer; - - rbuffer = CALLOC_STRUCT(r600_resource_buffer); - if (rbuffer == NULL) - return NULL; - - rbuffer->magic = R600_BUFFER_MAGIC; - pipe_reference_init(&rbuffer->r.base.b.reference, 1); - rbuffer->r.base.vtbl = &r600_buffer_vtbl; - rbuffer->r.base.b.screen = screen; - rbuffer->r.base.b.target = PIPE_BUFFER; - rbuffer->r.base.b.format = PIPE_FORMAT_R8_UNORM; - rbuffer->r.base.b.usage = PIPE_USAGE_IMMUTABLE; - rbuffer->r.base.b.bind = bind; - rbuffer->r.base.b.width0 = bytes; - rbuffer->r.base.b.height0 = 1; - rbuffer->r.base.b.depth0 = 1; - rbuffer->r.base.b.array_size = 1; - rbuffer->r.base.b.flags = 0; - rbuffer->r.bo = NULL; - rbuffer->r.bo_size = 0; - rbuffer->user_buffer = ptr; - rbuffer->uploaded = FALSE; - return &rbuffer->r.base.b; -} - static void r600_buffer_destroy(struct pipe_screen *screen, struct pipe_resource *buf) { + struct r600_screen *rscreen = (struct r600_screen*)screen; struct r600_resource_buffer *rbuffer = r600_buffer(buf); if (rbuffer->r.bo) { r600_bo_reference((struct radeon*)screen->winsys, &rbuffer->r.bo, NULL); } rbuffer->r.bo = NULL; - FREE(rbuffer); + util_slab_free(&rscreen->pool_buffers, rbuffer); +} + +static unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context, + struct pipe_resource *buf, + unsigned level, int layer) +{ + /* FIXME */ + return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; +} + +static struct pipe_transfer *r600_get_transfer(struct pipe_context *ctx, + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box) +{ + struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx; + struct pipe_transfer *transfer = util_slab_alloc(&rctx->pool_transfers); + + transfer->resource = resource; + transfer->level = level; + transfer->usage = usage; + transfer->box = *box; + transfer->stride = 0; + transfer->layer_stride = 0; + transfer->data = NULL; + + /* Note strides are zero, this is ok for buffers, but not for + * textures 2d & higher at least. + */ + return transfer; } static void *r600_buffer_transfer_map(struct pipe_context *pipe, @@ -117,8 +90,8 @@ static void *r600_buffer_transfer_map(struct pipe_context *pipe, int write = 0; uint8_t *data; - if (rbuffer->user_buffer) - return (uint8_t*)rbuffer->user_buffer + transfer->box.x; + if (rbuffer->r.b.user_ptr) + return (uint8_t*)rbuffer->r.b.user_ptr + transfer->box.x; if (transfer->usage & PIPE_TRANSFER_DONTBLOCK) { /* FIXME */ @@ -138,7 +111,7 @@ static void r600_buffer_transfer_unmap(struct pipe_context *pipe, { struct r600_resource_buffer *rbuffer = r600_buffer(transfer->resource); - if (rbuffer->user_buffer) + if (rbuffer->r.b.user_ptr) return; if (rbuffer->r.bo) @@ -151,128 +124,163 @@ static void r600_buffer_transfer_flush_region(struct pipe_context *pipe, { } -unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context, - struct pipe_resource *buf, - unsigned level, int layer) +static void r600_transfer_destroy(struct pipe_context *ctx, + struct pipe_transfer *transfer) { - /* FIXME */ - return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; + struct r600_pipe_context *rctx = (struct r600_pipe_context*)ctx; + util_slab_free(&rctx->pool_transfers, transfer); } -struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen, - struct winsys_handle *whandle) +static void r600_buffer_transfer_inline_write(struct pipe_context *pipe, + struct pipe_resource *resource, + unsigned level, + unsigned usage, + const struct pipe_box *box, + const void *data, + unsigned stride, + unsigned layer_stride) { - struct radeon *rw = (struct radeon*)screen->winsys; - struct r600_resource *rbuffer; - struct r600_bo *bo = NULL; + struct radeon *ws = (struct radeon*)pipe->winsys; + struct r600_resource_buffer *rbuffer = r600_buffer(resource); + uint8_t *map = NULL; - bo = r600_bo_handle(rw, whandle->handle, NULL); - if (bo == NULL) { - return NULL; - } + assert(rbuffer->r.b.user_ptr == NULL); - rbuffer = CALLOC_STRUCT(r600_resource); - if (rbuffer == NULL) { - r600_bo_reference(rw, &bo, NULL); - return NULL; - } + map = r600_bo_map(ws, rbuffer->r.bo, + PIPE_TRANSFER_WRITE | PIPE_TRANSFER_DISCARD | usage, + pipe); - pipe_reference_init(&rbuffer->base.b.reference, 1); - rbuffer->base.b.target = PIPE_BUFFER; - rbuffer->base.b.screen = screen; - rbuffer->base.vtbl = &r600_buffer_vtbl; - rbuffer->bo = bo; - return &rbuffer->base.b; + memcpy(map + box->x, data, box->width); + + if (rbuffer->r.bo) + r600_bo_unmap(ws, rbuffer->r.bo); } -struct u_resource_vtbl r600_buffer_vtbl = +static const struct u_resource_vtbl r600_buffer_vtbl = { u_default_resource_get_handle, /* get_handle */ r600_buffer_destroy, /* resource_destroy */ r600_buffer_is_referenced_by_cs, /* is_buffer_referenced */ - u_default_get_transfer, /* get_transfer */ - u_default_transfer_destroy, /* transfer_destroy */ + r600_get_transfer, /* get_transfer */ + r600_transfer_destroy, /* transfer_destroy */ r600_buffer_transfer_map, /* transfer_map */ r600_buffer_transfer_flush_region, /* transfer_flush_region */ r600_buffer_transfer_unmap, /* transfer_unmap */ - u_default_transfer_inline_write /* transfer_inline_write */ + r600_buffer_transfer_inline_write /* transfer_inline_write */ }; -int r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw) +struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, + const struct pipe_resource *templ) { - if (r600_buffer_is_user_buffer(draw->index_buffer)) { - struct r600_resource_buffer *rbuffer = r600_buffer(draw->index_buffer); - unsigned upload_offset; - int ret = 0; - - ret = r600_upload_buffer(rctx->rupload_vb, - draw->index_buffer_offset, - draw->count * draw->index_size, - rbuffer, - &upload_offset, - &rbuffer->r.bo_size, - &rbuffer->r.bo); - if (ret) - return ret; - rbuffer->uploaded = TRUE; - draw->index_buffer_offset = upload_offset; + struct r600_screen *rscreen = (struct r600_screen*)screen; + struct r600_resource_buffer *rbuffer; + struct r600_bo *bo; + /* XXX We probably want a different alignment for buffers and textures. */ + unsigned alignment = 4096; + + rbuffer = util_slab_alloc(&rscreen->pool_buffers); + + rbuffer->magic = R600_BUFFER_MAGIC; + rbuffer->r.b.b.b = *templ; + pipe_reference_init(&rbuffer->r.b.b.b.reference, 1); + rbuffer->r.b.b.b.screen = screen; + rbuffer->r.b.b.vtbl = &r600_buffer_vtbl; + rbuffer->r.b.user_ptr = NULL; + rbuffer->r.size = rbuffer->r.b.b.b.width0; + rbuffer->r.bo_size = rbuffer->r.size; + + bo = r600_bo((struct radeon*)screen->winsys, + rbuffer->r.b.b.b.width0, + alignment, rbuffer->r.b.b.b.bind, + rbuffer->r.b.b.b.usage); + + if (bo == NULL) { + FREE(rbuffer); + return NULL; } + rbuffer->r.bo = bo; + return &rbuffer->r.b.b.b; +} + +struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen, + void *ptr, unsigned bytes, + unsigned bind) +{ + struct r600_screen *rscreen = (struct r600_screen*)screen; + struct r600_resource_buffer *rbuffer; - return 0; + rbuffer = util_slab_alloc(&rscreen->pool_buffers); + + rbuffer->magic = R600_BUFFER_MAGIC; + pipe_reference_init(&rbuffer->r.b.b.b.reference, 1); + rbuffer->r.b.b.vtbl = &r600_buffer_vtbl; + rbuffer->r.b.b.b.screen = screen; + rbuffer->r.b.b.b.target = PIPE_BUFFER; + rbuffer->r.b.b.b.format = PIPE_FORMAT_R8_UNORM; + rbuffer->r.b.b.b.usage = PIPE_USAGE_IMMUTABLE; + rbuffer->r.b.b.b.bind = bind; + rbuffer->r.b.b.b.width0 = bytes; + rbuffer->r.b.b.b.height0 = 1; + rbuffer->r.b.b.b.depth0 = 1; + rbuffer->r.b.b.b.array_size = 1; + rbuffer->r.b.b.b.flags = 0; + rbuffer->r.b.user_ptr = ptr; + rbuffer->r.bo = NULL; + rbuffer->r.bo_size = 0; + return &rbuffer->r.b.b.b; } -int r600_upload_user_buffers(struct r600_pipe_context *rctx) +struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen, + struct winsys_handle *whandle) { - enum pipe_error ret = PIPE_OK; - int i, nr; - - nr = rctx->vertex_elements->count; - nr = rctx->nvertex_buffer; - - for (i = 0; i < nr; i++) { - struct pipe_vertex_buffer *vb = &rctx->vertex_buffer[i]; - - if (r600_buffer_is_user_buffer(vb->buffer)) { - struct r600_resource_buffer *rbuffer = r600_buffer(vb->buffer); - unsigned upload_offset; - - ret = r600_upload_buffer(rctx->rupload_vb, - 0, vb->buffer->width0, - rbuffer, - &upload_offset, - &rbuffer->r.bo_size, - &rbuffer->r.bo); - if (ret) - return ret; - rbuffer->uploaded = TRUE; - vb->buffer_offset = upload_offset; - } + struct radeon *rw = (struct radeon*)screen->winsys; + struct r600_resource *rbuffer; + struct r600_bo *bo = NULL; + + bo = r600_bo_handle(rw, whandle->handle, NULL); + if (bo == NULL) { + return NULL; + } + + rbuffer = CALLOC_STRUCT(r600_resource); + if (rbuffer == NULL) { + r600_bo_reference(rw, &bo, NULL); + return NULL; } - return ret; + + pipe_reference_init(&rbuffer->b.b.b.reference, 1); + rbuffer->b.b.b.target = PIPE_BUFFER; + rbuffer->b.b.b.screen = screen; + rbuffer->b.b.vtbl = &r600_buffer_vtbl; + rbuffer->bo = bo; + return &rbuffer->b.b.b; } +void r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw) +{ + struct r600_resource_buffer *rbuffer = r600_buffer(draw->index_buffer); + boolean flushed; + + u_upload_data(rctx->vbuf_mgr->uploader, 0, + draw->info.count * draw->index_size, + rbuffer->r.b.user_ptr, + &draw->index_buffer_offset, + &draw->index_buffer, &flushed); +} -int r600_upload_const_buffer(struct r600_pipe_context *rctx, struct pipe_resource *cbuffer, +void r600_upload_const_buffer(struct r600_pipe_context *rctx, struct r600_resource_buffer **rbuffer, uint32_t *const_offset) { - if (r600_buffer_is_user_buffer(cbuffer)) { - struct r600_resource_buffer *rbuffer = r600_buffer(cbuffer); - unsigned upload_offset; - int ret = 0; - - ret = r600_upload_buffer(rctx->rupload_const, - 0, cbuffer->width0, - rbuffer, - &upload_offset, - &rbuffer->r.bo_size, - &rbuffer->r.bo); - if (ret) - return ret; - rbuffer->uploaded = TRUE; - *const_offset = upload_offset; - return 0; - } + if ((*rbuffer)->r.b.user_ptr) { + uint8_t *ptr = (*rbuffer)->r.b.user_ptr; + unsigned size = (*rbuffer)->r.b.b.b.width0; + boolean flushed; + + *rbuffer = NULL; - *const_offset = 0; - return 0; + u_upload_data(rctx->vbuf_mgr->uploader, 0, size, ptr, const_offset, + (struct pipe_resource**)rbuffer, &flushed); + } else { + *const_offset = 0; + } } diff --git a/src/gallium/drivers/r600/r600_pipe.c b/src/gallium/drivers/r600/r600_pipe.c index 68b625cc3b4..34094001b75 100644 --- a/src/gallium/drivers/r600/r600_pipe.c +++ b/src/gallium/drivers/r600/r600_pipe.c @@ -30,11 +30,13 @@ #include <tgsi/tgsi_util.h> #include <util/u_blitter.h> #include <util/u_double_list.h> +#include <util/u_format_s3tc.h> #include <util/u_transfer.h> #include <util/u_surface.h> #include <util/u_pack_color.h> #include <util/u_memory.h> #include <util/u_inlines.h> +#include "util/u_upload_mgr.h" #include <pipebuffer/pb_buffer.h> #include "r600.h" #include "r600d.h" @@ -69,8 +71,30 @@ static void r600_flush(struct pipe_context *ctx, unsigned flags, #endif r600_context_flush(&rctx->ctx); - r600_upload_flush(rctx->rupload_vb); - r600_upload_flush(rctx->rupload_const); + /* XXX This shouldn't be really necessary, but removing it breaks some tests. + * Needless buffer reallocations may significantly increase memory consumption, + * so getting rid of this call is important. */ + u_upload_flush(rctx->vbuf_mgr->uploader); +} + +static void r600_update_num_contexts(struct r600_screen *rscreen, + int diff) +{ + pipe_mutex_lock(rscreen->mutex_num_contexts); + if (diff > 0) { + rscreen->num_contexts++; + + if (rscreen->num_contexts > 1) + util_slab_set_thread_safety(&rscreen->pool_buffers, + UTIL_SLAB_MULTITHREADED); + } else { + rscreen->num_contexts--; + + if (rscreen->num_contexts <= 1) + util_slab_set_thread_safety(&rscreen->pool_buffers, + UTIL_SLAB_SINGLETHREADED); + } + pipe_mutex_unlock(rscreen->mutex_num_contexts); } static void r600_destroy_context(struct pipe_context *context) @@ -79,8 +103,6 @@ static void r600_destroy_context(struct pipe_context *context) rctx->context.delete_depth_stencil_alpha_state(&rctx->context, rctx->custom_dsa_flush); - r600_end_vertex_translate(rctx); - r600_context_fini(&rctx->ctx); util_blitter_destroy(rctx->blitter); @@ -89,14 +111,11 @@ static void r600_destroy_context(struct pipe_context *context) free(rctx->states[i]); } - r600_upload_destroy(rctx->rupload_vb); - r600_upload_destroy(rctx->rupload_const); + u_vbuf_mgr_destroy(rctx->vbuf_mgr); + util_slab_destroy(&rctx->pool_transfers); - if (rctx->tran.translate_cache) - translate_cache_destroy(rctx->tran.translate_cache); + r600_update_num_contexts(rctx->screen, -1); - FREE(rctx->ps_resource); - FREE(rctx->vs_resource); FREE(rctx); } @@ -108,6 +127,9 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void if (rctx == NULL) return NULL; + + r600_update_num_contexts(rscreen, 1); + rctx->context.winsys = rscreen->screen.winsys; rctx->context.screen = screen; rctx->context.priv = priv; @@ -123,6 +145,7 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void r600_init_query_functions(rctx); r600_init_context_resource_functions(rctx); r600_init_surface_functions(rctx); + rctx->context.draw_vbo = r600_draw_vbo; switch (r600_get_family(rctx->radeon)) { case CHIP_R600: @@ -137,7 +160,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: - rctx->context.draw_vbo = r600_draw_vbo; r600_init_state_functions(rctx); if (r600_context_init(&rctx->ctx, rctx->radeon)) { r600_destroy_context(&rctx->context); @@ -154,7 +176,6 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: - rctx->context.draw_vbo = evergreen_draw; evergreen_init_state_functions(rctx); if (evergreen_context_init(&rctx->ctx, rctx->radeon)) { r600_destroy_context(&rctx->context); @@ -168,39 +189,23 @@ static struct pipe_context *r600_create_context(struct pipe_screen *screen, void return NULL; } - rctx->rupload_vb = r600_upload_create(rctx, 128 * 1024, 16); - if (rctx->rupload_vb == NULL) { - r600_destroy_context(&rctx->context); - return NULL; - } + util_slab_create(&rctx->pool_transfers, + sizeof(struct pipe_transfer), 64, + UTIL_SLAB_SINGLETHREADED); - rctx->rupload_const = r600_upload_create(rctx, 128 * 1024, 256); - if (rctx->rupload_const == NULL) { + rctx->vbuf_mgr = u_vbuf_mgr_create(&rctx->context, 1024 * 1024, 256, + PIPE_BIND_VERTEX_BUFFER | + PIPE_BIND_INDEX_BUFFER | + PIPE_BIND_CONSTANT_BUFFER, + U_VERTEX_FETCH_DWORD_ALIGNED); + if (!rctx->vbuf_mgr) { r600_destroy_context(&rctx->context); return NULL; } rctx->blitter = util_blitter_create(&rctx->context); if (rctx->blitter == NULL) { - FREE(rctx); - return NULL; - } - - rctx->tran.translate_cache = translate_cache_create(); - if (rctx->tran.translate_cache == NULL) { - FREE(rctx); - return NULL; - } - - rctx->vs_resource = CALLOC(R600_RESOURCE_ARRAY_SIZE, sizeof(struct r600_pipe_state)); - if (!rctx->vs_resource) { - FREE(rctx); - return NULL; - } - - rctx->ps_resource = CALLOC(R600_RESOURCE_ARRAY_SIZE, sizeof(struct r600_pipe_state)); - if (!rctx->ps_resource) { - FREE(rctx); + r600_destroy_context(&rctx->context); return NULL; } @@ -284,13 +289,16 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) return 1; /* Unsupported features (boolean caps). */ - case PIPE_CAP_TIMER_QUERY: case PIPE_CAP_STREAM_OUTPUT: case PIPE_CAP_PRIMITIVE_RESTART: case PIPE_CAP_INDEP_BLEND_FUNC: /* FIXME allow this */ case PIPE_CAP_INSTANCED_DRAWING: return 0; + case PIPE_CAP_ARRAY_TEXTURES: + /* fix once the CS checker upstream is fixed */ + return debug_get_bool_option("R600_ARRAY_TEXTURE", FALSE); + /* Texturing. */ case PIPE_CAP_MAX_TEXTURE_2D_LEVELS: case PIPE_CAP_MAX_TEXTURE_3D_LEVELS: @@ -319,6 +327,10 @@ static int r600_get_param(struct pipe_screen* pscreen, enum pipe_cap param) case PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER: return 0; + /* Timer queries, present when the clock frequency is non zero. */ + case PIPE_CAP_TIMER_QUERY: + return r600_get_clock_crystal_freq(rscreen->radeon) != 0; + default: R600_ERR("r600: unknown param %d\n", param); return 0; @@ -385,7 +397,7 @@ static int r600_get_shader_param(struct pipe_screen* pscreen, unsigned shader, e case PIPE_SHADER_CAP_MAX_CONSTS: return 256; //max native parameters case PIPE_SHADER_CAP_MAX_CONST_BUFFERS: - return 1; + return R600_MAX_CONST_BUFFERS; case PIPE_SHADER_CAP_MAX_PREDS: return 0; /* FIXME */ case PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED: @@ -441,9 +453,14 @@ static boolean r600_is_format_supported(struct pipe_screen* screen, retval |= PIPE_BIND_DEPTH_STENCIL; } - if ((usage & PIPE_BIND_VERTEX_BUFFER) && - r600_is_vertex_format_supported(format)) - retval |= PIPE_BIND_VERTEX_BUFFER; + if (usage & PIPE_BIND_VERTEX_BUFFER) { + struct r600_screen *rscreen = (struct r600_screen *)screen; + enum radeon_family family = r600_get_family(rscreen->radeon); + + if (r600_is_vertex_format_supported(format, family)) { + retval |= PIPE_BIND_VERTEX_BUFFER; + } + } if (usage & PIPE_BIND_TRANSFER_READ) retval |= PIPE_BIND_TRANSFER_READ; @@ -462,6 +479,8 @@ static void r600_destroy_screen(struct pipe_screen* pscreen) radeon_decref(rscreen->radeon); + util_slab_destroy(&rscreen->pool_buffers); + pipe_mutex_destroy(rscreen->mutex_num_contexts); FREE(rscreen); } @@ -489,6 +508,13 @@ struct pipe_screen *r600_screen_create(struct radeon *radeon) r600_init_screen_resource_functions(&rscreen->screen); rscreen->tiling_info = r600_get_tiling_info(radeon); + util_format_s3tc_init(); + + util_slab_create(&rscreen->pool_buffers, + sizeof(struct r600_resource_buffer), 64, + UTIL_SLAB_SINGLETHREADED); + + pipe_mutex_init(rscreen->mutex_num_contexts); return &rscreen->screen; } diff --git a/src/gallium/drivers/r600/r600_pipe.h b/src/gallium/drivers/r600/r600_pipe.h index 7f74fda0daf..8dc1f4ad5c3 100644 --- a/src/gallium/drivers/r600/r600_pipe.h +++ b/src/gallium/drivers/r600/r600_pipe.h @@ -30,12 +30,15 @@ #include <pipe/p_screen.h> #include <pipe/p_context.h> #include <util/u_math.h> -#include "translate/translate_cache.h" +#include "util/u_slab.h" +#include "util/u_vbuf_mgr.h" #include "r600.h" #include "r600_public.h" #include "r600_shader.h" #include "r600_resource.h" +#define R600_MAX_CONST_BUFFERS 1 + enum r600_pipe_state_id { R600_PIPE_STATE_BLEND = 0, R600_PIPE_STATE_BLEND_COLOR, @@ -62,6 +65,11 @@ struct r600_screen { struct pipe_screen screen; struct radeon *radeon; struct r600_tiling_info *tiling_info; + struct util_slab_mempool pool_buffers; + unsigned num_contexts; + + /* for thread-safe write accessing to num_contexts */ + pipe_mutex mutex_num_contexts; }; struct r600_pipe_sampler_view { @@ -86,9 +94,7 @@ struct r600_vertex_element { unsigned count; struct pipe_vertex_element elements[PIPE_MAX_ATTRIBS]; - enum pipe_format hw_format[PIPE_MAX_ATTRIBS]; - unsigned hw_format_size[PIPE_MAX_ATTRIBS]; - boolean incompatible_layout; + struct u_vbuf_mgr_elements *vmgr_elements; struct r600_bo *fetch_shader; unsigned fs_size; struct r600_pipe_state rstate; @@ -117,22 +123,9 @@ struct r600_textures_info { unsigned n_samplers; }; -/* vertex buffer translation context, used to translate vertex input that - * hw doesn't natively support, so far only FLOAT64 is unsupported. - */ -struct r600_translate_context { - /* Translate cache for incompatible vertex offset/stride/format fallback. */ - struct translate_cache *translate_cache; - /* The vertex buffer slot containing the translated buffer. */ - unsigned vb_slot; - void *new_velems; -}; - #define R600_CONSTANT_ARRAY_SIZE 256 #define R600_RESOURCE_ARRAY_SIZE 160 -struct r600_upload; - struct r600_pipe_context { struct pipe_context context; struct blitter_context *blitter; @@ -143,43 +136,35 @@ struct r600_pipe_context { struct r600_pipe_state *states[R600_PIPE_NSTATES]; struct r600_context ctx; struct r600_vertex_element *vertex_elements; + struct r600_pipe_state fs_resource[PIPE_MAX_ATTRIBS]; struct pipe_framebuffer_state framebuffer; struct pipe_index_buffer index_buffer; - struct pipe_vertex_buffer vertex_buffer[PIPE_MAX_ATTRIBS]; - unsigned nvertex_buffer; unsigned cb_target_mask; /* for saving when using blitter */ struct pipe_stencil_ref stencil_ref; struct pipe_viewport_state viewport; struct pipe_clip_state clip; - unsigned nvs_resource; - struct r600_pipe_state *vs_resource; - struct r600_pipe_state *ps_resource; struct r600_pipe_state config; struct r600_pipe_shader *ps_shader; struct r600_pipe_shader *vs_shader; struct r600_pipe_state vs_const_buffer; + struct r600_pipe_state vs_const_buffer_resource[R600_MAX_CONST_BUFFERS]; struct r600_pipe_state ps_const_buffer; + struct r600_pipe_state ps_const_buffer_resource[R600_MAX_CONST_BUFFERS]; struct r600_pipe_rasterizer *rasterizer; /* shader information */ unsigned sprite_coord_enable; bool flatshade; - struct r600_upload *rupload_vb; - unsigned any_user_vbs; struct r600_textures_info ps_samplers; - unsigned vb_max_index; - struct r600_translate_context tran; - struct r600_upload *rupload_const; + + struct u_vbuf_mgr *vbuf_mgr; + struct util_slab_mempool pool_transfers; + bool blit; }; struct r600_drawl { + struct pipe_draw_info info; struct pipe_context *ctx; - unsigned mode; - unsigned min_index; - unsigned max_index; - unsigned index_bias; - unsigned start; - unsigned count; unsigned index_size; unsigned index_buffer_offset; struct pipe_resource *index_buffer; @@ -188,16 +173,20 @@ struct r600_drawl { /* evergreen_state.c */ void evergreen_init_state_functions(struct r600_pipe_context *rctx); void evergreen_init_config(struct r600_pipe_context *rctx); -void evergreen_draw(struct pipe_context *ctx, const struct pipe_draw_info *info); void evergreen_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shader *shader); void evergreen_pipe_shader_vs(struct pipe_context *ctx, struct r600_pipe_shader *shader); void *evergreen_create_db_flush_dsa(struct r600_pipe_context *rctx); void evergreen_polygon_offset_update(struct r600_pipe_context *rctx); -void evergreen_vertex_buffer_update(struct r600_pipe_context *rctx); +void evergreen_pipe_set_buffer_resource(struct r600_pipe_context *rctx, + struct r600_pipe_state *rstate, + struct r600_resource *rbuffer, + unsigned offset, unsigned stride); /* r600_blit.c */ void r600_init_blit_functions(struct r600_pipe_context *rctx); void r600_blit_uncompress_depth(struct pipe_context *ctx, struct r600_resource_texture *texture); +void r600_blit_push_depth(struct pipe_context *ctx, struct r600_resource_texture *texture); +void r600_flush_depth_textures(struct r600_pipe_context *rctx); /* r600_buffer.c */ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, @@ -205,13 +194,9 @@ struct pipe_resource *r600_buffer_create(struct pipe_screen *screen, struct pipe_resource *r600_user_buffer_create(struct pipe_screen *screen, void *ptr, unsigned bytes, unsigned bind); -unsigned r600_buffer_is_referenced_by_cs(struct pipe_context *context, - struct pipe_resource *buf, - unsigned level, int layer); struct pipe_resource *r600_buffer_from_handle(struct pipe_screen *screen, struct winsys_handle *whandle); -int r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw); -int r600_upload_user_buffers(struct r600_pipe_context *rctx); +void r600_upload_index_buffer(struct r600_pipe_context *rctx, struct r600_drawl *draw); /* r600_query.c */ void r600_init_query_functions(struct r600_pipe_context *rctx); @@ -220,7 +205,6 @@ void r600_init_query_functions(struct r600_pipe_context *rctx); void r600_init_context_resource_functions(struct r600_pipe_context *r600); /* r600_shader.c */ -int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader); int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader, const struct tgsi_token *tokens); void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader *shader); int r600_find_vs_semantic_index(struct r600_shader *vs, @@ -228,11 +212,14 @@ int r600_find_vs_semantic_index(struct r600_shader *vs, /* r600_state.c */ void r600_init_state_functions(struct r600_pipe_context *rctx); -void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info); +void r600_spi_update(struct r600_pipe_context *rctx); void r600_init_config(struct r600_pipe_context *rctx); void *r600_create_db_flush_dsa(struct r600_pipe_context *rctx); void r600_polygon_offset_update(struct r600_pipe_context *rctx); -void r600_vertex_buffer_update(struct r600_pipe_context *rctx); +void r600_pipe_set_buffer_resource(struct r600_pipe_context *rctx, + struct r600_pipe_state *rstate, + struct r600_resource *rbuffer, + unsigned offset, unsigned stride); /* r600_helper.h */ int r600_conv_pipe_prim(unsigned pprim, unsigned *prim); @@ -247,8 +234,6 @@ unsigned r600_texture_get_offset(struct r600_resource_texture *rtex, unsigned level, unsigned layer); /* r600_translate.c */ -void r600_begin_vertex_translate(struct r600_pipe_context *rctx); -void r600_end_vertex_translate(struct r600_pipe_context *rctx); void r600_translate_index_buffer(struct r600_pipe_context *r600, struct pipe_resource **index_buffer, unsigned *index_size, @@ -277,6 +262,9 @@ void r600_bind_ps_shader(struct pipe_context *ctx, void *state); void r600_bind_vs_shader(struct pipe_context *ctx, void *state); void r600_delete_ps_shader(struct pipe_context *ctx, void *state); void r600_delete_vs_shader(struct pipe_context *ctx, void *state); +void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, + struct pipe_resource *buffer); +void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info); /* * common helpers diff --git a/src/gallium/drivers/r600/r600_resource.h b/src/gallium/drivers/r600/r600_resource.h index 28b3e1e5e40..836e7491f1f 100644 --- a/src/gallium/drivers/r600/r600_resource.h +++ b/src/gallium/drivers/r600/r600_resource.h @@ -24,6 +24,7 @@ #define R600_RESOURCE_H #include "util/u_transfer.h" +#include "util/u_vbuf_mgr.h" /* flag to indicate a resource is to be used as a transfer so should not be tiled */ #define R600_RESOURCE_FLAG_TRANSFER PIPE_RESOURCE_FLAG_DRV_PRIV @@ -43,7 +44,7 @@ struct r600_transfer { * underlying implementations. */ struct r600_resource { - struct u_resource base; + struct u_vbuf_resource b; struct r600_bo *bo; u32 size; unsigned bo_size; @@ -52,26 +53,31 @@ struct r600_resource { struct r600_resource_texture { struct r600_resource resource; unsigned offset[PIPE_MAX_TEXTURE_LEVELS]; - unsigned pitch_in_bytes[PIPE_MAX_TEXTURE_LEVELS]; - unsigned pitch_in_pixels[PIPE_MAX_TEXTURE_LEVELS]; + unsigned pitch_in_bytes[PIPE_MAX_TEXTURE_LEVELS]; /* transfer */ + unsigned pitch_in_blocks[PIPE_MAX_TEXTURE_LEVELS]; /* texture resource */ unsigned layer_size[PIPE_MAX_TEXTURE_LEVELS]; unsigned array_mode[PIPE_MAX_TEXTURE_LEVELS]; unsigned pitch_override; unsigned size; - unsigned tiled; unsigned tile_type; unsigned depth; - unsigned dirty; + unsigned dirty_db; struct r600_resource_texture *flushed_depth_texture; + boolean is_flushing_texture; + + /* on some cards we have to use integer 64/128-bit types + for s3tc blits, do this until gallium grows int formats */ + boolean force_int_type; }; +#define R600_TEX_IS_TILED(tex, level) ((tex)->array_mode[level] != V_038000_ARRAY_LINEAR_GENERAL && (tex)->array_mode[level] != V_038000_ARRAY_LINEAR_ALIGNED) + #define R600_BUFFER_MAGIC 0xabcd1600 +/* XXX this could be removed */ struct r600_resource_buffer { struct r600_resource r; uint32_t magic; - void *user_buffer; - bool uploaded; }; struct r600_surface { @@ -98,14 +104,7 @@ static INLINE struct r600_resource_buffer *r600_buffer(struct pipe_resource *buf return NULL; } -static INLINE boolean r600_buffer_is_user_buffer(struct pipe_resource *buffer) -{ - if (r600_buffer(buffer)->uploaded) - return FALSE; - return r600_buffer(buffer)->user_buffer ? TRUE : FALSE; -} - -int r600_texture_depth_flush(struct pipe_context *ctx, struct pipe_resource *texture); +int r600_texture_depth_flush(struct pipe_context *ctx, struct pipe_resource *texture, boolean just_create); /* r600_texture.c texture transfer functions. */ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, @@ -121,15 +120,7 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx, struct pipe_transfer* transfer); struct r600_pipe_context; -struct r600_upload *r600_upload_create(struct r600_pipe_context *rctx, - unsigned default_size, - unsigned alignment); -void r600_upload_flush(struct r600_upload *upload); -void r600_upload_destroy(struct r600_upload *upload); -int r600_upload_buffer(struct r600_upload *upload, unsigned offset, - unsigned size, struct r600_resource_buffer *in_buffer, - unsigned *out_offset, unsigned *out_size, - struct r600_bo **out_buffer); - -int r600_upload_const_buffer(struct r600_pipe_context *rctx, struct pipe_resource *cbuffer, uint32_t *offset); + +void r600_upload_const_buffer(struct r600_pipe_context *rctx, struct r600_resource_buffer **rbuffer, uint32_t *offset); + #endif diff --git a/src/gallium/drivers/r600/r600_shader.c b/src/gallium/drivers/r600/r600_shader.c index c982471a04f..240c8f1ffd0 100644 --- a/src/gallium/drivers/r600/r600_shader.c +++ b/src/gallium/drivers/r600/r600_shader.c @@ -28,6 +28,7 @@ #include "r600_pipe.h" #include "r600_asm.h" #include "r600_sq.h" +#include "r600_formats.h" #include "r600_opcodes.h" #include "r600d.h" #include <stdio.h> @@ -175,6 +176,13 @@ static void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shade R_0288CC_SQ_PGM_CF_OFFSET_PS, 0x00000000, 0xFFFFFFFF, NULL); + if (rshader->fs_write_all) { + r600_pipe_state_add_reg(rstate, R_028808_CB_COLOR_CONTROL, + S_028808_MULTIWRITE_ENABLE(1), + S_028808_MULTIWRITE_ENABLE(1), + NULL); + } + if (rshader->uses_kill) { /* only set some bits here, the other bits are set in the dsa state */ r600_pipe_state_add_reg(rstate, @@ -187,7 +195,7 @@ static void r600_pipe_shader_ps(struct pipe_context *ctx, struct r600_pipe_shade 0xFFFFFFFF, NULL); } -int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader) +static int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader) { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; struct r600_shader *rshader = &shader->shader; @@ -225,12 +233,12 @@ int r600_pipe_shader(struct pipe_context *ctx, struct r600_pipe_shader *shader) return 0; } -int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals); +static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader); + int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *shader, const struct tgsi_token *tokens) { static int dump_shaders = -1; struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - u32 *literals; int r; /* Would like some magic "get_bool_option_once" routine. @@ -243,13 +251,12 @@ int r600_pipe_shader_create(struct pipe_context *ctx, struct r600_pipe_shader *s tgsi_dump(tokens, 0); } shader->shader.family = r600_get_family(rctx->radeon); - r = r600_shader_from_tgsi(tokens, &shader->shader, &literals); + r = r600_shader_from_tgsi(tokens, &shader->shader); if (r) { R600_ERR("translation from TGSI failed !\n"); return r; } r = r600_bc_build(&shader->shader.bc); - free(literals); if (r) { R600_ERR("building bytecode failed !\n"); return r; @@ -274,6 +281,15 @@ void r600_pipe_shader_destroy(struct pipe_context *ctx, struct r600_pipe_shader */ struct r600_shader_tgsi_instruction; +struct r600_shader_src { + unsigned sel; + unsigned swizzle[4]; + unsigned neg; + unsigned abs; + unsigned rel; + uint32_t value[4]; +}; + struct r600_shader_ctx { struct tgsi_shader_info info; struct tgsi_parse_context parse; @@ -281,9 +297,11 @@ struct r600_shader_ctx { unsigned type; unsigned file_offset[TGSI_FILE_COUNT]; unsigned temp_reg; + unsigned ar_reg; struct r600_shader_tgsi_instruction *inst_info; struct r600_bc *bc; struct r600_shader *shader; + struct r600_shader_src src[3]; u32 *literals; u32 nliterals; u32 max_driver_temp_used; @@ -492,9 +510,179 @@ static int evergreen_gpr_count(struct r600_shader_ctx *ctx) return ctx->num_interp_gpr; } -int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals) +static void tgsi_src(struct r600_shader_ctx *ctx, + const struct tgsi_full_src_register *tgsi_src, + struct r600_shader_src *r600_src) +{ + memset(r600_src, 0, sizeof(*r600_src)); + r600_src->swizzle[0] = tgsi_src->Register.SwizzleX; + r600_src->swizzle[1] = tgsi_src->Register.SwizzleY; + r600_src->swizzle[2] = tgsi_src->Register.SwizzleZ; + r600_src->swizzle[3] = tgsi_src->Register.SwizzleW; + r600_src->neg = tgsi_src->Register.Negate; + r600_src->abs = tgsi_src->Register.Absolute; + if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) { + int index; + if ((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) && + (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) && + (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) { + + index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX; + r600_bc_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg); + if (r600_src->sel != V_SQ_ALU_SRC_LITERAL) + return; + } + index = tgsi_src->Register.Index; + r600_src->sel = V_SQ_ALU_SRC_LITERAL; + memcpy(r600_src->value, ctx->literals + index * 4, sizeof(r600_src->value)); + } else { + if (tgsi_src->Register.Indirect) + r600_src->rel = V_SQ_REL_RELATIVE; + r600_src->sel = tgsi_src->Register.Index; + r600_src->sel += ctx->file_offset[tgsi_src->Register.File]; + } +} + +static int tgsi_fetch_rel_const(struct r600_shader_ctx *ctx, unsigned int offset, unsigned int dst_reg) +{ + struct r600_bc_vtx vtx; + unsigned int ar_reg; + int r; + + if (offset) { + struct r600_bc_alu alu; + + memset(&alu, 0, sizeof(alu)); + + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD_INT); + alu.src[0].sel = ctx->ar_reg; + + alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; + alu.src[1].value = offset; + + alu.dst.sel = dst_reg; + alu.dst.write = 1; + alu.last = 1; + + if ((r = r600_bc_add_alu(ctx->bc, &alu))) + return r; + + ar_reg = dst_reg; + } else { + ar_reg = ctx->ar_reg; + } + + memset(&vtx, 0, sizeof(vtx)); + vtx.fetch_type = 2; /* VTX_FETCH_NO_INDEX_OFFSET */ + vtx.src_gpr = ar_reg; + vtx.mega_fetch_count = 16; + vtx.dst_gpr = dst_reg; + vtx.dst_sel_x = 0; /* SEL_X */ + vtx.dst_sel_y = 1; /* SEL_Y */ + vtx.dst_sel_z = 2; /* SEL_Z */ + vtx.dst_sel_w = 3; /* SEL_W */ + vtx.data_format = FMT_32_32_32_32_FLOAT; + vtx.num_format_all = 2; /* NUM_FORMAT_SCALED */ + vtx.format_comp_all = 1; /* FORMAT_COMP_SIGNED */ + vtx.srf_mode_all = 1; /* SRF_MODE_NO_ZERO */ + + if ((r = r600_bc_add_vtx(ctx->bc, &vtx))) + return r; + + return 0; +} + +static int tgsi_split_constant(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bc_alu alu; + int i, j, k, nconst, r; + + for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) { + if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) { + nconst++; + } + tgsi_src(ctx, &inst->Src[i], &ctx->src[i]); + } + for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) { + if (inst->Src[i].Register.File != TGSI_FILE_CONSTANT) { + continue; + } + + if (ctx->src[i].rel) { + int treg = r600_get_temp(ctx); + if ((r = tgsi_fetch_rel_const(ctx, ctx->src[i].sel - 512, treg))) + return r; + + ctx->src[i].sel = treg; + ctx->src[i].rel = 0; + j--; + } else if (j > 0) { + int treg = r600_get_temp(ctx); + for (k = 0; k < 4; k++) { + memset(&alu, 0, sizeof(struct r600_bc_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); + alu.src[0].sel = ctx->src[i].sel; + alu.src[0].chan = k; + alu.src[0].rel = ctx->src[i].rel; + alu.dst.sel = treg; + alu.dst.chan = k; + alu.dst.write = 1; + if (k == 3) + alu.last = 1; + r = r600_bc_add_alu(ctx->bc, &alu); + if (r) + return r; + } + ctx->src[i].sel = treg; + ctx->src[i].rel =0; + j--; + } + } + return 0; +} + +/* need to move any immediate into a temp - for trig functions which use literal for PI stuff */ +static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx) +{ + struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; + struct r600_bc_alu alu; + int i, j, k, nliteral, r; + + for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) { + if (ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) { + nliteral++; + } + } + for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) { + if (j > 0 && ctx->src[i].sel == V_SQ_ALU_SRC_LITERAL) { + int treg = r600_get_temp(ctx); + for (k = 0; k < 4; k++) { + memset(&alu, 0, sizeof(struct r600_bc_alu)); + alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); + alu.src[0].sel = ctx->src[i].sel; + alu.src[0].chan = k; + alu.src[0].value = ctx->src[i].value[k]; + alu.dst.sel = treg; + alu.dst.chan = k; + alu.dst.write = 1; + if (k == 3) + alu.last = 1; + r = r600_bc_add_alu(ctx->bc, &alu); + if (r) + return r; + } + ctx->src[i].sel = treg; + j--; + } + } + return 0; +} + +static int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader) { struct tgsi_full_immediate *immediate; + struct tgsi_full_property *property; struct r600_shader_ctx ctx; struct r600_bc_output output[32]; unsigned noutput; @@ -558,12 +746,13 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s ctx.file_offset[TGSI_FILE_CONSTANT] = 512; ctx.file_offset[TGSI_FILE_IMMEDIATE] = V_SQ_ALU_SRC_LITERAL; - ctx.temp_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] + + ctx.ar_reg = ctx.file_offset[TGSI_FILE_TEMPORARY] + ctx.info.file_count[TGSI_FILE_TEMPORARY]; + ctx.temp_reg = ctx.ar_reg + 1; ctx.nliterals = 0; ctx.literals = NULL; - + shader->fs_write_all = FALSE; while (!tgsi_parse_end_of_tokens(&ctx.parse)) { tgsi_parse_token(&ctx.parse); switch (ctx.parse.FullToken.Token.Type) { @@ -592,7 +781,12 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s ctx.max_driver_temp_used = 0; /* reserve first tmp for everyone */ r600_get_temp(&ctx); + opcode = ctx.parse.FullToken.FullInstruction.Instruction.Opcode; + if ((r = tgsi_split_constant(&ctx))) + goto out_err; + if ((r = tgsi_split_literal_constant(&ctx))) + goto out_err; if (ctx.bc->chiprev == CHIPREV_EVERGREEN) ctx.inst_info = &eg_shader_tgsi_instruction[opcode]; else @@ -602,6 +796,11 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s goto out_err; break; case TGSI_TOKEN_TYPE_PROPERTY: + property = &ctx.parse.FullToken.FullProperty; + if (property->Property.PropertyName == TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS) { + if (property->u[0].Data == 1) + shader->fs_write_all = TRUE; + } break; default: R600_ERR("unsupported token type %d\n", ctx.parse.FullToken.Token.Type); @@ -619,6 +818,7 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s output[i].swizzle_y = 1; output[i].swizzle_z = 2; output[i].swizzle_w = 3; + output[i].burst_count = 1; output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; output[i].array_base = i - pos0; switch (ctx.type) { @@ -680,6 +880,7 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s output[i].swizzle_y = 1; output[i].swizzle_z = 2; output[i].swizzle_w = 3; + output[i].burst_count = 1; output[i].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PARAM; output[i].array_base = 0; noutput++; @@ -694,6 +895,7 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s output[0].swizzle_y = 7; output[0].swizzle_z = 7; output[0].swizzle_w = 7; + output[0].burst_count = 1; output[0].type = V_SQ_CF_ALLOC_EXPORT_WORD0_SQ_EXPORT_PIXEL; output[0].array_base = 0; noutput++; @@ -704,7 +906,7 @@ int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *s if (r) goto out_err; } - *literals = ctx.literals; + free(ctx.literals); tgsi_parse_free(&ctx.parse); return 0; out_err: @@ -724,40 +926,22 @@ static int tgsi_end(struct r600_shader_ctx *ctx) return 0; } -static int tgsi_src(struct r600_shader_ctx *ctx, - const struct tgsi_full_src_register *tgsi_src, - struct r600_bc_alu_src *r600_src) +static void r600_bc_src(struct r600_bc_alu_src *bc_src, + const struct r600_shader_src *shader_src, + unsigned chan) { - memset(r600_src, 0, sizeof(struct r600_bc_alu_src)); - r600_src->neg = tgsi_src->Register.Negate; - r600_src->abs = tgsi_src->Register.Absolute; - if (tgsi_src->Register.File == TGSI_FILE_IMMEDIATE) { - int index; - if((tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleY) && - (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleZ) && - (tgsi_src->Register.SwizzleX == tgsi_src->Register.SwizzleW)) { - - index = tgsi_src->Register.Index * 4 + tgsi_src->Register.SwizzleX; - r600_bc_special_constants(ctx->literals[index], &r600_src->sel, &r600_src->neg); - if (r600_src->sel != V_SQ_ALU_SRC_LITERAL) - return 0; - } - index = tgsi_src->Register.Index; - r600_src->sel = V_SQ_ALU_SRC_LITERAL; - r600_src->value = ctx->literals + index * 4; - } else { - if (tgsi_src->Register.Indirect) - r600_src->rel = V_SQ_REL_RELATIVE; - r600_src->sel = tgsi_src->Register.Index; - r600_src->sel += ctx->file_offset[tgsi_src->Register.File]; - } - return 0; + bc_src->sel = shader_src->sel; + bc_src->chan = shader_src->swizzle[chan]; + bc_src->neg = shader_src->neg; + bc_src->abs = shader_src->abs; + bc_src->rel = shader_src->rel; + bc_src->value = shader_src->value[bc_src->chan]; } -static int tgsi_dst(struct r600_shader_ctx *ctx, - const struct tgsi_full_dst_register *tgsi_dst, - unsigned swizzle, - struct r600_bc_alu_dst *r600_dst) +static void tgsi_dst(struct r600_shader_ctx *ctx, + const struct tgsi_full_dst_register *tgsi_dst, + unsigned swizzle, + struct r600_bc_alu_dst *r600_dst) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; @@ -770,101 +954,6 @@ static int tgsi_dst(struct r600_shader_ctx *ctx, if (inst->Instruction.Saturate) { r600_dst->clamp = 1; } - return 0; -} - -static unsigned tgsi_chan(const struct tgsi_full_src_register *tgsi_src, unsigned swizzle) -{ - switch (swizzle) { - case 0: - return tgsi_src->Register.SwizzleX; - case 1: - return tgsi_src->Register.SwizzleY; - case 2: - return tgsi_src->Register.SwizzleZ; - case 3: - return tgsi_src->Register.SwizzleW; - default: - return 0; - } -} - -static int tgsi_split_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3]) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu alu; - int i, j, k, nconst, r; - - for (i = 0, nconst = 0; i < inst->Instruction.NumSrcRegs; i++) { - if (inst->Src[i].Register.File == TGSI_FILE_CONSTANT) { - nconst++; - } - r = tgsi_src(ctx, &inst->Src[i], &r600_src[i]); - if (r) { - return r; - } - } - for (i = 0, j = nconst - 1; i < inst->Instruction.NumSrcRegs; i++) { - if (j > 0 && inst->Src[i].Register.File == TGSI_FILE_CONSTANT) { - int treg = r600_get_temp(ctx); - for (k = 0; k < 4; k++) { - memset(&alu, 0, sizeof(struct r600_bc_alu)); - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - alu.src[0].sel = r600_src[i].sel; - alu.src[0].chan = k; - alu.src[0].rel = r600_src[i].rel; - alu.dst.sel = treg; - alu.dst.chan = k; - alu.dst.write = 1; - if (k == 3) - alu.last = 1; - r = r600_bc_add_alu(ctx->bc, &alu); - if (r) - return r; - } - r600_src[i].sel = treg; - r600_src[i].rel =0; - j--; - } - } - return 0; -} - -/* need to move any immediate into a temp - for trig functions which use literal for PI stuff */ -static int tgsi_split_literal_constant(struct r600_shader_ctx *ctx, struct r600_bc_alu_src r600_src[3]) -{ - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu alu; - int i, j, k, nliteral, r; - - for (i = 0, nliteral = 0; i < inst->Instruction.NumSrcRegs; i++) { - if (r600_src[i].sel == V_SQ_ALU_SRC_LITERAL) { - nliteral++; - } - } - for (i = 0, j = nliteral - 1; i < inst->Instruction.NumSrcRegs; i++) { - if (j > 0 && r600_src[i].sel == V_SQ_ALU_SRC_LITERAL) { - int treg = r600_get_temp(ctx); - for (k = 0; k < 4; k++) { - memset(&alu, 0, sizeof(struct r600_bc_alu)); - alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - alu.src[0].sel = r600_src[i].sel; - alu.src[0].chan = k; - alu.src[0].value = r600_src[i].value; - alu.dst.sel = treg; - alu.dst.chan = k; - alu.dst.write = 1; - if (k == 3) - alu.last = 1; - r = r600_bc_add_alu(ctx->bc, &alu); - if (r) - return r; - } - r600_src[i].sel = treg; - j--; - } - } - return 0; } static int tgsi_last_instruction(unsigned writemask) @@ -882,38 +971,25 @@ static int tgsi_last_instruction(unsigned writemask) static int tgsi_op2_s(struct r600_shader_ctx *ctx, int swap) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int i, j, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; for (i = 0; i < lasti + 1; i++) { if (!(inst->Dst[0].Register.WriteMask & (1 << i))) continue; memset(&alu, 0, sizeof(struct r600_bc_alu)); - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.inst = ctx->inst_info->r600_opcode; if (!swap) { for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { - alu.src[j] = r600_src[j]; - alu.src[j].chan = tgsi_chan(&inst->Src[j], i); + r600_bc_src(&alu.src[j], &ctx->src[j], i); } } else { - alu.src[0] = r600_src[1]; - alu.src[0].chan = tgsi_chan(&inst->Src[1], i); - - alu.src[1] = r600_src[0]; - alu.src[1].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[0], &ctx->src[1], i); + r600_bc_src(&alu.src[1], &ctx->src[0], i); } /* handle some special cases */ switch (ctx->inst_info->tgsi_opcode) { @@ -951,24 +1027,15 @@ static int tgsi_op2_swap(struct r600_shader_ctx *ctx) * r700 - normalize by dividing by 2PI * see fdo bug 27901 */ -static int tgsi_setup_trig(struct r600_shader_ctx *ctx, - struct r600_bc_alu_src r600_src[3]) +static int tgsi_setup_trig(struct r600_shader_ctx *ctx) { static float half_inv_pi = 1.0 /(3.1415926535 * 2); static float double_pi = 3.1415926535 * 2; static float neg_pi = -3.1415926535; - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; int r; struct r600_bc_alu alu; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD); alu.is_op3 = 1; @@ -977,12 +1044,11 @@ static int tgsi_setup_trig(struct r600_shader_ctx *ctx, alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.src[1].sel = V_SQ_ALU_SRC_LITERAL; alu.src[1].chan = 0; - alu.src[1].value = (uint32_t *)&half_inv_pi; + alu.src[1].value = *(uint32_t *)&half_inv_pi; alu.src[2].sel = V_SQ_ALU_SRC_0_5; alu.src[2].chan = 0; alu.last = 1; @@ -1021,8 +1087,8 @@ static int tgsi_setup_trig(struct r600_shader_ctx *ctx, alu.src[2].chan = 0; if (ctx->bc->chiprev == CHIPREV_R600) { - alu.src[1].value = (uint32_t *)&double_pi; - alu.src[2].value = (uint32_t *)&neg_pi; + alu.src[1].value = *(uint32_t *)&double_pi; + alu.src[2].value = *(uint32_t *)&neg_pi; } else { alu.src[1].sel = V_SQ_ALU_SRC_1; alu.src[2].sel = V_SQ_ALU_SRC_0_5; @@ -1039,12 +1105,11 @@ static int tgsi_setup_trig(struct r600_shader_ctx *ctx, static int tgsi_trig(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int i, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - r = tgsi_setup_trig(ctx, r600_src); + r = tgsi_setup_trig(ctx); if (r) return r; @@ -1070,9 +1135,7 @@ static int tgsi_trig(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); alu.src[0].sel = ctx->temp_reg; - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); if (i == lasti) alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); @@ -1085,7 +1148,6 @@ static int tgsi_trig(struct r600_shader_ctx *ctx) static int tgsi_scs(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int r; @@ -1093,7 +1155,7 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) * X or Y components of the destination vector. */ if (likely(inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_XY)) { - r = tgsi_setup_trig(ctx, r600_src); + r = tgsi_setup_trig(ctx); if (r) return r; } @@ -1102,9 +1164,7 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_X) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_COS); - r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = 0; @@ -1118,9 +1178,7 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) if (inst->Dst[0].Register.WriteMask & TGSI_WRITEMASK_Y) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_SIN); - r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = 0; @@ -1136,9 +1194,7 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); alu.src[0].sel = V_SQ_ALU_SRC_0; alu.src[0].chan = 0; @@ -1156,9 +1212,7 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst); alu.src[0].sel = V_SQ_ALU_SRC_1; alu.src[0].chan = 0; @@ -1175,7 +1229,6 @@ static int tgsi_scs(struct r600_shader_ctx *ctx) static int tgsi_kill(struct r600_shader_ctx *ctx) { - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; int i, r; @@ -1191,10 +1244,7 @@ static int tgsi_kill(struct r600_shader_ctx *ctx) alu.src[1].sel = V_SQ_ALU_SRC_1; alu.src[1].neg = 1; } else { - r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]); - if (r) - return r; - alu.src[1].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[1], &ctx->src[0], i); } if (i == 3) { alu.last = 1; @@ -1214,24 +1264,14 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; - struct r600_bc_alu_src r600_src[3]; int r; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - /* dst.x, <- 1.0 */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); alu.src[0].sel = V_SQ_ALU_SRC_1; /*1.0*/ alu.src[0].chan = 0; - r = tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 0, &alu.dst); alu.dst.write = (inst->Dst[0].Register.WriteMask >> 0) & 1; r = r600_bc_add_alu(ctx->bc, &alu); if (r) @@ -1240,12 +1280,10 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) /* dst.y = max(src.x, 0.0) */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MAX); - alu.src[0] = r600_src[0]; + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.src[1].sel = V_SQ_ALU_SRC_0; /*0.0*/ alu.src[1].chan = 0; - r = tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 1, &alu.dst); alu.dst.write = (inst->Dst[0].Register.WriteMask >> 1) & 1; r = r600_bc_add_alu(ctx->bc, &alu); if (r) @@ -1256,9 +1294,7 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); alu.src[0].sel = V_SQ_ALU_SRC_1; alu.src[0].chan = 0; - r = tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 3, &alu.dst); alu.dst.write = (inst->Dst[0].Register.WriteMask >> 3) & 1; alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); @@ -1273,11 +1309,8 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) /* dst.z = log(src.y) */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_CLAMPED); - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 1); - r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); - if (r) - return r; + r600_bc_src(&alu.src[0], &ctx->src[0], 1); + tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); if (r) @@ -1289,13 +1322,11 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) /* tmp.x = amd MUL_LIT(src.w, dst.z, src.x ) */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MUL_LIT); - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 3); + r600_bc_src(&alu.src[0], &ctx->src[0], 3); alu.src[1].sel = sel; alu.src[1].chan = chan; - alu.src[2] = r600_src[0]; - alu.src[2].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[2], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.chan = 0; alu.dst.write = 1; @@ -1310,9 +1341,7 @@ static int tgsi_lit(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = 0; - r = tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], 2, &alu.dst); alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); if (r) @@ -1336,10 +1365,7 @@ static int tgsi_rsq(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIPSQRT_CLAMPED); for (i = 0; i < inst->Instruction.NumSrcRegs; i++) { - r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]); - if (r) - return r; - alu.src[i].chan = tgsi_chan(&inst->Src[i], 0); + r600_bc_src(&alu.src[i], &ctx->src[i], 0); alu.src[i].abs = 1; } alu.dst.sel = ctx->temp_reg; @@ -1363,9 +1389,7 @@ static int tgsi_helper_tempx_replicate(struct r600_shader_ctx *ctx) alu.src[0].sel = ctx->temp_reg; alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); alu.dst.chan = i; - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1; if (i == 3) alu.last = 1; @@ -1385,10 +1409,7 @@ static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = ctx->inst_info->r600_opcode; for (i = 0; i < inst->Instruction.NumSrcRegs; i++) { - r = tgsi_src(ctx, &inst->Src[i], &alu.src[i]); - if (r) - return r; - alu.src[i].chan = tgsi_chan(&inst->Src[i], 0); + r600_bc_src(&alu.src[i], &ctx->src[i], 0); } alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; @@ -1402,17 +1423,13 @@ static int tgsi_trans_srcx_replicate(struct r600_shader_ctx *ctx) static int tgsi_pow(struct r600_shader_ctx *ctx) { - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; int r; /* LOG2(a) */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; alu.last = 1; @@ -1422,10 +1439,7 @@ static int tgsi_pow(struct r600_shader_ctx *ctx) /* b * LOG2(a) */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); - r = tgsi_src(ctx, &inst->Src[1], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[1], 0); + r600_bc_src(&alu.src[0], &ctx->src[1], 0); alu.src[1].sel = ctx->temp_reg; alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; @@ -1450,16 +1464,8 @@ static int tgsi_ssg(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; - struct r600_bc_alu_src r600_src[3]; int i, r; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - /* tmp = (src > 0 ? 1 : src) */ for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bc_alu)); @@ -1469,13 +1475,10 @@ static int tgsi_ssg(struct r600_shader_ctx *ctx) alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], i); - + r600_bc_src(&alu.src[0], &ctx->src[0], i); alu.src[1].sel = V_SQ_ALU_SRC_1; + r600_bc_src(&alu.src[2], &ctx->src[0], i); - alu.src[2] = r600_src[0]; - alu.src[2].chan = tgsi_chan(&inst->Src[0], i); if (i == 3) alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); @@ -1488,9 +1491,7 @@ static int tgsi_ssg(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGT); alu.is_op3 = 1; - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = i; @@ -1523,9 +1524,7 @@ static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instru alu.dst.chan = i; } else { alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = i; } @@ -1542,17 +1541,10 @@ static int tgsi_helper_copy(struct r600_shader_ctx *ctx, struct tgsi_full_instru static int tgsi_op3(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int i, j, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; for (i = 0; i < lasti + 1; i++) { if (!(inst->Dst[0].Register.WriteMask & (1 << i))) continue; @@ -1560,14 +1552,10 @@ static int tgsi_op3(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = ctx->inst_info->r600_opcode; for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { - alu.src[j] = r600_src[j]; - alu.src[j].chan = tgsi_chan(&inst->Src[j], i); + r600_bc_src(&alu.src[j], &ctx->src[j], i); } - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; alu.dst.write = 1; alu.is_op3 = 1; @@ -1584,28 +1572,17 @@ static int tgsi_op3(struct r600_shader_ctx *ctx) static int tgsi_dp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int i, j, r; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = ctx->inst_info->r600_opcode; for (j = 0; j < inst->Instruction.NumSrcRegs; j++) { - alu.src[j] = r600_src[j]; - alu.src[j].chan = tgsi_chan(&inst->Src[j], i); + r600_bc_src(&alu.src[j], &ctx->src[j], i); } - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; alu.dst.write = (inst->Dst[0].Register.WriteMask >> i) & 1; /* handle some special cases */ @@ -1661,11 +1638,8 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) /* Add perspective divide */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_RECIP_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; + r600_bc_src(&alu.src[0], &ctx->src[0], 3); - alu.src[0].chan = tgsi_chan(&inst->Src[0], 3); alu.dst.sel = ctx->temp_reg; alu.dst.chan = 3; alu.last = 1; @@ -1679,10 +1653,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = 3; - r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]); - if (r) - return r; - alu.src[1].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[1], &ctx->src[0], i); alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; alu.dst.write = 1; @@ -1735,14 +1706,8 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) src2_chan = 0; break; } - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], src_chan); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[1]); - if (r) - return r; - alu.src[1].chan = tgsi_chan(&inst->Src[0], src2_chan); + r600_bc_src(&alu.src[0], &ctx->src[0], src_chan); + r600_bc_src(&alu.src[1], &ctx->src[0], src2_chan); alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; if (i == 3) @@ -1782,7 +1747,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) alu.src[2].sel = V_SQ_ALU_SRC_LITERAL; alu.src[2].chan = 0; - alu.src[2].value = (u32*)&one_point_five; + alu.src[2].value = *(uint32_t *)&one_point_five; alu.dst.sel = ctx->temp_reg; alu.dst.chan = 0; @@ -1803,7 +1768,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) alu.src[2].sel = V_SQ_ALU_SRC_LITERAL; alu.src[2].chan = 0; - alu.src[2].value = (u32*)&one_point_five; + alu.src[2].value = *(uint32_t *)&one_point_five; alu.dst.sel = ctx->temp_reg; alu.dst.chan = 1; @@ -1822,10 +1787,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOV); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[0], &ctx->src[0], i); alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; if (i == 3) @@ -1846,7 +1808,7 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) memset(&tex, 0, sizeof(struct r600_bc_tex)); tex.inst = opcode; tex.sampler_id = ctx->file_offset[inst->Src[1].Register.File] + inst->Src[1].Register.Index; - tex.resource_id = tex.sampler_id; + tex.resource_id = tex.sampler_id + R600_MAX_CONST_BUFFERS; tex.src_gpr = src_gpr; tex.dst_gpr = ctx->file_offset[inst->Dst[0].Register.File] + inst->Dst[0].Register.Index; tex.dst_sel_x = (inst->Dst[0].Register.WriteMask & 1) ? 0 : 7; @@ -1872,6 +1834,12 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) tex.coord_type_w = 1; } + if (inst->Texture.Texture == TGSI_TEXTURE_1D_ARRAY) { + tex.coord_type_z = 0; + tex.src_sel_z = 1; + } else if (inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY) + tex.coord_type_z = 0; + if (inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D || inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D) tex.src_sel_w = 2; @@ -1886,36 +1854,23 @@ static int tgsi_tex(struct r600_shader_ctx *ctx) static int tgsi_lrp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); unsigned i; int r; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - /* optimize if it's just an equal balance */ - if(r600_src[0].sel == V_SQ_ALU_SRC_0_5) { + if (ctx->src[0].sel == V_SQ_ALU_SRC_0_5) { for (i = 0; i < lasti + 1; i++) { if (!(inst->Dst[0].Register.WriteMask & (1 << i))) continue; memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD); - alu.src[0] = r600_src[1]; - alu.src[0].chan = tgsi_chan(&inst->Src[1], i); - alu.src[1] = r600_src[2]; - alu.src[1].chan = tgsi_chan(&inst->Src[2], i); + r600_bc_src(&alu.src[0], &ctx->src[1], i); + r600_bc_src(&alu.src[1], &ctx->src[2], i); alu.omod = 3; - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; if (i == lasti) { alu.last = 1; @@ -1936,8 +1891,7 @@ static int tgsi_lrp(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_ADD); alu.src[0].sel = V_SQ_ALU_SRC_1; alu.src[0].chan = 0; - alu.src[1] = r600_src[0]; - alu.src[1].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[1], &ctx->src[0], i); alu.src[1].neg = 1; alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; @@ -1959,8 +1913,7 @@ static int tgsi_lrp(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); alu.src[0].sel = ctx->temp_reg; alu.src[0].chan = i; - alu.src[1] = r600_src[2]; - alu.src[1].chan = tgsi_chan(&inst->Src[2], i); + r600_bc_src(&alu.src[1], &ctx->src[2], i); alu.dst.sel = ctx->temp_reg; alu.dst.chan = i; if (i == lasti) { @@ -1980,17 +1933,12 @@ static int tgsi_lrp(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD); alu.is_op3 = 1; - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], i); - alu.src[1] = r600_src[1]; - alu.src[1].chan = tgsi_chan(&inst->Src[1], i); + r600_bc_src(&alu.src[0], &ctx->src[0], i); + r600_bc_src(&alu.src[1], &ctx->src[1], i); alu.src[2].sel = ctx->temp_reg; alu.src[2].chan = i; - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; if (i == lasti) { alu.last = 1; @@ -2005,37 +1953,20 @@ static int tgsi_lrp(struct r600_shader_ctx *ctx) static int tgsi_cmp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; int i, r; int lasti = tgsi_last_instruction(inst->Dst[0].Register.WriteMask); - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - for (i = 0; i < lasti + 1; i++) { if (!(inst->Dst[0].Register.WriteMask & (1 << i))) continue; memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_CNDGE); - alu.src[0] = r600_src[0]; - alu.src[0].chan = tgsi_chan(&inst->Src[0], i); - - alu.src[1] = r600_src[2]; - alu.src[1].chan = tgsi_chan(&inst->Src[2], i); - - alu.src[2] = r600_src[1]; - alu.src[2].chan = tgsi_chan(&inst->Src[1], i); - - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - + r600_bc_src(&alu.src[0], &ctx->src[0], i); + r600_bc_src(&alu.src[1], &ctx->src[2], i); + r600_bc_src(&alu.src[2], &ctx->src[1], i); + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; alu.dst.write = 1; alu.is_op3 = 1; @@ -2051,7 +1982,6 @@ static int tgsi_cmp(struct r600_shader_ctx *ctx) static int tgsi_xpd(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3]; struct r600_bc_alu alu; uint32_t use_temp = 0; int i, r; @@ -2059,43 +1989,34 @@ static int tgsi_xpd(struct r600_shader_ctx *ctx) if (inst->Dst[0].Register.WriteMask != 0xf) use_temp = 1; - r = tgsi_split_constant(ctx, r600_src); - if (r) - return r; - r = tgsi_split_literal_constant(ctx, r600_src); - if (r) - return r; - for (i = 0; i < 4; i++) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); - alu.src[0] = r600_src[0]; switch (i) { case 0: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 2); + r600_bc_src(&alu.src[0], &ctx->src[0], 2); break; case 1: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); break; case 2: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 1); + r600_bc_src(&alu.src[0], &ctx->src[0], 1); break; case 3: alu.src[0].sel = V_SQ_ALU_SRC_0; alu.src[0].chan = i; } - alu.src[1] = r600_src[1]; switch (i) { case 0: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 1); + r600_bc_src(&alu.src[1], &ctx->src[1], 1); break; case 1: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 2); + r600_bc_src(&alu.src[1], &ctx->src[1], 2); break; case 2: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 0); + r600_bc_src(&alu.src[1], &ctx->src[1], 0); break; case 3: alu.src[1].sel = V_SQ_ALU_SRC_0; @@ -2117,32 +2038,30 @@ static int tgsi_xpd(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP3_SQ_OP3_INST_MULADD); - alu.src[0] = r600_src[0]; switch (i) { case 0: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 1); + r600_bc_src(&alu.src[0], &ctx->src[0], 1); break; case 1: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 2); + r600_bc_src(&alu.src[0], &ctx->src[0], 2); break; case 2: - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); break; case 3: alu.src[0].sel = V_SQ_ALU_SRC_0; alu.src[0].chan = i; } - alu.src[1] = r600_src[1]; switch (i) { case 0: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 2); + r600_bc_src(&alu.src[1], &ctx->src[1], 2); break; case 1: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 0); + r600_bc_src(&alu.src[1], &ctx->src[1], 0); break; case 2: - alu.src[1].chan = tgsi_chan(&inst->Src[1], 1); + r600_bc_src(&alu.src[1], &ctx->src[1], 1); break; case 3: alu.src[1].sel = V_SQ_ALU_SRC_0; @@ -2155,11 +2074,8 @@ static int tgsi_xpd(struct r600_shader_ctx *ctx) if (use_temp) alu.dst.sel = ctx->temp_reg; - else { - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; - } + else + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); alu.dst.chan = i; alu.dst.write = 1; alu.is_op3 = 1; @@ -2177,7 +2093,6 @@ static int tgsi_xpd(struct r600_shader_ctx *ctx) static int tgsi_exp(struct r600_shader_ctx *ctx) { struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; - struct r600_bc_alu_src r600_src[3] = { { 0 } }; struct r600_bc_alu alu; int r; @@ -2186,11 +2101,7 @@ static int tgsi_exp(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.chan = 0; @@ -2218,11 +2129,7 @@ static int tgsi_exp(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FRACT); - alu.src[0] = r600_src[0]; - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; // r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); @@ -2242,10 +2149,7 @@ static int tgsi_exp(struct r600_shader_ctx *ctx) if ((inst->Dst[0].Register.WriteMask >> 2) & 0x1) { memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_EXP_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; @@ -2288,11 +2192,7 @@ static int tgsi_log(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.chan = 0; @@ -2321,11 +2221,7 @@ static int tgsi_log(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.chan = 1; @@ -2385,11 +2281,7 @@ static int tgsi_log(struct r600_shader_ctx *ctx) alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.src[1].sel = ctx->temp_reg; alu.src[1].chan = 1; @@ -2409,11 +2301,7 @@ static int tgsi_log(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_LOG_IEEE); - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.dst.sel = ctx->temp_reg; alu.dst.write = 1; @@ -2451,6 +2339,7 @@ static int tgsi_eg_arl(struct r600_shader_ctx *ctx) struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; int r; + memset(&alu, 0, sizeof(struct r600_bc_alu)); switch (inst->Instruction.Opcode) { @@ -2465,23 +2354,23 @@ static int tgsi_eg_arl(struct r600_shader_ctx *ctx) return -1; } - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.last = 1; - alu.dst.chan = 0; - alu.dst.sel = ctx->temp_reg; + alu.dst.sel = ctx->ar_reg; alu.dst.write = 1; r = r600_bc_add_alu(ctx->bc, &alu); if (r) return r; + + /* TODO: Note that the MOVA can be avoided if we never use AR for + * indexing non-CB registers in the current ALU clause. Similarly, we + * need to load AR from ar_reg again if we started a new clause + * between ARL and AR usage. The easy way to do that is to remove + * the MOVA here, and load it for the first AR access after ar_reg + * has been modified in each clause. */ memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = EG_V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT; - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].sel = ctx->temp_reg; + alu.src[0].sel = ctx->ar_reg; alu.src[0].chan = 0; alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); @@ -2495,26 +2384,48 @@ static int tgsi_r600_arl(struct r600_shader_ctx *ctx) struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; int r; - memset(&alu, 0, sizeof(struct r600_bc_alu)); switch (inst->Instruction.Opcode) { case TGSI_OPCODE_ARL: - alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_FLOOR; + memset(&alu, 0, sizeof(alu)); + alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLOOR; + r600_bc_src(&alu.src[0], &ctx->src[0], 0); + alu.dst.sel = ctx->ar_reg; + alu.dst.write = 1; + alu.last = 1; + + if ((r = r600_bc_add_alu(ctx->bc, &alu))) + return r; + + memset(&alu, 0, sizeof(alu)); + alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT; + alu.src[0].sel = ctx->ar_reg; + alu.dst.sel = ctx->ar_reg; + alu.dst.write = 1; + alu.last = 1; + + if ((r = r600_bc_add_alu(ctx->bc, &alu))) + return r; break; case TGSI_OPCODE_ARR: - alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA; + memset(&alu, 0, sizeof(alu)); + alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_FLT_TO_INT; + r600_bc_src(&alu.src[0], &ctx->src[0], 0); + alu.dst.sel = ctx->ar_reg; + alu.dst.write = 1; + alu.last = 1; + + if ((r = r600_bc_add_alu(ctx->bc, &alu))) + return r; break; default: assert(0); return -1; } - - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); - + memset(&alu, 0, sizeof(alu)); + alu.inst = V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MOVA_INT; + alu.src[0].sel = ctx->ar_reg; alu.last = 1; r = r600_bc_add_alu(ctx->bc, &alu); @@ -2534,26 +2445,18 @@ static int tgsi_opdst(struct r600_shader_ctx *ctx) memset(&alu, 0, sizeof(struct r600_bc_alu)); alu.inst = CTX_INST(V_SQ_ALU_WORD1_OP2_SQ_OP2_INST_MUL); - r = tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); - if (r) - return r; + tgsi_dst(ctx, &inst->Dst[0], i, &alu.dst); if (i == 0 || i == 3) { alu.src[0].sel = V_SQ_ALU_SRC_1; } else { - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], i); + r600_bc_src(&alu.src[0], &ctx->src[0], i); } - if (i == 0 || i == 2) { + if (i == 0 || i == 2) { alu.src[1].sel = V_SQ_ALU_SRC_1; } else { - r = tgsi_src(ctx, &inst->Src[1], &alu.src[1]); - if (r) - return r; - alu.src[1].chan = tgsi_chan(&inst->Src[1], i); + r600_bc_src(&alu.src[1], &ctx->src[1], i); } if (i == 3) alu.last = 1; @@ -2566,7 +2469,6 @@ static int tgsi_opdst(struct r600_shader_ctx *ctx) static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode) { - struct tgsi_full_instruction *inst = &ctx->parse.FullToken.FullInstruction; struct r600_bc_alu alu; int r; @@ -2578,10 +2480,7 @@ static int emit_logic_pred(struct r600_shader_ctx *ctx, int opcode) alu.dst.write = 1; alu.dst.chan = 0; - r = tgsi_src(ctx, &inst->Src[0], &alu.src[0]); - if (r) - return r; - alu.src[0].chan = tgsi_chan(&inst->Src[0], 0); + r600_bc_src(&alu.src[0], &ctx->src[0], 0); alu.src[1].sel = V_SQ_ALU_SRC_0; alu.src[1].chan = 0; diff --git a/src/gallium/drivers/r600/r600_shader.h b/src/gallium/drivers/r600/r600_shader.h index 935dd6fe3ab..8f96ce5085c 100644 --- a/src/gallium/drivers/r600/r600_shader.h +++ b/src/gallium/drivers/r600/r600_shader.h @@ -45,8 +45,7 @@ struct r600_shader { struct r600_shader_io output[32]; enum radeon_family family; boolean uses_kill; + boolean fs_write_all; }; -int r600_shader_from_tgsi(const struct tgsi_token *tokens, struct r600_shader *shader, u32 **literals); - #endif diff --git a/src/gallium/drivers/r600/r600_state.c b/src/gallium/drivers/r600/r600_state.c index de2668cee16..576067ae81e 100644 --- a/src/gallium/drivers/r600/r600_state.c +++ b/src/gallium/drivers/r600/r600_state.c @@ -37,6 +37,7 @@ #include <util/u_memory.h> #include <util/u_inlines.h> #include <util/u_framebuffer.h> +#include "util/u_transfer.h" #include <pipebuffer/pb_buffer.h> #include "r600.h" #include "r600d.h" @@ -94,221 +95,6 @@ void r600_polygon_offset_update(struct r600_pipe_context *rctx) } } -/* FIXME optimize away spi update when it's not needed */ -static void r600_spi_update(struct r600_pipe_context *rctx) -{ - struct r600_pipe_shader *shader = rctx->ps_shader; - struct r600_pipe_state rstate; - struct r600_shader *rshader = &shader->shader; - unsigned i, tmp; - - rstate.nregs = 0; - for (i = 0; i < rshader->ninput; i++) { - tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i)); - if (rshader->input[i].centroid) - tmp |= S_028644_SEL_CENTROID(1); - if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) - tmp |= S_028644_SEL_LINEAR(1); - - if (rshader->input[i].name == TGSI_SEMANTIC_COLOR || - rshader->input[i].name == TGSI_SEMANTIC_BCOLOR || - rshader->input[i].name == TGSI_SEMANTIC_POSITION) { - tmp |= S_028644_FLAT_SHADE(rctx->flatshade); - } - if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && - rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) { - tmp |= S_028644_PT_SPRITE_TEX(1); - } - r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL); - } - r600_context_pipe_state_set(&rctx->ctx, &rstate); -} - -void r600_vertex_buffer_update(struct r600_pipe_context *rctx) -{ - struct r600_pipe_state *rstate; - struct r600_resource *rbuffer; - struct pipe_vertex_buffer *vertex_buffer; - unsigned i, offset; - - /* we don't update until we know vertex elements */ - if (rctx->vertex_elements == NULL || !rctx->nvertex_buffer) - return; - - if (rctx->vertex_elements->incompatible_layout) { - /* translate rebind new vertex elements so - * return once translated - */ - r600_begin_vertex_translate(rctx); - return; - } - - if (rctx->any_user_vbs) { - r600_upload_user_buffers(rctx); - rctx->any_user_vbs = FALSE; - } - - if (rctx->vertex_elements->vbuffer_need_offset) { - /* one resource per vertex elements */ - rctx->nvs_resource = rctx->vertex_elements->count; - } else { - /* bind vertex buffer once */ - rctx->nvs_resource = rctx->nvertex_buffer; - } - - for (i = 0 ; i < rctx->nvs_resource; i++) { - rstate = &rctx->vs_resource[i]; - rstate->id = R600_PIPE_STATE_RESOURCE; - rstate->nregs = 0; - - if (rctx->vertex_elements->vbuffer_need_offset) { - /* one resource per vertex elements */ - unsigned vbuffer_index; - vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index; - vertex_buffer = &rctx->vertex_buffer[vbuffer_index]; - rbuffer = (struct r600_resource*)vertex_buffer->buffer; - offset = rctx->vertex_elements->vbuffer_offset[i]; - } else { - /* bind vertex buffer once */ - vertex_buffer = &rctx->vertex_buffer[i]; - rbuffer = (struct r600_resource*)vertex_buffer->buffer; - offset = 0; - } - if (vertex_buffer == NULL || rbuffer == NULL) - continue; - offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo); - - r600_pipe_state_add_reg(rstate, R_038000_RESOURCE0_WORD0, - offset, 0xFFFFFFFF, rbuffer->bo); - r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1, - rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2, - S_038008_STRIDE(vertex_buffer->stride), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5, - 0x00000000, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6, - 0xC0000000, 0xFFFFFFFF, NULL); - r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); - } -} - -static void r600_draw_common(struct r600_drawl *draw) -{ - struct r600_pipe_context *rctx = (struct r600_pipe_context *)draw->ctx; - struct r600_resource *rbuffer; - unsigned prim; - u32 vgt_dma_index_type, vgt_draw_initiator, mask; - struct r600_draw rdraw; - struct r600_pipe_state vgt; - - switch (draw->index_size) { - case 2: - vgt_draw_initiator = 0; - vgt_dma_index_type = 0; - break; - case 4: - vgt_draw_initiator = 0; - vgt_dma_index_type = 1; - break; - case 0: - vgt_draw_initiator = 2; - vgt_dma_index_type = 0; - break; - default: - R600_ERR("unsupported index size %d\n", draw->index_size); - return; - } - if (r600_conv_pipe_prim(draw->mode, &prim)) - return; - if (unlikely(rctx->ps_shader == NULL)) { - R600_ERR("missing vertex shader\n"); - return; - } - if (unlikely(rctx->vs_shader == NULL)) { - R600_ERR("missing vertex shader\n"); - return; - } - /* there should be enough input */ - if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) { - R600_ERR("%d resources provided, expecting %d\n", - rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource); - return; - } - - r600_spi_update(rctx); - - mask = 0; - for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) { - mask |= (0xF << (i * 4)); - } - - vgt.id = R600_PIPE_STATE_VGT; - vgt.nregs = 0; - r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw->index_bias, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw->max_index, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw->min_index, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL); - r600_context_pipe_state_set(&rctx->ctx, &vgt); - - rdraw.vgt_num_indices = draw->count; - rdraw.vgt_num_instances = 1; - rdraw.vgt_index_type = vgt_dma_index_type; - rdraw.vgt_draw_initiator = vgt_draw_initiator; - rdraw.indices = NULL; - if (draw->index_buffer) { - rbuffer = (struct r600_resource*)draw->index_buffer; - rdraw.indices = rbuffer->bo; - rdraw.indices_bo_offset = draw->index_buffer_offset; - } - r600_context_draw(&rctx->ctx, &rdraw); -} - -void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) -{ - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - struct r600_drawl draw; - - memset(&draw, 0, sizeof(struct r600_drawl)); - draw.ctx = ctx; - draw.mode = info->mode; - draw.start = info->start; - draw.count = info->count; - if (info->indexed && rctx->index_buffer.buffer) { - draw.start += rctx->index_buffer.offset / rctx->index_buffer.index_size; - draw.min_index = info->min_index; - draw.max_index = info->max_index; - draw.index_bias = info->index_bias; - - r600_translate_index_buffer(rctx, &rctx->index_buffer.buffer, - &rctx->index_buffer.index_size, - &draw.start, - info->count); - - draw.index_size = rctx->index_buffer.index_size; - pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer); - draw.index_buffer_offset = draw.start * draw.index_size; - draw.start = 0; - r600_upload_index_buffer(rctx, &draw); - } else { - draw.index_size = 0; - draw.index_buffer = NULL; - draw.min_index = info->min_index; - draw.max_index = info->max_index; - draw.index_bias = info->start; - } - r600_draw_common(&draw); - - pipe_resource_reference(&draw.index_buffer, NULL); -} - static void r600_set_blend_color(struct pipe_context *ctx, const struct pipe_blend_color *state) { @@ -616,6 +402,7 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c uint32_t word4 = 0, yuv_format = 0, pitch = 0; unsigned char swizzle[4], array_mode = 0, tile_type = 0; struct r600_bo *bo[2]; + unsigned height, depth; if (resource == NULL) return NULL; @@ -643,22 +430,30 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c if (desc == NULL) { R600_ERR("unknow format %d\n", state->format); } - tmp = (struct r600_resource_texture*)texture; + tmp = (struct r600_resource_texture *)texture; + if (tmp->depth && !tmp->is_flushing_texture) { + r600_texture_depth_flush(ctx, texture, TRUE); + tmp = tmp->flushed_depth_texture; + } + + if (tmp->force_int_type) { + word4 &= C_038010_NUM_FORMAT_ALL; + word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT); + } rbuffer = &tmp->resource; bo[0] = rbuffer->bo; bo[1] = rbuffer->bo; - /* FIXME depth texture decompression */ - if (tmp->depth) { - r600_texture_depth_flush(ctx, texture); - tmp = (struct r600_resource_texture*)texture; - rbuffer = &tmp->flushed_depth_texture->resource; - bo[0] = rbuffer->bo; - bo[1] = rbuffer->bo; - } - pitch = align(tmp->pitch_in_pixels[0], 8); - if (tmp->tiled) { - array_mode = tmp->array_mode[0]; - tile_type = tmp->tile_type; + pitch = align(tmp->pitch_in_blocks[0] * util_format_get_blockwidth(state->format), 8); + array_mode = tmp->array_mode[0]; + tile_type = tmp->tile_type; + + height = texture->height0; + depth = texture->depth0; + if (texture->target == PIPE_TEXTURE_1D_ARRAY) { + height = 1; + depth = texture->array_size; + } else if (texture->target == PIPE_TEXTURE_2D_ARRAY) { + depth = texture->array_size; } /* FIXME properly handle first level != 0 */ @@ -669,22 +464,22 @@ static struct pipe_sampler_view *r600_create_sampler_view(struct pipe_context *c S_038000_PITCH((pitch / 8) - 1) | S_038000_TEX_WIDTH(texture->width0 - 1), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1, - S_038004_TEX_HEIGHT(texture->height0 - 1) | - S_038004_TEX_DEPTH(texture->depth0 - 1) | + S_038004_TEX_HEIGHT(height - 1) | + S_038004_TEX_DEPTH(depth - 1) | S_038004_DATA_FORMAT(format), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2, (tmp->offset[0] + r600_bo_offset(bo[0])) >> 8, 0xFFFFFFFF, bo[0]); r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3, (tmp->offset[1] + r600_bo_offset(bo[1])) >> 8, 0xFFFFFFFF, bo[1]); r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4, - word4 | S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_NORM) | + word4 | S_038010_SRF_MODE_ALL(V_038010_SRF_MODE_NO_ZERO) | S_038010_REQUEST_SIZE(1) | S_038010_BASE_LEVEL(state->u.tex.first_level), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5, S_038014_LAST_LEVEL(state->u.tex.last_level) | - S_038014_BASE_ARRAY(0) | - S_038014_LAST_ARRAY(0), 0xFFFFFFFF, NULL); + S_038014_BASE_ARRAY(state->u.tex.first_layer) | + S_038014_LAST_ARRAY(state->u.tex.last_layer), 0xFFFFFFFF, NULL); r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6, S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE), 0xFFFFFFFF, NULL); @@ -714,9 +509,11 @@ static void r600_set_ps_sampler_view(struct pipe_context *ctx, unsigned count, for (i = 0; i < count; i++) { if (&rctx->ps_samplers.views[i]->base != views[i]) { if (resource[i]) - r600_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, i); + r600_context_pipe_state_set_ps_resource(&rctx->ctx, &resource[i]->state, + i + R600_MAX_CONST_BUFFERS); else - r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i); + r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, + i + R600_MAX_CONST_BUFFERS); pipe_sampler_view_reference( (struct pipe_sampler_view **)&rctx->ps_samplers.views[i], @@ -726,7 +523,8 @@ static void r600_set_ps_sampler_view(struct pipe_context *ctx, unsigned count, } for (i = count; i < NUM_TEX_UNITS; i++) { if (rctx->ps_samplers.views[i]) { - r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, i); + r600_context_pipe_state_set_ps_resource(&rctx->ctx, NULL, + i + R600_MAX_CONST_BUFFERS); pipe_sampler_view_reference((struct pipe_sampler_view **)&rctx->ps_samplers.views[i], NULL); } } @@ -908,21 +706,28 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta unsigned offset; const struct util_format_description *desc; struct r600_bo *bo[3]; + int i; surf = (struct r600_surface *)state->cbufs[cb]; rtex = (struct r600_resource_texture*)state->cbufs[cb]->texture; + + if (rtex->depth && !rtex->is_flushing_texture) { + r600_texture_depth_flush(&rctx->context, state->cbufs[cb]->texture, TRUE); + rtex = rtex->flushed_depth_texture; + } + rbuffer = &rtex->resource; bo[0] = rbuffer->bo; bo[1] = rbuffer->bo; bo[2] = rbuffer->bo; /* XXX quite sure for dx10+ hw don't need any offset hacks */ - offset = r600_texture_get_offset((struct r600_resource_texture *)state->cbufs[cb]->texture, + offset = r600_texture_get_offset(rtex, level, state->cbufs[cb]->u.tex.first_layer); - pitch = rtex->pitch_in_pixels[level] / 8 - 1; - slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1; + pitch = rtex->pitch_in_blocks[level] / 8 - 1; + slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1; ntype = 0; - desc = util_format_description(rtex->resource.base.b.format); + desc = util_format_description(surf->base.format); if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) ntype = V_0280A0_NUMBER_SRGB; else if (desc->layout == UTIL_FORMAT_LAYOUT_PLAIN) { @@ -937,15 +742,30 @@ static void r600_cb(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta } } - format = r600_translate_colorformat(rtex->resource.base.b.format); - swap = r600_translate_colorswap(rtex->resource.base.b.format); + for (i = 0; i < 4; i++) { + if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { + break; + } + } + + format = r600_translate_colorformat(surf->base.format); + swap = r600_translate_colorswap(surf->base.format); + + /* disable when gallium grows int textures */ + if ((format == FMT_32_32_32_32 || format == FMT_16_16_16_16) && rtex->force_int_type) + ntype = 4; + color_info = S_0280A0_FORMAT(format) | S_0280A0_COMP_SWAP(swap) | S_0280A0_ARRAY_MODE(rtex->array_mode[level]) | S_0280A0_BLEND_CLAMP(1) | S_0280A0_NUMBER_TYPE(ntype); - if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS) - color_info |= S_0280A0_SOURCE_FORMAT(1); + + /* on R600 this can't be set if BLEND_CLAMP isn't set, + if BLEND_FLOAT32 is set of > 11 bits in a UNORM or SNORM */ + if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && + desc->channel[i].size < 12) + color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); r600_pipe_state_add_reg(rstate, R_028040_CB_COLOR0_BASE + cb * 4, @@ -989,17 +809,14 @@ static void r600_db(struct r600_pipe_context *rctx, struct r600_pipe_state *rsta surf = (struct r600_surface *)state->zsbuf; rtex = (struct r600_resource_texture*)state->zsbuf->texture; - rtex->tiled = 1; - rtex->array_mode[level] = 2; - rtex->tile_type = 1; - rtex->depth = 1; + rbuffer = &rtex->resource; /* XXX quite sure for dx10+ hw don't need any offset hacks */ offset = r600_texture_get_offset((struct r600_resource_texture *)state->zsbuf->texture, level, state->zsbuf->u.tex.first_layer); - pitch = rtex->pitch_in_pixels[level] / 8 - 1; - slice = rtex->pitch_in_pixels[level] * surf->aligned_height / 64 - 1; + pitch = rtex->pitch_in_blocks[level] / 8 - 1; + slice = rtex->pitch_in_blocks[level] * surf->aligned_height / 64 - 1; format = r600_translate_dbformat(state->zsbuf->texture->format); r600_pipe_state_add_reg(rstate, R_02800C_DB_DEPTH_BASE, @@ -1115,51 +932,6 @@ static void r600_set_framebuffer_state(struct pipe_context *ctx, } } -static void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, - struct pipe_resource *buffer) -{ - struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - struct r600_resource *rbuffer = (struct r600_resource*)buffer; - uint32_t offset; - - /* Note that the state tracker can unbind constant buffers by - * passing NULL here. - */ - if (buffer == NULL) { - return; - } - - r600_upload_const_buffer(rctx, buffer, &offset); - - switch (shader) { - case PIPE_SHADER_VERTEX: - rctx->vs_const_buffer.nregs = 0; - r600_pipe_state_add_reg(&rctx->vs_const_buffer, - R_028180_ALU_CONST_BUFFER_SIZE_VS_0, - ALIGN_DIVUP(buffer->width0 >> 4, 16), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&rctx->vs_const_buffer, - R_028980_ALU_CONST_CACHE_VS_0, - (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo); - r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer); - break; - case PIPE_SHADER_FRAGMENT: - rctx->ps_const_buffer.nregs = 0; - r600_pipe_state_add_reg(&rctx->ps_const_buffer, - R_028140_ALU_CONST_BUFFER_SIZE_PS_0, - ALIGN_DIVUP(buffer->width0 >> 4, 16), - 0xFFFFFFFF, NULL); - r600_pipe_state_add_reg(&rctx->ps_const_buffer, - R_028940_ALU_CONST_CACHE_PS_0, - (r600_bo_offset(rbuffer->bo) + offset) >> 8, 0xFFFFFFFF, rbuffer->bo); - r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer); - break; - default: - R600_ERR("unsupported %d\n", shader); - return; - } -} - void r600_init_state_functions(struct r600_pipe_context *rctx) { rctx->context.create_blend_state = r600_create_blend_state; @@ -1199,6 +971,7 @@ void r600_init_state_functions(struct r600_pipe_context *rctx) rctx->context.set_vertex_sampler_views = r600_set_vs_sampler_view; rctx->context.set_viewport_state = r600_set_viewport_state; rctx->context.sampler_view_destroy = r600_sampler_view_destroy; + rctx->context.redefine_user_buffer = u_default_redefine_user_buffer; } void r600_init_config(struct r600_pipe_context *rctx) @@ -1489,3 +1262,25 @@ void *r600_create_db_flush_dsa(struct r600_pipe_context *rctx) S_028D0C_COPY_CENTROID(1), NULL); return rstate; } + +void r600_pipe_set_buffer_resource(struct r600_pipe_context *rctx, + struct r600_pipe_state *rstate, + struct r600_resource *rbuffer, + unsigned offset, unsigned stride) +{ + r600_pipe_state_add_reg(rstate, R_038000_RESOURCE0_WORD0, + offset, 0xFFFFFFFF, rbuffer->bo); + r600_pipe_state_add_reg(rstate, R_038004_RESOURCE0_WORD1, + rbuffer->bo_size - offset - 1, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_038008_RESOURCE0_WORD2, + S_038008_STRIDE(stride), + 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_03800C_RESOURCE0_WORD3, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_038010_RESOURCE0_WORD4, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_038014_RESOURCE0_WORD5, + 0x00000000, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(rstate, R_038018_RESOURCE0_WORD6, + 0xC0000000, 0xFFFFFFFF, NULL); +} diff --git a/src/gallium/drivers/r600/r600_state_common.c b/src/gallium/drivers/r600/r600_state_common.c index 3603376f738..72707fbd8b8 100644 --- a/src/gallium/drivers/r600/r600_state_common.c +++ b/src/gallium/drivers/r600/r600_state_common.c @@ -27,7 +27,9 @@ #include <util/u_memory.h> #include <util/u_format.h> #include <pipebuffer/pb_buffer.h> +#include "pipe/p_shader_tokens.h" #include "r600_pipe.h" +#include "r600d.h" /* common state between evergreen and r600 */ void r600_bind_blend_state(struct pipe_context *ctx, void *state) @@ -119,24 +121,13 @@ void r600_bind_vertex_elements(struct pipe_context *ctx, void *state) struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; struct r600_vertex_element *v = (struct r600_vertex_element*)state; - /* delete previous translated vertex elements */ - if (rctx->tran.new_velems) { - r600_end_vertex_translate(rctx); - } - rctx->vertex_elements = v; if (v) { + u_vbuf_mgr_bind_vertex_elements(rctx->vbuf_mgr, state, + v->vmgr_elements); + rctx->states[v->rstate.id] = &v->rstate; r600_context_pipe_state_set(&rctx->ctx, &v->rstate); - if (rctx->family >= CHIP_CEDAR) { - evergreen_vertex_buffer_update(rctx); - } else { - r600_vertex_buffer_update(rctx); - } - } - - if (v) { -// rctx->vs_rebuild = TRUE; } } @@ -152,6 +143,7 @@ void r600_delete_vertex_element(struct pipe_context *ctx, void *state) rctx->vertex_elements = NULL; r600_bo_reference(rctx->radeon, &v->fetch_shader, NULL); + u_vbuf_mgr_destroy_vertex_elements(rctx->vbuf_mgr, v->vmgr_elements); FREE(state); } @@ -176,88 +168,44 @@ void r600_set_vertex_buffers(struct pipe_context *ctx, unsigned count, const struct pipe_vertex_buffer *buffers) { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; - struct pipe_vertex_buffer *vbo; - unsigned max_index = (unsigned)-1; + int i; - if (rctx->family >= CHIP_CEDAR) { - for (int i = 0; i < rctx->nvertex_buffer; i++) { - pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL); - evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); + /* Zero states. */ + for (i = 0; i < count; i++) { + if (!buffers[i].buffer) { + if (rctx->family >= CHIP_CEDAR) { + evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); + } else { + r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); + } } - } else { - for (int i = 0; i < rctx->nvertex_buffer; i++) { - pipe_resource_reference(&rctx->vertex_buffer[i].buffer, NULL); + } + for (; i < rctx->vbuf_mgr->nr_real_vertex_buffers; i++) { + if (rctx->family >= CHIP_CEDAR) { + evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); + } else { r600_context_pipe_state_set_fs_resource(&rctx->ctx, NULL, i); } } - memcpy(rctx->vertex_buffer, buffers, sizeof(struct pipe_vertex_buffer) * count); - - for (int i = 0; i < count; i++) { - vbo = (struct pipe_vertex_buffer*)&buffers[i]; - - rctx->vertex_buffer[i].buffer = NULL; - if (buffers[i].buffer == NULL) - continue; - if (r600_buffer_is_user_buffer(buffers[i].buffer)) - rctx->any_user_vbs = TRUE; - pipe_resource_reference(&rctx->vertex_buffer[i].buffer, buffers[i].buffer); - /* The stride of zero means we will be fetching only the first - * vertex, so don't care about max_index. */ - if (!vbo->stride) - continue; - - if (vbo->max_index == ~0) { - vbo->max_index = (vbo->buffer->width0 - vbo->buffer_offset) / vbo->stride; - } - max_index = MIN2(vbo->max_index, max_index); - } - rctx->nvertex_buffer = count; - rctx->vb_max_index = max_index; - if (rctx->family >= CHIP_CEDAR) { - evergreen_vertex_buffer_update(rctx); - } else { - r600_vertex_buffer_update(rctx); - } + u_vbuf_mgr_set_vertex_buffers(rctx->vbuf_mgr, count, buffers); } - -#define FORMAT_REPLACE(what, withwhat) \ - case PIPE_FORMAT_##what: *format = PIPE_FORMAT_##withwhat; break - void *r600_create_vertex_elements(struct pipe_context *ctx, unsigned count, const struct pipe_vertex_element *elements) { struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; struct r600_vertex_element *v = CALLOC_STRUCT(r600_vertex_element); - enum pipe_format *format; - int i; assert(count < 32); if (!v) return NULL; v->count = count; - memcpy(v->elements, elements, count * sizeof(struct pipe_vertex_element)); - - for (i = 0; i < count; i++) { - v->hw_format[i] = v->elements[i].src_format; - format = &v->hw_format[i]; - - switch (*format) { - FORMAT_REPLACE(R64_FLOAT, R32_FLOAT); - FORMAT_REPLACE(R64G64_FLOAT, R32G32_FLOAT); - FORMAT_REPLACE(R64G64B64_FLOAT, R32G32B32_FLOAT); - FORMAT_REPLACE(R64G64B64A64_FLOAT, R32G32B32A32_FLOAT); - default:; - } - v->incompatible_layout = - v->incompatible_layout || - v->elements[i].src_format != v->hw_format[i]; - - v->hw_format_size[i] = align(util_format_get_blocksize(v->hw_format[i]), 4); - } + v->vmgr_elements = + u_vbuf_mgr_create_vertex_elements(rctx->vbuf_mgr, count, + elements, v->elements); if (r600_vertex_elements_build_fetch_shader(rctx, v)) { FREE(v); @@ -327,3 +275,274 @@ void r600_delete_vs_shader(struct pipe_context *ctx, void *state) r600_pipe_shader_destroy(ctx, shader); free(shader); } + +/* FIXME optimize away spi update when it's not needed */ +void r600_spi_update(struct r600_pipe_context *rctx) +{ + struct r600_pipe_shader *shader = rctx->ps_shader; + struct r600_pipe_state rstate; + struct r600_shader *rshader = &shader->shader; + unsigned i, tmp; + + rstate.nregs = 0; + for (i = 0; i < rshader->ninput; i++) { + tmp = S_028644_SEMANTIC(r600_find_vs_semantic_index(&rctx->vs_shader->shader, rshader, i)); + + if (rshader->input[i].name == TGSI_SEMANTIC_COLOR || + rshader->input[i].name == TGSI_SEMANTIC_BCOLOR || + rshader->input[i].name == TGSI_SEMANTIC_POSITION) { + tmp |= S_028644_FLAT_SHADE(rctx->flatshade); + } + + if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && + rctx->sprite_coord_enable & (1 << rshader->input[i].sid)) { + tmp |= S_028644_PT_SPRITE_TEX(1); + } + + if (rctx->family < CHIP_CEDAR) { + if (rshader->input[i].centroid) + tmp |= S_028644_SEL_CENTROID(1); + + if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) + tmp |= S_028644_SEL_LINEAR(1); + } + + r600_pipe_state_add_reg(&rstate, R_028644_SPI_PS_INPUT_CNTL_0 + i * 4, tmp, 0xFFFFFFFF, NULL); + } + r600_context_pipe_state_set(&rctx->ctx, &rstate); +} + +void r600_set_constant_buffer(struct pipe_context *ctx, uint shader, uint index, + struct pipe_resource *buffer) +{ + struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; + struct r600_resource_buffer *rbuffer = r600_buffer(buffer); + struct r600_pipe_state *rstate; + uint32_t offset; + + /* Note that the state tracker can unbind constant buffers by + * passing NULL here. + */ + if (buffer == NULL) { + return; + } + + r600_upload_const_buffer(rctx, &rbuffer, &offset); + offset += r600_bo_offset(rbuffer->r.bo); + + switch (shader) { + case PIPE_SHADER_VERTEX: + rctx->vs_const_buffer.nregs = 0; + r600_pipe_state_add_reg(&rctx->vs_const_buffer, + R_028180_ALU_CONST_BUFFER_SIZE_VS_0, + ALIGN_DIVUP(buffer->width0 >> 4, 16), + 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&rctx->vs_const_buffer, + R_028980_ALU_CONST_CACHE_VS_0, + offset >> 8, 0xFFFFFFFF, rbuffer->r.bo); + r600_context_pipe_state_set(&rctx->ctx, &rctx->vs_const_buffer); + + rstate = &rctx->vs_const_buffer_resource[index]; + rstate->id = R600_PIPE_STATE_RESOURCE; + rstate->nregs = 0; + if (rctx->family >= CHIP_CEDAR) { + evergreen_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16); + evergreen_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index); + } else { + r600_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16); + r600_context_pipe_state_set_vs_resource(&rctx->ctx, rstate, index); + } + break; + case PIPE_SHADER_FRAGMENT: + rctx->ps_const_buffer.nregs = 0; + r600_pipe_state_add_reg(&rctx->ps_const_buffer, + R_028140_ALU_CONST_BUFFER_SIZE_PS_0, + ALIGN_DIVUP(buffer->width0 >> 4, 16), + 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&rctx->ps_const_buffer, + R_028940_ALU_CONST_CACHE_PS_0, + offset >> 8, 0xFFFFFFFF, rbuffer->r.bo); + r600_context_pipe_state_set(&rctx->ctx, &rctx->ps_const_buffer); + + rstate = &rctx->ps_const_buffer_resource[index]; + rstate->id = R600_PIPE_STATE_RESOURCE; + rstate->nregs = 0; + if (rctx->family >= CHIP_CEDAR) { + evergreen_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16); + evergreen_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index); + } else { + r600_pipe_set_buffer_resource(rctx, rstate, &rbuffer->r, offset, 16); + r600_context_pipe_state_set_ps_resource(&rctx->ctx, rstate, index); + } + break; + default: + R600_ERR("unsupported %d\n", shader); + return; + } + + if (buffer != &rbuffer->r.b.b.b) + pipe_resource_reference((struct pipe_resource**)&rbuffer, NULL); +} + +static void r600_vertex_buffer_update(struct r600_pipe_context *rctx) +{ + struct r600_pipe_state *rstate; + struct r600_resource *rbuffer; + struct pipe_vertex_buffer *vertex_buffer; + unsigned i, count, offset; + + if (rctx->vertex_elements->vbuffer_need_offset) { + /* one resource per vertex elements */ + count = rctx->vertex_elements->count; + } else { + /* bind vertex buffer once */ + count = rctx->vbuf_mgr->nr_real_vertex_buffers; + } + + for (i = 0 ; i < count; i++) { + rstate = &rctx->fs_resource[i]; + rstate->id = R600_PIPE_STATE_RESOURCE; + rstate->nregs = 0; + + if (rctx->vertex_elements->vbuffer_need_offset) { + /* one resource per vertex elements */ + unsigned vbuffer_index; + vbuffer_index = rctx->vertex_elements->elements[i].vertex_buffer_index; + vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[vbuffer_index]; + rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[vbuffer_index]; + offset = rctx->vertex_elements->vbuffer_offset[i]; + } else { + /* bind vertex buffer once */ + vertex_buffer = &rctx->vbuf_mgr->vertex_buffer[i]; + rbuffer = (struct r600_resource*)rctx->vbuf_mgr->real_vertex_buffer[i]; + offset = 0; + } + if (vertex_buffer == NULL || rbuffer == NULL) + continue; + offset += vertex_buffer->buffer_offset + r600_bo_offset(rbuffer->bo); + + if (rctx->family >= CHIP_CEDAR) { + evergreen_pipe_set_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride); + evergreen_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); + } else { + r600_pipe_set_buffer_resource(rctx, rstate, rbuffer, offset, vertex_buffer->stride); + r600_context_pipe_state_set_fs_resource(&rctx->ctx, rstate, i); + } + } +} + +void r600_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) +{ + struct r600_pipe_context *rctx = (struct r600_pipe_context *)ctx; + struct r600_resource *rbuffer; + u32 vgt_dma_index_type, vgt_draw_initiator, mask; + struct r600_draw rdraw; + struct r600_pipe_state vgt; + struct r600_drawl draw = {}; + unsigned prim; + + r600_flush_depth_textures(rctx); + u_vbuf_mgr_draw_begin(rctx->vbuf_mgr, info, NULL, NULL); + r600_vertex_buffer_update(rctx); + + draw.info = *info; + draw.ctx = ctx; + if (info->indexed && rctx->index_buffer.buffer) { + draw.info.start += rctx->index_buffer.offset / rctx->index_buffer.index_size; + pipe_resource_reference(&draw.index_buffer, rctx->index_buffer.buffer); + + r600_translate_index_buffer(rctx, &draw.index_buffer, + &rctx->index_buffer.index_size, + &draw.info.start, + info->count); + + draw.index_size = rctx->index_buffer.index_size; + draw.index_buffer_offset = draw.info.start * draw.index_size; + draw.info.start = 0; + + if (u_vbuf_resource(draw.index_buffer)->user_ptr) { + r600_upload_index_buffer(rctx, &draw); + } + } else { + draw.info.index_bias = info->start; + } + + switch (draw.index_size) { + case 2: + vgt_draw_initiator = 0; + vgt_dma_index_type = 0; + break; + case 4: + vgt_draw_initiator = 0; + vgt_dma_index_type = 1; + break; + case 0: + vgt_draw_initiator = 2; + vgt_dma_index_type = 0; + break; + default: + R600_ERR("unsupported index size %d\n", draw.index_size); + return; + } + if (r600_conv_pipe_prim(draw.info.mode, &prim)) + return; + if (unlikely(rctx->ps_shader == NULL)) { + R600_ERR("missing vertex shader\n"); + return; + } + if (unlikely(rctx->vs_shader == NULL)) { + R600_ERR("missing vertex shader\n"); + return; + } + /* there should be enough input */ + if (rctx->vertex_elements->count < rctx->vs_shader->shader.bc.nresource) { + R600_ERR("%d resources provided, expecting %d\n", + rctx->vertex_elements->count, rctx->vs_shader->shader.bc.nresource); + return; + } + + r600_spi_update(rctx); + + mask = 0; + for (int i = 0; i < rctx->framebuffer.nr_cbufs; i++) { + mask |= (0xF << (i * 4)); + } + + vgt.id = R600_PIPE_STATE_VGT; + vgt.nregs = 0; + r600_pipe_state_add_reg(&vgt, R_008958_VGT_PRIMITIVE_TYPE, prim, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_028408_VGT_INDX_OFFSET, draw.info.index_bias, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_028400_VGT_MAX_VTX_INDX, draw.info.max_index, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_028404_VGT_MIN_VTX_INDX, draw.info.min_index, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_028238_CB_TARGET_MASK, rctx->cb_target_mask & mask, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0, 0xFFFFFFFF, NULL); + r600_pipe_state_add_reg(&vgt, R_03CFF4_SQ_VTX_START_INST_LOC, 0, 0xFFFFFFFF, NULL); + r600_context_pipe_state_set(&rctx->ctx, &vgt); + + rdraw.vgt_num_indices = draw.info.count; + rdraw.vgt_num_instances = 1; + rdraw.vgt_index_type = vgt_dma_index_type; + rdraw.vgt_draw_initiator = vgt_draw_initiator; + rdraw.indices = NULL; + if (draw.index_buffer) { + rbuffer = (struct r600_resource*)draw.index_buffer; + rdraw.indices = rbuffer->bo; + rdraw.indices_bo_offset = draw.index_buffer_offset; + } + + if (rctx->family >= CHIP_CEDAR) { + evergreen_context_draw(&rctx->ctx, &rdraw); + } else { + r600_context_draw(&rctx->ctx, &rdraw); + } + + if (rctx->framebuffer.zsbuf) + { + struct pipe_resource *tex = rctx->framebuffer.zsbuf->texture; + ((struct r600_resource_texture *)tex)->dirty_db = TRUE; + } + + pipe_resource_reference(&draw.index_buffer, NULL); + + u_vbuf_mgr_draw_end(rctx->vbuf_mgr); +} diff --git a/src/gallium/drivers/r600/r600_state_inlines.h b/src/gallium/drivers/r600/r600_state_inlines.h index a0ec493fc85..3dd54f45202 100644 --- a/src/gallium/drivers/r600/r600_state_inlines.h +++ b/src/gallium/drivers/r600/r600_state_inlines.h @@ -253,9 +253,13 @@ static inline unsigned r600_tex_dim(unsigned dim) default: case PIPE_TEXTURE_1D: return V_038000_SQ_TEX_DIM_1D; + case PIPE_TEXTURE_1D_ARRAY: + return V_038000_SQ_TEX_DIM_1D_ARRAY; case PIPE_TEXTURE_2D: case PIPE_TEXTURE_RECT: return V_038000_SQ_TEX_DIM_2D; + case PIPE_TEXTURE_2D_ARRAY: + return V_038000_SQ_TEX_DIM_2D_ARRAY; case PIPE_TEXTURE_3D: return V_038000_SQ_TEX_DIM_3D; case PIPE_TEXTURE_CUBE: @@ -285,10 +289,14 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) return V_0280A0_SWAP_ALT_REV; case PIPE_FORMAT_I8_UNORM: case PIPE_FORMAT_L8_UNORM: + case PIPE_FORMAT_L8_SRGB: case PIPE_FORMAT_R8_UNORM: case PIPE_FORMAT_R8_SNORM: return V_0280A0_SWAP_STD; + case PIPE_FORMAT_L4A4_UNORM: + return V_0280A0_SWAP_ALT; + /* 16-bit buffers. */ case PIPE_FORMAT_B5G6R5_UNORM: return V_0280A0_SWAP_STD_REV; @@ -305,6 +313,7 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) return V_0280A0_SWAP_STD; case PIPE_FORMAT_L8A8_UNORM: + case PIPE_FORMAT_L8A8_SRGB: return V_0280A0_SWAP_ALT; case PIPE_FORMAT_R8G8_UNORM: return V_0280A0_SWAP_STD; @@ -328,6 +337,7 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) case PIPE_FORMAT_X8R8G8B8_UNORM: return V_0280A0_SWAP_ALT_REV; case PIPE_FORMAT_R8G8B8A8_SNORM: + case PIPE_FORMAT_R8G8B8A8_UNORM: case PIPE_FORMAT_R8G8B8X8_UNORM: return V_0280A0_SWAP_STD; @@ -346,9 +356,11 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) case PIPE_FORMAT_R10G10B10A2_UNORM: case PIPE_FORMAT_R10G10B10X2_SNORM: - case PIPE_FORMAT_B10G10R10A2_UNORM: case PIPE_FORMAT_R10SG10SB10SA2U_NORM: - return V_0280A0_SWAP_STD_REV; + return V_0280A0_SWAP_STD; + + case PIPE_FORMAT_B10G10R10A2_UNORM: + return V_0280A0_SWAP_ALT; case PIPE_FORMAT_R16G16_UNORM: return V_0280A0_SWAP_STD; @@ -356,14 +368,13 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) /* 64-bit buffers. */ case PIPE_FORMAT_R16G16B16A16_UNORM: case PIPE_FORMAT_R16G16B16A16_SNORM: - // return FMT_16_16_16_16; case PIPE_FORMAT_R16G16B16A16_FLOAT: - // return FMT_16_16_16_16_FLOAT; /* 128-bit buffers. */ - //case PIPE_FORMAT_R32G32B32A32_FLOAT: - // return FMT_32_32_32_32_FLOAT; - return 0; + case PIPE_FORMAT_R32G32B32A32_FLOAT: + case PIPE_FORMAT_R32G32B32A32_SNORM: + case PIPE_FORMAT_R32G32B32A32_UNORM: + return V_0280A0_SWAP_STD; default: R600_ERR("unsupported colorswap format %d\n", format); return ~0; @@ -374,10 +385,14 @@ static inline uint32_t r600_translate_colorswap(enum pipe_format format) static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) { switch (format) { + case PIPE_FORMAT_L4A4_UNORM: + return V_0280A0_COLOR_4_4; + /* 8-bit buffers. */ case PIPE_FORMAT_A8_UNORM: case PIPE_FORMAT_I8_UNORM: case PIPE_FORMAT_L8_UNORM: + case PIPE_FORMAT_L8_SRGB: case PIPE_FORMAT_R8_UNORM: case PIPE_FORMAT_R8_SNORM: return V_0280A0_COLOR_8; @@ -398,6 +413,7 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) return V_0280A0_COLOR_16; case PIPE_FORMAT_L8A8_UNORM: + case PIPE_FORMAT_L8A8_SRGB: case PIPE_FORMAT_R8G8_UNORM: return V_0280A0_COLOR_8_8; @@ -425,7 +441,7 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) case PIPE_FORMAT_R10G10B10X2_SNORM: case PIPE_FORMAT_B10G10R10A2_UNORM: case PIPE_FORMAT_R10SG10SB10SA2U_NORM: - return V_0280A0_COLOR_10_10_10_2; + return V_0280A0_COLOR_2_10_10_10; case PIPE_FORMAT_Z24X8_UNORM: case PIPE_FORMAT_Z24_UNORM_S8_USCALED: @@ -467,10 +483,13 @@ static INLINE uint32_t r600_translate_colorformat(enum pipe_format format) return V_0280A0_COLOR_32_32; /* 128-bit buffers. */ - //case PIPE_FORMAT_R32G32B32_FLOAT: - // return V_0280A0_COLOR_32_32_32_FLOAT; - //case PIPE_FORMAT_R32G32B32A32_FLOAT: - // return V_0280A0_COLOR_32_32_32_32_FLOAT; + case PIPE_FORMAT_R32G32B32_FLOAT: + return V_0280A0_COLOR_32_32_32_FLOAT; + case PIPE_FORMAT_R32G32B32A32_FLOAT: + return V_0280A0_COLOR_32_32_32_32_FLOAT; + case PIPE_FORMAT_R32G32B32A32_SNORM: + case PIPE_FORMAT_R32G32B32A32_UNORM: + return V_0280A0_COLOR_32_32_32_32; /* YUV buffers. */ case PIPE_FORMAT_UYVY: @@ -497,9 +516,37 @@ static INLINE boolean r600_is_zs_format_supported(enum pipe_format format) return r600_translate_dbformat(format) != ~0; } -static INLINE boolean r600_is_vertex_format_supported(enum pipe_format format) +static INLINE boolean r600_is_vertex_format_supported(enum pipe_format format, + enum radeon_family family) { - return r600_translate_colorformat(format) != ~0; + unsigned i; + const struct util_format_description *desc = util_format_description(format); + if (!desc) + return FALSE; + + /* Find the first non-VOID channel. */ + for (i = 0; i < 4; i++) { + if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { + break; + } + } + if (i == 4) + return FALSE; + + /* No fixed, no double. */ + if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN || + desc->channel[i].type == UTIL_FORMAT_TYPE_FIXED || + (desc->channel[i].size == 64 && + desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT)) + return FALSE; + + /* No scaled/norm formats with 32 bits per channel. */ + if (desc->channel[i].size == 32 && + (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED || + desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED)) + return FALSE; + + return TRUE; } #endif diff --git a/src/gallium/drivers/r600/r600_texture.c b/src/gallium/drivers/r600/r600_texture.c index 1f4f453c091..03af367401d 100644 --- a/src/gallium/drivers/r600/r600_texture.c +++ b/src/gallium/drivers/r600/r600_texture.c @@ -27,6 +27,7 @@ #include <errno.h> #include <pipe/p_screen.h> #include <util/u_format.h> +#include <util/u_format_s3tc.h> #include <util/u_math.h> #include <util/u_inlines.h> #include <util/u_memory.h> @@ -38,8 +39,6 @@ #include "r600d.h" #include "r600_formats.h" -extern struct u_resource_vtbl r600_texture_vtbl; - /* Copy from a full GPU texture to a transfer's staging one. */ static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer) { @@ -77,17 +76,15 @@ unsigned r600_texture_get_offset(struct r600_resource_texture *rtex, { unsigned offset = rtex->offset[level]; - switch (rtex->resource.base.b.target) { + switch (rtex->resource.b.b.b.target) { case PIPE_TEXTURE_3D: case PIPE_TEXTURE_CUBE: - return offset + layer * rtex->layer_size[level]; default: - assert(layer == 0); - return offset; + return offset + layer * rtex->layer_size[level]; } } -static unsigned r600_get_pixel_alignment(struct pipe_screen *screen, +static unsigned r600_get_block_alignment(struct pipe_screen *screen, enum pipe_format format, unsigned array_mode) { @@ -105,6 +102,9 @@ static unsigned r600_get_pixel_alignment(struct pipe_screen *screen, (((rscreen->tiling_info->group_bytes / 8 / pixsize)) * rscreen->tiling_info->num_banks)) * 8; break; + case V_038000_ARRAY_LINEAR_ALIGNED: + p_align = MAX2(64, rscreen->tiling_info->group_bytes / pixsize); + break; case V_038000_ARRAY_LINEAR_GENERAL: default: p_align = rscreen->tiling_info->group_bytes / pixsize; @@ -124,8 +124,10 @@ static unsigned r600_get_height_alignment(struct pipe_screen *screen, h_align = rscreen->tiling_info->num_channels * 8; break; case V_038000_ARRAY_1D_TILED_THIN1: + case V_038000_ARRAY_LINEAR_ALIGNED: h_align = 8; break; + case V_038000_ARRAY_LINEAR_GENERAL: default: h_align = 1; break; @@ -139,7 +141,7 @@ static unsigned r600_get_base_alignment(struct pipe_screen *screen, { struct r600_screen* rscreen = (struct r600_screen *)screen; unsigned pixsize = util_format_get_blocksize(format); - int p_align = r600_get_pixel_alignment(screen, format, array_mode); + int p_align = r600_get_block_alignment(screen, format, array_mode); int h_align = r600_get_height_alignment(screen, array_mode); int b_align; @@ -149,6 +151,8 @@ static unsigned r600_get_base_alignment(struct pipe_screen *screen, p_align * pixsize * h_align); break; case V_038000_ARRAY_1D_TILED_THIN1: + case V_038000_ARRAY_LINEAR_ALIGNED: + case V_038000_ARRAY_LINEAR_GENERAL: default: b_align = rscreen->tiling_info->group_bytes; break; @@ -165,55 +169,46 @@ static unsigned mip_minify(unsigned size, unsigned level) return val; } -static unsigned r600_texture_get_stride(struct pipe_screen *screen, - struct r600_resource_texture *rtex, - unsigned level) +static unsigned r600_texture_get_nblocksx(struct pipe_screen *screen, + struct r600_resource_texture *rtex, + unsigned level) { - struct pipe_resource *ptex = &rtex->resource.base.b; - unsigned width, stride, tile_width; + struct pipe_resource *ptex = &rtex->resource.b.b.b; + unsigned nblocksx, block_align, width; + unsigned blocksize = util_format_get_blocksize(ptex->format); if (rtex->pitch_override) - return rtex->pitch_override; + return rtex->pitch_override / blocksize; width = mip_minify(ptex->width0, level); - if (util_format_is_plain(ptex->format)) { - tile_width = r600_get_pixel_alignment(screen, ptex->format, - rtex->array_mode[level]); - width = align(width, tile_width); - } - stride = util_format_get_stride(ptex->format, width); + nblocksx = util_format_get_nblocksx(ptex->format, width); - return stride; + block_align = r600_get_block_alignment(screen, ptex->format, + rtex->array_mode[level]); + nblocksx = align(nblocksx, block_align); + return nblocksx; } static unsigned r600_texture_get_nblocksy(struct pipe_screen *screen, struct r600_resource_texture *rtex, unsigned level) { - struct pipe_resource *ptex = &rtex->resource.base.b; + struct pipe_resource *ptex = &rtex->resource.b.b.b; unsigned height, tile_height; height = mip_minify(ptex->height0, level); - if (util_format_is_plain(ptex->format)) { - tile_height = r600_get_height_alignment(screen, - rtex->array_mode[level]); - height = align(height, tile_height); - } - return util_format_get_nblocksy(ptex->format, height); -} - -/* Get a width in pixels from a stride in bytes. */ -static unsigned pitch_to_width(enum pipe_format format, unsigned pitch_in_bytes) -{ - return (pitch_in_bytes / util_format_get_blocksize(format)) * - util_format_get_blockwidth(format); + height = util_format_get_nblocksy(ptex->format, height); + tile_height = r600_get_height_alignment(screen, + rtex->array_mode[level]); + height = align(height, tile_height); + return height; } static void r600_texture_set_array_mode(struct pipe_screen *screen, struct r600_resource_texture *rtex, unsigned level, unsigned array_mode) { - struct pipe_resource *ptex = &rtex->resource.base.b; + struct pipe_resource *ptex = &rtex->resource.b.b.b; switch (array_mode) { case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -227,7 +222,7 @@ static void r600_texture_set_array_mode(struct pipe_screen *screen, unsigned w, h, tile_height, tile_width; tile_height = r600_get_height_alignment(screen, array_mode); - tile_width = r600_get_pixel_alignment(screen, ptex->format, array_mode); + tile_width = r600_get_block_alignment(screen, ptex->format, array_mode); w = mip_minify(ptex->width0, level); h = mip_minify(ptex->height0, level); @@ -244,40 +239,128 @@ static void r600_setup_miptree(struct pipe_screen *screen, struct r600_resource_texture *rtex, unsigned array_mode) { - struct pipe_resource *ptex = &rtex->resource.base.b; + struct pipe_resource *ptex = &rtex->resource.b.b.b; struct radeon *radeon = (struct radeon *)screen->winsys; enum chip_class chipc = r600_get_family_class(radeon); - unsigned pitch, size, layer_size, i, offset; - unsigned nblocksy; + unsigned size, layer_size, i, offset; + unsigned nblocksx, nblocksy; for (i = 0, offset = 0; i <= ptex->last_level; i++) { + unsigned blocksize = util_format_get_blocksize(ptex->format); + r600_texture_set_array_mode(screen, rtex, i, array_mode); - pitch = r600_texture_get_stride(screen, rtex, i); + nblocksx = r600_texture_get_nblocksx(screen, rtex, i); nblocksy = r600_texture_get_nblocksy(screen, rtex, i); - layer_size = pitch * nblocksy; - + layer_size = nblocksx * nblocksy * blocksize; if (ptex->target == PIPE_TEXTURE_CUBE) { if (chipc >= R700) size = layer_size * 8; else size = layer_size * 6; } - else + else if (ptex->target == PIPE_TEXTURE_3D) size = layer_size * u_minify(ptex->depth0, i); + else + size = layer_size * ptex->array_size; + /* align base image and start of miptree */ if ((i == 0) || (i == 1)) offset = align(offset, r600_get_base_alignment(screen, ptex->format, array_mode)); rtex->offset[i] = offset; rtex->layer_size[i] = layer_size; - rtex->pitch_in_bytes[i] = pitch; - rtex->pitch_in_pixels[i] = pitch_to_width(ptex->format, pitch); + rtex->pitch_in_blocks[i] = nblocksx; /* CB talks in elements */ + rtex->pitch_in_bytes[i] = nblocksx * blocksize; + offset += size; } rtex->size = offset; } +/* Figure out whether u_blitter will fallback to a transfer operation. + * If so, don't use a staging resource. + */ +static boolean permit_hardware_blit(struct pipe_screen *screen, + const struct pipe_resource *res) +{ + unsigned bind; + + if (util_format_is_depth_or_stencil(res->format)) + bind = PIPE_BIND_DEPTH_STENCIL; + else + bind = PIPE_BIND_RENDER_TARGET; + + /* hackaround for S3TC */ + if (util_format_is_s3tc(res->format)) + return TRUE; + + if (!screen->is_format_supported(screen, + res->format, + res->target, + res->nr_samples, + bind, 0)) + return FALSE; + + if (!screen->is_format_supported(screen, + res->format, + res->target, + res->nr_samples, + PIPE_BIND_SAMPLER_VIEW, 0)) + return FALSE; + + return TRUE; +} + +static boolean r600_texture_get_handle(struct pipe_screen* screen, + struct pipe_resource *ptex, + struct winsys_handle *whandle) +{ + struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex; + struct r600_resource *resource = &rtex->resource; + struct radeon *radeon = (struct radeon *)screen->winsys; + + return r600_bo_get_winsys_handle(radeon, resource->bo, + rtex->pitch_in_bytes[0], whandle); +} + +static void r600_texture_destroy(struct pipe_screen *screen, + struct pipe_resource *ptex) +{ + struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex; + struct r600_resource *resource = &rtex->resource; + struct radeon *radeon = (struct radeon *)screen->winsys; + + if (rtex->flushed_depth_texture) + pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL); + + if (resource->bo) { + r600_bo_reference(radeon, &resource->bo, NULL); + } + FREE(rtex); +} + +static unsigned int r600_texture_is_referenced(struct pipe_context *context, + struct pipe_resource *texture, + unsigned level, int layer) +{ + /* FIXME */ + return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; +} + +static const struct u_resource_vtbl r600_texture_vtbl = +{ + r600_texture_get_handle, /* get_handle */ + r600_texture_destroy, /* resource_destroy */ + r600_texture_is_referenced, /* is_resource_referenced */ + r600_texture_get_transfer, /* get_transfer */ + r600_texture_transfer_destroy, /* transfer_destroy */ + r600_texture_transfer_map, /* transfer_map */ + u_default_transfer_flush_region,/* transfer_flush_region */ + r600_texture_transfer_unmap, /* transfer_unmap */ + u_default_transfer_inline_write /* transfer_inline_write */ +}; + static struct r600_resource_texture * r600_texture_create_object(struct pipe_screen *screen, const struct pipe_resource *base, @@ -295,21 +378,22 @@ r600_texture_create_object(struct pipe_screen *screen, return NULL; resource = &rtex->resource; - resource->base.b = *base; - resource->base.vtbl = &r600_texture_vtbl; - pipe_reference_init(&resource->base.b.reference, 1); - resource->base.b.screen = screen; + resource->b.b.b = *base; + resource->b.b.vtbl = &r600_texture_vtbl; + pipe_reference_init(&resource->b.b.b.reference, 1); + resource->b.b.b.screen = screen; resource->bo = bo; rtex->pitch_override = pitch_in_bytes_override; + /* only mark depth textures the HW can hit as depth textures */ + if (util_format_is_depth_or_stencil(base->format) && permit_hardware_blit(screen, base)) + rtex->depth = 1; - if (array_mode) - rtex->tiled = 1; r600_setup_miptree(screen, rtex, array_mode); resource->size = rtex->size; if (!resource->bo) { - struct pipe_resource *ptex = &rtex->resource.base.b; + struct pipe_resource *ptex = &rtex->resource.b.b.b; int base_align = r600_get_base_alignment(screen, ptex->format, array_mode); resource->bo = r600_bo(radeon, rtex->size, base_align, base->bind, base->usage); @@ -321,48 +405,6 @@ r600_texture_create_object(struct pipe_screen *screen, return rtex; } -/* Figure out whether u_blitter will fallback to a transfer operation. - * If so, don't use a staging resource. - */ -static boolean permit_hardware_blit(struct pipe_screen *screen, - const struct pipe_resource *res) -{ - unsigned bind; - - if (util_format_is_depth_or_stencil(res->format)) - bind = PIPE_BIND_DEPTH_STENCIL; - else - bind = PIPE_BIND_RENDER_TARGET; - - /* See r600_resource_copy_region: there is something wrong - * with depth resource copies at the moment so avoid them for - * now. - */ - if (util_format_get_component_bits(res->format, - UTIL_FORMAT_COLORSPACE_ZS, - 0) != 0) - return FALSE; - - if (!screen->is_format_supported(screen, - res->format, - res->target, - res->nr_samples, - bind, 0)) - return FALSE; - - if (!screen->is_format_supported(screen, - res->format, - res->target, - res->nr_samples, - PIPE_BIND_SAMPLER_VIEW, 0)) - return FALSE; - - if (res->usage == PIPE_USAGE_STREAM) - return FALSE; - - return TRUE; -} - struct pipe_resource *r600_texture_create(struct pipe_screen *screen, const struct pipe_resource *templ) { @@ -381,46 +423,21 @@ struct pipe_resource *r600_texture_create(struct pipe_screen *screen, } } + if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER) && + util_format_is_s3tc(templ->format)) + array_mode = V_038000_ARRAY_1D_TILED_THIN1; + return (struct pipe_resource *)r600_texture_create_object(screen, templ, array_mode, 0, 0, NULL); } -static void r600_texture_destroy(struct pipe_screen *screen, - struct pipe_resource *ptex) -{ - struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex; - struct r600_resource *resource = &rtex->resource; - struct radeon *radeon = (struct radeon *)screen->winsys; - - if (rtex->flushed_depth_texture) - pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL); - - if (resource->bo) { - r600_bo_reference(radeon, &resource->bo, NULL); - } - FREE(rtex); -} - -static boolean r600_texture_get_handle(struct pipe_screen* screen, - struct pipe_resource *ptex, - struct winsys_handle *whandle) -{ - struct r600_resource_texture *rtex = (struct r600_resource_texture*)ptex; - struct r600_resource *resource = &rtex->resource; - struct radeon *radeon = (struct radeon *)screen->winsys; - - return r600_bo_get_winsys_handle(radeon, resource->bo, - rtex->pitch_in_bytes[0], whandle); -} - static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, struct pipe_resource *texture, const struct pipe_surface *surf_tmpl) { struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture; struct r600_surface *surface = CALLOC_STRUCT(r600_surface); - unsigned tile_height; unsigned level = surf_tmpl->u.tex.level; assert(surf_tmpl->u.tex.first_layer == surf_tmpl->u.tex.last_layer); @@ -440,8 +457,8 @@ static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, surface->base.u.tex.last_layer = surf_tmpl->u.tex.last_layer; surface->base.u.tex.level = level; - tile_height = r600_get_height_alignment(pipe->screen, rtex->array_mode[level]); - surface->aligned_height = align(surface->base.height, tile_height); + surface->aligned_height = r600_texture_get_nblocksy(pipe->screen, + rtex, level); return &surface->base; } @@ -477,16 +494,8 @@ struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen, bo); } -static unsigned int r600_texture_is_referenced(struct pipe_context *context, - struct pipe_resource *texture, - unsigned level, int layer) -{ - /* FIXME */ - return PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE; -} - int r600_texture_depth_flush(struct pipe_context *ctx, - struct pipe_resource *texture) + struct pipe_resource *texture, boolean just_create) { struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture; struct pipe_resource resource; @@ -499,7 +508,8 @@ int r600_texture_depth_flush(struct pipe_context *ctx, resource.width0 = texture->width0; resource.height0 = texture->height0; resource.depth0 = 1; - resource.last_level = 0; + resource.array_size = 1; + resource.last_level = texture->last_level; resource.nr_samples = 0; resource.usage = PIPE_USAGE_DYNAMIC; resource.bind = 0; @@ -513,7 +523,11 @@ int r600_texture_depth_flush(struct pipe_context *ctx, return -ENOMEM; } + ((struct r600_resource_texture *)rtex->flushed_depth_texture)->is_flushing_texture = TRUE; out: + if (just_create) + return 0; + /* XXX: only do this if the depth texture has actually changed: */ r600_blit_uncompress_depth(ctx, rtex); @@ -546,7 +560,7 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, * the CPU is much happier reading out of cached system memory * than uncached VRAM. */ - if (rtex->tiled) + if (R600_TEX_IS_TILED(rtex, level)) use_staging_texture = TRUE; if ((usage & PIPE_TRANSFER_READ) && u_box_volume(box) > 1024) @@ -579,13 +593,16 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, */ /* XXX: when discard is true, no need to read back from depth texture */ - r = r600_texture_depth_flush(ctx, texture); + r = r600_texture_depth_flush(ctx, texture, FALSE); if (r < 0) { R600_ERR("failed to create temporary texture to hold untiled copy\n"); pipe_resource_reference(&trans->transfer.resource, NULL); FREE(trans); return NULL; } + trans->transfer.stride = rtex->flushed_depth_texture->pitch_in_bytes[level]; + trans->offset = r600_texture_get_offset(rtex->flushed_depth_texture, level, box->z); + return &trans->transfer; } else if (use_staging_texture) { resource.target = PIPE_TEXTURE_2D; resource.format = texture->format; @@ -627,6 +644,7 @@ struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx, return &trans->transfer; } trans->transfer.stride = rtex->pitch_in_bytes[level]; + trans->transfer.layer_stride = rtex->layer_size[level]; trans->offset = r600_texture_get_offset(rtex, level, box->z); return &trans->transfer; } @@ -635,7 +653,8 @@ void r600_texture_transfer_destroy(struct pipe_context *ctx, struct pipe_transfer *transfer) { struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; - struct r600_resource_texture *rtex = (struct r600_resource_texture*)transfer->resource; + struct pipe_resource *texture = transfer->resource; + struct r600_resource_texture *rtex = (struct r600_resource_texture*)texture; if (rtransfer->staging_texture) { if (transfer->usage & PIPE_TRANSFER_WRITE) { @@ -643,9 +662,12 @@ void r600_texture_transfer_destroy(struct pipe_context *ctx, } pipe_resource_reference(&rtransfer->staging_texture, NULL); } - if (rtex->flushed_depth_texture) { - pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL); + + if (rtex->depth && !rtex->is_flushing_texture) { + if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtex->flushed_depth_texture) + r600_blit_push_depth(ctx, rtex); } + pipe_resource_reference(&transfer->resource, NULL); FREE(transfer); } @@ -727,19 +749,6 @@ void r600_texture_transfer_unmap(struct pipe_context *ctx, r600_bo_unmap(radeon, bo); } -struct u_resource_vtbl r600_texture_vtbl = -{ - r600_texture_get_handle, /* get_handle */ - r600_texture_destroy, /* resource_destroy */ - r600_texture_is_referenced, /* is_resource_referenced */ - r600_texture_get_transfer, /* get_transfer */ - r600_texture_transfer_destroy, /* transfer_destroy */ - r600_texture_transfer_map, /* transfer_map */ - u_default_transfer_flush_region,/* transfer_flush_region */ - r600_texture_transfer_unmap, /* transfer_unmap */ - u_default_transfer_inline_write /* transfer_inline_write */ -}; - void r600_init_surface_functions(struct r600_pipe_context *r600) { r600->context.create_surface = r600_create_surface; @@ -802,6 +811,8 @@ uint32_t r600_translate_texformat(enum pipe_format format, uint32_t result = 0, word4 = 0, yuv_format = 0; const struct util_format_description *desc; boolean uniform = TRUE; + static int r600_enable_s3tc = -1; + int i; const uint32_t sign_bit[4] = { S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED), @@ -853,34 +864,55 @@ uint32_t r600_translate_texformat(enum pipe_format format, case UTIL_FORMAT_COLORSPACE_SRGB: word4 |= S_038010_FORCE_DEGAMMA(1); - if (format == PIPE_FORMAT_L8A8_SRGB || format == PIPE_FORMAT_L8_SRGB) - goto out_unknown; /* fails for some reason - TODO */ break; default: break; } - /* S3TC formats. TODO */ - if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) { - static int r600_enable_s3tc = -1; + if (r600_enable_s3tc == -1) + r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE); + + if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) { + if (!r600_enable_s3tc) + goto out_unknown; + + switch (format) { + case PIPE_FORMAT_RGTC1_UNORM: + case PIPE_FORMAT_RGTC1_SNORM: + result = FMT_BC4; + goto out_word4; + case PIPE_FORMAT_RGTC2_UNORM: + case PIPE_FORMAT_RGTC2_SNORM: + result = FMT_BC5; + goto out_word4; + default: + goto out_unknown; + } + } - if (r600_enable_s3tc == -1) - r600_enable_s3tc = - debug_get_bool_option("R600_ENABLE_S3TC", FALSE); + if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) { if (!r600_enable_s3tc) goto out_unknown; + if (!util_format_s3tc_enabled) { + goto out_unknown; + } + switch (format) { case PIPE_FORMAT_DXT1_RGB: case PIPE_FORMAT_DXT1_RGBA: + case PIPE_FORMAT_DXT1_SRGB: + case PIPE_FORMAT_DXT1_SRGBA: result = FMT_BC1; goto out_word4; case PIPE_FORMAT_DXT3_RGBA: + case PIPE_FORMAT_DXT3_SRGBA: result = FMT_BC2; goto out_word4; case PIPE_FORMAT_DXT5_RGBA: + case PIPE_FORMAT_DXT5_SRGBA: result = FMT_BC3; goto out_word4; default: @@ -897,8 +929,6 @@ uint32_t r600_translate_texformat(enum pipe_format format, /* R8G8Bx_SNORM - TODO CxV8U8 */ - /* RGTC - TODO */ - /* See whether the components are of the same size. */ for (i = 1; i < desc->nr_channels; i++) { uniform = uniform && desc->channel[0].size == desc->channel[i].size; @@ -927,7 +957,7 @@ uint32_t r600_translate_texformat(enum pipe_format format, desc->channel[1].size == 10 && desc->channel[2].size == 10 && desc->channel[3].size == 2) { - result = FMT_10_10_10_2; + result = FMT_2_10_10_10; goto out_word4; } goto out_unknown; @@ -990,6 +1020,19 @@ uint32_t r600_translate_texformat(enum pipe_format format, result = FMT_16_16_16_16; goto out_word4; } + goto out_unknown; + case 32: + switch (desc->nr_channels) { + case 1: + result = FMT_32; + goto out_word4; + case 2: + result = FMT_32_32; + goto out_word4; + case 4: + result = FMT_32_32_32_32; + goto out_word4; + } } goto out_unknown; diff --git a/src/gallium/drivers/r600/r600_translate.c b/src/gallium/drivers/r600/r600_translate.c index f80fa7af941..7482d15e12f 100644 --- a/src/gallium/drivers/r600/r600_translate.c +++ b/src/gallium/drivers/r600/r600_translate.c @@ -22,178 +22,34 @@ * * Authors: Dave Airlie <[email protected]> */ -#include "translate/translate_cache.h" -#include "translate/translate.h" -#include <pipebuffer/pb_buffer.h> + #include <util/u_index_modify.h> +#include "util/u_inlines.h" +#include "util/u_upload_mgr.h" #include "r600_pipe.h" -void r600_begin_vertex_translate(struct r600_pipe_context *rctx) -{ - struct pipe_context *pipe = &rctx->context; - struct translate_key key = {0}; - struct translate_element *te; - unsigned tr_elem_index[PIPE_MAX_ATTRIBS] = {0}; - struct translate *tr; - struct r600_vertex_element *ve = rctx->vertex_elements; - boolean vb_translated[PIPE_MAX_ATTRIBS] = {0}; - void *vb_map[PIPE_MAX_ATTRIBS] = {0}, *out_map; - struct pipe_transfer *vb_transfer[PIPE_MAX_ATTRIBS] = {0}, *out_transfer; - struct pipe_resource *out_buffer; - unsigned i, num_verts; - struct pipe_vertex_element new_velems[PIPE_MAX_ATTRIBS]; - void *tmp; - - /* Initialize the translate key, i.e. the recipe how vertices should be - * translated. */ - for (i = 0; i < ve->count; i++) { - struct pipe_vertex_buffer *vb = - &rctx->vertex_buffer[ve->elements[i].vertex_buffer_index]; - enum pipe_format output_format = ve->hw_format[i]; - unsigned output_format_size = ve->hw_format_size[i]; - - /* Check for support. */ - if (ve->elements[i].src_format == ve->hw_format[i]) { - continue; - } - - /* Workaround for translate: output floats instead of halfs. */ - switch (output_format) { - case PIPE_FORMAT_R16_FLOAT: - output_format = PIPE_FORMAT_R32_FLOAT; - output_format_size = 4; - break; - case PIPE_FORMAT_R16G16_FLOAT: - output_format = PIPE_FORMAT_R32G32_FLOAT; - output_format_size = 8; - break; - case PIPE_FORMAT_R16G16B16_FLOAT: - output_format = PIPE_FORMAT_R32G32B32_FLOAT; - output_format_size = 12; - break; - case PIPE_FORMAT_R16G16B16A16_FLOAT: - output_format = PIPE_FORMAT_R32G32B32A32_FLOAT; - output_format_size = 16; - break; - default:; - } - - /* Add this vertex element. */ - te = &key.element[key.nr_elements]; - /*te->type; - te->instance_divisor;*/ - te->input_buffer = ve->elements[i].vertex_buffer_index; - te->input_format = ve->elements[i].src_format; - te->input_offset = vb->buffer_offset + ve->elements[i].src_offset; - te->output_format = output_format; - te->output_offset = key.output_stride; - - key.output_stride += output_format_size; - vb_translated[ve->elements[i].vertex_buffer_index] = TRUE; - tr_elem_index[i] = key.nr_elements; - key.nr_elements++; - } - - /* Get a translate object. */ - tr = translate_cache_find(rctx->tran.translate_cache, &key); - - /* Map buffers we want to translate. */ - for (i = 0; i < rctx->nvertex_buffer; i++) { - if (vb_translated[i]) { - struct pipe_vertex_buffer *vb = &rctx->vertex_buffer[i]; - - vb_map[i] = pipe_buffer_map(pipe, vb->buffer, - PIPE_TRANSFER_READ, &vb_transfer[i]); - - tr->set_buffer(tr, i, vb_map[i], vb->stride, vb->max_index); - } - } - - /* Create and map the output buffer. */ - num_verts = rctx->vb_max_index + 1; - - out_buffer = pipe_buffer_create(&rctx->screen->screen, - PIPE_BIND_VERTEX_BUFFER, - key.output_stride * num_verts); - - out_map = pipe_buffer_map(pipe, out_buffer, PIPE_TRANSFER_WRITE, - &out_transfer); - - /* Translate. */ - tr->run(tr, 0, num_verts, 0, out_map); - - /* Unmap all buffers. */ - for (i = 0; i < rctx->nvertex_buffer; i++) { - if (vb_translated[i]) { - pipe_buffer_unmap(pipe, vb_transfer[i]); - } - } - - pipe_buffer_unmap(pipe, out_transfer); - - /* Setup the new vertex buffer in the first free slot. */ - for (i = 0; i < PIPE_MAX_ATTRIBS; i++) { - struct pipe_vertex_buffer *vb = &rctx->vertex_buffer[i]; - - if (!vb->buffer) { - pipe_resource_reference(&vb->buffer, out_buffer); - vb->buffer_offset = 0; - vb->max_index = num_verts - 1; - vb->stride = key.output_stride; - rctx->tran.vb_slot = i; - break; - } - } - - /* Save and replace vertex elements. */ - for (i = 0; i < ve->count; i++) { - if (vb_translated[ve->elements[i].vertex_buffer_index]) { - te = &key.element[tr_elem_index[i]]; - new_velems[i].instance_divisor = ve->elements[i].instance_divisor; - new_velems[i].src_format = te->output_format; - new_velems[i].src_offset = te->output_offset; - new_velems[i].vertex_buffer_index = rctx->tran.vb_slot; - } else { - memcpy(&new_velems[i], &ve->elements[i], - sizeof(struct pipe_vertex_element)); - } - } - - tmp = pipe->create_vertex_elements_state(pipe, ve->count, new_velems); - pipe->bind_vertex_elements_state(pipe, tmp); - rctx->tran.new_velems = tmp; - - pipe_resource_reference(&out_buffer, NULL); -} - -void r600_end_vertex_translate(struct r600_pipe_context *rctx) -{ - struct pipe_context *pipe = &rctx->context; - - if (rctx->tran.new_velems == NULL) { - return; - } - /* Restore vertex elements. */ - pipe->delete_vertex_elements_state(pipe, rctx->tran.new_velems); - rctx->tran.new_velems = NULL; - - /* Delete the now-unused VBO. */ - pipe_resource_reference(&rctx->vertex_buffer[rctx->tran.vb_slot].buffer, NULL); -} void r600_translate_index_buffer(struct r600_pipe_context *r600, - struct pipe_resource **index_buffer, - unsigned *index_size, - unsigned *start, unsigned count) + struct pipe_resource **index_buffer, + unsigned *index_size, + unsigned *start, unsigned count) { + struct pipe_resource *out_buffer = NULL; + unsigned out_offset; + void *ptr; + boolean flushed; + switch (*index_size) { case 1: - util_shorten_ubyte_elts(&r600->context, index_buffer, 0, *start, count); + u_upload_alloc(r600->vbuf_mgr->uploader, 0, count * 2, + &out_offset, &out_buffer, &flushed, &ptr); + + util_shorten_ubyte_elts_to_userptr( + &r600->context, *index_buffer, 0, *start, count, ptr); + + pipe_resource_reference(index_buffer, out_buffer); *index_size = 2; - *start = 0; - break; - case 2: - case 4: + *start = out_offset / 2; break; } } diff --git a/src/gallium/drivers/r600/r600_upload.c b/src/gallium/drivers/r600/r600_upload.c deleted file mode 100644 index 44102ff55b6..00000000000 --- a/src/gallium/drivers/r600/r600_upload.c +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2010 Jerome Glisse <[email protected]> - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * on the rights to use, copy, modify, merge, publish, distribute, sub - * license, and/or sell copies of the Software, and to permit persons to whom - * the Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: - * Jerome Glisse <[email protected]> - */ -#include <errno.h> -#include "util/u_inlines.h" -#include "util/u_memory.h" -#include "r600.h" -#include "r600_pipe.h" -#include "r600_resource.h" - -struct r600_upload { - struct r600_pipe_context *rctx; - struct r600_bo *buffer; - char *ptr; - unsigned size; - unsigned default_size; - unsigned total_alloc_size; - unsigned offset; - unsigned alignment; -}; - -struct r600_upload *r600_upload_create(struct r600_pipe_context *rctx, - unsigned default_size, - unsigned alignment) -{ - struct r600_upload *upload = CALLOC_STRUCT(r600_upload); - - if (upload == NULL) - return NULL; - - upload->rctx = rctx; - upload->size = 0; - upload->default_size = default_size; - upload->alignment = alignment; - upload->ptr = NULL; - upload->buffer = NULL; - upload->total_alloc_size = 0; - - return upload; -} - -void r600_upload_flush(struct r600_upload *upload) -{ - if (upload->buffer) { - r600_bo_reference(upload->rctx->radeon, &upload->buffer, NULL); - } - upload->default_size = MAX2(upload->total_alloc_size, upload->default_size); - upload->total_alloc_size = 0; - upload->size = 0; - upload->offset = 0; - upload->ptr = NULL; - upload->buffer = NULL; -} - -void r600_upload_destroy(struct r600_upload *upload) -{ - r600_upload_flush(upload); - FREE(upload); -} - -int r600_upload_buffer(struct r600_upload *upload, unsigned offset, - unsigned size, struct r600_resource_buffer *in_buffer, - unsigned *out_offset, unsigned *out_size, - struct r600_bo **out_buffer) -{ - unsigned alloc_size = align(size, upload->alignment); - const void *in_ptr = NULL; - - if (upload->offset + alloc_size > upload->size) { - if (upload->size) { - r600_bo_reference(upload->rctx->radeon, &upload->buffer, NULL); - } - upload->size = align(MAX2(upload->default_size, alloc_size), 4096); - upload->total_alloc_size += upload->size; - upload->offset = 0; - upload->buffer = r600_bo(upload->rctx->radeon, upload->size, 4096, PIPE_BIND_VERTEX_BUFFER, 0); - if (upload->buffer == NULL) { - return -ENOMEM; - } - upload->ptr = r600_bo_map(upload->rctx->radeon, upload->buffer, 0, NULL); - } - - in_ptr = in_buffer->user_buffer; - memcpy(upload->ptr + upload->offset, (uint8_t *) in_ptr + offset, size); - *out_offset = upload->offset; - *out_size = upload->size; - *out_buffer = NULL; - r600_bo_reference(upload->rctx->radeon, out_buffer, upload->buffer); - upload->offset += alloc_size; - - return 0; -} diff --git a/src/gallium/drivers/r600/r600d.h b/src/gallium/drivers/r600/r600d.h index 8c391936db0..e8558c49a7c 100644 --- a/src/gallium/drivers/r600/r600d.h +++ b/src/gallium/drivers/r600/r600d.h @@ -248,6 +248,8 @@ #define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27) #define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1) #define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF +#define V_0280A0_EXPORT_FULL 0 +#define V_0280A0_EXPORT_NORM 1 #define R_028060_CB_COLOR0_SIZE 0x028060 #define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0) #define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF) @@ -2332,31 +2334,6 @@ #define R_0280D4_CB_COLOR5_TILE 0x0280D4 #define R_0280D8_CB_COLOR6_TILE 0x0280D8 #define R_0280DC_CB_COLOR7_TILE 0x0280DC -#define R_028808_CB_COLOR_CONTROL 0x028808 -#define S_028808_FOG_ENABLE(x) (((x) & 0x1) << 0) -#define G_028808_FOG_ENABLE(x) (((x) >> 0) & 0x1) -#define C_028808_FOG_ENABLE 0xFFFFFFFE -#define S_028808_MULTIWRITE_ENABLE(x) (((x) & 0x1) << 1) -#define G_028808_MULTIWRITE_ENABLE(x) (((x) >> 1) & 0x1) -#define C_028808_MULTIWRITE_ENABLE 0xFFFFFFFD -#define S_028808_DITHER_ENABLE(x) (((x) & 0x1) << 2) -#define G_028808_DITHER_ENABLE(x) (((x) >> 2) & 0x1) -#define C_028808_DITHER_ENABLE 0xFFFFFFFB -#define S_028808_DEGAMMA_ENABLE(x) (((x) & 0x1) << 3) -#define G_028808_DEGAMMA_ENABLE(x) (((x) >> 3) & 0x1) -#define C_028808_DEGAMMA_ENABLE 0xFFFFFFF7 -#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) -#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) -#define C_028808_SPECIAL_OP 0xFFFFFF8F -#define S_028808_PER_MRT_BLEND(x) (((x) & 0x1) << 7) -#define G_028808_PER_MRT_BLEND(x) (((x) >> 7) & 0x1) -#define C_028808_PER_MRT_BLEND 0xFFFFFF7F -#define S_028808_TARGET_BLEND_ENABLE(x) (((x) & 0xFF) << 8) -#define G_028808_TARGET_BLEND_ENABLE(x) (((x) >> 8) & 0xFF) -#define C_028808_TARGET_BLEND_ENABLE 0xFFFF00FF -#define S_028808_ROP3(x) (((x) & 0xFF) << 16) -#define G_028808_ROP3(x) (((x) >> 16) & 0xFF) -#define C_028808_ROP3 0xFF00FFFF #define R_028614_SPI_VS_OUT_ID_0 0x028614 #define S_028614_SEMANTIC_0(x) (((x) & 0xFF) << 0) #define G_028614_SEMANTIC_0(x) (((x) >> 0) & 0xFF) |