aboutsummaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/cell/ppu
diff options
context:
space:
mode:
Diffstat (limited to 'src/gallium/drivers/cell/ppu')
-rw-r--r--src/gallium/drivers/cell/ppu/cell_batch.c73
-rw-r--r--src/gallium/drivers/cell/ppu/cell_batch.h9
-rw-r--r--src/gallium/drivers/cell/ppu/cell_clear.c5
-rw-r--r--src/gallium/drivers/cell/ppu/cell_flush.c13
-rw-r--r--src/gallium/drivers/cell/ppu/cell_state_emit.c43
-rw-r--r--src/gallium/drivers/cell/ppu/cell_vbuf.c16
6 files changed, 53 insertions, 106 deletions
diff --git a/src/gallium/drivers/cell/ppu/cell_batch.c b/src/gallium/drivers/cell/ppu/cell_batch.c
index 962775cd335..fe144f8b849 100644
--- a/src/gallium/drivers/cell/ppu/cell_batch.c
+++ b/src/gallium/drivers/cell/ppu/cell_batch.c
@@ -108,15 +108,16 @@ emit_fence(struct cell_context *cell)
fence->status[i][0] = CELL_FENCE_EMITTED;
}
+ STATIC_ASSERT(sizeof(struct cell_command_fence) % 16 == 0);
+ ASSERT(size % 16 == 0);
ASSERT(size + sizeof(struct cell_command_fence) <= CELL_BUFFER_SIZE);
fence_cmd = (struct cell_command_fence *) (cell->buffer[batch] + size);
- fence_cmd->opcode = CELL_CMD_FENCE;
+ fence_cmd->opcode[0] = CELL_CMD_FENCE;
fence_cmd->fence = fence;
/* update batch buffer size */
cell->buffer_size[batch] = size + sizeof(struct cell_command_fence);
- assert(sizeof(struct cell_command_fence) % 8 == 0);
}
@@ -192,69 +193,18 @@ cell_batch_free_space(const struct cell_context *cell)
/**
- * Append data to the current batch buffer.
- * \param data address of block of bytes to append
- * \param bytes size of block of bytes
- */
-void
-cell_batch_append(struct cell_context *cell, const void *data, uint bytes)
-{
- uint size;
-
- ASSERT(bytes % 8 == 0);
- ASSERT(bytes <= CELL_BUFFER_SIZE);
- ASSERT(cell->cur_batch >= 0);
-
-#ifdef ASSERT
- {
- uint spu;
- for (spu = 0; spu < cell->num_spus; spu++) {
- ASSERT(cell->buffer_status[spu][cell->cur_batch][0]
- == CELL_BUFFER_STATUS_USED);
- }
- }
-#endif
-
- size = cell->buffer_size[cell->cur_batch];
-
- if (bytes > cell_batch_free_space(cell)) {
- cell_batch_flush(cell);
- size = 0;
- }
-
- ASSERT(size + bytes <= CELL_BUFFER_SIZE);
-
- memcpy(cell->buffer[cell->cur_batch] + size, data, bytes);
-
- cell->buffer_size[cell->cur_batch] = size + bytes;
-}
-
-
-/**
* Allocate space in the current batch buffer for 'bytes' space.
+ * Bytes must be a multiple of 16 bytes. Allocation will be 16 byte aligned.
* \return address in batch buffer to put data
*/
void *
-cell_batch_alloc(struct cell_context *cell, uint bytes)
-{
- return cell_batch_alloc_aligned(cell, bytes, 1);
-}
-
-
-/**
- * Same as \sa cell_batch_alloc, but return an address at a particular
- * alignment.
- */
-void *
-cell_batch_alloc_aligned(struct cell_context *cell, uint bytes,
- uint alignment)
+cell_batch_alloc16(struct cell_context *cell, uint bytes)
{
void *pos;
- uint size, padbytes;
+ uint size;
- ASSERT(bytes % 8 == 0);
+ ASSERT(bytes % 16 == 0);
ASSERT(bytes <= CELL_BUFFER_SIZE);
- ASSERT(alignment > 0);
ASSERT(cell->cur_batch >= 0);
#ifdef ASSERT
@@ -269,17 +219,12 @@ cell_batch_alloc_aligned(struct cell_context *cell, uint bytes,
size = cell->buffer_size[cell->cur_batch];
- padbytes = (alignment - (size % alignment)) % alignment;
-
- if (padbytes + bytes > cell_batch_free_space(cell)) {
+ if (bytes > cell_batch_free_space(cell)) {
cell_batch_flush(cell);
size = 0;
}
- else {
- size += padbytes;
- }
- ASSERT(size % alignment == 0);
+ ASSERT(size % 16 == 0);
ASSERT(size + bytes <= CELL_BUFFER_SIZE);
pos = (void *) (cell->buffer[cell->cur_batch] + size);
diff --git a/src/gallium/drivers/cell/ppu/cell_batch.h b/src/gallium/drivers/cell/ppu/cell_batch.h
index f74dd600791..290136031a1 100644
--- a/src/gallium/drivers/cell/ppu/cell_batch.h
+++ b/src/gallium/drivers/cell/ppu/cell_batch.h
@@ -44,15 +44,8 @@ cell_batch_flush(struct cell_context *cell);
extern uint
cell_batch_free_space(const struct cell_context *cell);
-extern void
-cell_batch_append(struct cell_context *cell, const void *data, uint bytes);
-
-extern void *
-cell_batch_alloc(struct cell_context *cell, uint bytes);
-
extern void *
-cell_batch_alloc_aligned(struct cell_context *cell, uint bytes,
- uint alignment);
+cell_batch_alloc16(struct cell_context *cell, uint bytes);
extern void
cell_init_batch_buffers(struct cell_context *cell);
diff --git a/src/gallium/drivers/cell/ppu/cell_clear.c b/src/gallium/drivers/cell/ppu/cell_clear.c
index 037635e4660..c2e276988ca 100644
--- a/src/gallium/drivers/cell/ppu/cell_clear.c
+++ b/src/gallium/drivers/cell/ppu/cell_clear.c
@@ -99,10 +99,11 @@ cell_clear_surface(struct pipe_context *pipe, struct pipe_surface *ps,
/* Build a CLEAR command and place it in the current batch buffer */
{
+ STATIC_ASSERT(sizeof(struct cell_command_clear_surface) % 16 == 0);
struct cell_command_clear_surface *clr
= (struct cell_command_clear_surface *)
- cell_batch_alloc(cell, sizeof(*clr));
- clr->opcode = CELL_CMD_CLEAR_SURFACE;
+ cell_batch_alloc16(cell, sizeof(*clr));
+ clr->opcode[0] = CELL_CMD_CLEAR_SURFACE;
clr->surface = surfIndex;
clr->value = clearValue;
}
diff --git a/src/gallium/drivers/cell/ppu/cell_flush.c b/src/gallium/drivers/cell/ppu/cell_flush.c
index a64967b4b9e..8275c9dc9c7 100644
--- a/src/gallium/drivers/cell/ppu/cell_flush.c
+++ b/src/gallium/drivers/cell/ppu/cell_flush.c
@@ -72,8 +72,9 @@ cell_flush_int(struct cell_context *cell, unsigned flags)
flushing = TRUE;
if (flags & CELL_FLUSH_WAIT) {
- uint64_t *cmd = (uint64_t *) cell_batch_alloc(cell, sizeof(uint64_t));
- *cmd = CELL_CMD_FINISH;
+ STATIC_ASSERT(sizeof(opcode_t) % 16 == 0);
+ opcode_t *cmd = (opcode_t*) cell_batch_alloc16(cell, sizeof(opcode_t));
+ *cmd[0] = CELL_CMD_FINISH;
}
cell_batch_flush(cell);
@@ -101,11 +102,11 @@ void
cell_flush_buffer_range(struct cell_context *cell, void *ptr,
unsigned size)
{
- uint64_t batch[1 + (ROUNDUP8(sizeof(struct cell_buffer_range)) / 8)];
- struct cell_buffer_range *br = (struct cell_buffer_range *) & batch[1];
-
+ STATIC_ASSERT((sizeof(opcode_t) + sizeof(struct cell_buffer_range)) % 16 == 0);
+ uint32_t *batch = (uint32_t*)cell_batch_alloc16(cell,
+ sizeof(opcode_t) + sizeof(struct cell_buffer_range));
+ struct cell_buffer_range *br = (struct cell_buffer_range *) &batch[4];
batch[0] = CELL_CMD_FLUSH_BUFFER_RANGE;
br->base = (uintptr_t) ptr;
br->size = size;
- cell_batch_append(cell, batch, sizeof(batch));
}
diff --git a/src/gallium/drivers/cell/ppu/cell_state_emit.c b/src/gallium/drivers/cell/ppu/cell_state_emit.c
index 0a0af81f53f..39b85faeb86 100644
--- a/src/gallium/drivers/cell/ppu/cell_state_emit.c
+++ b/src/gallium/drivers/cell/ppu/cell_state_emit.c
@@ -133,7 +133,7 @@ lookup_fragment_ops(struct cell_context *cell)
*/
ops = CALLOC_VARIANT_LENGTH_STRUCT(cell_command_fragment_ops, total_code_size);
/* populate the new cell_command_fragment_ops object */
- ops->opcode = CELL_CMD_STATE_FRAGMENT_OPS;
+ ops->opcode[0] = CELL_CMD_STATE_FRAGMENT_OPS;
ops->total_code_size = total_code_size;
ops->front_code_index = 0;
memcpy(ops->code, spe_code_front.store, front_code_size);
@@ -178,10 +178,10 @@ static void
emit_state_cmd(struct cell_context *cell, uint cmd,
const void *state, uint state_size)
{
- uint64_t *dst = (uint64_t *)
- cell_batch_alloc(cell, ROUNDUP8(sizeof(uint64_t) + state_size));
+ uint32_t *dst = (uint32_t *)
+ cell_batch_alloc16(cell, ROUNDUP16(sizeof(opcode_t) + state_size));
*dst = cmd;
- memcpy(dst + 1, state, state_size);
+ memcpy(dst + 4, state, state_size);
}
@@ -195,9 +195,10 @@ cell_emit_state(struct cell_context *cell)
if (cell->dirty & CELL_NEW_FRAMEBUFFER) {
struct pipe_surface *cbuf = cell->framebuffer.cbufs[0];
struct pipe_surface *zbuf = cell->framebuffer.zsbuf;
+ STATIC_ASSERT(sizeof(struct cell_command_framebuffer) % 16 == 0);
struct cell_command_framebuffer *fb
- = cell_batch_alloc(cell, sizeof(*fb));
- fb->opcode = CELL_CMD_STATE_FRAMEBUFFER;
+ = cell_batch_alloc16(cell, sizeof(*fb));
+ fb->opcode[0] = CELL_CMD_STATE_FRAMEBUFFER;
fb->color_start = cell->cbuf_map[0];
fb->color_format = cbuf->format;
fb->depth_start = cell->zsbuf_map;
@@ -211,17 +212,19 @@ cell_emit_state(struct cell_context *cell)
}
if (cell->dirty & (CELL_NEW_RASTERIZER)) {
+ STATIC_ASSERT(sizeof(struct cell_command_rasterizer) % 16 == 0);
struct cell_command_rasterizer *rast =
- cell_batch_alloc(cell, sizeof(*rast));
- rast->opcode = CELL_CMD_STATE_RASTERIZER;
+ cell_batch_alloc16(cell, sizeof(*rast));
+ rast->opcode[0] = CELL_CMD_STATE_RASTERIZER;
rast->rasterizer = *cell->rasterizer;
}
if (cell->dirty & (CELL_NEW_FS)) {
/* Send new fragment program to SPUs */
+ STATIC_ASSERT(sizeof(struct cell_command_fragment_program) % 16 == 0);
struct cell_command_fragment_program *fp
- = cell_batch_alloc(cell, sizeof(*fp));
- fp->opcode = CELL_CMD_STATE_FRAGMENT_PROGRAM;
+ = cell_batch_alloc16(cell, sizeof(*fp));
+ fp->opcode[0] = CELL_CMD_STATE_FRAGMENT_PROGRAM;
fp->num_inst = cell->fs->code.num_inst;
memcpy(&fp->code, cell->fs->code.store,
SPU_MAX_FRAGMENT_PROGRAM_INSTS * SPE_INST_SIZE);
@@ -238,14 +241,14 @@ cell_emit_state(struct cell_context *cell)
const uint shader = PIPE_SHADER_FRAGMENT;
const uint num_const = cell->constants[shader].size / sizeof(float);
uint i, j;
- float *buf = cell_batch_alloc(cell, 16 + num_const * sizeof(float));
- uint64_t *ibuf = (uint64_t *) buf;
+ float *buf = cell_batch_alloc16(cell, ROUNDUP16(32 + num_const * sizeof(float)));
+ uint32_t *ibuf = (uint32_t *) buf;
const float *constants = pipe_buffer_map(cell->pipe.screen,
cell->constants[shader].buffer,
PIPE_BUFFER_USAGE_CPU_READ);
ibuf[0] = CELL_CMD_STATE_FS_CONSTANTS;
- ibuf[1] = num_const;
- j = 4;
+ ibuf[4] = num_const;
+ j = 8;
for (i = 0; i < num_const; i++) {
buf[j++] = constants[i];
}
@@ -258,7 +261,7 @@ cell_emit_state(struct cell_context *cell)
struct cell_command_fragment_ops *fops, *fops_cmd;
/* Note that cell_command_fragment_ops is a variant-sized record */
fops = lookup_fragment_ops(cell);
- fops_cmd = cell_batch_alloc(cell, sizeof(*fops_cmd) + fops->total_code_size);
+ fops_cmd = cell_batch_alloc16(cell, ROUNDUP16(sizeof(*fops_cmd) + fops->total_code_size));
memcpy(fops_cmd, fops, sizeof(*fops) + fops->total_code_size);
}
@@ -267,9 +270,10 @@ cell_emit_state(struct cell_context *cell)
for (i = 0; i < CELL_MAX_SAMPLERS; i++) {
if (cell->dirty_samplers & (1 << i)) {
if (cell->sampler[i]) {
+ STATIC_ASSERT(sizeof(struct cell_command_sampler) % 16 == 0);
struct cell_command_sampler *sampler
- = cell_batch_alloc(cell, sizeof(*sampler));
- sampler->opcode = CELL_CMD_STATE_SAMPLER;
+ = cell_batch_alloc16(cell, sizeof(*sampler));
+ sampler->opcode[0] = CELL_CMD_STATE_SAMPLER;
sampler->unit = i;
sampler->state = *cell->sampler[i];
}
@@ -282,9 +286,10 @@ cell_emit_state(struct cell_context *cell)
uint i;
for (i = 0;i < CELL_MAX_SAMPLERS; i++) {
if (cell->dirty_textures & (1 << i)) {
+ STATIC_ASSERT(sizeof(struct cell_command_texture) % 16 == 0);
struct cell_command_texture *texture
- = cell_batch_alloc(cell, sizeof(*texture));
- texture->opcode = CELL_CMD_STATE_TEXTURE;
+ = (struct cell_command_texture *)cell_batch_alloc16(cell, sizeof(*texture));
+ texture->opcode[0] = CELL_CMD_STATE_TEXTURE;
texture->unit = i;
if (cell->texture[i]) {
uint level;
diff --git a/src/gallium/drivers/cell/ppu/cell_vbuf.c b/src/gallium/drivers/cell/ppu/cell_vbuf.c
index 65ba51b6bb2..ab54e796895 100644
--- a/src/gallium/drivers/cell/ppu/cell_vbuf.c
+++ b/src/gallium/drivers/cell/ppu/cell_vbuf.c
@@ -116,10 +116,11 @@ cell_vbuf_release_vertices(struct vbuf_render *vbr, void *vertices,
/* Tell SPUs they can release the vert buf */
if (cvbr->vertex_buf != ~0U) {
+ STATIC_ASSERT(sizeof(struct cell_command_release_verts) % 16 == 0);
struct cell_command_release_verts *release
= (struct cell_command_release_verts *)
- cell_batch_alloc(cell, sizeof(struct cell_command_release_verts));
- release->opcode = CELL_CMD_RELEASE_VERTS;
+ cell_batch_alloc16(cell, sizeof(struct cell_command_release_verts));
+ release->opcode[0] = CELL_CMD_RELEASE_VERTS;
release->vertex_buf = cvbr->vertex_buf;
}
@@ -210,15 +211,16 @@ cell_vbuf_draw(struct vbuf_render *vbr,
/* build/insert batch RENDER command */
{
- const uint index_bytes = ROUNDUP8(nr_indices * 2);
- const uint vertex_bytes = nr_vertices * 4 * cell->vertex_info.size;
+ const uint index_bytes = ROUNDUP16(nr_indices * 2);
+ const uint vertex_bytes = ROUNDUP16(nr_vertices * 4 * cell->vertex_info.size);
+ STATIC_ASSERT(sizeof(struct cell_command_render) % 16 == 0);
const uint batch_size = sizeof(struct cell_command_render) + index_bytes;
struct cell_command_render *render
= (struct cell_command_render *)
- cell_batch_alloc(cell, batch_size);
+ cell_batch_alloc16(cell, batch_size);
- render->opcode = CELL_CMD_RENDER;
+ render->opcode[0] = CELL_CMD_RENDER;
render->prim_type = cvbr->prim;
render->num_indexes = nr_indices;
@@ -236,7 +238,7 @@ cell_vbuf_draw(struct vbuf_render *vbr,
min_index == 0 &&
vertex_bytes + 16 <= cell_batch_free_space(cell)) {
/* vertex data inlined, after indices, at 16-byte boundary */
- void *dst = cell_batch_alloc_aligned(cell, vertex_bytes, 16);
+ void *dst = cell_batch_alloc16(cell, vertex_bytes);
memcpy(dst, vertices, vertex_bytes);
render->inline_verts = TRUE;
render->vertex_buf = ~0;