/* * Copyright © 2012 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ /** @file brw_eu_compact.c * * Instruction compaction is a feature of gm45 and newer hardware that allows * for a smaller instruction encoding. * * The instruction cache is on the order of 32KB, and many programs generate * far more instructions than that. The instruction cache is built to barely * keep up with instruction dispatch abaility in cache hit cases -- L1 * instruction cache misses that still hit in the next level could limit * throughput by around 50%. * * The idea of instruction compaction is that most instructions use a tiny * subset of the GPU functionality, so we can encode what would be a 16 byte * instruction in 8 bytes using some lookup tables for various fields. */ #include "brw_context.h" #include "brw_eu.h" #include "intel_asm_printer.h" static const uint32_t gen6_control_index_table[32] = { 0b00000000000000000, 0b01000000000000000, 0b00110000000000000, 0b00000000100000000, 0b00010000000000000, 0b00001000100000000, 0b00000000100000010, 0b00000000000000010, 0b01000000100000000, 0b01010000000000000, 0b10110000000000000, 0b00100000000000000, 0b11010000000000000, 0b11000000000000000, 0b01001000100000000, 0b01000000000001000, 0b01000000000000100, 0b00000000000001000, 0b00000000000000100, 0b00111000100000000, 0b00001000100000010, 0b00110000100000000, 0b00110000000000001, 0b00100000000000001, 0b00110000000000010, 0b00110000000000101, 0b00110000000001001, 0b00110000000010000, 0b00110000000000011, 0b00110000000000100, 0b00110000100001000, 0b00100000000001001 }; static const uint32_t gen6_datatype_table[32] = { 0b001001110000000000, 0b001000110000100000, 0b001001110000000001, 0b001000000001100000, 0b001010110100101001, 0b001000000110101101, 0b001100011000101100, 0b001011110110101101, 0b001000000111101100, 0b001000000001100001, 0b001000110010100101, 0b001000000001000001, 0b001000001000110001, 0b001000001000101001, 0b001000000000100000, 0b001000001000110010, 0b001010010100101001, 0b001011010010100101, 0b001000000110100101, 0b001100011000101001, 0b001011011000101100, 0b001011010110100101, 0b001011110110100101, 0b001111011110111101, 0b001111011110111100, 0b001111011110111101, 0b001111011110011101, 0b001111011110111110, 0b001000000000100001, 0b001000000000100010, 0b001001111111011101, 0b001000001110111110, }; static const uint16_t gen6_subreg_table[32] = { 0b000000000000000, 0b000000000000100, 0b000000110000000, 0b111000000000000, 0b011110000001000, 0b000010000000000, 0b000000000010000, 0b000110000001100, 0b001000000000000, 0b000001000000000, 0b000001010010100, 0b000000001010110, 0b010000000000000, 0b110000000000000, 0b000100000000000, 0b000000010000000, 0b000000000001000, 0b100000000000000, 0b000001010000000, 0b001010000000000, 0b001100000000000, 0b000000001010100, 0b101101010010100, 0b010100000000000, 0b000000010001111, 0b011000000000000, 0b111110000000000, 0b101000000000000, 0b000000000001111, 0b000100010001111, 0b001000010001111, 0b000110000000000, }; static const uint16_t gen6_src_index_table[32] = { 0b000000000000, 0b010110001000, 0b010001101000, 0b001000101000, 0b011010010000, 0b000100100000, 0b010001101100, 0b010101110000, 0b011001111000, 0b001100101000, 0b010110001100, 0b001000100000, 0b010110001010, 0b000000000010, 0b010101010000, 0b010101101000, 0b111101001100, 0b111100101100, 0b011001110000, 0b010110001001, 0b010101011000, 0b001101001000, 0b010000101100, 0b010000000000, 0b001101110000, 0b001100010000, 0b001100000000, 0b010001101010, 0b001101111000, 0b000001110000, 0b001100100000, 0b001101010000, }; static const uint32_t gen7_control_index_table[32] = { 0b0000000000000000010, 0b0000100000000000000, 0b0000100000000000001, 0b0000100000000000010, 0b0000100000000000011, 0b0000100000000000100, 0b0000100000000000101, 0b0000100000000000111, 0b0000100000000001000, 0b0000100000000001001, 0b0000100000000001101, 0b0000110000000000000, 0b0000110000000000001, 0b0000110000000000010, 0b0000110000000000011, 0b0000110000000000100, 0b0000110000000000101, 0b0000110000000000111, 0b0000110000000001001, 0b0000110000000001101, 0b0000110000000010000, 0b0000110000100000000, 0b0001000000000000000, 0b0001000000000000010, 0b0001000000000000100, 0b0001000000100000000, 0b0010110000000000000, 0b0010110000000010000, 0b0011000000000000000, 0b0011000000100000000, 0b0101000000000000000, 0b0101000000100000000 }; static const uint32_t gen7_datatype_table[32] = { 0b001000000000000001, 0b001000000000100000, 0b001000000000100001, 0b001000000001100001, 0b001000000010111101, 0b001000001011111101, 0b001000001110100001, 0b001000001110100101, 0b001000001110111101, 0b001000010000100001, 0b001000110000100000, 0b001000110000100001, 0b001001010010100101, 0b001001110010100100, 0b001001110010100101, 0b001111001110111101, 0b001111011110011101, 0b001111011110111100, 0b001111011110111101, 0b001111111110111100, 0b000000001000001100, 0b001000000000111101, 0b001000000010100101, 0b001000010000100000, 0b001001010010100100, 0b001001110010000100, 0b001010010100001001, 0b001101111110111101, 0b001111111110111101, 0b001011110110101100, 0b001010010100101000, 0b001010110100101000 }; static const uint16_t gen7_subreg_table[32] = { 0b000000000000000, 0b000000000000001, 0b000000000001000, 0b000000000001111, 0b000000000010000, 0b000000010000000, 0b000000100000000, 0b000000110000000, 0b000001000000000, 0b000001000010000, 0b000010100000000, 0b001000000000000, 0b001000000000001, 0b001000010000001, 0b001000010000010, 0b001000010000011, 0b001000010000100, 0b001000010000111, 0b001000010001000, 0b001000010001110, 0b001000010001111, 0b001000110000000, 0b001000111101000, 0b010000000000000, 0b010000110000000, 0b011000000000000, 0b011110010000111, 0b100000000000000, 0b101000000000000, 0b110000000000000, 0b111000000000000, 0b111000000011100 }; static const uint16_t gen7_src_index_table[32] = { 0b000000000000, 0b000000000010, 0b000000010000, 0b000000010010, 0b000000011000, 0b000000100000, 0b000000101000, 0b000001001000, 0b000001010000, 0b000001110000, 0b000001111000, 0b001100000000, 0b001100000010, 0b001100001000, 0b001100010000, 0b001100010010, 0b001100100000, 0b001100101000, 0b001100111000, 0b001101000000, 0b001101000010, 0b001101001000, 0b001101010000, 0b001101100000, 0b001101101000, 0b001101110000, 0b001101110001, 0b001101111000, 0b010001101000, 0b010001101001, 0b010001101010, 0b010110001000 }; static const uint32_t *control_index_table; static const uint32_t *datatype_table; static const uint16_t *subreg_table; static const uint16_t *src_index_table; static bool set_control_index(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src) { uint32_t uncompacted = /* 17b/SNB; 19b/IVB+ */ (brw_inst_bits(src, 31, 31) << 16) | /* 1b */ (brw_inst_bits(src, 23, 8)); /* 16b */ /* On gen7, the flag register and subregister numbers are integrated into * the control index. */ if (brw->gen >= 7) uncompacted |= brw_inst_bits(src, 90, 89) << 17; /* 2b */ for (int i = 0; i < 32; i++) { if (control_index_table[i] == uncompacted) { brw_compact_inst_set_control_index(dst, i); return true; } } return false; } static bool set_datatype_index(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src) { uint32_t uncompacted = /* 18b */ (brw_inst_bits(src, 63, 61) << 15) | /* 3b */ (brw_inst_bits(src, 46, 32)); /* 15b */ for (int i = 0; i < 32; i++) { if (datatype_table[i] == uncompacted) { brw_compact_inst_set_datatype_index(dst, i); return true; } } return false; } static bool set_subreg_index(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src, bool is_immediate) { uint16_t uncompacted = /* 15b */ (brw_inst_bits(src, 52, 48) << 0) | /* 5b */ (brw_inst_bits(src, 68, 64) << 5); /* 5b */ if (!is_immediate) uncompacted |= brw_inst_bits(src, 100, 96) << 10; /* 5b */ for (int i = 0; i < 32; i++) { if (subreg_table[i] == uncompacted) { brw_compact_inst_set_subreg_index(dst, i); return true; } } return false; } static bool get_src_index(uint16_t uncompacted, uint16_t *compacted) { for (int i = 0; i < 32; i++) { if (src_index_table[i] == uncompacted) { *compacted = i; return true; } } return false; } static bool set_src0_index(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src) { uint16_t compacted; uint16_t uncompacted = brw_inst_bits(src, 88, 77); /* 12b */ if (!get_src_index(uncompacted, &compacted)) return false; brw_compact_inst_set_src0_index(dst, compacted); return true; } static bool set_src1_index(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src, bool is_immediate) { uint16_t compacted; if (is_immediate) { compacted = (brw_inst_imm_ud(brw, src) >> 8) & 0x1f; } else { uint16_t uncompacted = brw_inst_bits(src, 120, 109); /* 12b */ if (!get_src_index(uncompacted, &compacted)) return false; } brw_compact_inst_set_src1_index(dst, compacted); return true; } /* Compacted instructions have 12-bits for immediate sources, and a 13th bit * that's replicated through the high 20 bits. * * Effectively this means we get 12-bit integers, 0.0f, and some limited uses * of packed vectors as compactable immediates. */ static bool is_compactable_immediate(unsigned imm) { /* We get the low 12 bits as-is. */ imm &= ~0xfff; /* We get one bit replicated through the top 20 bits. */ return imm == 0 || imm == 0xfffff000; } /** * Tries to compact instruction src into dst. * * It doesn't modify dst unless src is compactable, which is relied on by * brw_compact_instructions(). */ bool brw_try_compact_instruction(struct brw_context *brw, brw_compact_inst *dst, brw_inst *src) { brw_compact_inst temp; if (brw_inst_opcode(brw, src) == BRW_OPCODE_IF || brw_inst_opcode(brw, src) == BRW_OPCODE_ELSE || brw_inst_opcode(brw, src) == BRW_OPCODE_ENDIF || brw_inst_opcode(brw, src) == BRW_OPCODE_HALT || brw_inst_opcode(brw, src) == BRW_OPCODE_DO || brw_inst_opcode(brw, src) == BRW_OPCODE_WHILE) { /* FINISHME: The fixup code below, and brw_set_uip_jip and friends, needs * to be able to handle compacted flow control instructions.. */ return false; } bool is_immediate = brw_inst_src0_reg_file(brw, src) == BRW_IMMEDIATE_VALUE || brw_inst_src1_reg_file(brw, src) == BRW_IMMEDIATE_VALUE; if (is_immediate && !is_compactable_immediate(brw_inst_imm_ud(brw, src))) { return false; } memset(&temp, 0, sizeof(temp)); brw_compact_inst_set_opcode(&temp, brw_inst_opcode(brw, src)); brw_compact_inst_set_debug_control(&temp, brw_inst_debug_control(brw, src)); if (!set_control_index(brw, &temp, src)) return false; if (!set_datatype_index(brw, &temp, src)) return false; if (!set_subreg_index(brw, &temp, src, is_immediate)) return false; brw_compact_inst_set_acc_wr_control(&temp, brw_inst_acc_wr_control(brw, src)); brw_compact_inst_set_cond_modifier(&temp, brw_inst_cond_modifier(brw, src)); if (brw->gen <= 6) brw_compact_inst_set_flag_subreg_nr(&temp, brw_inst_flag_subreg_nr(brw, src)); brw_compact_inst_set_cmpt_control(&temp, true); if (!set_src0_index(brw, &temp, src)) return false; if (!set_src1_index(brw, &temp, src, is_immediate)) return false; brw_compact_inst_set_dst_reg_nr(&temp, brw_inst_dst_da_reg_nr(brw, src)); brw_compact_inst_set_src0_reg_nr(&temp, brw_inst_src0_da_reg_nr(brw, src)); if (is_immediate) { brw_compact_inst_set_src1_reg_nr(&temp, brw_inst_imm_ud(brw, src) & 0xff); } else { brw_compact_inst_set_src1_reg_nr(&temp, brw_inst_src1_da_reg_nr(brw, src)); } *dst = temp; return true; } static void set_uncompacted_control(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src) { uint32_t uncompacted = control_index_table[brw_compact_inst_control_index(src)]; brw_inst_set_bits(dst, 31, 31, (uncompacted >> 16) & 0x1); brw_inst_set_bits(dst, 23, 8, (uncompacted & 0xffff)); if (brw->gen >= 7) brw_inst_set_bits(dst, 90, 89, uncompacted >> 17); } static void set_uncompacted_datatype(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src) { uint32_t uncompacted = datatype_table[brw_compact_inst_datatype_index(src)]; brw_inst_set_bits(dst, 63, 61, (uncompacted >> 15)); brw_inst_set_bits(dst, 46, 32, (uncompacted & 0x7fff)); } static void set_uncompacted_subreg(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src) { uint16_t uncompacted = subreg_table[brw_compact_inst_subreg_index(src)]; brw_inst_set_bits(dst, 100, 96, (uncompacted >> 10)); brw_inst_set_bits(dst, 68, 64, (uncompacted >> 5) & 0x1f); brw_inst_set_bits(dst, 52, 48, (uncompacted >> 0) & 0x1f); } static void set_uncompacted_src0(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src) { uint32_t compacted = brw_compact_inst_src0_index(src); uint16_t uncompacted = src_index_table[compacted]; brw_inst_set_bits(dst, 88, 77, uncompacted); } static void set_uncompacted_src1(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src, bool is_immediate) { if (is_immediate) { signed high5 = brw_compact_inst_src1_index(src); /* Replicate top bit of src1_index into high 20 bits of the immediate. */ brw_inst_set_imm_ud(brw, dst, (high5 << 27) >> 19); } else { uint16_t uncompacted = src_index_table[brw_compact_inst_src1_index(src)]; brw_inst_set_bits(dst, 120, 109, uncompacted); } } void brw_uncompact_instruction(struct brw_context *brw, brw_inst *dst, brw_compact_inst *src) { memset(dst, 0, sizeof(*dst)); brw_inst_set_opcode(brw, dst, brw_compact_inst_opcode(src)); brw_inst_set_debug_control(brw, dst, brw_compact_inst_debug_control(src)); set_uncompacted_control(brw, dst, src); set_uncompacted_datatype(brw, dst, src); /* src0/1 register file fields are in the datatype table. */ bool is_immediate = brw_inst_src0_reg_file(brw, dst) == BRW_IMMEDIATE_VALUE || brw_inst_src1_reg_file(brw, dst) == BRW_IMMEDIATE_VALUE; set_uncompacted_subreg(brw, dst, src); brw_inst_set_acc_wr_control(brw, dst, brw_compact_inst_acc_wr_control(src)); brw_inst_set_cond_modifier(brw, dst, brw_compact_inst_cond_modifier(src)); if (brw->gen <= 6) brw_inst_set_flag_subreg_nr(brw, dst, brw_compact_inst_flag_subreg_nr(src)); set_uncompacted_src0(brw, dst, src); set_uncompacted_src1(brw, dst, src, is_immediate); brw_inst_set_dst_da_reg_nr(brw, dst, brw_compact_inst_dst_reg_nr(src)); brw_inst_set_src0_da_reg_nr(brw, dst, brw_compact_inst_src0_reg_nr(src)); if (is_immediate) { brw_inst_set_imm_ud(brw, dst, brw_inst_imm_ud(brw, dst) | brw_compact_inst_src1_reg_nr(src)); } else { brw_inst_set_src1_da_reg_nr(brw, dst, brw_compact_inst_src1_reg_nr(src)); } } void brw_debug_compact_uncompact(struct brw_context *brw, brw_inst *orig, brw_inst *uncompacted) { fprintf(stderr, "Instruction compact/uncompact changed (gen%d):\n", brw->gen); fprintf(stderr, " before: "); brw_disassemble_inst(stderr, brw, orig, true); fprintf(stderr, " after: "); brw_disassemble_inst(stderr, brw, uncompacted, false); uint32_t *before_bits = (uint32_t *)orig; uint32_t *after_bits = (uint32_t *)uncompacted; fprintf(stderr, " changed bits:\n"); for (int i = 0; i < 128; i++) { uint32_t before = before_bits[i / 32] & (1 << (i & 31)); uint32_t after = after_bits[i / 32] & (1 << (i & 31)); if (before != after) { fprintf(stderr, " bit %d, %s to %s\n", i, before ? "set" : "unset", after ? "set" : "unset"); } } } static int compacted_between(int old_ip, int old_target_ip, int *compacted_counts) { int this_compacted_count = compacted_counts[old_ip]; int target_compacted_count = compacted_counts[old_target_ip]; return target_compacted_count - this_compacted_count; } static void update_uip_jip(struct brw_context *brw, brw_inst *insn, int this_old_ip, int *compacted_counts) { int jip = brw_inst_jip(brw, insn); jip -= compacted_between(this_old_ip, this_old_ip + jip, compacted_counts); brw_inst_set_jip(brw, insn, jip); if (brw_inst_opcode(brw, insn) == BRW_OPCODE_ENDIF || brw_inst_opcode(brw, insn) == BRW_OPCODE_WHILE) return; int uip = brw_inst_uip(brw, insn); uip -= compacted_between(this_old_ip, this_old_ip + uip, compacted_counts); brw_inst_set_uip(brw, insn, uip); } void brw_init_compaction_tables(struct brw_context *brw) { assert(gen6_control_index_table[ARRAY_SIZE(gen6_control_index_table) - 1] != 0); assert(gen6_datatype_table[ARRAY_SIZE(gen6_datatype_table) - 1] != 0); assert(gen6_subreg_table[ARRAY_SIZE(gen6_subreg_table) - 1] != 0); assert(gen6_src_index_table[ARRAY_SIZE(gen6_src_index_table) - 1] != 0); assert(gen7_control_index_table[ARRAY_SIZE(gen7_control_index_table) - 1] != 0); assert(gen7_datatype_table[ARRAY_SIZE(gen7_datatype_table) - 1] != 0); assert(gen7_subreg_table[ARRAY_SIZE(gen7_subreg_table) - 1] != 0); assert(gen7_src_index_table[ARRAY_SIZE(gen7_src_index_table) - 1] != 0); switch (brw->gen) { case 7: control_index_table = gen7_control_index_table; datatype_table = gen7_datatype_table; subreg_table = gen7_subreg_table; src_index_table = gen7_src_index_table; break; case 6: control_index_table = gen6_control_index_table; datatype_table = gen6_datatype_table; subreg_table = gen6_subreg_table; src_index_table = gen6_src_index_table; break; default: return; } } void brw_compact_instructions(struct brw_compile *p, int start_offset, int num_annotations, struct annotation *annotation) { struct brw_context *brw = p->brw; void *store = p->store + start_offset / 16; /* For an instruction at byte offset 8*i before compaction, this is the number * of compacted instructions that preceded it. */ int compacted_counts[(p->next_insn_offset - start_offset) / 8]; /* For an instruction at byte offset 8*i after compaction, this is the * 8-byte offset it was at before compaction. */ int old_ip[(p->next_insn_offset - start_offset) / 8]; if (brw->gen < 6) return; int src_offset; int offset = 0; int compacted_count = 0; for (src_offset = 0; src_offset < p->next_insn_offset - start_offset;) { brw_inst *src = store + src_offset; void *dst = store + offset; old_ip[offset / 8] = src_offset / 8; compacted_counts[src_offset / 8] = compacted_count; brw_inst saved = *src; if (!brw_inst_cmpt_control(brw, src) && brw_try_compact_instruction(brw, dst, src)) { compacted_count++; if (INTEL_DEBUG) { brw_inst uncompacted; brw_uncompact_instruction(brw, &uncompacted, dst); if (memcmp(&saved, &uncompacted, sizeof(uncompacted))) { brw_debug_compact_uncompact(brw, &saved, &uncompacted); } } offset += 8; src_offset += 16; } else { int size = brw_inst_cmpt_control(brw, src) ? 8 : 16; /* It appears that the end of thread SEND instruction needs to be * aligned, or the GPU hangs. */ if ((brw_inst_opcode(brw, src) == BRW_OPCODE_SEND || brw_inst_opcode(brw, src) == BRW_OPCODE_SENDC) && brw_inst_eot(brw, src) && (offset & 8) != 0) { brw_compact_inst *align = store + offset; memset(align, 0, sizeof(*align)); brw_compact_inst_set_opcode(align, BRW_OPCODE_NOP); brw_compact_inst_set_cmpt_control(align, true); offset += 8; old_ip[offset / 8] = src_offset / 8; dst = store + offset; } /* If we didn't compact this intruction, we need to move it down into * place. */ if (offset != src_offset) { memmove(dst, src, size); } offset += size; src_offset += size; } } /* Fix up control flow offsets. */ p->next_insn_offset = start_offset + offset; for (offset = 0; offset < p->next_insn_offset - start_offset;) { brw_inst *insn = store + offset; int this_old_ip = old_ip[offset / 8]; int this_compacted_count = compacted_counts[this_old_ip]; int target_old_ip, target_compacted_count; switch (brw_inst_opcode(brw, insn)) { case BRW_OPCODE_BREAK: case BRW_OPCODE_CONTINUE: case BRW_OPCODE_HALT: update_uip_jip(brw, insn, this_old_ip, compacted_counts); break; case BRW_OPCODE_IF: case BRW_OPCODE_ELSE: case BRW_OPCODE_ENDIF: case BRW_OPCODE_WHILE: if (brw->gen == 6) { int gen6_jump_count = brw_inst_gen6_jump_count(brw, insn); target_old_ip = this_old_ip + gen6_jump_count; target_compacted_count = compacted_counts[target_old_ip]; gen6_jump_count -= (target_compacted_count - this_compacted_count); brw_inst_set_gen6_jump_count(brw, insn, gen6_jump_count); } else { update_uip_jip(brw, insn, this_old_ip, compacted_counts); } break; } offset = next_offset(brw, store, offset); } /* p->nr_insn is counting the number of uncompacted instructions still, so * divide. We do want to be sure there's a valid instruction in any * alignment padding, so that the next compression pass (for the FS 8/16 * compile passes) parses correctly. */ if (p->next_insn_offset & 8) { brw_compact_inst *align = store + offset; memset(align, 0, sizeof(*align)); brw_compact_inst_set_opcode(align, BRW_OPCODE_NOP); brw_compact_inst_set_cmpt_control(align, true); p->next_insn_offset += 8; } p->nr_insn = p->next_insn_offset / 16; /* Update the instruction offsets for each annotation. */ if (annotation) { for (int offset = 0, i = 0; i < num_annotations; i++) { while (start_offset + old_ip[offset / 8] * 8 != annotation[i].offset) { assert(start_offset + old_ip[offset / 8] * 8 < annotation[i].offset); offset = next_offset(brw, store, offset); } annotation[i].offset = start_offset + offset; offset = next_offset(brw, store, offset); } annotation[num_annotations].offset = p->next_insn_offset; } }