summaryrefslogtreecommitdiffstats
path: root/src/mesa/drivers
diff options
context:
space:
mode:
authorKenneth Graunke <[email protected]>2015-11-30 15:37:44 -0800
committerKenneth Graunke <[email protected]>2015-12-14 14:56:14 -0800
commit77cc2666b1935442f8a5b779c1d29977d029af01 (patch)
treeca9c7547758a8afc45564539e11b957f88898bd7 /src/mesa/drivers
parent9f0944d15b9d2cd85f501f80eea7e6b6fc7f3487 (diff)
i965: Use DIV_ROUND_UP() in gen7_urb.c code.
This is a newer convention, which we prefer over ALIGN(x, n) / n. Signed-off-by: Kenneth Graunke <[email protected]> Reviewed-by: Kristian Høgsberg <[email protected]>
Diffstat (limited to 'src/mesa/drivers')
-rw-r--r--src/mesa/drivers/dri/i965/gen7_urb.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/src/mesa/drivers/dri/i965/gen7_urb.c b/src/mesa/drivers/dri/i965/gen7_urb.c
index 99a9d3c6500..421512b2fc8 100644
--- a/src/mesa/drivers/dri/i965/gen7_urb.c
+++ b/src/mesa/drivers/dri/i965/gen7_urb.c
@@ -193,11 +193,11 @@ gen7_upload_urb(struct brw_context *brw)
/* VS has a lower limit on the number of URB entries */
unsigned vs_chunks =
- ALIGN(brw->urb.min_vs_entries * vs_entry_size_bytes, chunk_size_bytes) /
- chunk_size_bytes;
+ DIV_ROUND_UP(brw->urb.min_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes);
unsigned vs_wants =
- ALIGN(brw->urb.max_vs_entries * vs_entry_size_bytes,
- chunk_size_bytes) / chunk_size_bytes - vs_chunks;
+ DIV_ROUND_UP(brw->urb.max_vs_entries * vs_entry_size_bytes,
+ chunk_size_bytes) - vs_chunks;
unsigned gs_chunks = 0;
unsigned gs_wants = 0;
@@ -210,11 +210,10 @@ gen7_upload_urb(struct brw_context *brw)
*
* (2) We can't allocate less than nr_gs_entries_granularity.
*/
- gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
- chunk_size_bytes) / chunk_size_bytes;
- gs_wants =
- ALIGN(brw->urb.max_gs_entries * gs_entry_size_bytes,
- chunk_size_bytes) / chunk_size_bytes - gs_chunks;
+ gs_chunks = DIV_ROUND_UP(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
+ chunk_size_bytes);
+ gs_wants = DIV_ROUND_UP(brw->urb.max_gs_entries * gs_entry_size_bytes,
+ chunk_size_bytes) - gs_chunks;
}
/* There should always be enough URB space to satisfy the minimum