From 494f86bbe5b488f04390126d9988032194f82a2f Mon Sep 17 00:00:00 2001 From: Jonas Pfeil Date: Sat, 29 Jul 2017 21:23:52 +0200 Subject: broadcom/vc4: Port NEON-code to ARM64 Changed all register and instruction names, works the same. v2: Rebase on build system changes (by anholt) v3: Fix build on clang (by anholt, reported by Rob) Signed-off-by: Jonas Pfeil Tested-by: Rob Herring --- src/gallium/drivers/vc4/vc4_tiling_lt.c | 84 +++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) (limited to 'src/gallium/drivers') diff --git a/src/gallium/drivers/vc4/vc4_tiling_lt.c b/src/gallium/drivers/vc4/vc4_tiling_lt.c index f37a92e9390..4a76c0ff721 100644 --- a/src/gallium/drivers/vc4/vc4_tiling_lt.c +++ b/src/gallium/drivers/vc4/vc4_tiling_lt.c @@ -105,6 +105,50 @@ vc4_load_utile(void *cpu, void *gpu, uint32_t cpu_stride, uint32_t cpp) : "r"(gpu), "r"(cpu), "r"(cpu + 8), "r"(cpu_stride) : "q0", "q1", "q2", "q3"); } +#elif defined (PIPE_ARCH_AARCH64) + if (gpu_stride == 8) { + __asm__ volatile ( + /* Load from the GPU in one shot, no interleave, to + * d0-d7. + */ + "ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [%0]\n" + /* Store each 8-byte line to cpu-side destination, + * incrementing it by the stride each time. + */ + "st1 {v0.D}[0], [%1], %2\n" + "st1 {v0.D}[1], [%1], %2\n" + "st1 {v1.D}[0], [%1], %2\n" + "st1 {v1.D}[1], [%1], %2\n" + "st1 {v2.D}[0], [%1], %2\n" + "st1 {v2.D}[1], [%1], %2\n" + "st1 {v3.D}[0], [%1], %2\n" + "st1 {v3.D}[1], [%1]\n" + : + : "r"(gpu), "r"(cpu), "r"(cpu_stride) + : "v0", "v1", "v2", "v3"); + } else { + assert(gpu_stride == 16); + __asm__ volatile ( + /* Load from the GPU in one shot, no interleave, to + * d0-d7. + */ + "ld1 {v0.2d, v1.2d, v2.2d, v3.2d}, [%0]\n" + /* Store each 16-byte line in 2 parts to the cpu-side + * destination. (vld1 can only store one d-register + * at a time). + */ + "st1 {v0.D}[0], [%1], %3\n" + "st1 {v0.D}[1], [%2], %3\n" + "st1 {v1.D}[0], [%1], %3\n" + "st1 {v1.D}[1], [%2], %3\n" + "st1 {v2.D}[0], [%1], %3\n" + "st1 {v2.D}[1], [%2], %3\n" + "st1 {v3.D}[0], [%1]\n" + "st1 {v3.D}[1], [%2]\n" + : + : "r"(gpu), "r"(cpu), "r"(cpu + 8), "r"(cpu_stride) + : "v0", "v1", "v2", "v3"); + } #else for (uint32_t gpu_offset = 0; gpu_offset < 64; gpu_offset += gpu_stride) { memcpy(cpu, gpu + gpu_offset, gpu_stride); @@ -160,6 +204,46 @@ vc4_store_utile(void *gpu, void *cpu, uint32_t cpu_stride, uint32_t cpp) : "r"(gpu), "r"(cpu), "r"(cpu + 8), "r"(cpu_stride) : "q0", "q1", "q2", "q3"); } +#elif defined (PIPE_ARCH_AARCH64) + if (gpu_stride == 8) { + __asm__ volatile ( + /* Load each 8-byte line from cpu-side source, + * incrementing it by the stride each time. + */ + "ld1 {v0.D}[0], [%1], %2\n" + "ld1 {v0.D}[1], [%1], %2\n" + "ld1 {v1.D}[0], [%1], %2\n" + "ld1 {v1.D}[1], [%1], %2\n" + "ld1 {v2.D}[0], [%1], %2\n" + "ld1 {v2.D}[1], [%1], %2\n" + "ld1 {v3.D}[0], [%1], %2\n" + "ld1 {v3.D}[1], [%1]\n" + /* Store to the GPU in one shot, no interleave. */ + "st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [%0]\n" + : + : "r"(gpu), "r"(cpu), "r"(cpu_stride) + : "v0", "v1", "v2", "v3"); + } else { + assert(gpu_stride == 16); + __asm__ volatile ( + /* Load each 16-byte line in 2 parts from the cpu-side + * destination. (vld1 can only store one d-register + * at a time). + */ + "ld1 {v0.D}[0], [%1], %3\n" + "ld1 {v0.D}[1], [%2], %3\n" + "ld1 {v1.D}[0], [%1], %3\n" + "ld1 {v1.D}[1], [%2], %3\n" + "ld1 {v2.D}[0], [%1], %3\n" + "ld1 {v2.D}[1], [%2], %3\n" + "ld1 {v3.D}[0], [%1]\n" + "ld1 {v3.D}[1], [%2]\n" + /* Store to the GPU in one shot, no interleave. */ + "st1 {v0.2d, v1.2d, v2.2d, v3.2d}, [%0]\n" + : + : "r"(gpu), "r"(cpu), "r"(cpu + 8), "r"(cpu_stride) + : "v0", "v1", "v2", "v3"); + } #else for (uint32_t gpu_offset = 0; gpu_offset < 64; gpu_offset += gpu_stride) { memcpy(gpu + gpu_offset, cpu, gpu_stride); -- cgit v1.2.3