summaryrefslogtreecommitdiffstats
path: root/src/gallium/drivers/nouveau/nouveau_buffer.h
blob: d45bf7aebcfbc397603b54a046407fd230414fb8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#ifndef __NOUVEAU_BUFFER_H__
#define __NOUVEAU_BUFFER_H__

#include "util/u_range.h"
#include "util/u_transfer.h"
#include "util/list.h"

struct pipe_resource;
struct nouveau_context;
struct nouveau_bo;

/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
 *  resource->data has not been updated to reflect modified VRAM contents
 *
 * USER_MEMORY: resource->data is a pointer to client memory and may change
 *  between GL calls
 */
#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
#define NOUVEAU_BUFFER_STATUS_DIRTY       (1 << 2)
#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)

#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY

/* Resources, if mapped into the GPU's address space, are guaranteed to
 * have constant virtual addresses (nv50+).
 *
 * The address of a resource will lie within the nouveau_bo referenced,
 * and this bo should be added to the memory manager's validation list.
 */
struct nv04_resource {
   struct pipe_resource base;
   const struct u_resource_vtbl *vtbl;

   uint64_t address; /* virtual address (nv50+) */

   uint8_t *data; /* resource's contents, if domain == 0, or cached */
   struct nouveau_bo *bo;
   uint32_t offset; /* offset into the data/bo */

   uint8_t status;
   uint8_t domain;

   uint16_t cb_bindings[6]; /* per-shader per-slot bindings */

   struct nouveau_fence *fence;
   struct nouveau_fence *fence_wr;

   struct nouveau_mm_allocation *mm;

   /* buffer range that has been initialized */
   struct util_range valid_buffer_range;
};

void
nouveau_buffer_release_gpu_storage(struct nv04_resource *);

void
nouveau_copy_buffer(struct nouveau_context *,
                    struct nv04_resource *dst, unsigned dst_pos,
                    struct nv04_resource *src, unsigned src_pos, unsigned size);

bool
nouveau_buffer_migrate(struct nouveau_context *,
                       struct nv04_resource *, unsigned domain);

void *
nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
                            uint32_t offset, uint32_t flags);

static inline void
nouveau_resource_unmap(struct nv04_resource *res)
{
   /* no-op */
}

static inline struct nv04_resource *
nv04_resource(struct pipe_resource *resource)
{
   return (struct nv04_resource *)resource;
}

/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
static inline bool
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
   return nv04_resource(resource)->domain != 0;
}

struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
                      const struct pipe_resource *templ);

struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
                           unsigned bytes, unsigned usage);

bool
nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
                           unsigned base, unsigned size);

/* Copy data to a scratch buffer and return address & bo the data resides in.
 * Returns 0 on failure.
 */
uint64_t
nouveau_scratch_data(struct nouveau_context *,
                     const void *data, unsigned base, unsigned size,
                     struct nouveau_bo **);

#endif