1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
#ifndef __NOUVEAU_BUFFER_H__
#define __NOUVEAU_BUFFER_H__
#include "util/u_transfer.h"
#include "util/u_double_list.h"
struct pipe_resource;
struct nouveau_context;
struct nouveau_bo;
/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
* resource->data has not been updated to reflect modified VRAM contents
*
* USER_MEMORY: resource->data is a pointer to client memory and may change
* between GL calls
*/
#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
/* Resources, if mapped into the GPU's address space, are guaranteed to
* have constant virtual addresses (nv50+).
*
* The address of a resource will lie within the nouveau_bo referenced,
* and this bo should be added to the memory manager's validation list.
*/
struct nv04_resource {
struct pipe_resource base;
const struct u_resource_vtbl *vtbl;
uint8_t *data;
struct nouveau_bo *bo;
uint32_t offset;
uint8_t status;
uint8_t domain;
struct nouveau_fence *fence;
struct nouveau_fence *fence_wr;
struct nouveau_mm_allocation *mm;
};
void
nouveau_buffer_release_gpu_storage(struct nv04_resource *);
boolean
nouveau_buffer_download(struct nouveau_context *, struct nv04_resource *,
unsigned start, unsigned size);
boolean
nouveau_buffer_migrate(struct nouveau_context *,
struct nv04_resource *, unsigned domain);
/* XXX: wait for fence (atm only using this for vertex push) */
static INLINE void *
nouveau_resource_map_offset(struct nouveau_context *pipe,
struct nv04_resource *res, uint32_t offset,
uint32_t flags)
{
void *map;
if ((res->domain == NOUVEAU_BO_VRAM) &&
(res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
nouveau_buffer_download(pipe, res, 0, res->base.width0);
if ((res->domain != NOUVEAU_BO_GART) ||
(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
return res->data + offset;
if (res->mm)
flags |= NOUVEAU_BO_NOSYNC;
if (nouveau_bo_map_range(res->bo, res->offset + offset,
res->base.width0, flags))
return NULL;
map = res->bo->map;
nouveau_bo_unmap(res->bo);
return map;
}
static INLINE void
nouveau_resource_unmap(struct nv04_resource *res)
{
/* no-op */
}
static INLINE struct nv04_resource *
nv04_resource(struct pipe_resource *resource)
{
return (struct nv04_resource *)resource;
}
/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
static INLINE boolean
nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
{
return nv04_resource(resource)->domain != 0;
}
struct pipe_resource *
nouveau_buffer_create(struct pipe_screen *pscreen,
const struct pipe_resource *templ);
struct pipe_resource *
nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
unsigned bytes, unsigned usage);
boolean
nouveau_user_buffer_upload(struct nv04_resource *, unsigned base,
unsigned size);
#endif
|