summaryrefslogtreecommitdiffstats
path: root/src/gallium/auxiliary/pipebuffer/pb_slab.h
blob: 78a4bf7c47c9bb337032bfeac7f2225214dbab43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
/*
 * Copyright 2016 Advanced Micro Devices, Inc.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */

/**
 * \file
 *
 * Helper library for carving out smaller allocations (called "(slab) entries")
 * from larger buffers (called "slabs").
 *
 * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
 * meaning of each heap is treated as opaque by this library.
 *
 * The library allows delaying the re-use of an entry, i.e. an entry may be
 * freed by calling \ref pb_slab_free even while the corresponding buffer
 * region is still in use by the GPU. A callback function is called to
 * determine when it is safe to allocate the entry again; the user of this
 * library is expected to maintain the required fences or similar.
 */

#ifndef PB_SLAB_H
#define PB_SLAB_H

#include "pb_buffer.h"
#include "util/list.h"
#include "os/os_thread.h"

struct pb_slab;
struct pb_slabs;
struct pb_slab_group;

/* Descriptor of a slab entry.
 *
 * The user of this utility library is expected to embed this in a larger
 * structure that describes a buffer object.
 */
struct pb_slab_entry
{
   struct list_head head;
   struct pb_slab *slab; /* the slab that contains this buffer */
   unsigned group_index; /* index into pb_slabs::groups */
};

/* Descriptor of a slab from which many entries are carved out.
 *
 * The user of this utility library is expected to embed this in a larger
 * structure that describes a buffer object.
 */
struct pb_slab
{
   struct list_head head;

   struct list_head free; /* list of free pb_slab_entry structures */
   unsigned num_free; /* number of entries in free list */
   unsigned num_entries; /* total number of entries */
};

/* Callback function that is called when a new slab needs to be allocated
 * for fulfilling allocation requests of the given size from the given heap.
 *
 * The callback must allocate a pb_slab structure and the desired number
 * of entries. All entries that belong to the slab must be added to the free
 * list. Entries' pb_slab_entry structures must be initialized with the given
 * group_index.
 *
 * The callback may call pb_slab functions.
 */
typedef struct pb_slab *(slab_alloc_fn)(void *priv,
                                        unsigned heap,
                                        unsigned entry_size,
                                        unsigned group_index);

/* Callback function that is called when all entries of a slab have been freed.
 *
 * The callback must free the slab and all its entries. It must not call any of
 * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
 */
typedef void (slab_free_fn)(void *priv, struct pb_slab *);

/* Callback function to determine whether a given entry can already be reused.
 */
typedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);

/* Manager of slab allocations. The user of this utility library should embed
 * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
 * time.
 */
struct pb_slabs
{
   mtx_t mutex;

   unsigned min_order;
   unsigned num_orders;
   unsigned num_heaps;

   /* One group per (heap, order) pair. */
   struct pb_slab_group *groups;

   /* List of entries waiting to be reclaimed, i.e. they have been passed to
    * pb_slab_free, but may not be safe for re-use yet. The tail points at
    * the most-recently freed entry.
    */
   struct list_head reclaim;

   void *priv;
   slab_can_reclaim_fn *can_reclaim;
   slab_alloc_fn *slab_alloc;
   slab_free_fn *slab_free;
};

struct pb_slab_entry *
pb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);

void
pb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);

void
pb_slabs_reclaim(struct pb_slabs *slabs);

bool
pb_slabs_init(struct pb_slabs *slabs,
              unsigned min_order, unsigned max_order,
              unsigned num_heaps,
              void *priv,
              slab_can_reclaim_fn *can_reclaim,
              slab_alloc_fn *slab_alloc,
              slab_free_fn *slab_free);

void
pb_slabs_deinit(struct pb_slabs *slabs);

#endif