summaryrefslogtreecommitdiff
path: root/src/gallium/winsys/drm/intel/gem/intel_drm_batchbuffer.c
blob: ebd1b607b78f48e79b9a467a12043159832d2c3f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229

#include "intel_drm_winsys.h"
#include "util/u_memory.h"

#include "i915_drm.h"

#define BATCH_RESERVED 16

#define INTEL_DEFAULT_RELOCS 100
#define INTEL_MAX_RELOCS 400

#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS    0x2

#undef INTEL_RUN_SYNC
#undef INTEL_MAP_BATCHBUFFER

struct intel_drm_batchbuffer
{
   struct intel_batchbuffer base;

   size_t actual_size;

   drm_intel_bo *bo;
};

static INLINE struct intel_drm_batchbuffer *
intel_drm_batchbuffer(struct intel_batchbuffer *batch)
{
   return (struct intel_drm_batchbuffer *)batch;
}

static void
intel_drm_batchbuffer_reset(struct intel_drm_batchbuffer *batch)
{
   struct intel_drm_winsys *idws = intel_drm_winsys(batch->base.iws);

   if (batch->bo)
      drm_intel_bo_unreference(batch->bo);
   batch->bo = drm_intel_bo_alloc(idws->pools.gem,
                                  "gallium3d_batchbuffer",
                                  batch->actual_size,
                                  4096);

#ifdef INTEL_MAP_BATCHBUFFER
   drm_intel_bo_map(batch->bo, TRUE);
   batch->base.map = batch->bo->virtual;
#endif

   memset(batch->base.map, 0, batch->actual_size);
   batch->base.ptr = batch->base.map;
   batch->base.size = batch->actual_size - BATCH_RESERVED;
   batch->base.relocs = 0;
}

static struct intel_batchbuffer *
intel_drm_batchbuffer_create(struct intel_winsys *iws)
{
   struct intel_drm_winsys *idws = intel_drm_winsys(iws);
   struct intel_drm_batchbuffer *batch = CALLOC_STRUCT(intel_drm_batchbuffer);

   batch->actual_size = idws->max_batch_size;

#ifdef INTEL_MAP_BATCHBUFFER
   batch->base.map = NULL;
#else
   batch->base.map = MALLOC(batch->actual_size);
#endif
   batch->base.ptr = NULL;
   batch->base.size = 0;

   batch->base.relocs = 0;
   batch->base.max_relocs = 300;/*INTEL_DEFAULT_RELOCS;*/

   batch->base.iws = iws;

   intel_drm_batchbuffer_reset(batch);

   return &batch->base;
}

static int
intel_drm_batchbuffer_reloc(struct intel_batchbuffer *ibatch,
                            struct intel_buffer *buffer,
                            enum intel_buffer_usage usage,
                            unsigned pre_add)
{
   struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
   unsigned write_domain = 0;
   unsigned read_domain = 0;
   unsigned offset;
   int ret = 0;

   assert(batch->base.relocs < batch->base.max_relocs);

   if (usage == INTEL_USAGE_SAMPLER) {
      write_domain = 0;
      read_domain = I915_GEM_DOMAIN_SAMPLER;

   } else if (usage == INTEL_USAGE_RENDER) {
      write_domain = I915_GEM_DOMAIN_RENDER;
      read_domain = I915_GEM_DOMAIN_RENDER;

   } else if (usage == INTEL_USAGE_2D_TARGET) {
      write_domain = I915_GEM_DOMAIN_RENDER;
      read_domain = I915_GEM_DOMAIN_RENDER;

   } else if (usage == INTEL_USAGE_2D_SOURCE) {
      write_domain = 0;
      read_domain = I915_GEM_DOMAIN_RENDER;

   } else if (usage == INTEL_USAGE_VERTEX) {
      write_domain = 0;
      read_domain = I915_GEM_DOMAIN_VERTEX;

   } else {
      assert(0);
      return -1;
   }

   offset = (unsigned)(batch->base.ptr - batch->base.map);

   ret = drm_intel_bo_emit_reloc(batch->bo, offset,
                                 intel_bo(buffer), pre_add,
                                 read_domain,
                                 write_domain);

   ((uint32_t*)batch->base.ptr)[0] = intel_bo(buffer)->offset + pre_add;
   batch->base.ptr += 4;

   if (!ret)
      batch->base.relocs++;

   return ret;
}

static void
intel_drm_batchbuffer_flush(struct intel_batchbuffer *ibatch,
                            struct pipe_fence_handle **fence)
{
   struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);
   unsigned used = 0;
   int ret = 0;
   int i;

   assert(intel_batchbuffer_space(ibatch) >= 0);

   used = batch->base.ptr - batch->base.map;
   assert((used & 3) == 0);

   if (used & 4) {
      // MI_FLUSH | FLUSH_MAP_CACHE;
      intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x4<<23)|(1<<0));
      // MI_NOOP
      intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x0<<23));
      // MI_BATCH_BUFFER_END;
      intel_batchbuffer_dword(ibatch, (0x0<<29)|(0xA<<23));
   } else {
      //MI_FLUSH | FLUSH_MAP_CACHE;
      intel_batchbuffer_dword(ibatch, (0x0<<29)|(0x4<<23)|(1<<0));
      // MI_BATCH_BUFFER_END;
      intel_batchbuffer_dword(ibatch, (0x0<<29)|(0xA<<23));
   }

   used = batch->base.ptr - batch->base.map;

#ifdef INTEL_MAP_BATCHBUFFER
   drm_intel_bo_unmap(batch->bo);
#else
   drm_intel_bo_subdata(batch->bo, 0, used, batch->base.map);
#endif

   /* Do the sending to HW */
   ret = drm_intel_bo_exec(batch->bo, used, NULL, 0, 0);
   assert(ret == 0);

   if (intel_drm_winsys(ibatch->iws)->dump_cmd) {
      unsigned *ptr;
      drm_intel_bo_map(batch->bo, FALSE);
      ptr = (unsigned*)batch->bo->virtual;

      debug_printf("%s:\n", __func__);
      for (i = 0; i < used / 4; i++, ptr++) {
         debug_printf("\t%08x:    %08x\n", i*4, *ptr);
      }

      drm_intel_bo_unmap(batch->bo);
   } else {
#ifdef INTEL_RUN_SYNC
      drm_intel_bo_map(batch->bo, FALSE);
      drm_intel_bo_unmap(batch->bo);
#endif
   }

   if (fence) {
      ibatch->iws->fence_reference(ibatch->iws, fence, NULL);

#ifdef INTEL_RUN_SYNC
      /* we run synced to GPU so just pass null */
      (*fence) = intel_drm_fence_create(NULL);
#else
      (*fence) = intel_drm_fence_create(batch->bo);
#endif
   }

   intel_drm_batchbuffer_reset(batch);
}

static void
intel_drm_batchbuffer_destroy(struct intel_batchbuffer *ibatch)
{
   struct intel_drm_batchbuffer *batch = intel_drm_batchbuffer(ibatch);

   if (batch->bo)
      drm_intel_bo_unreference(batch->bo);

#ifndef INTEL_MAP_BATCHBUFFER
   FREE(batch->base.map);
#endif
   FREE(batch);
}

void intel_drm_winsys_init_batchbuffer_functions(struct intel_drm_winsys *idws)
{
   idws->base.batchbuffer_create = intel_drm_batchbuffer_create;
   idws->base.batchbuffer_reloc = intel_drm_batchbuffer_reloc;
   idws->base.batchbuffer_flush = intel_drm_batchbuffer_flush;
   idws->base.batchbuffer_destroy = intel_drm_batchbuffer_destroy;
}