summaryrefslogtreecommitdiff
path: root/src/mesa/drivers/dri/intel
diff options
context:
space:
mode:
Diffstat (limited to 'src/mesa/drivers/dri/intel')
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c289
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.h173
-rw-r--r--src/mesa/drivers/dri/intel/intel_blit.c512
-rw-r--r--src/mesa/drivers/dri/intel/intel_blit.h73
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffer_objects.c734
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffer_objects.h92
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffers.c325
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffers.h56
-rw-r--r--src/mesa/drivers/dri/intel/intel_chipset.h128
-rw-r--r--src/mesa/drivers/dri/intel/intel_clear.c207
-rw-r--r--src/mesa/drivers/dri/intel/intel_clear.h38
-rw-r--r--src/mesa/drivers/dri/intel/intel_context.c912
-rw-r--r--src/mesa/drivers/dri/intel/intel_context.h478
-rw-r--r--src/mesa/drivers/dri/intel/intel_decode.c1822
-rw-r--r--src/mesa/drivers/dri/intel/intel_decode.h29
-rw-r--r--src/mesa/drivers/dri/intel/intel_extensions.c229
-rw-r--r--src/mesa/drivers/dri/intel/intel_extensions.h39
-rw-r--r--src/mesa/drivers/dri/intel/intel_extensions_es2.c94
-rw-r--r--src/mesa/drivers/dri/intel/intel_fbo.c697
-rw-r--r--src/mesa/drivers/dri/intel/intel_fbo.h113
-rw-r--r--src/mesa/drivers/dri/intel/intel_mipmap_tree.c461
-rw-r--r--src/mesa/drivers/dri/intel/intel_mipmap_tree.h217
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel.c169
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel.h67
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel_bitmap.c522
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel_copy.c214
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel_draw.c279
-rw-r--r--src/mesa/drivers/dri/intel/intel_pixel_read.c207
-rw-r--r--src/mesa/drivers/dri/intel/intel_reg.h248
-rw-r--r--src/mesa/drivers/dri/intel/intel_regions.c507
-rw-r--r--src/mesa/drivers/dri/intel/intel_regions.h160
-rw-r--r--src/mesa/drivers/dri/intel/intel_screen.c574
-rw-r--r--src/mesa/drivers/dri/intel/intel_screen.h68
-rw-r--r--src/mesa/drivers/dri/intel/intel_span.c384
-rw-r--r--src/mesa/drivers/dri/intel/intel_span.h42
-rw-r--r--src/mesa/drivers/dri/intel/intel_state.c211
-rw-r--r--src/mesa/drivers/dri/intel/intel_syncobj.c132
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex.c228
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex.h71
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_copy.c330
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_format.c228
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_image.c856
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_layout.c136
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_layout.h44
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_obj.h80
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_subimage.c277
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_validate.c304
-rw-r--r--src/mesa/drivers/dri/intel/server/i830_dri.h62
-rw-r--r--src/mesa/drivers/dri/intel/server/intel.h331
49 files changed, 14449 insertions, 0 deletions
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
new file mode 100644
index 0000000000..698445c526
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -0,0 +1,289 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_decode.h"
+#include "intel_reg.h"
+#include "intel_bufmgr.h"
+#include "intel_buffers.h"
+
+void
+intel_batchbuffer_reset(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+
+ if (batch->buf != NULL) {
+ drm_intel_bo_unreference(batch->buf);
+ batch->buf = NULL;
+ }
+
+ batch->buf = drm_intel_bo_alloc(intel->bufmgr, "batchbuffer",
+ intel->maxBatchSize, 4096);
+ batch->map = batch->buffer;
+ batch->size = intel->maxBatchSize;
+ batch->ptr = batch->map;
+ batch->reserved_space = BATCH_RESERVED;
+ batch->dirty_state = ~0;
+ batch->state_batch_offset = batch->size;
+}
+
+struct intel_batchbuffer *
+intel_batchbuffer_alloc(struct intel_context *intel)
+{
+ struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
+
+ batch->intel = intel;
+ batch->buffer = malloc(intel->maxBatchSize);
+ intel_batchbuffer_reset(batch);
+
+ return batch;
+}
+
+void
+intel_batchbuffer_free(struct intel_batchbuffer *batch)
+{
+ free (batch->buffer);
+ drm_intel_bo_unreference(batch->buf);
+ batch->buf = NULL;
+ free(batch);
+}
+
+
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static void
+do_flush_locked(struct intel_batchbuffer *batch, GLuint used)
+{
+ struct intel_context *intel = batch->intel;
+ int ret = 0;
+ int x_off = 0, y_off = 0;
+
+ drm_intel_bo_subdata(batch->buf, 0, used, batch->buffer);
+ if (batch->state_batch_offset != batch->size) {
+ drm_intel_bo_subdata(batch->buf,
+ batch->state_batch_offset,
+ batch->size - batch->state_batch_offset,
+ batch->buffer + batch->state_batch_offset);
+ }
+
+ batch->ptr = NULL;
+
+ if (!intel->no_hw) {
+ drm_intel_bo_exec(batch->buf, used, NULL, 0,
+ (x_off & 0xffff) | (y_off << 16));
+ }
+
+ if (INTEL_DEBUG & DEBUG_BATCH) {
+ drm_intel_bo_map(batch->buf, GL_FALSE);
+ intel_decode(batch->buf->virtual, used / 4, batch->buf->offset,
+ intel->intelScreen->deviceID);
+ drm_intel_bo_unmap(batch->buf);
+
+ if (intel->vtbl.debug_batch != NULL)
+ intel->vtbl.debug_batch(intel);
+ }
+
+ if (ret != 0) {
+ exit(1);
+ }
+ intel->vtbl.new_batch(intel);
+}
+
+void
+_intel_batchbuffer_flush(struct intel_batchbuffer *batch, const char *file,
+ int line)
+{
+ struct intel_context *intel = batch->intel;
+ GLuint used = batch->ptr - batch->map;
+
+ if (intel->first_post_swapbuffers_batch == NULL) {
+ intel->first_post_swapbuffers_batch = intel->batch->buf;
+ drm_intel_bo_reference(intel->first_post_swapbuffers_batch);
+ }
+
+ if (used == 0)
+ return;
+
+ if (INTEL_DEBUG & DEBUG_BATCH)
+ fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
+ used);
+
+ batch->reserved_space = 0;
+
+ if (intel->always_flush_cache) {
+ intel_batchbuffer_emit_mi_flush(batch);
+ used = batch->ptr - batch->map;
+ }
+
+ /* Round batchbuffer usage to 2 DWORDs. */
+
+ if ((used & 4) == 0) {
+ *(GLuint *) (batch->ptr) = 0; /* noop */
+ batch->ptr += 4;
+ used = batch->ptr - batch->map;
+ }
+
+ /* Mark the end of the buffer. */
+ *(GLuint *) (batch->ptr) = MI_BATCH_BUFFER_END;
+ batch->ptr += 4;
+ used = batch->ptr - batch->map;
+ assert (used <= batch->buf->size);
+
+ /* Workaround for recursive batchbuffer flushing: If the window is
+ * moved, we can get into a case where we try to flush during a
+ * flush. What happens is that when we try to grab the lock for
+ * the first flush, we detect that the window moved which then
+ * causes another flush (from the intel_draw_buffer() call in
+ * intelUpdatePageFlipping()). To work around this we reset the
+ * batchbuffer tail pointer before trying to get the lock. This
+ * prevent the nested buffer flush, but a better fix would be to
+ * avoid that in the first place. */
+ batch->ptr = batch->map;
+
+ if (intel->vtbl.finish_batch)
+ intel->vtbl.finish_batch(intel);
+
+ /* Check that we didn't just wrap our batchbuffer at a bad time. */
+ assert(!intel->no_batch_wrap);
+
+ do_flush_locked(batch, used);
+
+ if (INTEL_DEBUG & DEBUG_SYNC) {
+ fprintf(stderr, "waiting for idle\n");
+ drm_intel_bo_map(batch->buf, GL_TRUE);
+ drm_intel_bo_unmap(batch->buf);
+ }
+
+ /* Reset the buffer:
+ */
+ intel_batchbuffer_reset(batch);
+}
+
+
+/* This is the only way buffers get added to the validate list.
+ */
+GLboolean
+intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ assert(delta < buffer->size);
+
+ if (batch->ptr - batch->map > batch->buf->size)
+ printf ("bad relocation ptr %p map %p offset %d size %lu\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
+ ret = drm_intel_bo_emit_reloc(batch->buf, batch->ptr - batch->map,
+ buffer, delta,
+ read_domains, write_domain);
+
+ /*
+ * Using the old buffer offset, write in what the right data would be, in case
+ * the buffer doesn't move and we can short-circuit the relocation processing
+ * in the kernel
+ */
+ intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
+
+ return GL_TRUE;
+}
+
+GLboolean
+intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer,
+ uint32_t read_domains, uint32_t write_domain,
+ uint32_t delta)
+{
+ int ret;
+
+ assert(delta < buffer->size);
+
+ if (batch->ptr - batch->map > batch->buf->size)
+ printf ("bad relocation ptr %p map %p offset %d size %lu\n",
+ batch->ptr, batch->map, batch->ptr - batch->map, batch->buf->size);
+ ret = drm_intel_bo_emit_reloc_fence(batch->buf, batch->ptr - batch->map,
+ buffer, delta,
+ read_domains, write_domain);
+
+ /*
+ * Using the old buffer offset, write in what the right data would
+ * be, in case the buffer doesn't move and we can short-circuit the
+ * relocation processing in the kernel
+ */
+ intel_batchbuffer_emit_dword (batch, buffer->offset + delta);
+
+ return GL_TRUE;
+}
+
+void
+intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ const void *data, GLuint bytes)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(batch, bytes);
+ __memcpy(batch->ptr, data, bytes);
+ batch->ptr += bytes;
+}
+
+/* Emit a pipelined flush to either flush render and texture cache for
+ * reading from a FBO-drawn texture, or flush so that frontbuffer
+ * render appears on the screen in DRI1.
+ *
+ * This is also used for the always_flush_cache driconf debug option.
+ */
+void
+intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+
+ if (intel->gen >= 6) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL);
+ OUT_BATCH(PIPE_CONTROL_INSTRUCTION_FLUSH |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else if (intel->gen >= 4) {
+ BEGIN_BATCH(4);
+ OUT_BATCH(_3DSTATE_PIPE_CONTROL |
+ PIPE_CONTROL_WRITE_FLUSH |
+ PIPE_CONTROL_NO_WRITE);
+ OUT_BATCH(0); /* write address */
+ OUT_BATCH(0); /* write data */
+ OUT_BATCH(0); /* write data */
+ ADVANCE_BATCH();
+ } else {
+ BEGIN_BATCH(1);
+ OUT_BATCH(MI_FLUSH);
+ ADVANCE_BATCH();
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.h b/src/mesa/drivers/dri/intel/intel_batchbuffer.h
new file mode 100644
index 0000000000..ae53f45511
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.h
@@ -0,0 +1,173 @@
+#ifndef INTEL_BATCHBUFFER_H
+#define INTEL_BATCHBUFFER_H
+
+#include "main/mtypes.h"
+
+#include "intel_context.h"
+#include "intel_bufmgr.h"
+#include "intel_reg.h"
+
+#define BATCH_SZ 16384
+#define BATCH_RESERVED 16
+
+
+struct intel_batchbuffer
+{
+ struct intel_context *intel;
+
+ drm_intel_bo *buf;
+
+ GLubyte *buffer;
+
+ GLubyte *map;
+ GLubyte *ptr;
+
+ GLuint size;
+ uint32_t state_batch_offset;
+
+#ifdef DEBUG
+ /** Tracking of BEGIN_BATCH()/OUT_BATCH()/ADVANCE_BATCH() debugging */
+ struct {
+ GLuint total;
+ GLubyte *start_ptr;
+ } emit;
+#endif
+
+ GLuint dirty_state;
+ GLuint reserved_space;
+};
+
+struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
+ *intel);
+
+void intel_batchbuffer_free(struct intel_batchbuffer *batch);
+
+
+void _intel_batchbuffer_flush(struct intel_batchbuffer *batch,
+ const char *file, int line);
+
+#define intel_batchbuffer_flush(batch) \
+ _intel_batchbuffer_flush(batch, __FILE__, __LINE__)
+
+void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
+
+
+/* Unlike bmBufferData, this currently requires the buffer be mapped.
+ * Consider it a convenience function wrapping multple
+ * intel_buffer_dword() calls.
+ */
+void intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ const void *data, GLuint bytes);
+
+void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
+ GLuint bytes);
+
+GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer,
+ uint32_t read_domains,
+ uint32_t write_domain,
+ uint32_t offset);
+GLboolean intel_batchbuffer_emit_reloc_fenced(struct intel_batchbuffer *batch,
+ drm_intel_bo *buffer,
+ uint32_t read_domains,
+ uint32_t write_domain,
+ uint32_t offset);
+void intel_batchbuffer_emit_mi_flush(struct intel_batchbuffer *batch);
+
+static INLINE uint32_t float_as_int(float f)
+{
+ union {
+ float f;
+ uint32_t d;
+ } fi;
+
+ fi.f = f;
+ return fi.d;
+}
+
+/* Inline functions - might actually be better off with these
+ * non-inlined. Certainly better off switching all command packets to
+ * be passed as structs rather than dwords, but that's a little bit of
+ * work...
+ */
+static INLINE GLint
+intel_batchbuffer_space(struct intel_batchbuffer *batch)
+{
+ return (batch->state_batch_offset - batch->reserved_space) -
+ (batch->ptr - batch->map);
+}
+
+
+static INLINE void
+intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
+{
+#ifdef DEBUG
+ assert(intel_batchbuffer_space(batch) >= 4);
+#endif
+ *(GLuint *) (batch->ptr) = dword;
+ batch->ptr += 4;
+}
+
+static INLINE void
+intel_batchbuffer_emit_float(struct intel_batchbuffer *batch, float f)
+{
+ intel_batchbuffer_emit_dword(batch, float_as_int(f));
+}
+
+static INLINE void
+intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
+ GLuint sz)
+{
+#ifdef DEBUG
+ assert(sz < batch->size - 8);
+#endif
+ if (intel_batchbuffer_space(batch) < sz)
+ intel_batchbuffer_flush(batch);
+}
+
+static INLINE void
+intel_batchbuffer_begin(struct intel_batchbuffer *batch, int n)
+{
+ intel_batchbuffer_require_space(batch, n * 4);
+#ifdef DEBUG
+ assert(batch->map);
+ assert(batch->emit.start_ptr == NULL);
+ batch->emit.total = n * 4;
+ batch->emit.start_ptr = batch->ptr;
+#endif
+}
+
+static INLINE void
+intel_batchbuffer_advance(struct intel_batchbuffer *batch)
+{
+#ifdef DEBUG
+ unsigned int _n = batch->ptr - batch->emit.start_ptr;
+ assert(batch->emit.start_ptr != NULL);
+ if (_n != batch->emit.total) {
+ fprintf(stderr, "ADVANCE_BATCH: %d of %d dwords emitted\n",
+ _n, batch->emit.total);
+ abort();
+ }
+ batch->emit.start_ptr = NULL;
+#endif
+}
+
+/* Here are the crusty old macros, to be removed:
+ */
+#define BATCH_LOCALS
+
+#define BEGIN_BATCH(n) intel_batchbuffer_begin(intel->batch, n)
+#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
+#define OUT_BATCH_F(f) intel_batchbuffer_emit_float(intel->batch,f)
+#define OUT_RELOC(buf, read_domains, write_domain, delta) do { \
+ intel_batchbuffer_emit_reloc(intel->batch, buf, \
+ read_domains, write_domain, delta); \
+} while (0)
+#define OUT_RELOC_FENCED(buf, read_domains, write_domain, delta) do { \
+ intel_batchbuffer_emit_reloc_fenced(intel->batch, buf, \
+ read_domains, write_domain, delta); \
+} while (0)
+
+#define ADVANCE_BATCH() intel_batchbuffer_advance(intel->batch);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_blit.c b/src/mesa/drivers/dri/intel/intel_blit.c
new file mode 100644
index 0000000000..2c85ad3c36
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_blit.c
@@ -0,0 +1,512 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/mtypes.h"
+#include "main/context.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_reg.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+static GLuint translate_raster_op(GLenum logicop)
+{
+ switch(logicop) {
+ case GL_CLEAR: return 0x00;
+ case GL_AND: return 0x88;
+ case GL_AND_REVERSE: return 0x44;
+ case GL_COPY: return 0xCC;
+ case GL_AND_INVERTED: return 0x22;
+ case GL_NOOP: return 0xAA;
+ case GL_XOR: return 0x66;
+ case GL_OR: return 0xEE;
+ case GL_NOR: return 0x11;
+ case GL_EQUIV: return 0x99;
+ case GL_INVERT: return 0x55;
+ case GL_OR_REVERSE: return 0xDD;
+ case GL_COPY_INVERTED: return 0x33;
+ case GL_OR_INVERTED: return 0xBB;
+ case GL_NAND: return 0x77;
+ case GL_SET: return 0xFF;
+ default: return 0;
+ }
+}
+
+static uint32_t
+br13_for_cpp(int cpp)
+{
+ switch (cpp) {
+ case 4:
+ return BR13_8888;
+ break;
+ case 2:
+ return BR13_565;
+ break;
+ case 1:
+ return BR13_8;
+ break;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+/* Copy BitBlt
+ */
+GLboolean
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ drm_intel_bo *src_buffer,
+ GLuint src_offset,
+ uint32_t src_tiling,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort src_x, GLshort src_y,
+ GLshort dst_x, GLshort dst_y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ GLuint CMD, BR13, pass = 0;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ drm_intel_bo *aper_array[3];
+ BATCH_LOCALS;
+
+ /* Blits are in a different ringbuffer so we don't use them. */
+ if (intel->gen >= 6)
+ return GL_FALSE;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return GL_FALSE;
+ if (dst_tiling == I915_TILING_Y)
+ return GL_FALSE;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ if (src_offset & 4095)
+ return GL_FALSE;
+ if (src_tiling == I915_TILING_Y)
+ return GL_FALSE;
+ }
+
+ /* do space check before going any further */
+ do {
+ aper_array[0] = intel->batch->buf;
+ aper_array[1] = dst_buffer;
+ aper_array[2] = src_buffer;
+
+ if (dri_bufmgr_check_aperture_space(aper_array, 3) != 0) {
+ intel_batchbuffer_flush(intel->batch);
+ pass++;
+ } else
+ break;
+ } while (pass < 2);
+
+ if (pass >= 2)
+ return GL_FALSE;
+
+ intel_batchbuffer_require_space(intel->batch, 8 * 4);
+ DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ BR13 = br13_for_cpp(cpp) | translate_raster_op(logic_op) << 16;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ CMD = XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ break;
+ default:
+ return GL_FALSE;
+ }
+
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+ if (src_tiling != I915_TILING_NONE) {
+ CMD |= XY_SRC_TILED;
+ src_pitch /= 4;
+ }
+#endif
+
+ if (dst_y2 <= dst_y || dst_x2 <= dst_x) {
+ return GL_TRUE;
+ }
+
+ assert(dst_x < dst_x2);
+ assert(dst_y < dst_y2);
+
+ BEGIN_BATCH(8);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13 | (uint16_t)dst_pitch);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH((uint16_t)src_pitch);
+ OUT_RELOC_FENCED(src_buffer,
+ I915_GEM_DOMAIN_RENDER, 0,
+ src_offset);
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Use blitting to clear the renderbuffers named by 'flags'.
+ * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferIndexes field
+ * since that might include software renderbuffers or renderbuffers
+ * which we're clearing with triangles.
+ * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
+ */
+void
+intelClearWithBlit(GLcontext *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint clear_depth;
+ GLboolean all;
+ GLint cx, cy, cw, ch;
+ BATCH_LOCALS;
+
+ /* Blits are in a different ringbuffer so we don't use them. */
+ assert(intel->gen < 6);
+
+ /*
+ * Compute values for clearing the buffers.
+ */
+ clear_depth = 0;
+ if (mask & BUFFER_BIT_DEPTH) {
+ clear_depth = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
+ }
+ if (mask & BUFFER_BIT_STENCIL) {
+ clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
+ }
+
+ cx = fb->_Xmin;
+ if (fb->Name == 0)
+ cy = ctx->DrawBuffer->Height - fb->_Ymax;
+ else
+ cy = fb->_Ymin;
+ cw = fb->_Xmax - fb->_Xmin;
+ ch = fb->_Ymax - fb->_Ymin;
+
+ if (cw == 0 || ch == 0)
+ return;
+
+ GLuint buf;
+ all = (cw == fb->Width && ch == fb->Height);
+
+ /* Loop over all renderbuffers */
+ for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
+ const GLbitfield bufBit = 1 << buf;
+ struct intel_renderbuffer *irb;
+ drm_intel_bo *write_buffer;
+ int x1, y1, x2, y2;
+ uint32_t clear_val;
+ uint32_t BR13, CMD;
+ int pitch, cpp;
+ drm_intel_bo *aper_array[2];
+
+ if (!(mask & bufBit))
+ continue;
+
+ /* OK, clear this renderbuffer */
+ irb = intel_get_renderbuffer(fb, buf);
+ write_buffer = intel_region_buffer(intel, irb->region,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+ x1 = cx + irb->region->draw_x;
+ y1 = cy + irb->region->draw_y;
+ x2 = cx + cw + irb->region->draw_x;
+ y2 = cy + ch + irb->region->draw_y;
+
+ pitch = irb->region->pitch;
+ cpp = irb->region->cpp;
+
+ DBG("%s dst:buf(%p)/%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ irb->region->buffer, (pitch * cpp),
+ x1, y1, x2 - x1, y2 - y1);
+
+ BR13 = br13_for_cpp(cpp) | 0xf0 << 16;
+ CMD = XY_COLOR_BLT_CMD;
+
+ /* Setup the blit command */
+ if (cpp == 4) {
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ if (mask & BUFFER_BIT_DEPTH)
+ CMD |= XY_BLT_WRITE_RGB;
+ if (mask & BUFFER_BIT_STENCIL)
+ CMD |= XY_BLT_WRITE_ALPHA;
+ } else {
+ /* clearing RGBA */
+ CMD |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ }
+ }
+
+ assert(irb->region->tiling != I915_TILING_Y);
+
+#ifndef I915
+ if (irb->region->tiling != I915_TILING_NONE) {
+ CMD |= XY_DST_TILED;
+ pitch /= 4;
+ }
+#endif
+ BR13 |= (pitch * cpp);
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ clear_val = clear_depth;
+ } else {
+ uint8_t clear[4];
+ GLclampf *color = ctx->Color.ClearColor;
+
+ CLAMPED_FLOAT_TO_UBYTE(clear[0], color[0]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[1], color[1]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[2], color[2]);
+ CLAMPED_FLOAT_TO_UBYTE(clear[3], color[3]);
+
+ switch (irb->Base.Format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ clear_val = PACK_COLOR_8888(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_RGB565:
+ clear_val = PACK_COLOR_565(clear[0], clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB4444:
+ clear_val = PACK_COLOR_4444(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_ARGB1555:
+ clear_val = PACK_COLOR_1555(clear[3], clear[0],
+ clear[1], clear[2]);
+ break;
+ case MESA_FORMAT_A8:
+ clear_val = PACK_COLOR_8888(clear[3], clear[3],
+ clear[3], clear[3]);
+ break;
+ default:
+ _mesa_problem(ctx, "Unexpected renderbuffer format: %d\n",
+ irb->Base.Format);
+ clear_val = 0;
+ }
+ }
+
+ assert(x1 < x2);
+ assert(y1 < y2);
+
+ /* do space check before going any further */
+ aper_array[0] = intel->batch->buf;
+ aper_array[1] = write_buffer;
+
+ if (drm_intel_bufmgr_check_aperture_space(aper_array,
+ ARRAY_SIZE(aper_array)) != 0) {
+ intel_batchbuffer_flush(intel->batch);
+ }
+
+ BEGIN_BATCH(6);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y1 << 16) | x1);
+ OUT_BATCH((y2 << 16) | x2);
+ OUT_RELOC_FENCED(write_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ 0);
+ OUT_BATCH(clear_val);
+ ADVANCE_BATCH();
+
+ if (intel->always_flush_cache)
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL)
+ mask &= ~(BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL);
+ else
+ mask &= ~bufBit; /* turn off bit, for faster loop exit */
+ }
+}
+
+GLboolean
+intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLubyte *src_bits, GLuint src_size,
+ GLuint fg_color,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort x, GLshort y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ int dwords = ALIGN(src_size, 8) / 4;
+ uint32_t opcode, br13, blit_cmd;
+
+ /* Blits are in a different ringbuffer so we don't use them. */
+ if (intel->gen >= 6)
+ return GL_FALSE;
+
+ if (dst_tiling != I915_TILING_NONE) {
+ if (dst_offset & 4095)
+ return GL_FALSE;
+ if (dst_tiling == I915_TILING_Y)
+ return GL_FALSE;
+ }
+
+ assert( logic_op - GL_CLEAR >= 0 );
+ assert( logic_op - GL_CLEAR < 0x10 );
+ assert(dst_pitch > 0);
+
+ if (w < 0 || h < 0)
+ return GL_TRUE;
+
+ dst_pitch *= cpp;
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d, %d bytes %d dwords\n",
+ __FUNCTION__,
+ dst_buffer, dst_pitch, dst_offset, x, y, w, h, src_size, dwords);
+
+ intel_batchbuffer_require_space( intel->batch,
+ (8 * 4) +
+ (3 * 4) +
+ dwords * 4 );
+
+ opcode = XY_SETUP_BLT_CMD;
+ if (cpp == 4)
+ opcode |= XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+#ifndef I915
+ if (dst_tiling != I915_TILING_NONE) {
+ opcode |= XY_DST_TILED;
+ dst_pitch /= 4;
+ }
+#endif
+
+ br13 = dst_pitch | (translate_raster_op(logic_op) << 16) | (1 << 29);
+ br13 |= br13_for_cpp(cpp);
+
+ blit_cmd = XY_TEXT_IMMEDIATE_BLIT_CMD | XY_TEXT_BYTE_PACKED; /* packing? */
+ if (dst_tiling != I915_TILING_NONE)
+ blit_cmd |= XY_DST_TILED;
+
+ BEGIN_BATCH(8 + 3);
+ OUT_BATCH(opcode);
+ OUT_BATCH(br13);
+ OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
+ OUT_BATCH((100 << 16) | 100); /* clip x2, y2 */
+ OUT_RELOC_FENCED(dst_buffer,
+ I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
+ dst_offset);
+ OUT_BATCH(0); /* bg */
+ OUT_BATCH(fg_color); /* fg */
+ OUT_BATCH(0); /* pattern base addr */
+
+ OUT_BATCH(blit_cmd | ((3 - 2) + dwords));
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ ADVANCE_BATCH();
+
+ intel_batchbuffer_data( intel->batch,
+ src_bits,
+ dwords * 4 );
+
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+
+ return GL_TRUE;
+}
+
+/* We don't have a memmove-type blit like some other hardware, so we'll do a
+ * rectangular blit covering a large space, then emit 1-scanline blit at the
+ * end to cover the last if we need.
+ */
+void
+intel_emit_linear_blit(struct intel_context *intel,
+ drm_intel_bo *dst_bo,
+ unsigned int dst_offset,
+ drm_intel_bo *src_bo,
+ unsigned int src_offset,
+ unsigned int size)
+{
+ GLuint pitch, height;
+ GLboolean ok;
+
+ /* Blits are in a different ringbuffer so we don't use them. */
+ assert(intel->gen < 6);
+
+ /* The pitch is a signed value. */
+ pitch = MIN2(size, (1 << 15) - 1);
+ height = size / pitch;
+ ok = intelEmitCopyBlit(intel, 1,
+ pitch, src_bo, src_offset, I915_TILING_NONE,
+ pitch, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ pitch, height, /* w, h */
+ GL_COPY);
+ assert(ok);
+
+ src_offset += pitch * height;
+ dst_offset += pitch * height;
+ size -= pitch * height;
+ assert (size < (1 << 15));
+ if (size != 0) {
+ ok = intelEmitCopyBlit(intel, 1,
+ size, src_bo, src_offset, I915_TILING_NONE,
+ size, dst_bo, dst_offset, I915_TILING_NONE,
+ 0, 0, /* src x/y */
+ 0, 0, /* dst x/y */
+ size, 1, /* w, h */
+ GL_COPY);
+ assert(ok);
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_blit.h b/src/mesa/drivers/dri/intel/intel_blit.h
new file mode 100644
index 0000000000..70d277df3c
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_blit.h
@@ -0,0 +1,73 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BLIT_H
+#define INTEL_BLIT_H
+
+#include "intel_context.h"
+
+extern void intelCopyBuffer(const __DRIdrawable * dpriv,
+ const drm_clip_rect_t * rect);
+
+extern void intelClearWithBlit(GLcontext * ctx, GLbitfield mask);
+
+GLboolean
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ drm_intel_bo *src_buffer,
+ GLuint src_offset,
+ uint32_t src_tiling,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort srcx, GLshort srcy,
+ GLshort dstx, GLshort dsty,
+ GLshort w, GLshort h,
+ GLenum logicop );
+
+GLboolean
+intelEmitImmediateColorExpandBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLubyte *src_bits, GLuint src_size,
+ GLuint fg_color,
+ GLshort dst_pitch,
+ drm_intel_bo *dst_buffer,
+ GLuint dst_offset,
+ uint32_t dst_tiling,
+ GLshort x, GLshort y,
+ GLshort w, GLshort h,
+ GLenum logic_op);
+void intel_emit_linear_blit(struct intel_context *intel,
+ drm_intel_bo *dst_bo,
+ unsigned int dst_offset,
+ drm_intel_bo *src_bo,
+ unsigned int src_offset,
+ unsigned int size);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
new file mode 100644
index 0000000000..8ab41f8d27
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
@@ -0,0 +1,734 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/imports.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/bufferobj.h"
+
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+
+static GLboolean
+intel_bufferobj_unmap(GLcontext * ctx,
+ GLenum target, struct gl_buffer_object *obj);
+
+/** Allocates a new drm_intel_bo to store the data for the buffer object. */
+static void
+intel_bufferobj_alloc_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
+ intel_obj->Base.Size, 64);
+}
+
+/**
+ * There is some duplication between mesa's bufferobjects and our
+ * bufmgr buffers. Both have an integer handle and a hashtable to
+ * lookup an opaque structure. It would be nice if the handles and
+ * internal structure where somehow shared.
+ */
+static struct gl_buffer_object *
+intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
+
+ _mesa_initialize_buffer_object(&obj->Base, name, target);
+
+ obj->buffer = NULL;
+
+ return &obj->Base;
+}
+
+/* Break the COW tie to the region. The region gets to keep the data.
+ */
+void
+intel_bufferobj_release_region(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region->buffer == intel_obj->buffer);
+ intel_obj->region->pbo = NULL;
+ intel_obj->region = NULL;
+
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+}
+
+/* Break the COW tie to the region. Both the pbo and the region end
+ * up with a copy of the data.
+ */
+void
+intel_bufferobj_cow(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region);
+ intel_region_cow(intel, intel_obj->region);
+}
+
+
+/**
+ * Deallocate/free a vertex/pixel buffer object.
+ * Called via glDeleteBuffersARB().
+ */
+static void
+intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* Buffer objects are automatically unmapped when deleting according
+ * to the spec, but Mesa doesn't do UnmapBuffer for us at context destroy
+ * (though it does if you call glDeleteBuffers)
+ */
+ if (obj->Pointer)
+ intel_bufferobj_unmap(ctx, 0, obj);
+
+ free(intel_obj->sys_buffer);
+ if (intel_obj->region) {
+ intel_bufferobj_release_region(intel, intel_obj);
+ }
+ else if (intel_obj->buffer) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ }
+
+ free(intel_obj);
+}
+
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via ctx->Driver.BufferData().
+ * \return GL_TRUE for success, GL_FALSE if out of memory
+ */
+static GLboolean
+intel_bufferobj_data(GLcontext * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ intel_obj->Base.Size = size;
+ intel_obj->Base.Usage = usage;
+
+ assert(!obj->Pointer); /* Mesa should have unmapped it */
+
+ if (intel_obj->region)
+ intel_bufferobj_release_region(intel, intel_obj);
+
+ if (intel_obj->buffer != NULL) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+ }
+ free(intel_obj->sys_buffer);
+ intel_obj->sys_buffer = NULL;
+
+ if (size != 0) {
+#ifdef I915
+ /* On pre-965, stick VBOs in system memory, as we're always doing swtnl
+ * with their contents anyway.
+ */
+ if (target == GL_ARRAY_BUFFER || target == GL_ELEMENT_ARRAY_BUFFER) {
+ intel_obj->sys_buffer = malloc(size);
+ if (intel_obj->sys_buffer != NULL) {
+ if (data != NULL)
+ memcpy(intel_obj->sys_buffer, data, size);
+ return GL_TRUE;
+ }
+ }
+#endif
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ if (!intel_obj->buffer)
+ return GL_FALSE;
+
+ if (data != NULL)
+ drm_intel_bo_subdata(intel_obj->buffer, 0, size, data);
+ }
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+intel_bufferobj_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ if (intel_obj->sys_buffer)
+ memcpy((char *)intel_obj->sys_buffer + offset, data, size);
+ else {
+ /* Flush any existing batchbuffer that might reference this data. */
+ if (drm_intel_bo_busy(intel_obj->buffer) ||
+ drm_intel_bo_references(intel->batch->buf, intel_obj->buffer)) {
+ drm_intel_bo *temp_bo;
+
+ temp_bo = drm_intel_bo_alloc(intel->bufmgr, "subdata temp", size, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, size, data);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, offset,
+ temp_bo, 0,
+ size);
+
+ drm_intel_bo_unreference(temp_bo);
+ } else {
+ drm_intel_bo_subdata(intel_obj->buffer, offset, size, data);
+ }
+ }
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+static void
+intel_bufferobj_get_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ if (intel_obj->sys_buffer)
+ memcpy(data, (char *)intel_obj->sys_buffer + offset, size);
+ else
+ drm_intel_bo_get_subdata(intel_obj->buffer, offset, size, data);
+}
+
+
+
+/**
+ * Called via glMapBufferARB().
+ */
+static void *
+intel_bufferobj_map(GLcontext * ctx,
+ GLenum target,
+ GLenum access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ GLboolean read_only = (access == GL_READ_ONLY_ARB);
+ GLboolean write_only = (access == GL_WRITE_ONLY_ARB);
+
+ assert(intel_obj);
+
+ if (intel_obj->sys_buffer) {
+ obj->Pointer = intel_obj->sys_buffer;
+ obj->Length = obj->Size;
+ obj->Offset = 0;
+ return obj->Pointer;
+ }
+
+ /* Flush any existing batchbuffer that might reference this data. */
+ if (drm_intel_bo_references(intel->batch->buf, intel_obj->buffer))
+ intel_flush(ctx);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ if (write_only) {
+ drm_intel_gem_bo_map_gtt(intel_obj->buffer);
+ intel_obj->mapped_gtt = GL_TRUE;
+ } else {
+ drm_intel_bo_map(intel_obj->buffer, !read_only);
+ intel_obj->mapped_gtt = GL_FALSE;
+ }
+
+ obj->Pointer = intel_obj->buffer->virtual;
+ obj->Length = obj->Size;
+ obj->Offset = 0;
+
+ return obj->Pointer;
+}
+
+/**
+ * Called via glMapBufferRange().
+ *
+ * The goal of this extension is to allow apps to accumulate their rendering
+ * at the same time as they accumulate their buffer object. Without it,
+ * you'd end up blocking on execution of rendering every time you mapped
+ * the buffer to put new data in.
+ *
+ * We support it in 3 ways: If unsynchronized, then don't bother
+ * flushing the batchbuffer before mapping the buffer, which can save blocking
+ * in many cases. If we would still block, and they allow the whole buffer
+ * to be invalidated, then just allocate a new buffer to replace the old one.
+ * If not, and we'd block, and they allow the subrange of the buffer to be
+ * invalidated, then we can make a new little BO, let them write into that,
+ * and blit it into the real BO at unmap time.
+ */
+static void *
+intel_bufferobj_map_range(GLcontext * ctx,
+ GLenum target, GLintptr offset, GLsizeiptr length,
+ GLbitfield access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ /* _mesa_MapBufferRange (GL entrypoint) sets these, but the vbo module also
+ * internally uses our functions directly.
+ */
+ obj->Offset = offset;
+ obj->Length = length;
+ obj->AccessFlags = access;
+
+ if (intel_obj->sys_buffer) {
+ obj->Pointer = intel_obj->sys_buffer + offset;
+ return obj->Pointer;
+ }
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ /* If the mapping is synchronized with other GL operations, flush
+ * the batchbuffer so that GEM knows about the buffer access for later
+ * syncing.
+ */
+ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
+ drm_intel_bo_references(intel->batch->buf, intel_obj->buffer))
+ intel_flush(ctx);
+
+ if (intel_obj->buffer == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ /* If the user doesn't care about existing buffer contents and mapping
+ * would cause us to block, then throw out the old buffer.
+ */
+ if (!(access & GL_MAP_UNSYNCHRONIZED_BIT) &&
+ (access & GL_MAP_INVALIDATE_BUFFER_BIT) &&
+ drm_intel_bo_busy(intel_obj->buffer)) {
+ drm_intel_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = drm_intel_bo_alloc(intel->bufmgr, "bufferobj",
+ intel_obj->Base.Size, 64);
+ }
+
+ /* If the user is mapping a range of an active buffer object but
+ * doesn't require the current contents of that range, make a new
+ * BO, and we'll copy what they put in there out at unmap or
+ * FlushRange time.
+ */
+ if ((access & GL_MAP_INVALIDATE_RANGE_BIT) &&
+ drm_intel_bo_busy(intel_obj->buffer)) {
+ if (access & GL_MAP_FLUSH_EXPLICIT_BIT) {
+ intel_obj->range_map_buffer = malloc(length);
+ obj->Pointer = intel_obj->range_map_buffer;
+ } else {
+ intel_obj->range_map_bo = drm_intel_bo_alloc(intel->bufmgr,
+ "range map",
+ length, 64);
+ if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->range_map_bo);
+ intel_obj->mapped_gtt = GL_TRUE;
+ } else {
+ drm_intel_bo_map(intel_obj->range_map_bo,
+ (access & GL_MAP_WRITE_BIT) != 0);
+ intel_obj->mapped_gtt = GL_FALSE;
+ }
+ obj->Pointer = intel_obj->range_map_bo->virtual;
+ }
+ return obj->Pointer;
+ }
+
+ if (!(access & GL_MAP_READ_BIT)) {
+ drm_intel_gem_bo_map_gtt(intel_obj->buffer);
+ intel_obj->mapped_gtt = GL_TRUE;
+ } else {
+ drm_intel_bo_map(intel_obj->buffer, (access & GL_MAP_WRITE_BIT) != 0);
+ intel_obj->mapped_gtt = GL_FALSE;
+ }
+
+ obj->Pointer = intel_obj->buffer->virtual + offset;
+ return obj->Pointer;
+}
+
+/* Ideally we'd use a BO to avoid taking up cache space for the temporary
+ * data, but FlushMappedBufferRange may be followed by further writes to
+ * the pointer, so we would have to re-map after emitting our blit, which
+ * would defeat the point.
+ */
+static void
+intel_bufferobj_flush_mapped_range(GLcontext *ctx, GLenum target,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+ drm_intel_bo *temp_bo;
+
+ /* Unless we're in the range map using a temporary system buffer,
+ * there's no work to do.
+ */
+ if (intel_obj->range_map_buffer == NULL)
+ return;
+
+ temp_bo = drm_intel_bo_alloc(intel->bufmgr, "range map flush", length, 64);
+
+ drm_intel_bo_subdata(temp_bo, 0, length, intel_obj->range_map_buffer);
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset + offset,
+ temp_bo, 0,
+ length);
+
+ drm_intel_bo_unreference(temp_bo);
+}
+
+
+/**
+ * Called via glUnmapBuffer().
+ */
+static GLboolean
+intel_bufferobj_unmap(GLcontext * ctx,
+ GLenum target, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ assert(obj->Pointer);
+ if (intel_obj->sys_buffer != NULL) {
+ /* always keep the mapping around. */
+ } else if (intel_obj->range_map_buffer != NULL) {
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+ free(intel_obj->range_map_buffer);
+ intel_obj->range_map_buffer = NULL;
+ } else if (intel_obj->range_map_bo != NULL) {
+ if (intel_obj->mapped_gtt) {
+ drm_intel_gem_bo_unmap_gtt(intel_obj->range_map_bo);
+ } else {
+ drm_intel_bo_unmap(intel_obj->range_map_bo);
+ }
+
+ intel_emit_linear_blit(intel,
+ intel_obj->buffer, obj->Offset,
+ intel_obj->range_map_bo, 0,
+ obj->Length);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+
+ drm_intel_bo_unreference(intel_obj->range_map_bo);
+ intel_obj->range_map_bo = NULL;
+ } else if (intel_obj->buffer != NULL) {
+ if (intel_obj->mapped_gtt) {
+ drm_intel_gem_bo_unmap_gtt(intel_obj->buffer);
+ } else {
+ drm_intel_bo_unmap(intel_obj->buffer);
+ }
+ }
+ obj->Pointer = NULL;
+ obj->Offset = 0;
+ obj->Length = 0;
+
+ return GL_TRUE;
+}
+
+drm_intel_bo *
+intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj, GLuint flag)
+{
+ if (intel_obj->region) {
+ if (flag == INTEL_WRITE_PART)
+ intel_bufferobj_cow(intel, intel_obj);
+ else if (flag == INTEL_WRITE_FULL) {
+ intel_bufferobj_release_region(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ }
+ }
+
+ if (intel_obj->buffer == NULL) {
+ void *sys_buffer = intel_obj->sys_buffer;
+
+ /* only one of buffer and sys_buffer could be non-NULL */
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ intel_obj->sys_buffer = NULL;
+
+ intel_bufferobj_subdata(&intel->ctx,
+ GL_ARRAY_BUFFER_ARB,
+ 0,
+ intel_obj->Base.Size,
+ sys_buffer,
+ &intel_obj->Base);
+ free(sys_buffer);
+ intel_obj->sys_buffer = NULL;
+ }
+
+ return intel_obj->buffer;
+}
+
+static void
+intel_bufferobj_copy_subdata(GLcontext *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr read_offset, GLintptr write_offset,
+ GLsizeiptr size)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_src = intel_buffer_object(src);
+ struct intel_buffer_object *intel_dst = intel_buffer_object(dst);
+ drm_intel_bo *src_bo, *dst_bo;
+
+ if (size == 0)
+ return;
+
+ /* If we're in system memory, just map and memcpy. */
+ if (intel_src->sys_buffer || intel_dst->sys_buffer) {
+ /* The same buffer may be used, but note that regions copied may
+ * not overlap.
+ */
+ if (src == dst) {
+ char *ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
+ GL_READ_WRITE, dst);
+ memcpy(ptr + write_offset, ptr + read_offset, size);
+ intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst);
+ } else {
+ const char *src_ptr;
+ char *dst_ptr;
+
+ src_ptr = intel_bufferobj_map(ctx, GL_COPY_READ_BUFFER,
+ GL_READ_ONLY, src);
+ dst_ptr = intel_bufferobj_map(ctx, GL_COPY_WRITE_BUFFER,
+ GL_WRITE_ONLY, dst);
+
+ memcpy(dst_ptr + write_offset, src_ptr + read_offset, size);
+
+ intel_bufferobj_unmap(ctx, GL_COPY_READ_BUFFER, src);
+ intel_bufferobj_unmap(ctx, GL_COPY_WRITE_BUFFER, dst);
+ }
+ }
+
+ /* Otherwise, we have real BOs, so blit them. */
+
+ dst_bo = intel_bufferobj_buffer(intel, intel_dst, INTEL_WRITE_PART);
+ src_bo = intel_bufferobj_buffer(intel, intel_src, INTEL_READ);
+
+ intel_emit_linear_blit(intel,
+ dst_bo, write_offset,
+ src_bo, read_offset, size);
+
+ /* Since we've emitted some blits to buffers that will (likely) be used
+ * in rendering operations in other cache domains in this batch, emit a
+ * flush. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer.
+ */
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+}
+
+#if FEATURE_APPLE_object_purgeable
+static GLenum
+intel_buffer_purgeable(GLcontext * ctx,
+ drm_intel_bo *buffer,
+ GLenum option)
+{
+ int retained = 0;
+
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_DONTNEED);
+
+ return retained ? GL_VOLATILE_APPLE : GL_RELEASED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_purgeable(GLcontext * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ struct intel_buffer_object *intel;
+
+ intel = intel_buffer_object (obj);
+ if (intel->buffer != NULL)
+ return intel_buffer_purgeable (ctx, intel->buffer, option);
+
+ if (option == GL_RELEASED_APPLE) {
+ if (intel->sys_buffer != NULL) {
+ free(intel->sys_buffer);
+ intel->sys_buffer = NULL;
+ }
+
+ return GL_RELEASED_APPLE;
+ } else {
+ /* XXX Create the buffer and madvise(MADV_DONTNEED)? */
+ return intel_buffer_purgeable (ctx,
+ intel_bufferobj_buffer(intel_context(ctx),
+ intel, INTEL_READ),
+ option);
+ }
+}
+
+static GLenum
+intel_texture_object_purgeable(GLcontext * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable (ctx, intel->mt->region->buffer, option);
+}
+
+static GLenum
+intel_render_object_purgeable(GLcontext * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->region == NULL)
+ return GL_RELEASED_APPLE;
+
+ return intel_buffer_purgeable (ctx, intel->region->buffer, option);
+}
+
+static GLenum
+intel_buffer_unpurgeable(GLcontext * ctx,
+ drm_intel_bo *buffer,
+ GLenum option)
+{
+ int retained;
+
+ retained = 0;
+ if (buffer != NULL)
+ retained = drm_intel_bo_madvise (buffer, I915_MADV_WILLNEED);
+
+ return retained ? GL_RETAINED_APPLE : GL_UNDEFINED_APPLE;
+}
+
+static GLenum
+intel_buffer_object_unpurgeable(GLcontext * ctx,
+ struct gl_buffer_object *obj,
+ GLenum option)
+{
+ return intel_buffer_unpurgeable (ctx, intel_buffer_object (obj)->buffer, option);
+}
+
+static GLenum
+intel_texture_object_unpurgeable(GLcontext * ctx,
+ struct gl_texture_object *obj,
+ GLenum option)
+{
+ struct intel_texture_object *intel;
+
+ intel = intel_texture_object(obj);
+ if (intel->mt == NULL || intel->mt->region == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable (ctx, intel->mt->region->buffer, option);
+}
+
+static GLenum
+intel_render_object_unpurgeable(GLcontext * ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option)
+{
+ struct intel_renderbuffer *intel;
+
+ intel = intel_renderbuffer(obj);
+ if (intel->region == NULL)
+ return GL_UNDEFINED_APPLE;
+
+ return intel_buffer_unpurgeable (ctx, intel->region->buffer, option);
+}
+#endif
+
+void
+intelInitBufferObjectFuncs(struct dd_function_table *functions)
+{
+ functions->NewBufferObject = intel_bufferobj_alloc;
+ functions->DeleteBuffer = intel_bufferobj_free;
+ functions->BufferData = intel_bufferobj_data;
+ functions->BufferSubData = intel_bufferobj_subdata;
+ functions->GetBufferSubData = intel_bufferobj_get_subdata;
+ functions->MapBuffer = intel_bufferobj_map;
+ functions->MapBufferRange = intel_bufferobj_map_range;
+ functions->FlushMappedBufferRange = intel_bufferobj_flush_mapped_range;
+ functions->UnmapBuffer = intel_bufferobj_unmap;
+ functions->CopyBufferSubData = intel_bufferobj_copy_subdata;
+
+#if FEATURE_APPLE_object_purgeable
+ functions->BufferObjectPurgeable = intel_buffer_object_purgeable;
+ functions->TextureObjectPurgeable = intel_texture_object_purgeable;
+ functions->RenderObjectPurgeable = intel_render_object_purgeable;
+
+ functions->BufferObjectUnpurgeable = intel_buffer_object_unpurgeable;
+ functions->TextureObjectUnpurgeable = intel_texture_object_unpurgeable;
+ functions->RenderObjectUnpurgeable = intel_render_object_unpurgeable;
+#endif
+}
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.h b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
new file mode 100644
index 0000000000..b15c192106
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
@@ -0,0 +1,92 @@
+/**************************************************************************
+ *
+ * Copyright 2005 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BUFFEROBJ_H
+#define INTEL_BUFFEROBJ_H
+
+#include "main/mtypes.h"
+
+struct intel_context;
+struct intel_region;
+struct gl_buffer_object;
+
+
+/**
+ * Intel vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
+ */
+struct intel_buffer_object
+{
+ struct gl_buffer_object Base;
+ drm_intel_bo *buffer; /* the low-level buffer manager's buffer handle */
+ /** System memory buffer data, if not using a BO to store the data. */
+ void *sys_buffer;
+
+ struct intel_region *region; /* Is there a zero-copy texture
+ associated with this (pixel)
+ buffer object? */
+
+ drm_intel_bo *range_map_bo;
+ void *range_map_buffer;
+ unsigned int range_map_offset;
+ GLsizei range_map_size;
+
+ GLboolean mapped_gtt;
+};
+
+
+/* Get the bm buffer associated with a GL bufferobject:
+ */
+drm_intel_bo *intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *obj,
+ GLuint flag);
+
+/* Hook the bufferobject implementation into mesa:
+ */
+void intelInitBufferObjectFuncs(struct dd_function_table *functions);
+
+
+
+/* Are the obj->Name tests necessary? Unfortunately yes, mesa
+ * allocates a couple of gl_buffer_object structs statically, and
+ * the Name == 0 test is the only way to identify them and avoid
+ * casting them erroneously to our structs.
+ */
+static INLINE struct intel_buffer_object *
+intel_buffer_object(struct gl_buffer_object *obj)
+{
+ return (struct intel_buffer_object *) obj;
+}
+
+/* Helpers for zerocopy image uploads. See also intel_regions.h:
+ */
+void intel_bufferobj_cow(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj);
+void intel_bufferobj_release_region(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj);
+
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_buffers.c b/src/mesa/drivers/dri/intel/intel_buffers.c
new file mode 100644
index 0000000000..1bff344a45
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffers.c
@@ -0,0 +1,325 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "main/framebuffer.h"
+
+/**
+ * Return pointer to current color drawing region, or NULL.
+ */
+struct intel_region *
+intel_drawbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0]);
+ if (irbColor)
+ return irbColor->region;
+ else
+ return NULL;
+}
+
+/**
+ * Return pointer to current color reading region, or NULL.
+ */
+struct intel_region *
+intel_readbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+/**
+ * Check if we're about to draw into the front color buffer.
+ * If so, set the intel->front_buffer_dirty field to true.
+ */
+void
+intel_check_front_buffer_rendering(struct intel_context *intel)
+{
+ const struct gl_framebuffer *fb = intel->ctx.DrawBuffer;
+ if (fb->Name == 0) {
+ /* drawing to window system buffer */
+ if (fb->_NumColorDrawBuffers > 0) {
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
+ intel->front_buffer_dirty = GL_TRUE;
+ }
+ }
+ }
+}
+
+
+/**
+ * Update the hardware state for drawing into a window or framebuffer object.
+ *
+ * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
+ * places within the driver.
+ *
+ * Basically, this needs to be called any time the current framebuffer
+ * changes, the renderbuffers change, or we need to draw into different
+ * color buffers.
+ */
+void
+intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *colorRegions[MAX_DRAW_BUFFERS], *depthRegion = NULL;
+ struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
+
+ if (!fb) {
+ /* this can happen during the initial context initialization */
+ return;
+ }
+
+ /* Do this here, not core Mesa, since this function is called from
+ * many places within the driver.
+ */
+ if (ctx->NewState & _NEW_BUFFERS) {
+ /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
+ _mesa_update_framebuffer(ctx);
+ /* this updates the DrawBuffer's Width/Height if it's a FBO */
+ _mesa_update_draw_buffer_bounds(ctx);
+ }
+
+ if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
+ /* this may occur when we're called by glBindFrameBuffer() during
+ * the process of someone setting up renderbuffers, etc.
+ */
+ /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
+ return;
+ }
+
+ /* How many color buffers are we drawing into?
+ *
+ * If there are zero buffers or the buffer is too big, don't configure any
+ * regions for hardware drawing. We'll fallback to software below. Not
+ * having regions set makes some of the software fallback paths faster.
+ */
+ if ((fb->Width > ctx->Const.MaxRenderbufferSize)
+ || (fb->Height > ctx->Const.MaxRenderbufferSize)
+ || (fb->_NumColorDrawBuffers == 0)) {
+ /* writing to 0 */
+ colorRegions[0] = NULL;
+ }
+ else if (fb->_NumColorDrawBuffers > 1) {
+ int i;
+ struct intel_renderbuffer *irb;
+
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+ colorRegions[i] = irb ? irb->region : NULL;
+ }
+ }
+ else {
+ /* Get the intel_renderbuffer for the single colorbuffer we're drawing
+ * into.
+ */
+ if (fb->Name == 0) {
+ /* drawing to window system buffer */
+ if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT)
+ colorRegions[0] = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
+ else
+ colorRegions[0] = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
+ }
+ else {
+ /* drawing to user-created FBO */
+ struct intel_renderbuffer *irb;
+ irb = intel_renderbuffer(fb->_ColorDrawBuffers[0]);
+ colorRegions[0] = (irb && irb->region) ? irb->region : NULL;
+ }
+ }
+
+ if (!colorRegions[0]) {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ }
+
+ /***
+ *** Get depth buffer region and check if we need a software fallback.
+ *** Note that the depth buffer is usually a DEPTH_STENCIL buffer.
+ ***/
+ if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
+ irbDepth = intel_renderbuffer(fb->_DepthBuffer->Wrapped);
+ if (irbDepth && irbDepth->region) {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = irbDepth->region;
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ depthRegion = NULL;
+ }
+ }
+ else {
+ /* not using depth buffer */
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = NULL;
+ }
+
+ /***
+ *** Stencil buffer
+ *** This can only be hardware accelerated if we're using a
+ *** combined DEPTH_STENCIL buffer.
+ ***/
+ if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
+ irbStencil = intel_renderbuffer(fb->_StencilBuffer->Wrapped);
+ if (irbStencil && irbStencil->region) {
+ ASSERT(irbStencil->Base.Format == MESA_FORMAT_S8_Z24);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ }
+ }
+ else {
+ /* XXX FBO: instead of FALSE, pass ctx->Stencil._Enabled ??? */
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ }
+
+ /* If we have a (packed) stencil buffer attached but no depth buffer,
+ * we still need to set up the shared depth/stencil state so we can use it.
+ */
+ if (depthRegion == NULL && irbStencil && irbStencil->region)
+ depthRegion = irbStencil->region;
+
+ /*
+ * Update depth and stencil test state
+ */
+ if (ctx->Driver.Enable) {
+ ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
+ (ctx->Depth.Test && fb->Visual.depthBits > 0));
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
+ (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
+ }
+ else {
+ /* Mesa's Stencil._Enabled field is updated when
+ * _NEW_BUFFERS | _NEW_STENCIL, but i965 code assumes that the value
+ * only changes with _NEW_STENCIL (which seems sensible). So flag it
+ * here since this is the _NEW_BUFFERS path.
+ */
+ intel->NewGLState |= (_NEW_DEPTH | _NEW_STENCIL);
+ }
+
+ intel->vtbl.set_draw_region(intel, colorRegions, depthRegion,
+ fb->_NumColorDrawBuffers);
+ intel->NewGLState |= _NEW_BUFFERS;
+
+ /* update viewport since it depends on window size */
+#ifdef I915
+ intelCalcViewport(ctx);
+#else
+ intel->NewGLState |= _NEW_VIEWPORT;
+#endif
+ /* Set state we know depends on drawable parameters:
+ */
+ if (ctx->Driver.Scissor)
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+ intel->NewGLState |= _NEW_SCISSOR;
+
+ if (ctx->Driver.DepthRange)
+ ctx->Driver.DepthRange(ctx,
+ ctx->Viewport.Near,
+ ctx->Viewport.Far);
+
+ /* Update culling direction which changes depending on the
+ * orientation of the buffer:
+ */
+ if (ctx->Driver.FrontFace)
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+ else
+ intel->NewGLState |= _NEW_POLYGON;
+}
+
+
+static void
+intelDrawBuffer(GLcontext * ctx, GLenum mode)
+{
+ if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const GLboolean was_front_buffer_rendering =
+ intel->is_front_buffer_rendering;
+
+ intel->is_front_buffer_rendering = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT);
+
+ /* If we weren't front-buffer rendering before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start rendering again.
+ */
+ if (!was_front_buffer_rendering && intel->is_front_buffer_rendering)
+ dri2InvalidateDrawable(intel->driContext->driDrawablePriv);
+ }
+
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+}
+
+
+static void
+intelReadBuffer(GLcontext * ctx, GLenum mode)
+{
+ if ((ctx->DrawBuffer != NULL) && (ctx->DrawBuffer->Name == 0)) {
+ struct intel_context *const intel = intel_context(ctx);
+ const GLboolean was_front_buffer_reading =
+ intel->is_front_buffer_reading;
+
+ intel->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
+ || (mode == GL_FRONT);
+
+ /* If we weren't front-buffer reading before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start reading again.
+ */
+ if (!was_front_buffer_reading && intel->is_front_buffer_reading)
+ dri2InvalidateDrawable(intel->driContext->driReadablePriv);
+ }
+
+ if (ctx->ReadBuffer == ctx->DrawBuffer) {
+ /* This will update FBO completeness status.
+ * A framebuffer will be incomplete if the GL_READ_BUFFER setting
+ * refers to a missing renderbuffer. Calling glReadBuffer can set
+ * that straight and can make the drawing buffer complete.
+ */
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+ }
+ /* Generally, functions which read pixels (glReadPixels, glCopyPixels, etc)
+ * reference ctx->ReadBuffer and do appropriate state checks.
+ */
+}
+
+
+void
+intelInitBufferFuncs(struct dd_function_table *functions)
+{
+ functions->DrawBuffer = intelDrawBuffer;
+ functions->ReadBuffer = intelReadBuffer;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_buffers.h b/src/mesa/drivers/dri/intel/intel_buffers.h
new file mode 100644
index 0000000000..abb86aade6
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffers.h
@@ -0,0 +1,56 @@
+
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_BUFFERS_H
+#define INTEL_BUFFERS_H
+
+#include "dri_util.h"
+#include "drm.h"
+
+struct intel_context;
+struct intel_framebuffer;
+
+extern struct intel_region *intel_readbuf_region(struct intel_context *intel);
+
+extern struct intel_region *intel_drawbuf_region(struct intel_context *intel);
+
+extern void intel_check_front_buffer_rendering(struct intel_context *intel);
+
+extern void intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb);
+
+extern void intelInitBufferFuncs(struct dd_function_table *functions);
+
+void intel_get_cliprects(struct intel_context *intel,
+ struct drm_clip_rect **cliprects,
+ unsigned int *num_cliprects,
+ int *x_off, int *y_off);
+#ifdef I915
+void intelCalcViewport(GLcontext * ctx);
+#endif
+
+#endif /* INTEL_BUFFERS_H */
diff --git a/src/mesa/drivers/dri/intel/intel_chipset.h b/src/mesa/drivers/dri/intel/intel_chipset.h
new file mode 100644
index 0000000000..cd614c59e5
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_chipset.h
@@ -0,0 +1,128 @@
+ /*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#define PCI_CHIP_I810 0x7121
+#define PCI_CHIP_I810_DC100 0x7123
+#define PCI_CHIP_I810_E 0x7125
+#define PCI_CHIP_I815 0x1132
+
+#define PCI_CHIP_I830_M 0x3577
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I865_G 0x2572
+
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_E7221_G 0x258A
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GME 0x27AE
+
+#define PCI_CHIP_Q35_G 0x29B2
+#define PCI_CHIP_G33_G 0x29C2
+#define PCI_CHIP_Q33_G 0x29D2
+
+#define PCI_CHIP_IGD_GM 0xA011
+#define PCI_CHIP_IGD_G 0xA001
+
+#define IS_IGDGM(devid) (devid == PCI_CHIP_IGD_GM)
+#define IS_IGDG(devid) (devid == PCI_CHIP_IGD_G)
+#define IS_IGD(devid) (IS_IGDG(devid) || IS_IGDGM(devid))
+
+#define PCI_CHIP_I965_G 0x29A2
+#define PCI_CHIP_I965_Q 0x2992
+#define PCI_CHIP_I965_G_1 0x2982
+#define PCI_CHIP_I946_GZ 0x2972
+#define PCI_CHIP_I965_GM 0x2A02
+#define PCI_CHIP_I965_GME 0x2A12
+
+#define PCI_CHIP_GM45_GM 0x2A42
+
+#define PCI_CHIP_IGD_E_G 0x2E02
+#define PCI_CHIP_Q45_G 0x2E12
+#define PCI_CHIP_G45_G 0x2E22
+#define PCI_CHIP_G41_G 0x2E32
+#define PCI_CHIP_B43_G 0x2E42
+
+#define PCI_CHIP_ILD_G 0x0042
+#define PCI_CHIP_ILM_G 0x0046
+
+#define PCI_CHIP_SANDYBRIDGE 0x0102
+#define PCI_CHIP_SANDYBRIDGE_M 0x0106
+
+#define IS_MOBILE(devid) (devid == PCI_CHIP_I855_GM || \
+ devid == PCI_CHIP_I915_GM || \
+ devid == PCI_CHIP_I945_GM || \
+ devid == PCI_CHIP_I945_GME || \
+ devid == PCI_CHIP_I965_GM || \
+ devid == PCI_CHIP_I965_GME || \
+ devid == PCI_CHIP_GM45_GM || \
+ IS_IGD(devid) || \
+ devid == PCI_CHIP_ILM_G)
+
+#define IS_G45(devid) (devid == PCI_CHIP_IGD_E_G || \
+ devid == PCI_CHIP_Q45_G || \
+ devid == PCI_CHIP_G45_G || \
+ devid == PCI_CHIP_G41_G || \
+ devid == PCI_CHIP_B43_G)
+#define IS_GM45(devid) (devid == PCI_CHIP_GM45_GM)
+#define IS_G4X(devid) (IS_G45(devid) || IS_GM45(devid))
+
+#define IS_ILD(devid) (devid == PCI_CHIP_ILD_G)
+#define IS_ILM(devid) (devid == PCI_CHIP_ILM_G)
+#define IS_GEN5(devid) (IS_ILD(devid) || IS_ILM(devid))
+
+#define IS_915(devid) (devid == PCI_CHIP_I915_G || \
+ devid == PCI_CHIP_E7221_G || \
+ devid == PCI_CHIP_I915_GM)
+
+#define IS_945(devid) (devid == PCI_CHIP_I945_G || \
+ devid == PCI_CHIP_I945_GM || \
+ devid == PCI_CHIP_I945_GME || \
+ devid == PCI_CHIP_G33_G || \
+ devid == PCI_CHIP_Q33_G || \
+ devid == PCI_CHIP_Q35_G || IS_IGD(devid))
+
+#define IS_GEN4(devid) (devid == PCI_CHIP_I965_G || \
+ devid == PCI_CHIP_I965_Q || \
+ devid == PCI_CHIP_I965_G_1 || \
+ devid == PCI_CHIP_I965_GM || \
+ devid == PCI_CHIP_I965_GME || \
+ devid == PCI_CHIP_I946_GZ || \
+ IS_G4X(devid))
+
+#define IS_GEN6(devid) (devid == PCI_CHIP_SANDYBRIDGE || \
+ devid == PCI_CHIP_SANDYBRIDGE_M)
+
+#define IS_965(devid) (IS_GEN4(devid) || \
+ IS_G4X(devid) || \
+ IS_GEN5(devid) || \
+ IS_GEN6(devid))
+
+#define IS_9XX(devid) (IS_915(devid) || \
+ IS_945(devid) || \
+ IS_965(devid))
diff --git a/src/mesa/drivers/dri/intel/intel_clear.c b/src/mesa/drivers/dri/intel/intel_clear.c
new file mode 100644
index 0000000000..3c22118866
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_clear.c
@@ -0,0 +1,207 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * Copyright 2009 Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/mtypes.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_clear.h"
+#include "intel_fbo.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+static const char *buffer_names[] = {
+ [BUFFER_FRONT_LEFT] = "front",
+ [BUFFER_BACK_LEFT] = "back",
+ [BUFFER_FRONT_RIGHT] = "front right",
+ [BUFFER_BACK_RIGHT] = "back right",
+ [BUFFER_DEPTH] = "depth",
+ [BUFFER_STENCIL] = "stencil",
+ [BUFFER_ACCUM] = "accum",
+ [BUFFER_AUX0] = "aux0",
+ [BUFFER_COLOR0] = "color0",
+ [BUFFER_COLOR1] = "color1",
+ [BUFFER_COLOR2] = "color2",
+ [BUFFER_COLOR3] = "color3",
+ [BUFFER_COLOR4] = "color4",
+ [BUFFER_COLOR5] = "color5",
+ [BUFFER_COLOR6] = "color6",
+ [BUFFER_COLOR7] = "color7",
+};
+
+/**
+ * Called by ctx->Driver.Clear.
+ */
+static void
+intelClear(GLcontext *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask[0]);
+ GLbitfield tri_mask = 0;
+ GLbitfield blit_mask = 0;
+ GLbitfield swrast_mask = 0;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint i;
+
+ if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
+ intel->front_buffer_dirty = GL_TRUE;
+ }
+
+ if (0)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ /* HW color buffers (front, back, aux, generic FBO, etc) */
+ if (colorMask == ~0) {
+ /* clear all R,G,B,A */
+ /* XXX FBO: need to check if colorbuffers are software RBOs! */
+ blit_mask |= (mask & BUFFER_BITS_COLOR);
+ }
+ else {
+ /* glColorMask in effect */
+ tri_mask |= (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT));
+ }
+
+ /* Make sure we have up to date buffers before we start looking at
+ * the tiling bits to determine how to clear. */
+ intel_prepare_render(intel);
+
+ /* HW stencil */
+ if (mask & BUFFER_BIT_STENCIL) {
+ const struct intel_region *stencilRegion
+ = intel_get_rb_region(fb, BUFFER_STENCIL);
+ if (stencilRegion) {
+ /* have hw stencil */
+ if (stencilRegion->tiling == I915_TILING_Y ||
+ (ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
+ /* We have to use the 3D engine if we're clearing a partial mask
+ * of the stencil buffer, or if we're on a 965 which has a tiled
+ * depth/stencil buffer in a layout we can't blit to.
+ */
+ tri_mask |= BUFFER_BIT_STENCIL;
+ }
+ else {
+ /* clearing all stencil bits, use blitting */
+ blit_mask |= BUFFER_BIT_STENCIL;
+ }
+ }
+ }
+
+ /* HW depth */
+ if (mask & BUFFER_BIT_DEPTH) {
+ const struct intel_region *irb = intel_get_rb_region(fb, BUFFER_DEPTH);
+
+ /* clear depth with whatever method is used for stencil (see above) */
+ if (irb->tiling == I915_TILING_Y || tri_mask & BUFFER_BIT_STENCIL)
+ tri_mask |= BUFFER_BIT_DEPTH;
+ else
+ blit_mask |= BUFFER_BIT_DEPTH;
+ }
+
+ /* If we're doing a tri pass for depth/stencil, include a likely color
+ * buffer with it.
+ */
+ if (mask & (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL)) {
+ int color_bit = _mesa_ffs(mask & BUFFER_BITS_COLOR);
+ if (color_bit != 0) {
+ tri_mask |= blit_mask & (1 << (color_bit - 1));
+ blit_mask &= ~(1 << (color_bit - 1));
+ }
+ }
+
+ if (intel->gen >= 6) {
+ /* Blits are in a different ringbuffer so we don't use them. */
+ tri_mask |= blit_mask;
+ blit_mask = 0;
+ }
+
+ /* SW fallback clearing */
+ swrast_mask = mask & ~tri_mask & ~blit_mask;
+
+ {
+ /* look for non-Intel renderbuffers (clear them with swrast) */
+ GLbitfield blit_or_tri = blit_mask | tri_mask;
+ while (blit_or_tri) {
+ GLuint i = _mesa_ffs(blit_or_tri) - 1;
+ GLbitfield bufBit = 1 << i;
+ if (!fb->Attachment[i].Renderbuffer->ClassID) {
+ blit_mask &= ~bufBit;
+ tri_mask &= ~bufBit;
+ swrast_mask |= bufBit;
+ }
+ blit_or_tri ^= bufBit;
+ }
+ }
+
+ if (blit_mask) {
+ if (INTEL_DEBUG & DEBUG_BLIT) {
+ DBG("blit clear:");
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ if (blit_mask & (1 << i))
+ DBG(" %s", buffer_names[i]);
+ }
+ DBG("\n");
+ }
+ intelClearWithBlit(ctx, blit_mask);
+ }
+
+ if (tri_mask) {
+ if (INTEL_DEBUG & DEBUG_BLIT) {
+ DBG("tri clear:");
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ if (tri_mask & (1 << i))
+ DBG(" %s", buffer_names[i]);
+ }
+ DBG("\n");
+ }
+
+ _mesa_meta_Clear(&intel->ctx, tri_mask);
+ }
+
+ if (swrast_mask) {
+ if (INTEL_DEBUG & DEBUG_BLIT) {
+ DBG("swrast clear:");
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ if (swrast_mask & (1 << i))
+ DBG(" %s", buffer_names[i]);
+ }
+ DBG("\n");
+ }
+ _swrast_Clear(ctx, swrast_mask);
+ }
+}
+
+
+void
+intelInitClearFuncs(struct dd_function_table *functions)
+{
+ functions->Clear = intelClear;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_clear.h b/src/mesa/drivers/dri/intel/intel_clear.h
new file mode 100644
index 0000000000..7fd6b310a9
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_clear.h
@@ -0,0 +1,38 @@
+
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_CLEAR_H
+#define INTEL_CLEAR_H
+
+struct dd_function_table;
+
+extern void
+intelInitClearFuncs(struct dd_function_table *functions);
+
+
+#endif /* INTEL_CLEAR_H */
diff --git a/src/mesa/drivers/dri/intel/intel_context.c b/src/mesa/drivers/dri/intel/intel_context.c
new file mode 100644
index 0000000000..5f2035d79c
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_context.c
@@ -0,0 +1,912 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/extensions.h"
+#include "main/framebuffer.h"
+#include "main/imports.h"
+#include "main/points.h"
+
+#include "swrast/swrast.h"
+#include "swrast_setup/swrast_setup.h"
+#include "tnl/tnl.h"
+#include "drivers/common/driverfuncs.h"
+#include "drivers/common/meta.h"
+
+#include "i830_dri.h"
+
+#include "intel_chipset.h"
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_batchbuffer.h"
+#include "intel_clear.h"
+#include "intel_extensions.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+#include "intel_buffer_objects.h"
+#include "intel_fbo.h"
+#include "intel_bufmgr.h"
+#include "intel_screen.h"
+
+#include "drirenderbuffer.h"
+#include "utils.h"
+
+
+#ifndef INTEL_DEBUG
+int INTEL_DEBUG = (0);
+#endif
+
+
+#define DRIVER_DATE "20100330 DEVELOPMENT"
+#define DRIVER_DATE_GEM "GEM " DRIVER_DATE
+
+
+static const GLubyte *
+intelGetString(GLcontext * ctx, GLenum name)
+{
+ const struct intel_context *const intel = intel_context(ctx);
+ const char *chipset;
+ static char buffer[128];
+
+ switch (name) {
+ case GL_VENDOR:
+ return (GLubyte *) "Tungsten Graphics, Inc";
+ break;
+
+ case GL_RENDERER:
+ switch (intel->intelScreen->deviceID) {
+ case PCI_CHIP_845_G:
+ chipset = "Intel(R) 845G";
+ break;
+ case PCI_CHIP_I830_M:
+ chipset = "Intel(R) 830M";
+ break;
+ case PCI_CHIP_I855_GM:
+ chipset = "Intel(R) 852GM/855GM";
+ break;
+ case PCI_CHIP_I865_G:
+ chipset = "Intel(R) 865G";
+ break;
+ case PCI_CHIP_I915_G:
+ chipset = "Intel(R) 915G";
+ break;
+ case PCI_CHIP_E7221_G:
+ chipset = "Intel (R) E7221G (i915)";
+ break;
+ case PCI_CHIP_I915_GM:
+ chipset = "Intel(R) 915GM";
+ break;
+ case PCI_CHIP_I945_G:
+ chipset = "Intel(R) 945G";
+ break;
+ case PCI_CHIP_I945_GM:
+ chipset = "Intel(R) 945GM";
+ break;
+ case PCI_CHIP_I945_GME:
+ chipset = "Intel(R) 945GME";
+ break;
+ case PCI_CHIP_G33_G:
+ chipset = "Intel(R) G33";
+ break;
+ case PCI_CHIP_Q35_G:
+ chipset = "Intel(R) Q35";
+ break;
+ case PCI_CHIP_Q33_G:
+ chipset = "Intel(R) Q33";
+ break;
+ case PCI_CHIP_IGD_GM:
+ case PCI_CHIP_IGD_G:
+ chipset = "Intel(R) IGD";
+ break;
+ case PCI_CHIP_I965_Q:
+ chipset = "Intel(R) 965Q";
+ break;
+ case PCI_CHIP_I965_G:
+ case PCI_CHIP_I965_G_1:
+ chipset = "Intel(R) 965G";
+ break;
+ case PCI_CHIP_I946_GZ:
+ chipset = "Intel(R) 946GZ";
+ break;
+ case PCI_CHIP_I965_GM:
+ chipset = "Intel(R) 965GM";
+ break;
+ case PCI_CHIP_I965_GME:
+ chipset = "Intel(R) 965GME/GLE";
+ break;
+ case PCI_CHIP_GM45_GM:
+ chipset = "Mobile Intel® GM45 Express Chipset";
+ break;
+ case PCI_CHIP_IGD_E_G:
+ chipset = "Intel(R) Integrated Graphics Device";
+ break;
+ case PCI_CHIP_G45_G:
+ chipset = "Intel(R) G45/G43";
+ break;
+ case PCI_CHIP_Q45_G:
+ chipset = "Intel(R) Q45/Q43";
+ break;
+ case PCI_CHIP_G41_G:
+ chipset = "Intel(R) G41";
+ break;
+ case PCI_CHIP_B43_G:
+ chipset = "Intel(R) B43";
+ break;
+ case PCI_CHIP_ILD_G:
+ chipset = "Intel(R) Ironlake Desktop";
+ break;
+ case PCI_CHIP_ILM_G:
+ chipset = "Intel(R) Ironlake Mobile";
+ break;
+ default:
+ chipset = "Unknown Intel Chipset";
+ break;
+ }
+
+ (void) driGetRendererString(buffer, chipset, DRIVER_DATE_GEM, 0);
+ return (GLubyte *) buffer;
+
+ default:
+ return NULL;
+ }
+}
+
+static void
+intel_flush_front(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+ __DRIscreen *const screen = intel->intelScreen->driScrnPriv;
+
+ if ((ctx->DrawBuffer->Name == 0) && intel->front_buffer_dirty) {
+ if (screen->dri2.loader &&
+ (screen->dri2.loader->base.version >= 2)
+ && (screen->dri2.loader->flushFrontBuffer != NULL) &&
+ driContext->driDrawablePriv &&
+ driContext->driDrawablePriv->loaderPrivate) {
+ (*screen->dri2.loader->flushFrontBuffer)(driContext->driDrawablePriv,
+ driContext->driDrawablePriv->loaderPrivate);
+
+ /* We set the dirty bit in intel_prepare_render() if we're
+ * front buffer rendering once we get there.
+ */
+ intel->front_buffer_dirty = GL_FALSE;
+ }
+ }
+}
+
+static unsigned
+intel_bits_per_pixel(const struct intel_renderbuffer *rb)
+{
+ return _mesa_get_format_bytes(rb->Base.Format) * 8;
+}
+
+void
+intel_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
+{
+ struct gl_framebuffer *fb = drawable->driverPrivate;
+ struct intel_renderbuffer *rb;
+ struct intel_region *region, *depth_region;
+ struct intel_context *intel = context->driverPrivate;
+ struct intel_renderbuffer *front_rb, *back_rb, *depth_rb, *stencil_rb;
+ __DRIbuffer *buffers = NULL;
+ __DRIscreen *screen;
+ int i, count;
+ unsigned int attachments[10];
+ const char *region_name;
+
+ /* If we're rendering to the fake front buffer, make sure all the
+ * pending drawing has landed on the real front buffer. Otherwise
+ * when we eventually get to DRI2GetBuffersWithFormat the stale
+ * real front buffer contents will get copied to the new fake front
+ * buffer.
+ */
+ if (intel->is_front_buffer_rendering) {
+ intel_flush(&intel->ctx);
+ intel_flush_front(&intel->ctx);
+ }
+
+ /* Set this up front, so that in case our buffers get invalidated
+ * while we're getting new buffers, we don't clobber the stamp and
+ * thus ignore the invalidate. */
+ drawable->lastStamp = drawable->dri2.stamp;
+
+ if (INTEL_DEBUG & DEBUG_DRI)
+ fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
+
+ screen = intel->intelScreen->driScrnPriv;
+
+ if (screen->dri2.loader
+ && (screen->dri2.loader->base.version > 2)
+ && (screen->dri2.loader->getBuffersWithFormat != NULL)) {
+
+ front_rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ back_rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+ depth_rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ stencil_rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+
+ i = 0;
+ if ((intel->is_front_buffer_rendering ||
+ intel->is_front_buffer_reading ||
+ !back_rb) && front_rb) {
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ attachments[i++] = intel_bits_per_pixel(front_rb);
+ }
+
+ if (back_rb) {
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ attachments[i++] = intel_bits_per_pixel(back_rb);
+ }
+
+ if ((depth_rb != NULL) && (stencil_rb != NULL)) {
+ attachments[i++] = __DRI_BUFFER_DEPTH_STENCIL;
+ attachments[i++] = intel_bits_per_pixel(depth_rb);
+ } else if (depth_rb != NULL) {
+ attachments[i++] = __DRI_BUFFER_DEPTH;
+ attachments[i++] = intel_bits_per_pixel(depth_rb);
+ } else if (stencil_rb != NULL) {
+ attachments[i++] = __DRI_BUFFER_STENCIL;
+ attachments[i++] = intel_bits_per_pixel(stencil_rb);
+ }
+
+ buffers =
+ (*screen->dri2.loader->getBuffersWithFormat)(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i / 2,
+ &count,
+ drawable->loaderPrivate);
+ } else if (screen->dri2.loader) {
+ i = 0;
+ if (intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT))
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
+ if (intel_get_renderbuffer(fb, BUFFER_BACK_LEFT))
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
+ if (intel_get_renderbuffer(fb, BUFFER_DEPTH))
+ attachments[i++] = __DRI_BUFFER_DEPTH;
+ if (intel_get_renderbuffer(fb, BUFFER_STENCIL))
+ attachments[i++] = __DRI_BUFFER_STENCIL;
+
+ buffers = (*screen->dri2.loader->getBuffers)(drawable,
+ &drawable->w,
+ &drawable->h,
+ attachments, i,
+ &count,
+ drawable->loaderPrivate);
+ }
+
+ if (buffers == NULL)
+ return;
+
+ drawable->x = 0;
+ drawable->y = 0;
+ drawable->backX = 0;
+ drawable->backY = 0;
+ drawable->numClipRects = 1;
+ drawable->pClipRects[0].x1 = 0;
+ drawable->pClipRects[0].y1 = 0;
+ drawable->pClipRects[0].x2 = drawable->w;
+ drawable->pClipRects[0].y2 = drawable->h;
+ drawable->numBackClipRects = 1;
+ drawable->pBackClipRects[0].x1 = 0;
+ drawable->pBackClipRects[0].y1 = 0;
+ drawable->pBackClipRects[0].x2 = drawable->w;
+ drawable->pBackClipRects[0].y2 = drawable->h;
+
+ depth_region = NULL;
+ for (i = 0; i < count; i++) {
+ switch (buffers[i].attachment) {
+ case __DRI_BUFFER_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 front buffer";
+ break;
+
+ case __DRI_BUFFER_FAKE_FRONT_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ region_name = "dri2 fake front buffer";
+ break;
+
+ case __DRI_BUFFER_BACK_LEFT:
+ rb = intel_get_renderbuffer(fb, BUFFER_BACK_LEFT);
+ region_name = "dri2 back buffer";
+ break;
+
+ case __DRI_BUFFER_DEPTH:
+ rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ region_name = "dri2 depth buffer";
+ break;
+
+ case __DRI_BUFFER_DEPTH_STENCIL:
+ rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ region_name = "dri2 depth / stencil buffer";
+ break;
+
+ case __DRI_BUFFER_STENCIL:
+ rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ region_name = "dri2 stencil buffer";
+ break;
+
+ case __DRI_BUFFER_ACCUM:
+ default:
+ fprintf(stderr,
+ "unhandled buffer attach event, attacment type %d\n",
+ buffers[i].attachment);
+ return;
+ }
+
+ if (rb == NULL)
+ continue;
+
+ if (rb->region && rb->region->name == buffers[i].name)
+ continue;
+
+ if (INTEL_DEBUG & DEBUG_DRI)
+ fprintf(stderr,
+ "attaching buffer %d, at %d, cpp %d, pitch %d\n",
+ buffers[i].name, buffers[i].attachment,
+ buffers[i].cpp, buffers[i].pitch);
+
+ if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
+ if (INTEL_DEBUG & DEBUG_DRI)
+ fprintf(stderr, "(reusing depth buffer as stencil)\n");
+ intel_region_reference(&region, depth_region);
+ }
+ else
+ region = intel_region_alloc_for_handle(intel, buffers[i].cpp,
+ drawable->w,
+ drawable->h,
+ buffers[i].pitch / buffers[i].cpp,
+ buffers[i].name,
+ region_name);
+
+ if (buffers[i].attachment == __DRI_BUFFER_DEPTH)
+ depth_region = region;
+
+ intel_renderbuffer_set_region(intel, rb, region);
+ intel_region_release(&region);
+
+ if (buffers[i].attachment == __DRI_BUFFER_DEPTH_STENCIL) {
+ rb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ if (rb != NULL) {
+ struct intel_region *stencil_region = NULL;
+
+ if (rb->region && rb->region->name == buffers[i].name)
+ continue;
+
+ intel_region_reference(&stencil_region, region);
+ intel_renderbuffer_set_region(intel, rb, stencil_region);
+ intel_region_release(&stencil_region);
+ }
+ }
+ }
+
+ driUpdateFramebufferSize(&intel->ctx, drawable);
+}
+
+/**
+ * intel_prepare_render should be called anywhere that curent read/drawbuffer
+ * state is required.
+ */
+void
+intel_prepare_render(struct intel_context *intel)
+{
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *drawable;
+
+ drawable = driContext->driDrawablePriv;
+ if (drawable->dri2.stamp != driContext->dri2.draw_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
+ driContext->dri2.draw_stamp = drawable->dri2.stamp;
+ }
+
+ drawable = driContext->driReadablePriv;
+ if (drawable->dri2.stamp != driContext->dri2.read_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ driContext->dri2.read_stamp = drawable->dri2.stamp;
+ }
+
+ /* If we're currently rendering to the front buffer, the rendering
+ * that will happen next will probably dirty the front buffer. So
+ * mark it as dirty here.
+ */
+ if (intel->is_front_buffer_rendering)
+ intel->front_buffer_dirty = GL_TRUE;
+
+ /* Wait for the swapbuffers before the one we just emitted, so we
+ * don't get too many swaps outstanding for apps that are GPU-heavy
+ * but not CPU-heavy.
+ *
+ * We're using intelDRI2Flush (called from the loader before
+ * swapbuffer) and glFlush (for front buffer rendering) as the
+ * indicator that a frame is done and then throttle when we get
+ * here as we prepare to render the next frame. At this point for
+ * round trips for swap/copy and getting new buffers are done and
+ * we'll spend less time waiting on the GPU.
+ *
+ * Unfortunately, we don't have a handle to the batch containing
+ * the swap, and getting our hands on that doesn't seem worth it,
+ * so we just us the first batch we emitted after the last swap.
+ */
+ if (intel->need_throttle && intel->first_post_swapbuffers_batch) {
+ drm_intel_bo_wait_rendering(intel->first_post_swapbuffers_batch);
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+ intel->need_throttle = GL_FALSE;
+ }
+}
+
+static void
+intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
+{
+ struct intel_context *intel = intel_context(ctx);
+ __DRIcontext *driContext = intel->driContext;
+
+ if (intel->saved_viewport)
+ intel->saved_viewport(ctx, x, y, w, h);
+
+ if (!intel->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
+ dri2InvalidateDrawable(driContext->driDrawablePriv);
+ dri2InvalidateDrawable(driContext->driReadablePriv);
+ }
+}
+
+static const struct dri_debug_control debug_control[] = {
+ { "tex", DEBUG_TEXTURE},
+ { "state", DEBUG_STATE},
+ { "ioctl", DEBUG_IOCTL},
+ { "blit", DEBUG_BLIT},
+ { "mip", DEBUG_MIPTREE},
+ { "fall", DEBUG_FALLBACKS},
+ { "verb", DEBUG_VERBOSE},
+ { "bat", DEBUG_BATCH},
+ { "pix", DEBUG_PIXEL},
+ { "buf", DEBUG_BUFMGR},
+ { "reg", DEBUG_REGION},
+ { "fbo", DEBUG_FBO},
+ { "gs", DEBUG_GS},
+ { "sync", DEBUG_SYNC},
+ { "prim", DEBUG_PRIMS },
+ { "vert", DEBUG_VERTS },
+ { "dri", DEBUG_DRI },
+ { "sf", DEBUG_SF },
+ { "san", DEBUG_SANITY },
+ { "sleep", DEBUG_SLEEP },
+ { "stats", DEBUG_STATS },
+ { "tile", DEBUG_TILE },
+ { "sing", DEBUG_SINGLE_THREAD },
+ { "thre", DEBUG_SINGLE_THREAD },
+ { "wm", DEBUG_WM },
+ { "glsl_force", DEBUG_GLSL_FORCE },
+ { "urb", DEBUG_URB },
+ { "vs", DEBUG_VS },
+ { "clip", DEBUG_CLIP },
+ { NULL, 0 }
+};
+
+
+static void
+intelInvalidateState(GLcontext * ctx, GLuint new_state)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ _swrast_InvalidateState(ctx, new_state);
+ _swsetup_InvalidateState(ctx, new_state);
+ _vbo_InvalidateState(ctx, new_state);
+ _tnl_InvalidateState(ctx, new_state);
+ _tnl_invalidate_vertex_state(ctx, new_state);
+
+ intel->NewGLState |= new_state;
+
+ if (intel->vtbl.invalidate_state)
+ intel->vtbl.invalidate_state( intel, new_state );
+}
+
+void
+intel_flush(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ if (intel->Fallback)
+ _swrast_flush(ctx);
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+
+ if (intel->batch->map != intel->batch->ptr)
+ intel_batchbuffer_flush(intel->batch);
+}
+
+static void
+intel_glFlush(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+ intel->need_throttle = GL_TRUE;
+}
+
+void
+intelFinish(GLcontext * ctx)
+{
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ int i;
+
+ intel_flush(ctx);
+ intel_flush_front(ctx);
+
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ struct intel_renderbuffer *irb;
+
+ irb = intel_renderbuffer(fb->_ColorDrawBuffers[i]);
+
+ if (irb && irb->region)
+ drm_intel_bo_wait_rendering(irb->region->buffer);
+ }
+ if (fb->_DepthBuffer) {
+ /* XXX: Wait on buffer idle */
+ }
+}
+
+void
+intelInitDriverFunctions(struct dd_function_table *functions)
+{
+ _mesa_init_driver_functions(functions);
+
+ functions->Flush = intel_glFlush;
+ functions->Finish = intelFinish;
+ functions->GetString = intelGetString;
+ functions->UpdateState = intelInvalidateState;
+
+ intelInitTextureFuncs(functions);
+ intelInitTextureImageFuncs(functions);
+ intelInitTextureSubImageFuncs(functions);
+ intelInitTextureCopyImageFuncs(functions);
+ intelInitStateFuncs(functions);
+ intelInitClearFuncs(functions);
+ intelInitBufferFuncs(functions);
+ intelInitPixelFuncs(functions);
+ intelInitBufferObjectFuncs(functions);
+ intel_init_syncobj_functions(functions);
+}
+
+
+GLboolean
+intelInitContext(struct intel_context *intel,
+ int api,
+ const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions)
+{
+ GLcontext *ctx = &intel->ctx;
+ GLcontext *shareCtx = (GLcontext *) sharedContextPrivate;
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->private;
+ int bo_reuse_mode;
+
+ /* we can't do anything without a connection to the device */
+ if (intelScreen->bufmgr == NULL)
+ return GL_FALSE;
+
+ /* Can't rely on invalidate events, fall back to glViewport hack */
+ if (!driContextPriv->driScreenPriv->dri2.useInvalidate) {
+ intel->saved_viewport = functions->Viewport;
+ functions->Viewport = intel_viewport;
+ }
+
+ if (!_mesa_initialize_context_for_api(&intel->ctx, api, mesaVis, shareCtx,
+ functions, (void *) intel)) {
+ printf("%s: failed to init mesa context\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ driContextPriv->driverPrivate = intel;
+ intel->intelScreen = intelScreen;
+ intel->driContext = driContextPriv;
+ intel->driFd = sPriv->fd;
+
+ intel->has_xrgb_textures = GL_TRUE;
+ if (IS_GEN6(intel->intelScreen->deviceID)) {
+ intel->gen = 6;
+ intel->needs_ff_sync = GL_TRUE;
+ intel->has_luminance_srgb = GL_TRUE;
+ } else if (IS_GEN5(intel->intelScreen->deviceID)) {
+ intel->gen = 5;
+ intel->needs_ff_sync = GL_TRUE;
+ intel->has_luminance_srgb = GL_TRUE;
+ } else if (IS_965(intel->intelScreen->deviceID)) {
+ intel->gen = 4;
+ if (IS_G4X(intel->intelScreen->deviceID)) {
+ intel->has_luminance_srgb = GL_TRUE;
+ intel->is_g4x = GL_TRUE;
+ }
+ } else if (IS_9XX(intel->intelScreen->deviceID)) {
+ intel->gen = 3;
+ if (IS_945(intel->intelScreen->deviceID)) {
+ intel->is_945 = GL_TRUE;
+ }
+ } else {
+ intel->gen = 2;
+ if (intel->intelScreen->deviceID == PCI_CHIP_I830_M ||
+ intel->intelScreen->deviceID == PCI_CHIP_845_G) {
+ intel->has_xrgb_textures = GL_FALSE;
+ }
+ }
+
+ driParseConfigFiles(&intel->optionCache, &intelScreen->optionCache,
+ sPriv->myNum, (intel->gen >= 4) ? "i965" : "i915");
+ if (intelScreen->deviceID == PCI_CHIP_I865_G)
+ intel->maxBatchSize = 4096;
+ else
+ intel->maxBatchSize = BATCH_SZ;
+
+ intel->bufmgr = intelScreen->bufmgr;
+
+ bo_reuse_mode = driQueryOptioni(&intel->optionCache, "bo_reuse");
+ switch (bo_reuse_mode) {
+ case DRI_CONF_BO_REUSE_DISABLED:
+ break;
+ case DRI_CONF_BO_REUSE_ALL:
+ intel_bufmgr_gem_enable_reuse(intel->bufmgr);
+ break;
+ }
+
+ /* This doesn't yet catch all non-conformant rendering, but it's a
+ * start.
+ */
+ if (getenv("INTEL_STRICT_CONFORMANCE")) {
+ unsigned int value = atoi(getenv("INTEL_STRICT_CONFORMANCE"));
+ if (value > 0) {
+ intel->conformance_mode = value;
+ }
+ else {
+ intel->conformance_mode = 1;
+ }
+ }
+
+ if (intel->conformance_mode > 0) {
+ ctx->Const.MinLineWidth = 1.0;
+ ctx->Const.MinLineWidthAA = 1.0;
+ ctx->Const.MaxLineWidth = 1.0;
+ ctx->Const.MaxLineWidthAA = 1.0;
+ ctx->Const.LineWidthGranularity = 1.0;
+ }
+ else {
+ ctx->Const.MinLineWidth = 1.0;
+ ctx->Const.MinLineWidthAA = 1.0;
+ ctx->Const.MaxLineWidth = 5.0;
+ ctx->Const.MaxLineWidthAA = 5.0;
+ ctx->Const.LineWidthGranularity = 0.5;
+ }
+
+ ctx->Const.MinPointSize = 1.0;
+ ctx->Const.MinPointSizeAA = 1.0;
+ ctx->Const.MaxPointSize = 255.0;
+ ctx->Const.MaxPointSizeAA = 3.0;
+ ctx->Const.PointSizeGranularity = 1.0;
+
+ /* reinitialize the context point state.
+ * It depend on constants in __GLcontextRec::Const
+ */
+ _mesa_init_point(ctx);
+
+ meta_init_metaops(ctx, &intel->meta);
+ if (intel->gen >= 4) {
+ if (MAX_WIDTH > 8192)
+ ctx->Const.MaxRenderbufferSize = 8192;
+ } else {
+ if (MAX_WIDTH > 2048)
+ ctx->Const.MaxRenderbufferSize = 2048;
+ }
+
+ /* Initialize the software rasterizer and helper modules. */
+ _swrast_CreateContext(ctx);
+ _vbo_CreateContext(ctx);
+ _tnl_CreateContext(ctx);
+ _swsetup_CreateContext(ctx);
+
+ /* Configure swrast to match hardware characteristics: */
+ _swrast_allow_pixel_fog(ctx, GL_FALSE);
+ _swrast_allow_vertex_fog(ctx, GL_TRUE);
+
+ _mesa_meta_init(ctx);
+
+ intel->hw_stencil = mesaVis->stencilBits && mesaVis->depthBits == 24;
+ intel->hw_stipple = 1;
+
+ /* XXX FBO: this doesn't seem to be used anywhere */
+ switch (mesaVis->depthBits) {
+ case 0: /* what to do in this case? */
+ case 16:
+ intel->polygon_offset_scale = 1.0;
+ break;
+ case 24:
+ intel->polygon_offset_scale = 2.0; /* req'd to pass glean */
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ if (intel->gen >= 4)
+ intel->polygon_offset_scale /= 0xffff;
+
+ intel->RenderIndex = ~0;
+
+ switch (ctx->API) {
+ case API_OPENGL:
+ intelInitExtensions(ctx);
+ break;
+ case API_OPENGLES:
+ break;
+ case API_OPENGLES2:
+ intelInitExtensionsES2(ctx);
+ break;
+ }
+
+ INTEL_DEBUG = driParseDebugString(getenv("INTEL_DEBUG"), debug_control);
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ dri_bufmgr_set_debug(intel->bufmgr, GL_TRUE);
+
+ intel->batch = intel_batchbuffer_alloc(intel);
+
+ intel_fbo_init(intel);
+
+ if (intel->ctx.Mesa_DXTn) {
+ _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
+ _mesa_enable_extension(ctx, "GL_S3_s3tc");
+ }
+ else if (driQueryOptionb(&intel->optionCache, "force_s3tc_enable")) {
+ _mesa_enable_extension(ctx, "GL_EXT_texture_compression_s3tc");
+ }
+ intel->use_texture_tiling = driQueryOptionb(&intel->optionCache,
+ "texture_tiling");
+ intel->use_early_z = driQueryOptionb(&intel->optionCache, "early_z");
+
+ intel->prim.primitive = ~0;
+
+ /* Force all software fallbacks */
+ if (driQueryOptionb(&intel->optionCache, "no_rast")) {
+ fprintf(stderr, "disabling 3D rasterization\n");
+ intel->no_rast = 1;
+ }
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_batch")) {
+ fprintf(stderr, "flushing batchbuffer before/after each draw call\n");
+ intel->always_flush_batch = 1;
+ }
+
+ if (driQueryOptionb(&intel->optionCache, "always_flush_cache")) {
+ fprintf(stderr, "flushing GPU caches before/after each draw call\n");
+ intel->always_flush_cache = 1;
+ }
+
+ /* Disable all hardware rendering (skip emitting batches and fences/waits
+ * to the kernel)
+ */
+ intel->no_hw = getenv("INTEL_NO_HW") != NULL;
+
+ return GL_TRUE;
+}
+
+void
+intelDestroyContext(__DRIcontext * driContextPriv)
+{
+ struct intel_context *intel =
+ (struct intel_context *) driContextPriv->driverPrivate;
+
+ assert(intel); /* should never be null */
+ if (intel) {
+ INTEL_FIREVERTICES(intel);
+
+ _mesa_meta_free(&intel->ctx);
+
+ meta_destroy_metaops(&intel->meta);
+
+ intel->vtbl.destroy(intel);
+
+ _swsetup_DestroyContext(&intel->ctx);
+ _tnl_DestroyContext(&intel->ctx);
+ _vbo_DestroyContext(&intel->ctx);
+
+ _swrast_DestroyContext(&intel->ctx);
+ intel->Fallback = 0x0; /* don't call _swrast_Flush later */
+
+ intel_batchbuffer_free(intel->batch);
+ intel->batch = NULL;
+
+ free(intel->prim.vb);
+ intel->prim.vb = NULL;
+ drm_intel_bo_unreference(intel->prim.vb_bo);
+ intel->prim.vb_bo = NULL;
+ drm_intel_bo_unreference(intel->first_post_swapbuffers_batch);
+ intel->first_post_swapbuffers_batch = NULL;
+
+ driDestroyOptionCache(&intel->optionCache);
+
+ /* free the Mesa context */
+ _mesa_free_context_data(&intel->ctx);
+
+ FREE(intel);
+ driContextPriv->driverPrivate = NULL;
+ }
+}
+
+GLboolean
+intelUnbindContext(__DRIcontext * driContextPriv)
+{
+ return GL_TRUE;
+}
+
+GLboolean
+intelMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv)
+{
+ struct intel_context *intel;
+ GET_CURRENT_CONTEXT(curCtx);
+
+ if (driContextPriv)
+ intel = (struct intel_context *) driContextPriv->driverPrivate;
+ else
+ intel = NULL;
+
+ /* According to the glXMakeCurrent() man page: "Pending commands to
+ * the previous context, if any, are flushed before it is released."
+ * But only flush if we're actually changing contexts.
+ */
+ if (intel_context(curCtx) && intel_context(curCtx) != intel) {
+ _mesa_flush(curCtx);
+ }
+
+ if (driContextPriv) {
+ struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
+ struct gl_framebuffer *readFb = driReadPriv->driverPrivate;
+
+ driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
+ driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
+ intel_prepare_render(intel);
+ _mesa_make_current(&intel->ctx, fb, readFb);
+
+ /* We do this in intel_prepare_render() too, but intel->ctx.DrawBuffer
+ * is NULL at that point. We can't call _mesa_makecurrent()
+ * first, since we need the buffer size for the initial
+ * viewport. So just call intel_draw_buffer() again here. */
+ intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
+ }
+ else {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ return GL_TRUE;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_context.h b/src/mesa/drivers/dri/intel/intel_context.h
new file mode 100644
index 0000000000..c7ac2de01e
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_context.h
@@ -0,0 +1,478 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTELCONTEXT_INC
+#define INTELCONTEXT_INC
+
+
+
+#include "main/mtypes.h"
+#include "main/mm.h"
+#include "texmem.h"
+#include "dri_metaops.h"
+#include "drm.h"
+#include "intel_bufmgr.h"
+
+#include "intel_screen.h"
+#include "intel_tex_obj.h"
+#include "i915_drm.h"
+#include "tnl/t_vertex.h"
+
+#define TAG(x) intel##x
+#include "tnl_dd/t_dd_vertex.h"
+#undef TAG
+
+#define DV_PF_555 (1<<8)
+#define DV_PF_565 (2<<8)
+#define DV_PF_8888 (3<<8)
+#define DV_PF_4444 (8<<8)
+#define DV_PF_1555 (9<<8)
+
+struct intel_region;
+struct intel_context;
+
+typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
+ intelVertex *, intelVertex *);
+typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
+ intelVertex *);
+typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
+
+/**
+ * Bits for intel->Fallback field
+ */
+/*@{*/
+#define INTEL_FALLBACK_DRAW_BUFFER 0x1
+#define INTEL_FALLBACK_READ_BUFFER 0x2
+#define INTEL_FALLBACK_DEPTH_BUFFER 0x4
+#define INTEL_FALLBACK_STENCIL_BUFFER 0x8
+#define INTEL_FALLBACK_USER 0x10
+#define INTEL_FALLBACK_RENDERMODE 0x20
+#define INTEL_FALLBACK_TEXTURE 0x40
+#define INTEL_FALLBACK_DRIVER 0x1000 /**< first for drivers */
+/*@}*/
+
+extern void intelFallback(struct intel_context *intel, GLbitfield bit,
+ GLboolean mode);
+#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
+
+
+#define INTEL_WRITE_PART 0x1
+#define INTEL_WRITE_FULL 0x2
+#define INTEL_READ 0x4
+
+#define INTEL_MAX_FIXUP 64
+
+struct intel_sync_object {
+ struct gl_sync_object Base;
+
+ /** Batch associated with this sync object */
+ drm_intel_bo *bo;
+};
+
+/**
+ * intel_context is derived from Mesa's context class: GLcontext.
+ */
+struct intel_context
+{
+ GLcontext ctx; /**< base class, must be first field */
+
+ struct
+ {
+ void (*destroy) (struct intel_context * intel);
+ void (*emit_state) (struct intel_context * intel);
+ void (*finish_batch) (struct intel_context * intel);
+ void (*new_batch) (struct intel_context * intel);
+ void (*emit_invarient_state) (struct intel_context * intel);
+ void (*update_texture_state) (struct intel_context * intel);
+
+ void (*render_start) (struct intel_context * intel);
+ void (*render_prevalidate) (struct intel_context * intel);
+ void (*set_draw_region) (struct intel_context * intel,
+ struct intel_region * draw_regions[],
+ struct intel_region * depth_region,
+ GLuint num_regions);
+
+ void (*reduced_primitive_state) (struct intel_context * intel,
+ GLenum rprim);
+
+ GLboolean (*check_vertex_size) (struct intel_context * intel,
+ GLuint expected);
+ void (*invalidate_state) (struct intel_context *intel,
+ GLuint new_state);
+
+ void (*assert_not_dirty) (struct intel_context *intel);
+
+ void (*debug_batch)(struct intel_context *intel);
+ } vtbl;
+
+ struct dri_metaops meta;
+
+ GLbitfield Fallback; /**< mask of INTEL_FALLBACK_x bits */
+ GLuint NewGLState;
+
+ dri_bufmgr *bufmgr;
+ unsigned int maxBatchSize;
+
+ /**
+ * Generation number of the hardware: 2 is 8xx, 3 is 9xx pre-965, 4 is 965.
+ */
+ int gen;
+ GLboolean needs_ff_sync;
+ GLboolean is_g4x;
+ GLboolean is_945;
+ GLboolean has_luminance_srgb;
+ GLboolean has_xrgb_textures;
+
+ int urb_size;
+
+ struct intel_batchbuffer *batch;
+ drm_intel_bo *first_post_swapbuffers_batch;
+ GLboolean need_throttle;
+ GLboolean no_batch_wrap;
+
+ struct
+ {
+ GLuint id;
+ uint32_t primitive; /**< Current hardware primitive type */
+ void (*flush) (struct intel_context *);
+ GLubyte *start_ptr; /**< for i8xx */
+ drm_intel_bo *vb_bo;
+ uint8_t *vb;
+ unsigned int start_offset; /**< Byte offset of primitive sequence */
+ unsigned int current_offset; /**< Byte offset of next vertex */
+ unsigned int count; /**< Number of vertices in current primitive */
+ } prim;
+
+ GLuint stats_wm;
+ GLboolean locked;
+ char *prevLockFile;
+ int prevLockLine;
+
+ /* Offsets of fields within the current vertex:
+ */
+ GLuint coloroffset;
+ GLuint specoffset;
+ GLuint wpos_offset;
+ GLuint wpos_size;
+
+ struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
+ GLuint vertex_attr_count;
+
+ GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
+
+ GLboolean hw_stencil;
+ GLboolean hw_stipple;
+ GLboolean depth_buffer_is_float;
+ GLboolean no_rast;
+ GLboolean no_hw;
+ GLboolean always_flush_batch;
+ GLboolean always_flush_cache;
+
+ /* 0 - nonconformant, best performance;
+ * 1 - fallback to sw for known conformance bugs
+ * 2 - always fallback to sw
+ */
+ GLuint conformance_mode;
+
+ /* State for intelvb.c and inteltris.c.
+ */
+ GLuint RenderIndex;
+ GLmatrix ViewportMatrix;
+ GLenum render_primitive;
+ GLenum reduced_primitive;
+ GLuint vertex_size;
+ GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
+
+ /* Fallback rasterization functions
+ */
+ intel_point_func draw_point;
+ intel_line_func draw_line;
+ intel_tri_func draw_tri;
+
+ /**
+ * Set if rendering has occured to the drawable's front buffer.
+ *
+ * This is used in the DRI2 case to detect that glFlush should also copy
+ * the contents of the fake front buffer to the real front buffer.
+ */
+ GLboolean front_buffer_dirty;
+
+ /**
+ * Track whether front-buffer rendering is currently enabled
+ *
+ * A separate flag is used to track this in order to support MRT more
+ * easily.
+ */
+ GLboolean is_front_buffer_rendering;
+ /**
+ * Track whether front-buffer is the current read target.
+ *
+ * This is closely associated with is_front_buffer_rendering, but may
+ * be set separately. The DRI2 fake front buffer must be referenced
+ * either way.
+ */
+ GLboolean is_front_buffer_reading;
+
+ GLboolean use_texture_tiling;
+ GLboolean use_early_z;
+
+ int driFd;
+
+ __DRIcontext *driContext;
+ struct intel_screen *intelScreen;
+ void (*saved_viewport)(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height);
+
+ /**
+ * Configuration cache
+ */
+ driOptionCache optionCache;
+};
+
+extern char *__progname;
+
+
+#define SUBPIXEL_X 0.125
+#define SUBPIXEL_Y 0.125
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
+#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
+#define ROUND_DOWN_TO(value, alignment) (ALIGN(value - alignment - 1, \
+ alignment))
+#define IS_POWER_OF_TWO(val) (((val) & (val - 1)) == 0)
+
+static INLINE uint32_t
+U_FIXED(float value, uint32_t frac_bits)
+{
+ value *= (1 << frac_bits);
+ return value < 0 ? 0 : value;
+}
+
+static INLINE uint32_t
+S_FIXED(float value, uint32_t frac_bits)
+{
+ return value * (1 << frac_bits);
+}
+
+#define INTEL_FIREVERTICES(intel) \
+do { \
+ if ((intel)->prim.flush) \
+ (intel)->prim.flush(intel); \
+} while (0)
+
+/* ================================================================
+ * From linux kernel i386 header files, copes with odd sizes better
+ * than COPY_DWORDS would:
+ * XXX Put this in src/mesa/main/imports.h ???
+ */
+#if defined(i386) || defined(__i386__)
+static INLINE void * __memcpy(void * to, const void * from, size_t n)
+{
+ int d0, d1, d2;
+ __asm__ __volatile__(
+ "rep ; movsl\n\t"
+ "testb $2,%b4\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b4\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : "=&c" (d0), "=&D" (d1), "=&S" (d2)
+ :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
+ : "memory");
+ return (to);
+}
+#else
+#define __memcpy(a,b,c) memcpy(a,b,c)
+#endif
+
+
+/* ================================================================
+ * Debugging:
+ */
+extern int INTEL_DEBUG;
+
+#define DEBUG_TEXTURE 0x1
+#define DEBUG_STATE 0x2
+#define DEBUG_IOCTL 0x4
+#define DEBUG_BLIT 0x8
+#define DEBUG_MIPTREE 0x10
+#define DEBUG_FALLBACKS 0x20
+#define DEBUG_VERBOSE 0x40
+#define DEBUG_BATCH 0x80
+#define DEBUG_PIXEL 0x100
+#define DEBUG_BUFMGR 0x200
+#define DEBUG_REGION 0x400
+#define DEBUG_FBO 0x800
+#define DEBUG_GS 0x1000
+#define DEBUG_SYNC 0x2000
+#define DEBUG_PRIMS 0x4000
+#define DEBUG_VERTS 0x8000
+#define DEBUG_DRI 0x10000
+#define DEBUG_SF 0x20000
+#define DEBUG_SANITY 0x40000
+#define DEBUG_SLEEP 0x80000
+#define DEBUG_STATS 0x100000
+#define DEBUG_TILE 0x200000
+#define DEBUG_SINGLE_THREAD 0x400000
+#define DEBUG_WM 0x800000
+#define DEBUG_URB 0x1000000
+#define DEBUG_VS 0x2000000
+#define DEBUG_GLSL_FORCE 0x4000000
+#define DEBUG_CLIP 0x8000000
+
+#define DBG(...) do { \
+ if (INTEL_DEBUG & FILE_DEBUG_FLAG) \
+ printf(__VA_ARGS__); \
+} while(0)
+
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I830_M 0x3577
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I865_G 0x2572
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GME 0x27AE
+#define PCI_CHIP_G33_G 0x29C2
+#define PCI_CHIP_Q35_G 0x29B2
+#define PCI_CHIP_Q33_G 0x29D2
+
+
+/* ================================================================
+ * intel_context.c:
+ */
+
+extern GLboolean intelInitContext(struct intel_context *intel,
+ int api,
+ const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate,
+ struct dd_function_table *functions);
+
+extern void intelFinish(GLcontext * ctx);
+extern void intel_flush(GLcontext * ctx);
+
+extern void intelInitDriverFunctions(struct dd_function_table *functions);
+
+void intel_init_syncobj_functions(struct dd_function_table *functions);
+
+
+/* ================================================================
+ * intel_state.c:
+ */
+extern void intelInitStateFuncs(struct dd_function_table *functions);
+
+#define COMPAREFUNC_ALWAYS 0
+#define COMPAREFUNC_NEVER 0x1
+#define COMPAREFUNC_LESS 0x2
+#define COMPAREFUNC_EQUAL 0x3
+#define COMPAREFUNC_LEQUAL 0x4
+#define COMPAREFUNC_GREATER 0x5
+#define COMPAREFUNC_NOTEQUAL 0x6
+#define COMPAREFUNC_GEQUAL 0x7
+
+#define STENCILOP_KEEP 0
+#define STENCILOP_ZERO 0x1
+#define STENCILOP_REPLACE 0x2
+#define STENCILOP_INCRSAT 0x3
+#define STENCILOP_DECRSAT 0x4
+#define STENCILOP_INCR 0x5
+#define STENCILOP_DECR 0x6
+#define STENCILOP_INVERT 0x7
+
+#define LOGICOP_CLEAR 0
+#define LOGICOP_NOR 0x1
+#define LOGICOP_AND_INV 0x2
+#define LOGICOP_COPY_INV 0x3
+#define LOGICOP_AND_RVRSE 0x4
+#define LOGICOP_INV 0x5
+#define LOGICOP_XOR 0x6
+#define LOGICOP_NAND 0x7
+#define LOGICOP_AND 0x8
+#define LOGICOP_EQUIV 0x9
+#define LOGICOP_NOOP 0xa
+#define LOGICOP_OR_INV 0xb
+#define LOGICOP_COPY 0xc
+#define LOGICOP_OR_RVRSE 0xd
+#define LOGICOP_OR 0xe
+#define LOGICOP_SET 0xf
+
+#define BLENDFACT_ZERO 0x01
+#define BLENDFACT_ONE 0x02
+#define BLENDFACT_SRC_COLR 0x03
+#define BLENDFACT_INV_SRC_COLR 0x04
+#define BLENDFACT_SRC_ALPHA 0x05
+#define BLENDFACT_INV_SRC_ALPHA 0x06
+#define BLENDFACT_DST_ALPHA 0x07
+#define BLENDFACT_INV_DST_ALPHA 0x08
+#define BLENDFACT_DST_COLR 0x09
+#define BLENDFACT_INV_DST_COLR 0x0a
+#define BLENDFACT_SRC_ALPHA_SATURATE 0x0b
+#define BLENDFACT_CONST_COLOR 0x0c
+#define BLENDFACT_INV_CONST_COLOR 0x0d
+#define BLENDFACT_CONST_ALPHA 0x0e
+#define BLENDFACT_INV_CONST_ALPHA 0x0f
+#define BLENDFACT_MASK 0x0f
+
+enum {
+ DRI_CONF_BO_REUSE_DISABLED,
+ DRI_CONF_BO_REUSE_ALL
+};
+
+extern int intel_translate_shadow_compare_func(GLenum func);
+extern int intel_translate_compare_func(GLenum func);
+extern int intel_translate_stencil_op(GLenum op);
+extern int intel_translate_blend_factor(GLenum factor);
+extern int intel_translate_logic_op(GLenum opcode);
+
+void intel_update_renderbuffers(__DRIcontext *context,
+ __DRIdrawable *drawable);
+void intel_prepare_render(struct intel_context *intel);
+
+void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
+ uint32_t buffer_id);
+
+/*======================================================================
+ * Inline conversion functions.
+ * These are better-typed than the macros used previously:
+ */
+static INLINE struct intel_context *
+intel_context(GLcontext * ctx)
+{
+ return (struct intel_context *) ctx;
+}
+
+static INLINE GLboolean
+is_power_of_two(uint32_t value)
+{
+ return (value & (value - 1)) == 0;
+}
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_decode.c b/src/mesa/drivers/dri/intel/intel_decode.c
new file mode 100644
index 0000000000..650010ac9c
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_decode.c
@@ -0,0 +1,1822 @@
+/* -*- c-basic-offset: 4 -*- */
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_decode.c
+ * This file contains code to print out batchbuffer contents in a
+ * human-readable format.
+ *
+ * The current version only supports i915 packets, and only pretty-prints a
+ * subset of them. The intention is for it to make just a best attempt to
+ * decode, but never crash in the process.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "intel_decode.h"
+#include "intel_chipset.h"
+
+#define BUFFER_FAIL(_count, _len, _name) do { \
+ fprintf(out, "Buffer size too small in %s (%d < %d)\n", \
+ (_name), (_count), (_len)); \
+ (*failures)++; \
+ return count; \
+} while (0)
+
+static FILE *out;
+static uint32_t saved_s2 = 0, saved_s4 = 0;
+static char saved_s2_set = 0, saved_s4_set = 0;
+
+static float
+int_as_float(uint32_t intval)
+{
+ union intfloat {
+ uint32_t i;
+ float f;
+ } uval;
+
+ uval.i = intval;
+ return uval.f;
+}
+
+static void
+instr_out(uint32_t *data, uint32_t hw_offset, unsigned int index,
+ char *fmt, ...)
+{
+ va_list va;
+
+ fprintf(out, "0x%08x: 0x%08x:%s ", hw_offset + index * 4, data[index],
+ index == 0 ? "" : " ");
+ va_start(va, fmt);
+ vfprintf(out, fmt, va);
+ va_end(va);
+}
+
+
+static int
+decode_mi(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int len_mask;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_mi[] = {
+ { 0x08, 0, 1, 1, "MI_ARB_ON_OFF" },
+ { 0x0a, 0, 1, 1, "MI_BATCH_BUFFER_END" },
+ { 0x31, 0x3f, 2, 2, "MI_BATCH_BUFFER_START" },
+ { 0x14, 0x3f, 3, 3, "MI_DISPLAY_BUFFER_INFO" },
+ { 0x04, 0, 1, 1, "MI_FLUSH" },
+ { 0x22, 0, 3, 3, "MI_LOAD_REGISTER_IMM" },
+ { 0x13, 0x3f, 2, 2, "MI_LOAD_SCAN_LINES_EXCL" },
+ { 0x12, 0x3f, 2, 2, "MI_LOAD_SCAN_LINES_INCL" },
+ { 0x00, 0, 1, 1, "MI_NOOP" },
+ { 0x11, 0x3f, 2, 2, "MI_OVERLAY_FLIP" },
+ { 0x07, 0, 1, 1, "MI_REPORT_HEAD" },
+ { 0x18, 0x3f, 2, 2, "MI_SET_CONTEXT" },
+ { 0x20, 0x3f, 3, 4, "MI_STORE_DATA_IMM" },
+ { 0x21, 0x3f, 3, 4, "MI_STORE_DATA_INDEX" },
+ { 0x24, 0x3f, 3, 3, "MI_STORE_REGISTER_MEM" },
+ { 0x02, 0, 1, 1, "MI_USER_INTERRUPT" },
+ { 0x03, 0, 1, 1, "MI_WAIT_FOR_EVENT" },
+ };
+
+
+ for (opcode = 0; opcode < sizeof(opcodes_mi) / sizeof(opcodes_mi[0]);
+ opcode++) {
+ if ((data[0] & 0x1f800000) >> 23 == opcodes_mi[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_mi[opcode].name);
+ if (opcodes_mi[opcode].max_len > 1) {
+ len = (data[0] & opcodes_mi[opcode].len_mask) + 2;
+ if (len < opcodes_mi[opcode].min_len ||
+ len > opcodes_mi[opcode].max_len)
+ {
+ fprintf(out, "Bad length (%d) in %s, [%d, %d]\n",
+ len, opcodes_mi[opcode].name,
+ opcodes_mi[opcode].min_len,
+ opcodes_mi[opcode].max_len);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_mi[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "MI UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_2d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode, len;
+ char *format = NULL;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_2d[] = {
+ { 0x40, 5, 5, "COLOR_BLT" },
+ { 0x43, 6, 6, "SRC_COPY_BLT" },
+ { 0x01, 8, 8, "XY_SETUP_BLT" },
+ { 0x11, 9, 9, "XY_SETUP_MONO_PATTERN_SL_BLT" },
+ { 0x03, 3, 3, "XY_SETUP_CLIP_BLT" },
+ { 0x24, 2, 2, "XY_PIXEL_BLT" },
+ { 0x25, 3, 3, "XY_SCANLINES_BLT" },
+ { 0x26, 4, 4, "Y_TEXT_BLT" },
+ { 0x31, 5, 134, "XY_TEXT_IMMEDIATE_BLT" },
+ { 0x50, 6, 6, "XY_COLOR_BLT" },
+ { 0x51, 6, 6, "XY_PAT_BLT" },
+ { 0x76, 8, 8, "XY_PAT_CHROMA_BLT" },
+ { 0x72, 7, 135, "XY_PAT_BLT_IMMEDIATE" },
+ { 0x77, 9, 137, "XY_PAT_CHROMA_BLT_IMMEDIATE" },
+ { 0x52, 9, 9, "XY_MONO_PAT_BLT" },
+ { 0x59, 7, 7, "XY_MONO_PAT_FIXED_BLT" },
+ { 0x53, 8, 8, "XY_SRC_COPY_BLT" },
+ { 0x54, 8, 8, "XY_MONO_SRC_COPY_BLT" },
+ { 0x71, 9, 137, "XY_MONO_SRC_COPY_IMMEDIATE_BLT" },
+ { 0x55, 9, 9, "XY_FULL_BLT" },
+ { 0x55, 9, 137, "XY_FULL_IMMEDIATE_PATTERN_BLT" },
+ { 0x56, 9, 9, "XY_FULL_MONO_SRC_BLT" },
+ { 0x75, 10, 138, "XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT" },
+ { 0x57, 12, 12, "XY_FULL_MONO_PATTERN_BLT" },
+ { 0x58, 12, 12, "XY_FULL_MONO_PATTERN_MONO_SRC_BLT" },
+ };
+
+ switch ((data[0] & 0x1fc00000) >> 22) {
+ case 0x50:
+ instr_out(data, hw_offset, 0,
+ "XY_COLOR_BLT (rgb %sabled, alpha %sabled, dst tile %d)\n",
+ (data[0] & (1 << 20)) ? "en" : "dis",
+ (data[0] & (1 << 21)) ? "en" : "dis",
+ (data[0] >> 11) & 1);
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 6)
+ fprintf(out, "Bad count in XY_COLOR_BLT\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "XY_COLOR_BLT");
+
+ switch ((data[1] >> 24) & 0x3) {
+ case 0:
+ format="8";
+ break;
+ case 1:
+ format="565";
+ break;
+ case 2:
+ format="1555";
+ break;
+ case 3:
+ format="8888";
+ break;
+ }
+
+ instr_out(data, hw_offset, 1, "format %s, pitch %d, "
+ "clipping %sabled\n", format,
+ (short)(data[1] & 0xffff),
+ data[1] & (1 << 30) ? "en" : "dis");
+ instr_out(data, hw_offset, 2, "(%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(data, hw_offset, 3, "(%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(data, hw_offset, 4, "offset 0x%08x\n", data[4]);
+ instr_out(data, hw_offset, 5, "color\n");
+ return len;
+ case 0x53:
+ instr_out(data, hw_offset, 0,
+ "XY_SRC_COPY_BLT (rgb %sabled, alpha %sabled, "
+ "src tile %d, dst tile %d)\n",
+ (data[0] & (1 << 20)) ? "en" : "dis",
+ (data[0] & (1 << 21)) ? "en" : "dis",
+ (data[0] >> 15) & 1,
+ (data[0] >> 11) & 1);
+
+ len = (data[0] & 0x000000ff) + 2;
+ if (len != 8)
+ fprintf(out, "Bad count in XY_SRC_COPY_BLT\n");
+ if (count < 8)
+ BUFFER_FAIL(count, len, "XY_SRC_COPY_BLT");
+
+ switch ((data[1] >> 24) & 0x3) {
+ case 0:
+ format="8";
+ break;
+ case 1:
+ format="565";
+ break;
+ case 2:
+ format="1555";
+ break;
+ case 3:
+ format="8888";
+ break;
+ }
+
+ instr_out(data, hw_offset, 1, "format %s, dst pitch %d, "
+ "clipping %sabled\n", format,
+ (short)(data[1] & 0xffff),
+ data[1] & (1 << 30) ? "en" : "dis");
+ instr_out(data, hw_offset, 2, "dst (%d,%d)\n",
+ data[2] & 0xffff, data[2] >> 16);
+ instr_out(data, hw_offset, 3, "dst (%d,%d)\n",
+ data[3] & 0xffff, data[3] >> 16);
+ instr_out(data, hw_offset, 4, "dst offset 0x%08x\n", data[4]);
+ instr_out(data, hw_offset, 5, "src (%d,%d)\n",
+ data[5] & 0xffff, data[5] >> 16);
+ instr_out(data, hw_offset, 6, "src pitch %d\n",
+ (short)(data[6] & 0xffff));
+ instr_out(data, hw_offset, 7, "src offset 0x%08x\n", data[7]);
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_2d) / sizeof(opcodes_2d[0]);
+ opcode++) {
+ if ((data[0] & 0x1fc00000) >> 22 == opcodes_2d[opcode].opcode) {
+ unsigned int i;
+
+ len = 1;
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_2d[opcode].name);
+ if (opcodes_2d[opcode].max_len > 1) {
+ len = (data[0] & 0x000000ff) + 2;
+ if (len < opcodes_2d[opcode].min_len ||
+ len > opcodes_2d[opcode].max_len)
+ {
+ fprintf(out, "Bad count in %s\n", opcodes_2d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_2d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "2D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_3d_1c(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ switch ((data[0] & 0x00f80000) >> 19) {
+ case 0x11:
+ instr_out(data, hw_offset, 0, "3DSTATE_DEPTH_SUBRECTANGLE_DISALBE\n");
+ return 1;
+ case 0x10:
+ instr_out(data, hw_offset, 0, "3DSTATE_SCISSOR_ENABLE\n");
+ return 1;
+ case 0x01:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_COORD_SET_I830\n");
+ return 1;
+ case 0x0a:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_CUBE_I830\n");
+ return 1;
+ case 0x05:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_TEX_STREAM_I830\n");
+ return 1;
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+/** Sets the string dstname to describe the destination of the PS instruction */
+static void
+i915_get_instruction_dst(uint32_t *data, int i, char *dstname, int do_mask)
+{
+ uint32_t a0 = data[i];
+ int dst_nr = (a0 >> 14) & 0xf;
+ char dstmask[8];
+ char *sat;
+
+ if (do_mask) {
+ if (((a0 >> 10) & 0xf) == 0xf) {
+ dstmask[0] = 0;
+ } else {
+ int dstmask_index = 0;
+
+ dstmask[dstmask_index++] = '.';
+ if (a0 & (1 << 10))
+ dstmask[dstmask_index++] = 'x';
+ if (a0 & (1 << 11))
+ dstmask[dstmask_index++] = 'y';
+ if (a0 & (1 << 12))
+ dstmask[dstmask_index++] = 'z';
+ if (a0 & (1 << 13))
+ dstmask[dstmask_index++] = 'w';
+ dstmask[dstmask_index++] = 0;
+ }
+
+ if (a0 & (1 << 22))
+ sat = ".sat";
+ else
+ sat = "";
+ } else {
+ dstmask[0] = 0;
+ sat = "";
+ }
+
+ switch ((a0 >> 19) & 0x7) {
+ case 0:
+ if (dst_nr > 15)
+ fprintf(out, "bad destination reg R%d\n", dst_nr);
+ sprintf(dstname, "R%d%s%s", dst_nr, dstmask, sat);
+ break;
+ case 4:
+ if (dst_nr > 0)
+ fprintf(out, "bad destination reg oC%d\n", dst_nr);
+ sprintf(dstname, "oC%s%s", dstmask, sat);
+ break;
+ case 5:
+ if (dst_nr > 0)
+ fprintf(out, "bad destination reg oD%d\n", dst_nr);
+ sprintf(dstname, "oD%s%s", dstmask, sat);
+ break;
+ case 6:
+ if (dst_nr > 2)
+ fprintf(out, "bad destination reg U%d\n", dst_nr);
+ sprintf(dstname, "U%d%s%s", dst_nr, dstmask, sat);
+ break;
+ default:
+ sprintf(dstname, "RESERVED");
+ break;
+ }
+}
+
+static char *
+i915_get_channel_swizzle(uint32_t select)
+{
+ switch (select & 0x7) {
+ case 0:
+ return (select & 8) ? "-x" : "x";
+ case 1:
+ return (select & 8) ? "-y" : "y";
+ case 2:
+ return (select & 8) ? "-z" : "z";
+ case 3:
+ return (select & 8) ? "-w" : "w";
+ case 4:
+ return (select & 8) ? "-0" : "0";
+ case 5:
+ return (select & 8) ? "-1" : "1";
+ default:
+ return (select & 8) ? "-bad" : "bad";
+ }
+}
+
+static void
+i915_get_instruction_src_name(uint32_t src_type, uint32_t src_nr, char *name)
+{
+ switch (src_type) {
+ case 0:
+ sprintf(name, "R%d", src_nr);
+ if (src_nr > 15)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 1:
+ if (src_nr < 8)
+ sprintf(name, "T%d", src_nr);
+ else if (src_nr == 8)
+ sprintf(name, "DIFFUSE");
+ else if (src_nr == 9)
+ sprintf(name, "SPECULAR");
+ else if (src_nr == 10)
+ sprintf(name, "FOG");
+ else {
+ fprintf(out, "bad src reg T%d\n", src_nr);
+ sprintf(name, "RESERVED");
+ }
+ break;
+ case 2:
+ sprintf(name, "C%d", src_nr);
+ if (src_nr > 31)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 4:
+ sprintf(name, "oC");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oC%d\n", src_nr);
+ break;
+ case 5:
+ sprintf(name, "oD");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oD%d\n", src_nr);
+ break;
+ case 6:
+ sprintf(name, "U%d", src_nr);
+ if (src_nr > 2)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ default:
+ fprintf(out, "bad src reg type %d\n", src_type);
+ sprintf(name, "RESERVED");
+ break;
+ }
+}
+
+static void
+i915_get_instruction_src0(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a0 = data[i];
+ uint32_t a1 = data[i + 1];
+ int src_nr = (a0 >> 2) & 0x1f;
+ char *swizzle_x = i915_get_channel_swizzle((a1 >> 28) & 0xf);
+ char *swizzle_y = i915_get_channel_swizzle((a1 >> 24) & 0xf);
+ char *swizzle_z = i915_get_channel_swizzle((a1 >> 20) & 0xf);
+ char *swizzle_w = i915_get_channel_swizzle((a1 >> 16) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a0 >> 7) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z, swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void
+i915_get_instruction_src1(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a1 = data[i + 1];
+ uint32_t a2 = data[i + 2];
+ int src_nr = (a1 >> 8) & 0x1f;
+ char *swizzle_x = i915_get_channel_swizzle((a1 >> 4) & 0xf);
+ char *swizzle_y = i915_get_channel_swizzle((a1 >> 0) & 0xf);
+ char *swizzle_z = i915_get_channel_swizzle((a2 >> 28) & 0xf);
+ char *swizzle_w = i915_get_channel_swizzle((a2 >> 24) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a1 >> 13) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z, swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void
+i915_get_instruction_src2(uint32_t *data, int i, char *srcname)
+{
+ uint32_t a2 = data[i + 2];
+ int src_nr = (a2 >> 16) & 0x1f;
+ char *swizzle_x = i915_get_channel_swizzle((a2 >> 12) & 0xf);
+ char *swizzle_y = i915_get_channel_swizzle((a2 >> 8) & 0xf);
+ char *swizzle_z = i915_get_channel_swizzle((a2 >> 4) & 0xf);
+ char *swizzle_w = i915_get_channel_swizzle((a2 >> 0) & 0xf);
+ char swizzle[100];
+
+ i915_get_instruction_src_name((a2 >> 21) & 0x7, src_nr, srcname);
+ sprintf(swizzle, ".%s%s%s%s", swizzle_x, swizzle_y, swizzle_z, swizzle_w);
+ if (strcmp(swizzle, ".xyzw") != 0)
+ strcat(srcname, swizzle);
+}
+
+static void
+i915_get_instruction_addr(uint32_t src_type, uint32_t src_nr, char *name)
+{
+ switch (src_type) {
+ case 0:
+ sprintf(name, "R%d", src_nr);
+ if (src_nr > 15)
+ fprintf(out, "bad src reg %s\n", name);
+ break;
+ case 1:
+ if (src_nr < 8)
+ sprintf(name, "T%d", src_nr);
+ else if (src_nr == 8)
+ sprintf(name, "DIFFUSE");
+ else if (src_nr == 9)
+ sprintf(name, "SPECULAR");
+ else if (src_nr == 10)
+ sprintf(name, "FOG");
+ else {
+ fprintf(out, "bad src reg T%d\n", src_nr);
+ sprintf(name, "RESERVED");
+ }
+ break;
+ case 4:
+ sprintf(name, "oC");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oC%d\n", src_nr);
+ break;
+ case 5:
+ sprintf(name, "oD");
+ if (src_nr > 0)
+ fprintf(out, "bad src reg oD%d\n", src_nr);
+ break;
+ default:
+ fprintf(out, "bad src reg type %d\n", src_type);
+ sprintf(name, "RESERVED");
+ break;
+ }
+}
+
+static void
+i915_decode_alu1(uint32_t *data, uint32_t hw_offset,
+ int i, char *instr_prefix, char *op_name)
+{
+ char dst[100], src0[100];
+
+ i915_get_instruction_dst(data, i, dst, 1);
+ i915_get_instruction_src0(data, i, src0);
+
+ instr_out(data, hw_offset, i++, "%s: %s %s, %s\n", instr_prefix,
+ op_name, dst, src0);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_alu2(uint32_t *data, uint32_t hw_offset,
+ int i, char *instr_prefix, char *op_name)
+{
+ char dst[100], src0[100], src1[100];
+
+ i915_get_instruction_dst(data, i, dst, 1);
+ i915_get_instruction_src0(data, i, src0);
+ i915_get_instruction_src1(data, i, src1);
+
+ instr_out(data, hw_offset, i++, "%s: %s %s, %s, %s\n", instr_prefix,
+ op_name, dst, src0, src1);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_alu3(uint32_t *data, uint32_t hw_offset,
+ int i, char *instr_prefix, char *op_name)
+{
+ char dst[100], src0[100], src1[100], src2[100];
+
+ i915_get_instruction_dst(data, i, dst, 1);
+ i915_get_instruction_src0(data, i, src0);
+ i915_get_instruction_src1(data, i, src1);
+ i915_get_instruction_src2(data, i, src2);
+
+ instr_out(data, hw_offset, i++, "%s: %s %s, %s, %s, %s\n", instr_prefix,
+ op_name, dst, src0, src1, src2);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_tex(uint32_t *data, uint32_t hw_offset, int i, char *instr_prefix,
+ char *tex_name)
+{
+ uint32_t t0 = data[i];
+ uint32_t t1 = data[i + 1];
+ char dst_name[100];
+ char addr_name[100];
+ int sampler_nr;
+
+ i915_get_instruction_dst(data, i, dst_name, 0);
+ i915_get_instruction_addr((t1 >> 24) & 0x7,
+ (t1 >> 17) & 0xf,
+ addr_name);
+ sampler_nr = t0 & 0xf;
+
+ instr_out(data, hw_offset, i++, "%s: %s %s, S%d, %s\n", instr_prefix,
+ tex_name, dst_name, sampler_nr, addr_name);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+}
+
+static void
+i915_decode_dcl(uint32_t *data, uint32_t hw_offset, int i, char *instr_prefix)
+{
+ uint32_t d0 = data[i];
+ char *sampletype;
+ int dcl_nr = (d0 >> 14) & 0xf;
+ char *dcl_x = d0 & (1 << 10) ? "x" : "";
+ char *dcl_y = d0 & (1 << 11) ? "y" : "";
+ char *dcl_z = d0 & (1 << 12) ? "z" : "";
+ char *dcl_w = d0 & (1 << 13) ? "w" : "";
+ char dcl_mask[10];
+
+ switch ((d0 >> 19) & 0x3) {
+ case 1:
+ sprintf(dcl_mask, ".%s%s%s%s", dcl_x, dcl_y, dcl_z, dcl_w);
+ if (strcmp(dcl_mask, ".") == 0)
+ fprintf(out, "bad (empty) dcl mask\n");
+
+ if (dcl_nr > 10)
+ fprintf(out, "bad T%d dcl register number\n", dcl_nr);
+ if (dcl_nr < 8) {
+ if (strcmp(dcl_mask, ".x") != 0 &&
+ strcmp(dcl_mask, ".xy") != 0 &&
+ strcmp(dcl_mask, ".xz") != 0 &&
+ strcmp(dcl_mask, ".w") != 0 &&
+ strcmp(dcl_mask, ".xyzw") != 0) {
+ fprintf(out, "bad T%d.%s dcl mask\n", dcl_nr, dcl_mask);
+ }
+ instr_out(data, hw_offset, i++, "%s: DCL T%d%s\n", instr_prefix,
+ dcl_nr, dcl_mask);
+ } else {
+ if (strcmp(dcl_mask, ".xz") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n", dcl_mask);
+ else if (strcmp(dcl_mask, ".xw") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n", dcl_mask);
+ else if (strcmp(dcl_mask, ".xzw") == 0)
+ fprintf(out, "errataed bad dcl mask %s\n", dcl_mask);
+
+ if (dcl_nr == 8) {
+ instr_out(data, hw_offset, i++, "%s: DCL DIFFUSE%s\n", instr_prefix,
+ dcl_mask);
+ } else if (dcl_nr == 9) {
+ instr_out(data, hw_offset, i++, "%s: DCL SPECULAR%s\n", instr_prefix,
+ dcl_mask);
+ } else if (dcl_nr == 10) {
+ instr_out(data, hw_offset, i++, "%s: DCL FOG%s\n", instr_prefix,
+ dcl_mask);
+ }
+ }
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ break;
+ case 3:
+ switch ((d0 >> 22) & 0x3) {
+ case 0:
+ sampletype = "2D";
+ break;
+ case 1:
+ sampletype = "CUBE";
+ break;
+ case 2:
+ sampletype = "3D";
+ break;
+ default:
+ sampletype = "RESERVED";
+ break;
+ }
+ if (dcl_nr > 15)
+ fprintf(out, "bad S%d dcl register number\n", dcl_nr);
+ instr_out(data, hw_offset, i++, "%s: DCL S%d %s\n", instr_prefix,
+ dcl_nr, sampletype);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ break;
+ default:
+ instr_out(data, hw_offset, i++, "%s: DCL RESERVED%d\n", instr_prefix, dcl_nr);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ }
+}
+
+static void
+i915_decode_instruction(uint32_t *data, uint32_t hw_offset,
+ int i, char *instr_prefix)
+{
+ switch ((data[i] >> 24) & 0x1f) {
+ case 0x0:
+ instr_out(data, hw_offset, i++, "%s: NOP\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ break;
+ case 0x01:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "ADD");
+ break;
+ case 0x02:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "MOV");
+ break;
+ case 0x03:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "MUL");
+ break;
+ case 0x04:
+ i915_decode_alu3(data, hw_offset, i, instr_prefix, "MAD");
+ break;
+ case 0x05:
+ i915_decode_alu3(data, hw_offset, i, instr_prefix, "DP2ADD");
+ break;
+ case 0x06:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "DP3");
+ break;
+ case 0x07:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "DP4");
+ break;
+ case 0x08:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "FRC");
+ break;
+ case 0x09:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "RCP");
+ break;
+ case 0x0a:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "RSQ");
+ break;
+ case 0x0b:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "EXP");
+ break;
+ case 0x0c:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "LOG");
+ break;
+ case 0x0d:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "CMP");
+ break;
+ case 0x0e:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "MIN");
+ break;
+ case 0x0f:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "MAX");
+ break;
+ case 0x10:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "FLR");
+ break;
+ case 0x11:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "MOD");
+ break;
+ case 0x12:
+ i915_decode_alu1(data, hw_offset, i, instr_prefix, "TRC");
+ break;
+ case 0x13:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "SGE");
+ break;
+ case 0x14:
+ i915_decode_alu2(data, hw_offset, i, instr_prefix, "SLT");
+ break;
+ case 0x15:
+ i915_decode_tex(data, hw_offset, i, instr_prefix, "TEXLD");
+ break;
+ case 0x16:
+ i915_decode_tex(data, hw_offset, i, instr_prefix, "TEXLDP");
+ break;
+ case 0x17:
+ i915_decode_tex(data, hw_offset, i, instr_prefix, "TEXLDB");
+ break;
+ case 0x19:
+ i915_decode_dcl(data, hw_offset, i, instr_prefix);
+ break;
+ default:
+ instr_out(data, hw_offset, i++, "%s: unknown\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ instr_out(data, hw_offset, i++, "%s\n", instr_prefix);
+ break;
+ }
+}
+
+static int
+decode_3d_1d(uint32_t *data, int count, uint32_t hw_offset, int *failures, int i830)
+{
+ unsigned int len, i, c, opcode, word, map, sampler, instr;
+ char *format;
+
+ struct {
+ uint32_t opcode;
+ int i830_only;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d_1d[] = {
+ { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
+ { 0x86, 0, 4, 4, "3DSTATE_CHROMA_KEY" },
+ { 0x9c, 0, 1, 1, "3DSTATE_CLEAR_PARAMETERS" },
+ { 0x88, 0, 2, 2, "3DSTATE_CONSTANT_BLEND_COLOR" },
+ { 0x99, 0, 2, 2, "3DSTATE_DEFAULT_DIFFUSE" },
+ { 0x9a, 0, 2, 2, "3DSTATE_DEFAULT_SPECULAR" },
+ { 0x98, 0, 2, 2, "3DSTATE_DEFAULT_Z" },
+ { 0x97, 0, 2, 2, "3DSTATE_DEPTH_OFFSET_SCALE" },
+ { 0x85, 0, 2, 2, "3DSTATE_DEST_BUFFER_VARIABLES" },
+ { 0x80, 0, 5, 5, "3DSTATE_DRAWING_RECTANGLE" },
+ { 0x8e, 0, 3, 3, "3DSTATE_BUFFER_INFO" },
+ { 0x9d, 0, 65, 65, "3DSTATE_FILTER_COEFFICIENTS_4X4" },
+ { 0x9e, 0, 4, 4, "3DSTATE_MONO_FILTER" },
+ { 0x89, 0, 4, 4, "3DSTATE_FOG_MODE" },
+ { 0x8f, 0, 2, 16, "3DSTATE_MAP_PALLETE_LOAD_32" },
+ { 0x81, 0, 3, 3, "3DSTATE_SCISSOR_RECTANGLE" },
+ { 0x83, 0, 2, 2, "3DSTATE_SPAN_STIPPLE" },
+ { 0x8c, 1, 2, 2, "3DSTATE_MAP_COORD_TRANSFORM_I830" },
+ { 0x8b, 1, 2, 2, "3DSTATE_MAP_VERTEX_TRANSFORM_I830" },
+ { 0x8d, 1, 3, 3, "3DSTATE_W_STATE_I830" },
+ { 0x01, 1, 2, 2, "3DSTATE_COLOR_FACTOR_I830" },
+ { 0x02, 1, 2, 2, "3DSTATE_MAP_COORD_SETBIND_I830" },
+ };
+
+ switch ((data[0] & 0x00ff0000) >> 16) {
+ case 0x07:
+ /* This instruction is unusual. A 0 length means just 1 DWORD instead of
+ * 2. The 0 length is specified in one place to be unsupported, but
+ * stated to be required in another, and 0 length LOAD_INDIRECTs appear
+ * to cause no harm at least.
+ */
+ instr_out(data, hw_offset, 0, "3DSTATE_LOAD_INDIRECT\n");
+ len = (data[0] & 0x000000ff) + 1;
+ i = 1;
+ if (data[0] & (0x01 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "SIS.0\n");
+ instr_out(data, hw_offset, i++, "SIS.1\n");
+ }
+ if (data[0] & (0x02 << 8)) {
+ if (i + 1 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "DIS.0\n");
+ }
+ if (data[0] & (0x04 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "SSB.0\n");
+ instr_out(data, hw_offset, i++, "SSB.1\n");
+ }
+ if (data[0] & (0x08 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "MSB.0\n");
+ instr_out(data, hw_offset, i++, "MSB.1\n");
+ }
+ if (data[0] & (0x10 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "PSP.0\n");
+ instr_out(data, hw_offset, i++, "PSP.1\n");
+ }
+ if (data[0] & (0x20 << 8)) {
+ if (i + 2 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_INDIRECT");
+ instr_out(data, hw_offset, i++, "PSC.0\n");
+ instr_out(data, hw_offset, i++, "PSC.1\n");
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_LOAD_INDIRECT\n");
+ (*failures)++;
+ return len;
+ }
+ return len;
+ case 0x04:
+ instr_out(data, hw_offset, 0, "3DSTATE_LOAD_STATE_IMMEDIATE_1\n");
+ len = (data[0] & 0x0000000f) + 2;
+ i = 1;
+ for (word = 0; word <= 7; word++) {
+ if (data[0] & (1 << (4 + word))) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_LOAD_STATE_IMMEDIATE_1");
+
+ /* save vertex state for decode */
+ if (word == 2) {
+ saved_s2_set = 1;
+ saved_s2 = data[i];
+ }
+ if (word == 4) {
+ saved_s4_set = 1;
+ saved_s4 = data[i];
+ }
+
+ instr_out(data, hw_offset, i++, "S%d\n", word);
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_LOAD_INDIRECT\n");
+ (*failures)++;
+ }
+ return len;
+ case 0x00:
+ instr_out(data, hw_offset, 0, "3DSTATE_MAP_STATE\n");
+ len = (data[0] & 0x0000003f) + 2;
+ instr_out(data, hw_offset, 1, "mask\n");
+
+ i = 2;
+ for (map = 0; map <= 15; map++) {
+ if (data[1] & (1 << map)) {
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_MAP_STATE");
+ instr_out(data, hw_offset, i++, "map %d MS2\n", map);
+ instr_out(data, hw_offset, i++, "map %d MS3\n", map);
+ instr_out(data, hw_offset, i++, "map %d MS4\n", map);
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_MAP_STATE\n");
+ (*failures)++;
+ return len;
+ }
+ return len;
+ case 0x06:
+ instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_CONSTANTS\n");
+ len = (data[0] & 0x000000ff) + 2;
+
+ i = 2;
+ for (c = 0; c <= 31; c++) {
+ if (data[1] & (1 << c)) {
+ if (i + 4 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_PIXEL_SHADER_CONSTANTS");
+ instr_out(data, hw_offset, i, "C%d.X = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ instr_out(data, hw_offset, i, "C%d.Y = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ instr_out(data, hw_offset, i, "C%d.Z = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ instr_out(data, hw_offset, i, "C%d.W = %f\n",
+ c, int_as_float(data[i]));
+ i++;
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_PIXEL_SHADER_CONSTANTS\n");
+ (*failures)++;
+ }
+ return len;
+ case 0x05:
+ instr_out(data, hw_offset, 0, "3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ len = (data[0] & 0x000000ff) + 2;
+ if ((len - 1) % 3 != 0 || len > 370) {
+ fprintf(out, "Bad count in 3DSTATE_PIXEL_SHADER_PROGRAM\n");
+ (*failures)++;
+ }
+ i = 1;
+ for (instr = 0; instr < (len - 1) / 3; instr++) {
+ char instr_prefix[10];
+
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_PIXEL_SHADER_PROGRAM");
+ sprintf(instr_prefix, "PS%03d", instr);
+ i915_decode_instruction(data, hw_offset, i, instr_prefix);
+ i += 3;
+ }
+ return len;
+ case 0x01:
+ if (i830)
+ break;
+ instr_out(data, hw_offset, 0, "3DSTATE_SAMPLER_STATE\n");
+ instr_out(data, hw_offset, 1, "mask\n");
+ len = (data[0] & 0x0000003f) + 2;
+ i = 2;
+ for (sampler = 0; sampler <= 15; sampler++) {
+ if (data[1] & (1 << sampler)) {
+ if (i + 3 >= count)
+ BUFFER_FAIL(count, len, "3DSTATE_SAMPLER_STATE");
+ instr_out(data, hw_offset, i++, "sampler %d SS2\n",
+ sampler);
+ instr_out(data, hw_offset, i++, "sampler %d SS3\n",
+ sampler);
+ instr_out(data, hw_offset, i++, "sampler %d SS4\n",
+ sampler);
+ }
+ }
+ if (len != i) {
+ fprintf(out, "Bad count in 3DSTATE_SAMPLER_STATE\n");
+ (*failures)++;
+ }
+ return len;
+ case 0x85:
+ len = (data[0] & 0x0000000f) + 2;
+
+ if (len != 2)
+ fprintf(out, "Bad count in 3DSTATE_DEST_BUFFER_VARIABLES\n");
+ if (count < 2)
+ BUFFER_FAIL(count, len, "3DSTATE_DEST_BUFFER_VARIABLES");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_DEST_BUFFER_VARIABLES\n");
+
+ switch ((data[1] >> 8) & 0xf) {
+ case 0x0: format = "g8"; break;
+ case 0x1: format = "x1r5g5b5"; break;
+ case 0x2: format = "r5g6b5"; break;
+ case 0x3: format = "a8r8g8b8"; break;
+ case 0x4: format = "ycrcb_swapy"; break;
+ case 0x5: format = "ycrcb_normal"; break;
+ case 0x6: format = "ycrcb_swapuv"; break;
+ case 0x7: format = "ycrcb_swapuvy"; break;
+ case 0x8: format = "a4r4g4b4"; break;
+ case 0x9: format = "a1r5g5b5"; break;
+ case 0xa: format = "a2r10g10b10"; break;
+ default: format = "BAD"; break;
+ }
+ instr_out(data, hw_offset, 1, "%s format, early Z %sabled\n",
+ format,
+ (data[1] & (1 << 31)) ? "en" : "dis");
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d_1d) / sizeof(opcodes_3d_1d[0]);
+ opcode++)
+ {
+ if (opcodes_3d_1d[opcode].i830_only && !i830)
+ continue;
+
+ if (((data[0] & 0x00ff0000) >> 16) == opcodes_3d_1d[opcode].opcode) {
+ len = 1;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d_1d[opcode].name);
+ if (opcodes_3d_1d[opcode].max_len > 1) {
+ len = (data[0] & 0x0000ffff) + 2;
+ if (len < opcodes_3d_1d[opcode].min_len ||
+ len > opcodes_3d_1d[opcode].max_len)
+ {
+ fprintf(out, "Bad count in %s\n",
+ opcodes_3d_1d[opcode].name);
+ (*failures)++;
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d_1d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_3d_primitive(uint32_t *data, int count, uint32_t hw_offset,
+ int *failures)
+{
+ char immediate = (data[0] & (1 << 23)) == 0;
+ unsigned int len, i;
+ char *primtype;
+
+ switch ((data[0] >> 18) & 0xf) {
+ case 0x0: primtype = "TRILIST"; break;
+ case 0x1: primtype = "TRISTRIP"; break;
+ case 0x2: primtype = "TRISTRIP_REVERSE"; break;
+ case 0x3: primtype = "TRIFAN"; break;
+ case 0x4: primtype = "POLYGON"; break;
+ case 0x5: primtype = "LINELIST"; break;
+ case 0x6: primtype = "LINESTRIP"; break;
+ case 0x7: primtype = "RECTLIST"; break;
+ case 0x8: primtype = "POINTLIST"; break;
+ case 0x9: primtype = "DIB"; break;
+ case 0xa: primtype = "CLEAR_RECT"; break;
+ default: primtype = "unknown"; break;
+ }
+
+ /* XXX: 3DPRIM_DIB not supported */
+ if (immediate) {
+ len = (data[0] & 0x0003ffff) + 2;
+ instr_out(data, hw_offset, 0, "3DPRIMITIVE inline %s\n", primtype);
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DPRIMITIVE inline");
+ if (!saved_s2_set || !saved_s4_set) {
+ fprintf(out, "unknown vertex format\n");
+ for (i = 1; i < len; i++) {
+ instr_out(data, hw_offset, i,
+ " vertex data (%f float)\n",
+ int_as_float(data[i]));
+ }
+ } else {
+ unsigned int vertex = 0;
+ for (i = 1; i < len;) {
+ unsigned int tc;
+
+#define VERTEX_OUT(fmt, ...) do { \
+ if (i < len) \
+ instr_out(data, hw_offset, i, " V%d."fmt"\n", vertex, __VA_ARGS__); \
+ else \
+ fprintf(out, " missing data in V%d\n", vertex); \
+ i++; \
+} while (0)
+
+ VERTEX_OUT("X = %f", int_as_float(data[i]));
+ VERTEX_OUT("Y = %f", int_as_float(data[i]));
+ switch (saved_s4 >> 6 & 0x7) {
+ case 0x1:
+ VERTEX_OUT("Z = %f", int_as_float(data[i]));
+ break;
+ case 0x2:
+ VERTEX_OUT("Z = %f", int_as_float(data[i]));
+ VERTEX_OUT("W = %f", int_as_float(data[i]));
+ break;
+ case 0x3:
+ break;
+ case 0x4:
+ VERTEX_OUT("W = %f", int_as_float(data[i]));
+ break;
+ default:
+ fprintf(out, "bad S4 position mask\n");
+ }
+
+ if (saved_s4 & (1 << 10)) {
+ VERTEX_OUT("color = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)",
+ data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 11)) {
+ VERTEX_OUT("spec = (A=0x%02x, R=0x%02x, G=0x%02x, "
+ "B=0x%02x)",
+ data[i] >> 24,
+ (data[i] >> 16) & 0xff,
+ (data[i] >> 8) & 0xff,
+ data[i] & 0xff);
+ }
+ if (saved_s4 & (1 << 12))
+ VERTEX_OUT("width = 0x%08x)", data[i]);
+
+ for (tc = 0; tc <= 7; tc++) {
+ switch ((saved_s2 >> (tc * 4)) & 0xf) {
+ case 0x0:
+ VERTEX_OUT("T%d.X = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.Y = %f", tc, int_as_float(data[i]));
+ break;
+ case 0x1:
+ VERTEX_OUT("T%d.X = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.Y = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.Z = %f", tc, int_as_float(data[i]));
+ break;
+ case 0x2:
+ VERTEX_OUT("T%d.X = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.Y = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.Z = %f", tc, int_as_float(data[i]));
+ VERTEX_OUT("T%d.W = %f", tc, int_as_float(data[i]));
+ break;
+ case 0x3:
+ VERTEX_OUT("T%d.X = %f", tc, int_as_float(data[i]));
+ break;
+ case 0x4:
+ VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
+ break;
+ case 0x5:
+ VERTEX_OUT("T%d.XY = 0x%08x half-float", tc, data[i]);
+ VERTEX_OUT("T%d.ZW = 0x%08x half-float", tc, data[i]);
+ break;
+ case 0xf:
+ break;
+ default:
+ fprintf(out, "bad S2.T%d format\n", tc);
+ }
+ }
+ vertex++;
+ }
+ }
+ } else {
+ /* indirect vertices */
+ len = data[0] & 0x0000ffff; /* index count */
+ if (data[0] & (1 << 17)) {
+ /* random vertex access */
+ if (count < (len + 1) / 2 + 1) {
+ BUFFER_FAIL(count, (len + 1) / 2 + 1,
+ "3DPRIMITIVE random indirect");
+ }
+ instr_out(data, hw_offset, 0,
+ "3DPRIMITIVE random indirect %s (%d)\n", primtype, len);
+ if (len == 0) {
+ /* vertex indices continue until 0xffff is found */
+ for (i = 1; i < count; i++) {
+ if ((data[i] & 0xffff) == 0xffff) {
+ instr_out(data, hw_offset, i,
+ " indices: (terminator)\n");
+ return i;
+ } else if ((data[i] >> 16) == 0xffff) {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, "
+ "(terminator)\n",
+ data[i] & 0xffff);
+ return i;
+ } else {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, 0x%04x\n",
+ data[i] & 0xffff, data[i] >> 16);
+ }
+ }
+ fprintf(out,
+ "3DPRIMITIVE: no terminator found in index buffer\n");
+ (*failures)++;
+ return count;
+ } else {
+ /* fixed size vertex index buffer */
+ for (i = 0; i < len; i += 2) {
+ if (i * 2 == len - 1) {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x\n",
+ data[i] & 0xffff);
+ } else {
+ instr_out(data, hw_offset, i,
+ " indices: 0x%04x, 0x%04x\n",
+ data[i] & 0xffff, data[i] >> 16);
+ }
+ }
+ }
+ return (len + 1) / 2 + 1;
+ } else {
+ /* sequential vertex access */
+ if (count < 2)
+ BUFFER_FAIL(count, 2, "3DPRIMITIVE seq indirect");
+ instr_out(data, hw_offset, 0,
+ "3DPRIMITIVE sequential indirect %s, %d starting from "
+ "%d\n", primtype, len, data[1] & 0xffff);
+ instr_out(data, hw_offset, 1, " start\n");
+ return 2;
+ }
+ }
+
+ return len;
+}
+
+static int
+decode_3d(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x06, 1, 1, "3DSTATE_ANTI_ALIASING" },
+ { 0x08, 1, 1, "3DSTATE_BACKFACE_STENCIL_OPS" },
+ { 0x09, 1, 1, "3DSTATE_BACKFACE_STENCIL_MASKS" },
+ { 0x16, 1, 1, "3DSTATE_COORD_SET_BINDINGS" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x0b, 1, 1, "3DSTATE_INDEPENDENT_ALPHA_BLEND" },
+ { 0x0d, 1, 1, "3DSTATE_MODES_4" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
+ };
+
+ switch ((data[0] & 0x1f000000) >> 24) {
+ case 0x1f:
+ return decode_3d_primitive(data, count, hw_offset, failures);
+ case 0x1d:
+ return decode_3d_1d(data, count, hw_offset, failures, 0);
+ case 0x1c:
+ return decode_3d_1c(data, count, hw_offset, failures);
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ fprintf(out, "Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static const char *
+get_965_surfacetype(unsigned int surfacetype)
+{
+ switch (surfacetype) {
+ case 0: return "1D";
+ case 1: return "2D";
+ case 2: return "3D";
+ case 3: return "CUBE";
+ case 4: return "BUFFER";
+ case 7: return "NULL";
+ default: return "unknown";
+ }
+}
+
+static const char *
+get_965_depthformat(unsigned int depthformat)
+{
+ switch (depthformat) {
+ case 0: return "s8_z24float";
+ case 1: return "z32float";
+ case 2: return "z24s8";
+ case 5: return "z16";
+ default: return "unknown";
+ }
+}
+
+static const char *
+get_965_element_component(uint32_t data, int component)
+{
+ uint32_t component_control = (data >> (16 + (3 - component) * 4)) & 0x7;
+
+ switch (component_control) {
+ case 0:
+ return "nostore";
+ case 1:
+ switch (component) {
+ case 0: return "X";
+ case 1: return "Y";
+ case 2: return "Z";
+ case 3: return "W";
+ default: return "fail";
+ }
+ case 2:
+ return "0.0";
+ case 3:
+ return "1.0";
+ case 4:
+ return "0x1";
+ case 5:
+ return "VID";
+ default:
+ return "fail";
+ }
+}
+
+static const char *
+get_965_prim_type(uint32_t data)
+{
+ uint32_t primtype = (data >> 10) & 0x1f;
+
+ switch (primtype) {
+ case 0x01: return "point list";
+ case 0x02: return "line list";
+ case 0x03: return "line strip";
+ case 0x04: return "tri list";
+ case 0x05: return "tri strip";
+ case 0x06: return "tri fan";
+ case 0x07: return "quad list";
+ case 0x08: return "quad strip";
+ case 0x09: return "line list adj";
+ case 0x0a: return "line strip adj";
+ case 0x0b: return "tri list adj";
+ case 0x0c: return "tri strip adj";
+ case 0x0d: return "tri strip reverse";
+ case 0x0e: return "polygon";
+ case 0x0f: return "rect list";
+ case 0x10: return "line loop";
+ case 0x11: return "point list bf";
+ case 0x12: return "line strip cont";
+ case 0x13: return "line strip bf";
+ case 0x14: return "line strip cont bf";
+ case 0x15: return "tri fan no stipple";
+ default: return "fail";
+ }
+}
+
+static int
+decode_3d_965(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode, len;
+ int i;
+ char *desc1;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x6000, 3, 3, "URB_FENCE" },
+ { 0x6001, 2, 2, "CS_URB_STATE" },
+ { 0x6002, 2, 2, "CONSTANT_BUFFER" },
+ { 0x6101, 6, 6, "STATE_BASE_ADDRESS" },
+ { 0x6102, 2, 2 , "STATE_SIP" },
+ { 0x6104, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x680b, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x6904, 1, 1, "3DSTATE_PIPELINE_SELECT" },
+ { 0x7800, 7, 7, "3DSTATE_PIPELINED_POINTERS" },
+ { 0x7801, 6, 6, "3DSTATE_BINDING_TABLE_POINTERS" },
+ { 0x780b, 1, 1, "3DSTATE_VF_STATISTICS" },
+ { 0x7808, 5, 257, "3DSTATE_VERTEX_BUFFERS" },
+ { 0x7809, 3, 256, "3DSTATE_VERTEX_ELEMENTS" },
+ { 0x780a, 3, 3, "3DSTATE_INDEX_BUFFER" },
+ { 0x7900, 4, 4, "3DSTATE_DRAWING_RECTANGLE" },
+ { 0x7901, 5, 5, "3DSTATE_CONSTANT_COLOR" },
+ { 0x7905, 5, 7, "3DSTATE_DEPTH_BUFFER" },
+ { 0x7906, 2, 2, "3DSTATE_POLY_STIPPLE_OFFSET" },
+ { 0x7907, 33, 33, "3DSTATE_POLY_STIPPLE_PATTERN" },
+ { 0x7908, 3, 3, "3DSTATE_LINE_STIPPLE" },
+ { 0x7909, 2, 2, "3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP" },
+ { 0x790a, 3, 3, "3DSTATE_AA_LINE_PARAMETERS" },
+ { 0x7b00, 6, 6, "3DPRIMITIVE" },
+ { 0x780e, 4, 4, "3DSTATE_CC_STATE_POINTERS" },
+ { 0x7810, 6, 6, "3DSTATE_VS_STATE" },
+ { 0x7811, 6, 6, "3DSTATE_GS_STATE" },
+ { 0x7812, 4, 4, "3DSTATE_CLIP_STATE" },
+ { 0x7815, 5, 5, "3DSTATE_CONSTANT_VS_STATE" },
+ { 0x7816, 5, 5, "3DSTATE_CONSTANT_GS_STATE" },
+ };
+
+ len = (data[0] & 0x0000ffff) + 2;
+
+ switch ((data[0] & 0xffff0000) >> 16) {
+ case 0x6101:
+ if (len != 6)
+ fprintf(out, "Bad count in STATE_BASE_ADDRESS\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "STATE_BASE_ADDRESS");
+
+ instr_out(data, hw_offset, 0,
+ "STATE_BASE_ADDRESS\n");
+
+ if (data[1] & 1) {
+ instr_out(data, hw_offset, 1, "General state at 0x%08x\n",
+ data[1] & ~1);
+ } else
+ instr_out(data, hw_offset, 1, "General state not updated\n");
+
+ if (data[2] & 1) {
+ instr_out(data, hw_offset, 2, "Surface state at 0x%08x\n",
+ data[2] & ~1);
+ } else
+ instr_out(data, hw_offset, 2, "Surface state not updated\n");
+
+ if (data[3] & 1) {
+ instr_out(data, hw_offset, 3, "Indirect state at 0x%08x\n",
+ data[3] & ~1);
+ } else
+ instr_out(data, hw_offset, 3, "Indirect state not updated\n");
+
+ if (data[4] & 1) {
+ instr_out(data, hw_offset, 4, "General state upper bound 0x%08x\n",
+ data[4] & ~1);
+ } else
+ instr_out(data, hw_offset, 4, "General state not updated\n");
+
+ if (data[5] & 1) {
+ instr_out(data, hw_offset, 5, "Indirect state upper bound 0x%08x\n",
+ data[5] & ~1);
+ } else
+ instr_out(data, hw_offset, 5, "Indirect state not updated\n");
+
+ return len;
+ case 0x7800:
+ if (len != 7)
+ fprintf(out, "Bad count in 3DSTATE_PIPELINED_POINTERS\n");
+ if (count < 7)
+ BUFFER_FAIL(count, len, "3DSTATE_PIPELINED_POINTERS");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_PIPELINED_POINTERS\n");
+ instr_out(data, hw_offset, 1, "VS state\n");
+ instr_out(data, hw_offset, 2, "GS state\n");
+ instr_out(data, hw_offset, 3, "Clip state\n");
+ instr_out(data, hw_offset, 4, "SF state\n");
+ instr_out(data, hw_offset, 5, "WM state\n");
+ instr_out(data, hw_offset, 6, "CC state\n");
+ return len;
+ case 0x7801:
+ if (len != 6)
+ fprintf(out, "Bad count in 3DSTATE_BINDING_TABLE_POINTERS\n");
+ if (count < 6)
+ BUFFER_FAIL(count, len, "3DSTATE_BINDING_TABLE_POINTERS");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_BINDING_TABLE_POINTERS\n");
+ instr_out(data, hw_offset, 1, "VS binding table\n");
+ instr_out(data, hw_offset, 2, "GS binding table\n");
+ instr_out(data, hw_offset, 3, "Clip binding table\n");
+ instr_out(data, hw_offset, 4, "SF binding table\n");
+ instr_out(data, hw_offset, 5, "WM binding table\n");
+
+ return len;
+
+ case 0x7808:
+ len = (data[0] & 0xff) + 2;
+ if ((len - 1) % 4 != 0)
+ fprintf(out, "Bad count in 3DSTATE_VERTEX_BUFFERS\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DSTATE_VERTEX_BUFFERS");
+ instr_out(data, hw_offset, 0, "3DSTATE_VERTEX_BUFFERS\n");
+
+ for (i = 1; i < len;) {
+ instr_out(data, hw_offset, i, "buffer %d: %s, pitch %db\n",
+ data[i] >> 27,
+ data[i] & (1 << 26) ? "random" : "sequential",
+ data[i] & 0x07ff);
+ i++;
+ instr_out(data, hw_offset, i++, "buffer address\n");
+ instr_out(data, hw_offset, i++, "max index\n");
+ instr_out(data, hw_offset, i++, "mbz\n");
+ }
+ return len;
+
+ case 0x7809:
+ len = (data[0] & 0xff) + 2;
+ if ((len + 1) % 2 != 0)
+ fprintf(out, "Bad count in 3DSTATE_VERTEX_ELEMENTS\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DSTATE_VERTEX_ELEMENTS");
+ instr_out(data, hw_offset, 0, "3DSTATE_VERTEX_ELEMENTS\n");
+
+ for (i = 1; i < len;) {
+ instr_out(data, hw_offset, i, "buffer %d: %svalid, type 0x%04x, "
+ "src offset 0x%04x bytes\n",
+ data[i] >> 27,
+ data[i] & (1 << 26) ? "" : "in",
+ (data[i] >> 16) & 0x1ff,
+ data[i] & 0x07ff);
+ i++;
+ instr_out(data, hw_offset, i, "(%s, %s, %s, %s), "
+ "dst offset 0x%02x bytes\n",
+ get_965_element_component(data[i], 0),
+ get_965_element_component(data[i], 1),
+ get_965_element_component(data[i], 2),
+ get_965_element_component(data[i], 3),
+ (data[i] & 0xff) * 4);
+ i++;
+ }
+ return len;
+
+ case 0x780a:
+ len = (data[0] & 0xff) + 2;
+ if (len != 3)
+ fprintf(out, "Bad count in 3DSTATE_INDEX_BUFFER\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DSTATE_INDEX_BUFFER");
+ instr_out(data, hw_offset, 0, "3DSTATE_INDEX_BUFFER\n");
+ instr_out(data, hw_offset, 1, "beginning buffer address\n");
+ instr_out(data, hw_offset, 2, "ending buffer address\n");
+ return len;
+
+ case 0x7900:
+ if (len != 4)
+ fprintf(out, "Bad count in 3DSTATE_DRAWING_RECTANGLE\n");
+ if (count < 4)
+ BUFFER_FAIL(count, len, "3DSTATE_DRAWING_RECTANGLE");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_DRAWING_RECTANGLE\n");
+ instr_out(data, hw_offset, 1, "top left: %d,%d\n",
+ data[1] & 0xffff,
+ (data[1] >> 16) & 0xffff);
+ instr_out(data, hw_offset, 2, "bottom right: %d,%d\n",
+ data[2] & 0xffff,
+ (data[2] >> 16) & 0xffff);
+ instr_out(data, hw_offset, 3, "origin: %d,%d\n",
+ (int)data[3] & 0xffff,
+ ((int)data[3] >> 16) & 0xffff);
+
+ return len;
+
+ case 0x7905:
+ if (len < 5 || len > 7)
+ fprintf(out, "Bad count in 3DSTATE_DEPTH_BUFFER\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DSTATE_DEPTH_BUFFER");
+
+ instr_out(data, hw_offset, 0,
+ "3DSTATE_DEPTH_BUFFER\n");
+ instr_out(data, hw_offset, 1, "%s, %s, pitch = %d bytes, %stiled\n",
+ get_965_surfacetype(data[1] >> 29),
+ get_965_depthformat((data[1] >> 18) & 0x7),
+ (data[1] & 0x0001ffff) + 1,
+ data[1] & (1 << 27) ? "" : "not ");
+ instr_out(data, hw_offset, 2, "depth offset\n");
+ instr_out(data, hw_offset, 3, "%dx%d\n",
+ ((data[3] & 0x0007ffc0) >> 6) + 1,
+ ((data[3] & 0xfff80000) >> 19) + 1);
+ instr_out(data, hw_offset, 4, "volume depth\n");
+ if (len == 6)
+ instr_out(data, hw_offset, 5, "\n");
+ if (len == 7)
+ instr_out(data, hw_offset, 6, "render target view extent\n");
+
+ return len;
+
+ case 0x7a00:
+ len = (data[0] & 0xff) + 2;
+ if (len != 4)
+ fprintf(out, "Bad count in PIPE_CONTROL\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "PIPE_CONTROL");
+
+ switch ((data[0] >> 14) & 0x3) {
+ case 0: desc1 = "no write"; break;
+ case 1: desc1 = "qword write"; break;
+ case 2: desc1 = "PS_DEPTH_COUNT write"; break;
+ case 3: desc1 = "TIMESTAMP write"; break;
+ }
+ instr_out(data, hw_offset, 0,
+ "PIPE_CONTROL: %s, %sdepth stall, %sRC write flush, "
+ "%sinst flush, %stexture flush\n",
+ desc1,
+ data[0] & (1 << 13) ? "" : "no ",
+ data[0] & (1 << 12) ? "" : "no ",
+ data[0] & (1 << 11) ? "" : "no ",
+ data[0] & (1 << 9) ? "" : "no ");
+ instr_out(data, hw_offset, 1, "destination address\n");
+ instr_out(data, hw_offset, 2, "immediate dword low\n");
+ instr_out(data, hw_offset, 3, "immediate dword high\n");
+ return len;
+
+ case 0x7b00:
+ len = (data[0] & 0xff) + 2;
+ if (len != 6)
+ fprintf(out, "Bad count in 3DPRIMITIVE\n");
+ if (count < len)
+ BUFFER_FAIL(count, len, "3DPRIMITIVE");
+
+ instr_out(data, hw_offset, 0,
+ "3DPRIMITIVE: %s %s\n",
+ get_965_prim_type(data[0]),
+ (data[0] & (1 << 15)) ? "random" : "sequential");
+ instr_out(data, hw_offset, 1, "vertex count\n");
+ instr_out(data, hw_offset, 2, "start vertex\n");
+ instr_out(data, hw_offset, 3, "instance count\n");
+ instr_out(data, hw_offset, 4, "start instance\n");
+ instr_out(data, hw_offset, 5, "index bias\n");
+ return len;
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0xffff0000) >> 16 == opcodes_3d[opcode].opcode) {
+ unsigned int i;
+ len = 1;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ fprintf(out, "Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+static int
+decode_3d_i830(uint32_t *data, int count, uint32_t hw_offset, int *failures)
+{
+ unsigned int opcode;
+
+ struct {
+ uint32_t opcode;
+ int min_len;
+ int max_len;
+ char *name;
+ } opcodes_3d[] = {
+ { 0x02, 1, 1, "3DSTATE_MODES_3" },
+ { 0x03, 1, 1, "3DSTATE_ENABLES_1"},
+ { 0x04, 1, 1, "3DSTATE_ENABLES_2"},
+ { 0x05, 1, 1, "3DSTATE_VFT0"},
+ { 0x06, 1, 1, "3DSTATE_AA"},
+ { 0x07, 1, 1, "3DSTATE_RASTERIZATION_RULES" },
+ { 0x08, 1, 1, "3DSTATE_MODES_1" },
+ { 0x09, 1, 1, "3DSTATE_STENCIL_TEST" },
+ { 0x0a, 1, 1, "3DSTATE_VFT1"},
+ { 0x0b, 1, 1, "3DSTATE_INDPT_ALPHA_BLEND" },
+ { 0x0c, 1, 1, "3DSTATE_MODES_5" },
+ { 0x0d, 1, 1, "3DSTATE_MAP_BLEND_OP" },
+ { 0x0e, 1, 1, "3DSTATE_MAP_BLEND_ARG" },
+ { 0x0f, 1, 1, "3DSTATE_MODES_2" },
+ { 0x15, 1, 1, "3DSTATE_FOG_COLOR" },
+ { 0x16, 1, 1, "3DSTATE_MODES_4" },
+ };
+
+ switch ((data[0] & 0x1f000000) >> 24) {
+ case 0x1f:
+ return decode_3d_primitive(data, count, hw_offset, failures);
+ case 0x1d:
+ return decode_3d_1d(data, count, hw_offset, failures, 1);
+ case 0x1c:
+ return decode_3d_1c(data, count, hw_offset, failures);
+ }
+
+ for (opcode = 0; opcode < sizeof(opcodes_3d) / sizeof(opcodes_3d[0]);
+ opcode++) {
+ if ((data[0] & 0x1f000000) >> 24 == opcodes_3d[opcode].opcode) {
+ unsigned int len = 1, i;
+
+ instr_out(data, hw_offset, 0, "%s\n", opcodes_3d[opcode].name);
+ if (opcodes_3d[opcode].max_len > 1) {
+ len = (data[0] & 0xff) + 2;
+ if (len < opcodes_3d[opcode].min_len ||
+ len > opcodes_3d[opcode].max_len)
+ {
+ fprintf(out, "Bad count in %s\n", opcodes_3d[opcode].name);
+ }
+ }
+
+ for (i = 1; i < len; i++) {
+ if (i >= count)
+ BUFFER_FAIL(count, len, opcodes_3d[opcode].name);
+ instr_out(data, hw_offset, i, "dword %d\n", i);
+ }
+ return len;
+ }
+ }
+
+ instr_out(data, hw_offset, 0, "3D UNKNOWN\n");
+ (*failures)++;
+ return 1;
+}
+
+/**
+ * Decodes an i830-i915 batch buffer, writing the output to stdout.
+ *
+ * \param data batch buffer contents
+ * \param count number of DWORDs to decode in the batch buffer
+ * \param hw_offset hardware address for the buffer
+ */
+int
+intel_decode(uint32_t *data, int count, uint32_t hw_offset, uint32_t devid)
+{
+ int index = 0;
+ int failures = 0;
+
+ out = stderr;
+
+ while (index < count) {
+ switch ((data[index] & 0xe0000000) >> 29) {
+ case 0x0:
+ index += decode_mi(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ break;
+ case 0x2:
+ index += decode_2d(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ break;
+ case 0x3:
+ if (IS_965(devid)) {
+ index += decode_3d_965(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ } else if (IS_9XX(devid)) {
+ index += decode_3d(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ } else {
+ index += decode_3d_i830(data + index, count - index,
+ hw_offset + index * 4, &failures);
+ }
+ break;
+ default:
+ instr_out(data, hw_offset, index, "UNKNOWN\n");
+ failures++;
+ index++;
+ break;
+ }
+ fflush(out);
+ }
+
+ return failures;
+}
+
+void intel_decode_context_reset(void)
+{
+ saved_s2_set = 0;
+ saved_s4_set = 1;
+}
+
diff --git a/src/mesa/drivers/dri/intel/intel_decode.h b/src/mesa/drivers/dri/intel/intel_decode.h
new file mode 100644
index 0000000000..c50644a46b
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_decode.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+int intel_decode(uint32_t *data, int count, uint32_t hw_offset, uint32_t devid);
+void intel_decode_context_reset(void);
diff --git a/src/mesa/drivers/dri/intel/intel_extensions.c b/src/mesa/drivers/dri/intel/intel_extensions.c
new file mode 100644
index 0000000000..edba1fc2f2
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_extensions.c
@@ -0,0 +1,229 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_chipset.h"
+#include "intel_context.h"
+#include "intel_extensions.h"
+#include "utils.h"
+
+
+#define need_GL_ARB_copy_buffer
+#define need_GL_ARB_draw_elements_base_vertex
+#define need_GL_ARB_framebuffer_object
+#define need_GL_ARB_map_buffer_range
+#define need_GL_ARB_occlusion_query
+#define need_GL_ARB_point_parameters
+#define need_GL_ARB_shader_objects
+#define need_GL_ARB_sync
+#define need_GL_ARB_vertex_array_object
+#define need_GL_ARB_vertex_program
+#define need_GL_ARB_vertex_shader
+#define need_GL_ARB_window_pos
+#define need_GL_EXT_blend_color
+#define need_GL_EXT_blend_equation_separate
+#define need_GL_EXT_blend_func_separate
+#define need_GL_EXT_blend_minmax
+#define need_GL_EXT_cull_vertex
+#define need_GL_EXT_draw_buffers2
+#define need_GL_EXT_fog_coord
+#define need_GL_EXT_framebuffer_object
+#define need_GL_EXT_framebuffer_blit
+#define need_GL_EXT_gpu_program_parameters
+#define need_GL_EXT_point_parameters
+#define need_GL_EXT_provoking_vertex
+#define need_GL_EXT_secondary_color
+#define need_GL_EXT_stencil_two_side
+#define need_GL_EXT_timer_query
+#define need_GL_APPLE_vertex_array_object
+#define need_GL_APPLE_object_purgeable
+#define need_GL_ATI_separate_stencil
+#define need_GL_ATI_envmap_bumpmap
+#define need_GL_NV_point_sprite
+#define need_GL_NV_vertex_program
+#define need_GL_OES_EGL_image
+#define need_GL_VERSION_2_0
+#define need_GL_VERSION_2_1
+
+#include "main/remap_helper.h"
+
+
+/**
+ * Extension strings exported by the intel driver.
+ *
+ * Extensions supported by all chips supported by i830_dri, i915_dri, or
+ * i965_dri.
+ */
+static const struct dri_extension card_extensions[] = {
+ { "GL_ARB_copy_buffer", GL_ARB_copy_buffer_functions },
+ { "GL_ARB_draw_elements_base_vertex", GL_ARB_draw_elements_base_vertex_functions },
+ { "GL_ARB_half_float_pixel", NULL },
+ { "GL_ARB_map_buffer_range", GL_ARB_map_buffer_range_functions },
+ { "GL_ARB_multitexture", NULL },
+ { "GL_ARB_pixel_buffer_object", NULL },
+ { "GL_ARB_point_parameters", GL_ARB_point_parameters_functions },
+ { "GL_ARB_point_sprite", NULL },
+ { "GL_ARB_shader_objects", GL_ARB_shader_objects_functions },
+ { "GL_ARB_shading_language_100", GL_VERSION_2_0_functions },
+ { "GL_ARB_shading_language_120", GL_VERSION_2_1_functions },
+ { "GL_ARB_sync", GL_ARB_sync_functions },
+ { "GL_ARB_texture_border_clamp", NULL },
+ { "GL_ARB_texture_cube_map", NULL },
+ { "GL_ARB_texture_env_add", NULL },
+ { "GL_ARB_texture_env_combine", NULL },
+ { "GL_ARB_texture_env_crossbar", NULL },
+ { "GL_ARB_texture_env_dot3", NULL },
+ { "GL_ARB_texture_mirrored_repeat", NULL },
+ { "GL_ARB_texture_rectangle", NULL },
+ { "GL_ARB_vertex_array_object", GL_ARB_vertex_array_object_functions},
+ { "GL_ARB_vertex_program", GL_ARB_vertex_program_functions },
+ { "GL_ARB_vertex_shader", GL_ARB_vertex_shader_functions },
+ { "GL_ARB_window_pos", GL_ARB_window_pos_functions },
+ { "GL_EXT_blend_color", GL_EXT_blend_color_functions },
+ { "GL_EXT_blend_equation_separate", GL_EXT_blend_equation_separate_functions },
+ { "GL_EXT_blend_func_separate", GL_EXT_blend_func_separate_functions },
+ { "GL_EXT_blend_minmax", GL_EXT_blend_minmax_functions },
+ { "GL_EXT_blend_logic_op", NULL },
+ { "GL_EXT_blend_subtract", NULL },
+ { "GL_EXT_cull_vertex", GL_EXT_cull_vertex_functions },
+ { "GL_EXT_framebuffer_blit", GL_EXT_framebuffer_blit_functions },
+ { "GL_EXT_framebuffer_object", GL_EXT_framebuffer_object_functions },
+ { "GL_EXT_fog_coord", GL_EXT_fog_coord_functions },
+ { "GL_EXT_gpu_program_parameters", GL_EXT_gpu_program_parameters_functions },
+ { "GL_EXT_packed_depth_stencil", NULL },
+ { "GL_EXT_provoking_vertex", GL_EXT_provoking_vertex_functions },
+ { "GL_EXT_secondary_color", GL_EXT_secondary_color_functions },
+ { "GL_EXT_stencil_wrap", NULL },
+ { "GL_EXT_texture_edge_clamp", NULL },
+ { "GL_EXT_texture_env_combine", NULL },
+ { "GL_EXT_texture_env_dot3", NULL },
+ { "GL_EXT_texture_filter_anisotropic", NULL },
+ { "GL_EXT_texture_lod_bias", NULL },
+ { "GL_3DFX_texture_compression_FXT1", NULL },
+ { "GL_APPLE_client_storage", NULL },
+ { "GL_APPLE_object_purgeable", GL_APPLE_object_purgeable_functions },
+ { "GL_APPLE_vertex_array_object", GL_APPLE_vertex_array_object_functions},
+ { "GL_MESA_pack_invert", NULL },
+ { "GL_MESA_ycbcr_texture", NULL },
+ { "GL_NV_blend_square", NULL },
+ { "GL_NV_vertex_program", GL_NV_vertex_program_functions },
+ { "GL_NV_vertex_program1_1", NULL },
+ { "GL_SGIS_generate_mipmap", NULL },
+#if FEATURE_OES_EGL_image
+ { "GL_OES_EGL_image", GL_OES_EGL_image_functions },
+#endif
+ { NULL, NULL }
+};
+
+
+/** i915 / i945-only extensions */
+static const struct dri_extension i915_extensions[] = {
+ { "GL_ARB_depth_texture", NULL },
+ { "GL_ARB_fragment_program", NULL },
+ { "GL_ARB_shadow", NULL },
+ { "GL_ARB_texture_non_power_of_two", NULL },
+ { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions },
+ { "GL_ATI_texture_env_combine3", NULL },
+ { "GL_EXT_shadow_funcs", NULL },
+ { "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },
+ { "GL_NV_texture_env_combine4", NULL },
+ { NULL, NULL }
+};
+
+
+/** i965-only extensions */
+static const struct dri_extension brw_extensions[] = {
+ { "GL_ARB_depth_clamp", NULL },
+ { "GL_ARB_depth_texture", NULL },
+ { "GL_ARB_fragment_coord_conventions", NULL },
+ { "GL_ARB_fragment_program", NULL },
+ { "GL_ARB_fragment_program_shadow", NULL },
+ { "GL_ARB_fragment_shader", NULL },
+ { "GL_ARB_framebuffer_object", GL_ARB_framebuffer_object_functions},
+ { "GL_ARB_half_float_vertex", NULL },
+ { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions },
+ { "GL_ARB_point_sprite", NULL },
+ { "GL_ARB_seamless_cube_map", NULL },
+ { "GL_ARB_shadow", NULL },
+ { "GL_MESA_texture_signed_rgba", NULL },
+ { "GL_ARB_texture_non_power_of_two", NULL },
+ { "GL_EXT_draw_buffers2", GL_EXT_draw_buffers2_functions },
+ { "GL_EXT_shadow_funcs", NULL },
+ { "GL_EXT_stencil_two_side", GL_EXT_stencil_two_side_functions },
+ { "GL_EXT_texture_sRGB", NULL },
+ { "GL_EXT_texture_swizzle", NULL },
+ { "GL_EXT_vertex_array_bgra", NULL },
+ { "GL_ATI_envmap_bumpmap", GL_ATI_envmap_bumpmap_functions },
+ { "GL_ATI_separate_stencil", GL_ATI_separate_stencil_functions },
+ { "GL_ATI_texture_env_combine3", NULL },
+ { "GL_NV_texture_env_combine4", NULL },
+ { NULL, NULL }
+};
+
+static const struct dri_extension ironlake_extensions[] = {
+ { "GL_EXT_timer_query", GL_EXT_timer_query_functions },
+};
+
+static const struct dri_extension arb_oq_extensions[] = {
+ { "GL_ARB_occlusion_query", GL_ARB_occlusion_query_functions },
+ { NULL, NULL }
+};
+
+
+static const struct dri_extension fragment_shader_extensions[] = {
+ { "GL_ARB_fragment_shader", NULL },
+ { NULL, NULL }
+};
+
+/**
+ * Initializes potential list of extensions if ctx == NULL, or actually enables
+ * extensions for a context.
+ */
+void
+intelInitExtensions(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+
+ /* Disable imaging extension until convolution is working in teximage paths.
+ */
+ driInitExtensions(ctx, card_extensions, GL_FALSE);
+
+ if (intel->gen >= 5)
+ driInitExtensions(ctx, ironlake_extensions, GL_FALSE);
+
+ if (intel->gen >= 4)
+ driInitExtensions(ctx, brw_extensions, GL_FALSE);
+
+ if (intel->gen == 3) {
+ driInitExtensions(ctx, i915_extensions, GL_FALSE);
+
+ if (driQueryOptionb(&intel->optionCache, "fragment_shader"))
+ driInitExtensions(ctx, fragment_shader_extensions, GL_FALSE);
+
+ if (driQueryOptionb(&intel->optionCache, "stub_occlusion_query"))
+ driInitExtensions(ctx, arb_oq_extensions, GL_FALSE);
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_extensions.h b/src/mesa/drivers/dri/intel/intel_extensions.h
new file mode 100644
index 0000000000..236442a4d6
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_extensions.h
@@ -0,0 +1,39 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_EXTENSIONS_H
+#define INTEL_EXTENSIONS_H
+
+
+extern void
+intelInitExtensions(GLcontext *ctx);
+
+extern void
+intelInitExtensionsES2(GLcontext *ctx);
+
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_extensions_es2.c b/src/mesa/drivers/dri/intel/intel_extensions_es2.c
new file mode 100644
index 0000000000..baf8e13001
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_extensions_es2.c
@@ -0,0 +1,94 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/extensions.h"
+
+#include "intel_extensions.h"
+#include "utils.h"
+
+static const char *es2_extensions[] = {
+ /* Used by mesa internally (cf all_mesa_extensions in ../common/utils.c) */
+ "GL_ARB_draw_buffers",
+ "GL_ARB_multisample",
+ "GL_ARB_texture_compression",
+ "GL_ARB_transpose_matrix",
+ "GL_ARB_vertex_buffer_object",
+ "GL_ARB_window_pos",
+ "GL_EXT_blend_func_separate",
+ "GL_EXT_compiled_vertex_array",
+ "GL_EXT_multi_draw_arrays",
+ "GL_EXT_polygon_offset",
+ "GL_EXT_texture_object",
+ "GL_EXT_vertex_array",
+ "GL_IBM_multimode_draw_arrays",
+ "GL_MESA_window_pos",
+ "GL_NV_vertex_program",
+
+ /* Required by GLES2 */
+ "GL_ARB_fragment_program",
+ "GL_ARB_fragment_shader",
+ "GL_ARB_multitexture",
+ "GL_ARB_shader_objects",
+ "GL_ARB_texture_cube_map",
+ "GL_ARB_texture_mirrored_repeat",
+ "GL_ARB_texture_non_power_of_two",
+ "GL_ARB_vertex_shader",
+ "GL_EXT_blend_color",
+ "GL_EXT_blend_equation_separate",
+ "GL_EXT_blend_minmax",
+ "GL_EXT_blend_subtract",
+ "GL_EXT_stencil_wrap",
+
+ /* Optional GLES2 */
+ "GL_ARB_framebuffer_object",
+ "GL_EXT_texture_filter_anisotropic",
+ "GL_ARB_depth_texture",
+ "GL_EXT_packed_depth_stencil",
+ "GL_EXT_framebuffer_object",
+
+#if FEATURE_OES_EGL_image
+ "GL_OES_EGL_image",
+#endif
+
+ NULL,
+};
+
+/**
+ * Initializes potential list of extensions if ctx == NULL, or actually enables
+ * extensions for a context.
+ */
+void
+intelInitExtensionsES2(GLcontext *ctx)
+{
+ int i;
+
+ /* Can't use driInitExtensions() since it uses extensions from
+ * main/remap_helper.h when called the first time. */
+
+ for (i = 0; es2_extensions[i]; i++)
+ _mesa_enable_extension(ctx, es2_extensions[i]);
+}
diff --git a/src/mesa/drivers/dri/intel/intel_fbo.c b/src/mesa/drivers/dri/intel/intel_fbo.c
new file mode 100644
index 0000000000..4a83886fc1
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_fbo.c
@@ -0,0 +1,697 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/imports.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "main/fbobject.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/context.h"
+#include "main/texrender.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_FBO
+
+
+/**
+ * Create a new framebuffer object.
+ */
+static struct gl_framebuffer *
+intel_new_framebuffer(GLcontext * ctx, GLuint name)
+{
+ /* Only drawable state in intel_framebuffer at this time, just use Mesa's
+ * class
+ */
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+
+/** Called by gl_renderbuffer::Delete() */
+static void
+intel_delete_renderbuffer(struct gl_renderbuffer *rb)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ ASSERT(irb);
+
+ if (intel && irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ free(irb);
+}
+
+
+/**
+ * Return a pointer to a specific pixel in a renderbuffer.
+ */
+static void *
+intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLint x, GLint y)
+{
+ /* By returning NULL we force all software rendering to go through
+ * the span routines.
+ */
+ return NULL;
+}
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ int cpp;
+
+ ASSERT(rb->Name != 0);
+
+ switch (internalFormat) {
+ case GL_R3_G3_B2:
+ case GL_RGB4:
+ case GL_RGB5:
+ rb->Format = MESA_FORMAT_RGB565;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ break;
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ rb->Format = MESA_FORMAT_XRGB8888;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ break;
+ case GL_RGBA:
+ case GL_RGBA2:
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ rb->Format = MESA_FORMAT_ARGB8888;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ break;
+ case GL_ALPHA:
+ case GL_ALPHA8:
+ rb->Format = MESA_FORMAT_A8;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* alloc a depth+stencil buffer */
+ rb->Format = MESA_FORMAT_S8_Z24;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ rb->Format = MESA_FORMAT_Z16;
+ rb->DataType = GL_UNSIGNED_SHORT;
+ break;
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ rb->Format = MESA_FORMAT_S8_Z24;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ rb->Format = MESA_FORMAT_S8_Z24;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ break;
+ default:
+ _mesa_problem(ctx,
+ "Unexpected format in intel_alloc_renderbuffer_storage");
+ return GL_FALSE;
+ }
+
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
+ cpp = _mesa_get_format_bytes(rb->Format);
+
+ intel_flush(ctx);
+
+ /* free old region */
+ if (irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ /* allocate new memory region/renderbuffer */
+
+ /* alloc hardware renderbuffer */
+ DBG("Allocating %d x %d Intel RBO\n", width, height);
+
+ irb->region = intel_region_alloc(intel, I915_TILING_NONE, cpp,
+ width, height, GL_TRUE);
+ if (!irb->region)
+ return GL_FALSE; /* out of memory? */
+
+ ASSERT(irb->region->buffer);
+
+ rb->Width = width;
+ rb->Height = height;
+
+ return GL_TRUE;
+}
+
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_renderbuffer_storage(GLcontext *ctx,
+ struct gl_renderbuffer *rb,
+ void *image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb;
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(intel->driContext, image_handle,
+ intel->driContext->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ irb = intel_renderbuffer(rb);
+ if (irb->region)
+ intel_region_release(&irb->region);
+ intel_region_reference(&irb->region, image->region);
+
+ rb->InternalFormat = image->internal_format;
+ rb->Width = image->region->width;
+ rb->Height = image->region->height;
+ rb->Format = image->format;
+ rb->DataType = image->data_type;
+ rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
+ image->internal_format);
+}
+#endif
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->InternalFormat = internalFormat;
+
+ return GL_TRUE;
+}
+
+
+static void
+intel_resize_buffers(GLcontext *ctx, struct gl_framebuffer *fb,
+ GLuint width, GLuint height)
+{
+ int i;
+
+ _mesa_resize_framebuffer(ctx, fb, width, height);
+
+ fb->Initialized = GL_TRUE; /* XXX remove someday */
+
+ if (fb->Name != 0) {
+ return;
+ }
+
+
+ /* Make sure all window system renderbuffers are up to date */
+ for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
+ struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
+
+ /* only resize if size is changing */
+ if (rb && (rb->Width != width || rb->Height != height)) {
+ rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
+ }
+ }
+}
+
+
+/** Dummy function for gl_renderbuffer::AllocStorage() */
+static GLboolean
+intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ return GL_FALSE;
+}
+
+
+void
+intel_renderbuffer_set_region(struct intel_context *intel,
+ struct intel_renderbuffer *rb,
+ struct intel_region *region)
+{
+ struct intel_region *old;
+
+ old = rb->region;
+ rb->region = NULL;
+ intel_region_reference(&rb->region, region);
+ intel_region_release(&old);
+}
+
+
+/**
+ * Create a new intel_renderbuffer which corresponds to an on-screen window,
+ * not a user-created renderbuffer.
+ */
+struct intel_renderbuffer *
+intel_create_renderbuffer(gl_format format)
+{
+ GET_CURRENT_CONTEXT(ctx);
+
+ struct intel_renderbuffer *irb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, 0);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ switch (format) {
+ case MESA_FORMAT_RGB565:
+ irb->Base._BaseFormat = GL_RGB;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ break;
+ case MESA_FORMAT_XRGB8888:
+ irb->Base._BaseFormat = GL_RGB;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ break;
+ case MESA_FORMAT_ARGB8888:
+ irb->Base._BaseFormat = GL_RGBA;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ break;
+ case MESA_FORMAT_Z16:
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DataType = GL_UNSIGNED_SHORT;
+ break;
+ case MESA_FORMAT_X8_Z24:
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DataType = GL_UNSIGNED_INT;
+ break;
+ case MESA_FORMAT_S8_Z24:
+ irb->Base._BaseFormat = GL_DEPTH_STENCIL;
+ irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
+ break;
+ case MESA_FORMAT_A8:
+ irb->Base._BaseFormat = GL_ALPHA;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ break;
+ default:
+ _mesa_problem(NULL,
+ "Unexpected intFormat in intel_create_renderbuffer");
+ free(irb);
+ return NULL;
+ }
+
+ irb->Base.Format = format;
+ irb->Base.InternalFormat = irb->Base._BaseFormat;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_window_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+
+ return irb;
+}
+
+
+/**
+ * Create a new renderbuffer object.
+ * Typically called via glBindRenderbufferEXT().
+ */
+static struct gl_renderbuffer *
+intel_new_renderbuffer(GLcontext * ctx, GLuint name)
+{
+ /*struct intel_context *intel = intel_context(ctx); */
+ struct intel_renderbuffer *irb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+ /* span routines set in alloc_storage function */
+
+ return &irb->Base;
+}
+
+
+/**
+ * Called via glBindFramebufferEXT().
+ */
+static void
+intel_bind_framebuffer(GLcontext * ctx, GLenum target,
+ struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
+{
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ intel_draw_buffer(ctx, fb);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+
+/**
+ * Called via glFramebufferRenderbufferEXT().
+ */
+static void
+intel_framebuffer_renderbuffer(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+ DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
+
+ intel_flush(ctx);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ intel_draw_buffer(ctx, fb);
+}
+
+
+static GLboolean
+intel_update_wrapper(GLcontext *ctx, struct intel_renderbuffer *irb,
+ struct gl_texture_image *texImage)
+{
+ if (texImage->TexFormat == MESA_FORMAT_ARGB8888) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to RGBA8 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_XRGB8888) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to XGBA8 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_RGB565) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to RGB5 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_ARGB1555) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to ARGB1555 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_ARGB4444) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to ARGB4444 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_A8) {
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ DBG("Render to A8 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_Z16) {
+ irb->Base.DataType = GL_UNSIGNED_SHORT;
+ DBG("Render to DEPTH16 texture OK\n");
+ }
+ else if (texImage->TexFormat == MESA_FORMAT_S8_Z24) {
+ irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
+ DBG("Render to DEPTH_STENCIL texture OK\n");
+ }
+ else {
+ DBG("Render to texture BAD FORMAT %s\n",
+ _mesa_get_format_name(texImage->TexFormat));
+ return GL_FALSE;
+ }
+
+ irb->Base.Format = texImage->TexFormat;
+
+ irb->Base.InternalFormat = texImage->InternalFormat;
+ irb->Base._BaseFormat = _mesa_base_fbo_format(ctx, irb->Base.InternalFormat);
+ irb->Base.Width = texImage->Width;
+ irb->Base.Height = texImage->Height;
+
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_nop_alloc_storage;
+
+ return GL_TRUE;
+}
+
+
+/**
+ * When glFramebufferTexture[123]D is called this function sets up the
+ * gl_renderbuffer wrapper around the texture image.
+ * This will have the region info needed for hardware rendering.
+ */
+static struct intel_renderbuffer *
+intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ const GLuint name = ~0; /* not significant, but distinct for debugging */
+ struct intel_renderbuffer *irb;
+
+ /* make an intel_renderbuffer to wrap the texture image */
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ if (!intel_update_wrapper(ctx, irb, texImage)) {
+ free(irb);
+ return NULL;
+ }
+
+ return irb;
+}
+
+
+/**
+ * Called by glFramebufferTexture[123]DEXT() (and other places) to
+ * prepare for rendering into texture memory. This might be called
+ * many times to choose different texture levels, cube faces, etc
+ * before intel_finish_render_texture() is ever called.
+ */
+static void
+intel_render_texture(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct gl_texture_image *newImage
+ = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct intel_texture_image *intel_image;
+ GLuint dst_x, dst_y;
+
+ (void) fb;
+
+ ASSERT(newImage);
+
+ intel_image = intel_texture_image(newImage);
+ if (!intel_image->mt) {
+ /* Fallback on drawing to a texture that doesn't have a miptree
+ * (has a border, width/height 0, etc.)
+ */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _mesa_render_texture(ctx, fb, att);
+ return;
+ }
+ else if (!irb) {
+ irb = intel_wrap_texture(ctx, newImage);
+ if (irb) {
+ /* bind the wrapper to the attachment point */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
+ }
+ else {
+ /* fallback to software rendering */
+ _mesa_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ if (!intel_update_wrapper(ctx, irb, newImage)) {
+ _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ _mesa_render_texture(ctx, fb, att);
+ return;
+ }
+
+ DBG("Begin render texture tid %lx tex=%u w=%d h=%d refcount=%d\n",
+ _glthread_GetID(),
+ att->Texture->Name, newImage->Width, newImage->Height,
+ irb->Base.RefCount);
+
+ /* point the renderbufer's region to the texture image region */
+ if (irb->region != intel_image->mt->region) {
+ if (irb->region)
+ intel_region_release(&irb->region);
+ intel_region_reference(&irb->region, intel_image->mt->region);
+ }
+
+ /* compute offset of the particular 2D image within the texture region */
+ intel_miptree_get_image_offset(intel_image->mt,
+ att->TextureLevel,
+ att->CubeMapFace,
+ att->Zoffset,
+ &dst_x, &dst_y);
+
+ intel_image->mt->region->draw_offset = (dst_y * intel_image->mt->region->pitch +
+ dst_x) * intel_image->mt->cpp;
+ intel_image->mt->region->draw_x = dst_x;
+ intel_image->mt->region->draw_y = dst_y;
+ intel_image->used_as_render_target = GL_TRUE;
+
+ /* update drawing region, etc */
+ intel_draw_buffer(ctx, fb);
+}
+
+
+/**
+ * Called by Mesa when rendering to a texture is done.
+ */
+static void
+intel_finish_render_texture(GLcontext * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_texture_object *tex_obj = att->Texture;
+ struct gl_texture_image *image =
+ tex_obj->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+
+ /* Flag that this image may now be validated into the object's miptree. */
+ intel_image->used_as_render_target = GL_FALSE;
+
+ /* Since we've (probably) rendered to the texture and will (likely) use
+ * it in the texture domain later on in this batchbuffer, flush the
+ * batch. Once again, we wish for a domain tracker in libdrm to cover
+ * usage inside of a batchbuffer like GEM does in the kernel.
+ */
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+}
+
+/**
+ * Do additional "completeness" testing of a framebuffer object.
+ */
+static void
+intel_validate_framebuffer(GLcontext *ctx, struct gl_framebuffer *fb)
+{
+ const struct intel_renderbuffer *depthRb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ const struct intel_renderbuffer *stencilRb =
+ intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ int i;
+
+ if (depthRb && stencilRb && stencilRb != depthRb) {
+ if (ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE &&
+ ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE &&
+ (ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Texture->Name ==
+ ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Texture->Name)) {
+ /* OK */
+ } else {
+ /* we only support combined depth/stencil buffers, not separate
+ * stencil buffers.
+ */
+ DBG("Only supports combined depth/stencil (found %s, %s)\n",
+ depthRb ? _mesa_get_format_name(depthRb->Base.Format): "NULL",
+ stencilRb ? _mesa_get_format_name(stencilRb->Base.Format): "NULL");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ }
+
+ for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
+ struct gl_renderbuffer *rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ if (rb == NULL)
+ continue;
+
+ if (irb == NULL) {
+ DBG("software rendering renderbuffer\n");
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ continue;
+ }
+
+ switch (irb->Base.Format) {
+ case MESA_FORMAT_ARGB8888:
+ case MESA_FORMAT_XRGB8888:
+ case MESA_FORMAT_RGB565:
+ case MESA_FORMAT_ARGB1555:
+ case MESA_FORMAT_ARGB4444:
+ case MESA_FORMAT_A8:
+ break;
+ default:
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ }
+ }
+}
+
+
+/**
+ * Do one-time context initializations related to GL_EXT_framebuffer_object.
+ * Hook in device driver functions.
+ */
+void
+intel_fbo_init(struct intel_context *intel)
+{
+ intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
+ intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
+ intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
+ intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
+ intel->ctx.Driver.RenderTexture = intel_render_texture;
+ intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
+ intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
+ intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
+ intel->ctx.Driver.BlitFramebuffer = _mesa_meta_BlitFramebuffer;
+
+#if FEATURE_OES_EGL_image
+ intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
+ intel_image_target_renderbuffer_storage;
+#endif
+}
diff --git a/src/mesa/drivers/dri/intel/intel_fbo.h b/src/mesa/drivers/dri/intel/intel_fbo.h
new file mode 100644
index 0000000000..028f657d12
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_fbo.h
@@ -0,0 +1,113 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_FBO_H
+#define INTEL_FBO_H
+
+#include "main/formats.h"
+#include "intel_screen.h"
+
+struct intel_context;
+
+/**
+ * Intel renderbuffer, derived from gl_renderbuffer.
+ */
+struct intel_renderbuffer
+{
+ struct gl_renderbuffer Base;
+ struct intel_region *region;
+};
+
+
+/**
+ * gl_renderbuffer is a base class which we subclass. The Class field
+ * is used for simple run-time type checking.
+ */
+#define INTEL_RB_CLASS 0x12345678
+
+
+/**
+ * Return a gl_renderbuffer ptr casted to intel_renderbuffer.
+ * NULL will be returned if the rb isn't really an intel_renderbuffer.
+ * This is determined by checking the ClassID.
+ */
+static INLINE struct intel_renderbuffer *
+intel_renderbuffer(struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
+ if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
+ /*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
+ return irb;
+ }
+ else
+ return NULL;
+}
+
+
+/**
+ * Return a framebuffer's renderbuffer, named by a BUFFER_x index.
+ */
+static INLINE struct intel_renderbuffer *
+intel_get_renderbuffer(struct gl_framebuffer *fb, int attIndex)
+{
+ if (attIndex >= 0)
+ return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
+ else
+ return NULL;
+}
+
+
+extern void
+intel_renderbuffer_set_region(struct intel_context *intel,
+ struct intel_renderbuffer *irb,
+ struct intel_region *region);
+
+
+extern struct intel_renderbuffer *
+intel_create_renderbuffer(gl_format format);
+
+
+extern void
+intel_fbo_init(struct intel_context *intel);
+
+
+extern void
+intel_flip_renderbuffers(struct gl_framebuffer *fb);
+
+
+static INLINE struct intel_region *
+intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+
+#endif /* INTEL_FBO_H */
diff --git a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
new file mode 100644
index 0000000000..39ac0205fa
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
@@ -0,0 +1,461 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_tex_layout.h"
+#include "main/enums.h"
+
+#define FILE_DEBUG_FLAG DEBUG_MIPTREE
+
+
+static GLenum
+target_to_target(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return GL_TEXTURE_CUBE_MAP_ARB;
+ default:
+ return target;
+ }
+}
+
+
+static struct intel_mipmap_tree *
+intel_miptree_create_internal(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0, GLuint cpp, GLuint compress_byte,
+ uint32_t tiling)
+{
+ GLboolean ok;
+ struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+
+ DBG("%s target %s format %s level %d..%d <-- %p\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ _mesa_lookup_enum_by_nr(internal_format),
+ first_level, last_level, mt);
+
+ mt->target = target_to_target(target);
+ mt->internal_format = internal_format;
+ mt->first_level = first_level;
+ mt->last_level = last_level;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->depth0 = depth0;
+ mt->cpp = compress_byte ? compress_byte : cpp;
+ mt->compressed = compress_byte ? 1 : 0;
+ mt->refcount = 1;
+
+#ifdef I915
+ if (intel->is_945)
+ ok = i945_miptree_layout(intel, mt, tiling);
+ else
+ ok = i915_miptree_layout(intel, mt, tiling);
+#else
+ ok = brw_miptree_layout(intel, mt, tiling);
+#endif
+
+ if (!ok) {
+ free(mt);
+ DBG("%s not okay - returning NULL\n", __FUNCTION__);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ GLenum base_format,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0, GLuint cpp, GLuint compress_byte,
+ GLboolean expect_accelerated_upload)
+{
+ struct intel_mipmap_tree *mt;
+ uint32_t tiling = I915_TILING_NONE;
+
+ if (intel->use_texture_tiling && compress_byte == 0) {
+ if (intel->gen >= 4 &&
+ (base_format == GL_DEPTH_COMPONENT ||
+ base_format == GL_DEPTH_STENCIL_EXT))
+ tiling = I915_TILING_Y;
+ else if (width0 >= 64)
+ tiling = I915_TILING_X;
+ }
+
+ mt = intel_miptree_create_internal(intel, target, internal_format,
+ first_level, last_level, width0,
+ height0, depth0, cpp, compress_byte,
+ tiling);
+ /*
+ * pitch == 0 || height == 0 indicates the null texture
+ */
+ if (!mt || !mt->total_height) {
+ free(mt);
+ return NULL;
+ }
+
+ mt->region = intel_region_alloc(intel,
+ tiling,
+ mt->cpp,
+ mt->total_width,
+ mt->total_height,
+ expect_accelerated_upload);
+
+ if (!mt->region) {
+ free(mt);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+struct intel_mipmap_tree *
+intel_miptree_create_for_region(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ struct intel_region *region,
+ GLuint depth0,
+ GLuint compress_byte)
+{
+ struct intel_mipmap_tree *mt;
+
+ mt = intel_miptree_create_internal(intel, target, internal_format,
+ first_level, last_level,
+ region->width, region->height, 1,
+ region->cpp, compress_byte,
+ I915_TILING_NONE);
+ if (!mt)
+ return mt;
+
+ intel_region_reference(&mt->region, region);
+
+ return mt;
+}
+
+void
+intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src)
+{
+ src->refcount++;
+ *dst = src;
+ DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
+}
+
+
+void
+intel_miptree_release(struct intel_context *intel,
+ struct intel_mipmap_tree **mt)
+{
+ if (!*mt)
+ return;
+
+ DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
+ if (--(*mt)->refcount <= 0) {
+ GLuint i;
+
+ DBG("%s deleting %p\n", __FUNCTION__, *mt);
+
+ intel_region_release(&((*mt)->region));
+
+ for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
+ free((*mt)->level[i].x_offset);
+ free((*mt)->level[i].y_offset);
+ }
+
+ free(*mt);
+ }
+ *mt = NULL;
+}
+
+
+/**
+ * Can the image be pulled into a unified mipmap tree? This mirrors
+ * the completeness test in a lot of ways.
+ *
+ * Not sure whether I want to pass gl_texture_image here.
+ */
+GLboolean
+intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image)
+{
+ GLboolean isCompressed = _mesa_is_format_compressed(image->TexFormat);
+ struct intel_texture_image *intelImage = intel_texture_image(image);
+ GLuint level = intelImage->level;
+
+ /* Images with borders are never pulled into mipmap trees. */
+ if (image->Border)
+ return GL_FALSE;
+
+ if (image->InternalFormat != mt->internal_format ||
+ isCompressed != mt->compressed)
+ return GL_FALSE;
+
+ if (!isCompressed &&
+ !mt->compressed &&
+ _mesa_get_format_bytes(image->TexFormat) != mt->cpp)
+ return GL_FALSE;
+
+ /* Test image dimensions against the base level image adjusted for
+ * minification. This will also catch images not present in the
+ * tree, changed targets, etc.
+ */
+ if (image->Width != mt->level[level].width ||
+ image->Height != mt->level[level].height ||
+ image->Depth != mt->level[level].depth)
+ return GL_FALSE;
+
+ return GL_TRUE;
+}
+
+
+void
+intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint nr_images,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h, GLuint d)
+{
+ mt->level[level].width = w;
+ mt->level[level].height = h;
+ mt->level[level].depth = d;
+ mt->level[level].level_x = x;
+ mt->level[level].level_y = y;
+ mt->level[level].nr_images = nr_images;
+
+ DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __FUNCTION__,
+ level, w, h, d, x, y);
+
+ assert(nr_images);
+ assert(!mt->level[level].x_offset);
+
+ mt->level[level].x_offset = malloc(nr_images * sizeof(GLuint));
+ mt->level[level].x_offset[0] = mt->level[level].level_x;
+ mt->level[level].y_offset = malloc(nr_images * sizeof(GLuint));
+ mt->level[level].y_offset[0] = mt->level[level].level_y;
+}
+
+
+void
+intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint img,
+ GLuint x, GLuint y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < mt->level[level].nr_images);
+
+ mt->level[level].x_offset[img] = mt->level[level].level_x + x;
+ mt->level[level].y_offset[img] = mt->level[level].level_y + y;
+
+ DBG("%s level %d img %d pos %d,%d\n",
+ __FUNCTION__, level, img,
+ mt->level[level].x_offset[img], mt->level[level].y_offset[img]);
+}
+
+
+void
+intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint face, GLuint depth,
+ GLuint *x, GLuint *y)
+{
+ if (mt->target == GL_TEXTURE_CUBE_MAP_ARB) {
+ *x = mt->level[level].x_offset[face];
+ *y = mt->level[level].y_offset[face];
+ } else if (mt->target == GL_TEXTURE_3D) {
+ *x = mt->level[level].x_offset[depth];
+ *y = mt->level[level].y_offset[depth];
+ } else {
+ *x = mt->level[level].x_offset[0];
+ *y = mt->level[level].y_offset[0];
+ }
+}
+
+/**
+ * Map a teximage in a mipmap tree.
+ * \param row_stride returns row stride in bytes
+ * \param image_stride returns image stride in bytes (for 3D textures).
+ * \param image_offsets pointer to array of pixel offsets from the returned
+ * pointer to each depth image
+ * \return address of mapping
+ */
+GLubyte *
+intel_miptree_image_map(struct intel_context * intel,
+ struct intel_mipmap_tree * mt,
+ GLuint face,
+ GLuint level,
+ GLuint * row_stride, GLuint * image_offsets)
+{
+ GLuint x, y;
+ DBG("%s \n", __FUNCTION__);
+
+ if (row_stride)
+ *row_stride = mt->region->pitch * mt->cpp;
+
+ if (mt->target == GL_TEXTURE_3D) {
+ int i;
+
+ for (i = 0; i < mt->level[level].depth; i++) {
+
+ intel_miptree_get_image_offset(mt, level, face, i,
+ &x, &y);
+ image_offsets[i] = x + y * mt->region->pitch;
+ }
+
+ return intel_region_map(intel, mt->region);
+ } else {
+ assert(mt->level[level].depth == 1);
+ intel_miptree_get_image_offset(mt, level, face, 0,
+ &x, &y);
+ image_offsets[0] = 0;
+
+ return intel_region_map(intel, mt->region) +
+ (x + y * mt->region->pitch) * mt->cpp;
+ }
+}
+
+
+void
+intel_miptree_image_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ DBG("%s\n", __FUNCTION__);
+ intel_region_unmap(intel, mt->region);
+}
+
+
+/**
+ * Upload data for a particular image.
+ */
+void
+intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ void *src,
+ GLuint src_row_pitch,
+ GLuint src_image_pitch)
+{
+ const GLuint depth = dst->level[level].depth;
+ GLuint i;
+
+ DBG("%s: %d/%d\n", __FUNCTION__, face, level);
+ for (i = 0; i < depth; i++) {
+ GLuint dst_x, dst_y, height;
+
+ intel_miptree_get_image_offset(dst, level, face, i, &dst_x, &dst_y);
+
+ height = dst->level[level].height;
+ if(dst->compressed)
+ height = (height + 3) / 4;
+
+ intel_region_data(intel,
+ dst->region, 0, dst_x, dst_y,
+ src,
+ src_row_pitch,
+ 0, 0, /* source x, y */
+ dst->level[level].width, height); /* width, height */
+
+ src = (char *)src + src_image_pitch * dst->cpp;
+ }
+}
+
+
+/**
+ * Copy mipmap image between trees
+ */
+void
+intel_miptree_image_copy(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face, GLuint level,
+ struct intel_mipmap_tree *src)
+{
+ GLuint width = src->level[level].width;
+ GLuint height = src->level[level].height;
+ GLuint depth = src->level[level].depth;
+ GLuint src_x, src_y, dst_x, dst_y;
+ GLuint i;
+ GLboolean success;
+
+ if (dst->compressed) {
+ GLuint align_w, align_h;
+
+ intel_get_texture_alignment_unit(dst->internal_format,
+ &align_w, &align_h);
+ height = (height + 3) / 4;
+ width = ALIGN(width, align_w);
+ }
+
+ intel_prepare_render(intel);
+
+ for (i = 0; i < depth; i++) {
+ intel_miptree_get_image_offset(src, level, face, i, &src_x, &src_y);
+ intel_miptree_get_image_offset(dst, level, face, i, &dst_x, &dst_y);
+ success = intel_region_copy(intel,
+ dst->region, 0, dst_x, dst_y,
+ src->region, 0, src_x, src_y,
+ width, height, GL_FALSE,
+ GL_COPY);
+ if (!success) {
+ GLubyte *src_ptr, *dst_ptr;
+
+ src_ptr = intel_region_map(intel, src->region);
+ dst_ptr = intel_region_map(intel, dst->region);
+
+ _mesa_copy_rect(dst_ptr,
+ dst->cpp,
+ dst->region->pitch,
+ dst_x, dst_y, width, height,
+ src_ptr,
+ src->region->pitch,
+ src_x, src_y);
+ intel_region_unmap(intel, src->region);
+ intel_region_unmap(intel, dst->region);
+ }
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_mipmap_tree.h b/src/mesa/drivers/dri/intel/intel_mipmap_tree.h
new file mode 100644
index 0000000000..21db2f4d3b
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.h
@@ -0,0 +1,217 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_MIPMAP_TREE_H
+#define INTEL_MIPMAP_TREE_H
+
+#include "intel_regions.h"
+
+/* A layer on top of the intel_regions code which adds:
+ *
+ * - Code to size and layout a region to hold a set of mipmaps.
+ * - Query to determine if a new image fits in an existing tree.
+ * - More refcounting
+ * - maybe able to remove refcounting from intel_region?
+ * - ?
+ *
+ * The fixed mipmap layout of intel hardware where one offset
+ * specifies the position of all images in a mipmap hierachy
+ * complicates the implementation of GL texture image commands,
+ * compared to hardware where each image is specified with an
+ * independent offset.
+ *
+ * In an ideal world, each texture object would be associated with a
+ * single bufmgr buffer or 2d intel_region, and all the images within
+ * the texture object would slot into the tree as they arrive. The
+ * reality can be a little messier, as images can arrive from the user
+ * with sizes that don't fit in the existing tree, or in an order
+ * where the tree layout cannot be guessed immediately.
+ *
+ * This structure encodes an idealized mipmap tree. The GL image
+ * commands build these where possible, otherwise store the images in
+ * temporary system buffers.
+ */
+
+
+/**
+ * Describes the location of each texture image within a texture region.
+ */
+struct intel_mipmap_level
+{
+ /** Offset to this miptree level, used in computing x_offset. */
+ GLuint level_x;
+ /** Offset to this miptree level, used in computing y_offset. */
+ GLuint level_y;
+ GLuint width;
+ GLuint height;
+ /** Depth of the mipmap at this level: 1 for 1D/2D/CUBE, n for 3D. */
+ GLuint depth;
+ /** Number of images at this level: 1 for 1D/2D, 6 for CUBE, depth for 3D */
+ GLuint nr_images;
+
+ /** @{
+ * offsets from level_[xy] to the image for each cube face or depth
+ * level.
+ *
+ * Pretty much have to accept that hardware formats
+ * are going to be so diverse that there is no unified way to
+ * compute the offsets of depth/cube images within a mipmap level,
+ * so have to store them as a lookup table.
+ */
+ GLuint *x_offset, *y_offset;
+ /** @} */
+};
+
+struct intel_mipmap_tree
+{
+ /* Effectively the key:
+ */
+ GLenum target;
+ GLenum internal_format;
+
+ GLuint first_level;
+ GLuint last_level;
+
+ GLuint width0, height0, depth0; /**< Level zero image dimensions */
+ GLuint cpp;
+ GLboolean compressed;
+
+ /* Derived from the above:
+ */
+ GLuint total_width;
+ GLuint total_height;
+
+ /* Includes image offset tables:
+ */
+ struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
+
+ /* The data is held here:
+ */
+ struct intel_region *region;
+
+ /* These are also refcounted:
+ */
+ GLuint refcount;
+};
+
+
+
+struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ GLenum base_format,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0,
+ GLuint cpp,
+ GLuint compress_byte,
+ GLboolean expect_accelerated_upload);
+
+struct intel_mipmap_tree *
+intel_miptree_create_for_region(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ struct intel_region *region,
+ GLuint depth0,
+ GLuint compress_byte);
+
+int intel_miptree_pitch_align (struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling,
+ int pitch);
+
+void intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src);
+
+void intel_miptree_release(struct intel_context *intel,
+ struct intel_mipmap_tree **mt);
+
+/* Check if an image fits an existing mipmap tree layout
+ */
+GLboolean intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image);
+
+/* Return a pointer to an image within a tree. Return image stride as
+ * well.
+ */
+GLubyte *intel_miptree_image_map(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ GLuint face,
+ GLuint level,
+ GLuint * row_stride, GLuint * image_stride);
+
+void intel_miptree_image_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt);
+
+void
+intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint face, GLuint depth,
+ GLuint *x, GLuint *y);
+
+void intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint nr_images,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h, GLuint d);
+
+void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint img, GLuint x, GLuint y);
+
+/* Upload an image into a tree
+ */
+void intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ void *src,
+ GLuint src_row_pitch, GLuint src_image_pitch);
+
+/* Copy an image between two trees
+ */
+void intel_miptree_image_copy(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face, GLuint level,
+ struct intel_mipmap_tree *src);
+
+/* i915_mipmap_tree.c:
+ */
+GLboolean i915_miptree_layout(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling);
+GLboolean i945_miptree_layout(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling);
+GLboolean brw_miptree_layout(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_pixel.c b/src/mesa/drivers/dri/intel/intel_pixel.c
new file mode 100644
index 0000000000..cb088e4032
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel.c
@@ -0,0 +1,169 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/enums.h"
+#include "main/state.h"
+#include "main/bufferobj.h"
+#include "main/context.h"
+#include "swrast/swrast.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+static GLenum
+effective_func(GLenum func, GLboolean src_alpha_is_one)
+{
+ if (src_alpha_is_one) {
+ if (func == GL_SRC_ALPHA)
+ return GL_ONE;
+ if (func == GL_ONE_MINUS_SRC_ALPHA)
+ return GL_ZERO;
+ }
+
+ return func;
+}
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glDraw/CopyPixels.
+ */
+GLboolean
+intel_check_blit_fragment_ops(GLcontext * ctx, GLboolean src_alpha_is_one)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ if (ctx->FragmentProgram._Enabled) {
+ DBG("fallback due to fragment program\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Color.BlendEnabled &&
+ (effective_func(ctx->Color.BlendSrcRGB, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.BlendDstRGB, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.BlendEquationRGB != GL_FUNC_ADD ||
+ effective_func(ctx->Color.BlendSrcA, src_alpha_is_one) != GL_ONE ||
+ effective_func(ctx->Color.BlendDstA, src_alpha_is_one) != GL_ZERO ||
+ ctx->Color.BlendEquationA != GL_FUNC_ADD)) {
+ DBG("fallback due to blend\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Texture._EnabledUnits) {
+ DBG("fallback due to texturing\n");
+ return GL_FALSE;
+ }
+
+ if (!(ctx->Color.ColorMask[0][0] &&
+ ctx->Color.ColorMask[0][1] &&
+ ctx->Color.ColorMask[0][2] &&
+ ctx->Color.ColorMask[0][3])) {
+ DBG("fallback due to color masking\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Color.AlphaEnabled) {
+ DBG("fallback due to alpha\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Depth.Test) {
+ DBG("fallback due to depth test\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Fog.Enabled) {
+ DBG("fallback due to fog\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->_ImageTransferState) {
+ DBG("fallback due to image transfer\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Stencil._Enabled) {
+ DBG("fallback due to image stencil\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->RenderMode != GL_RENDER) {
+ DBG("fallback due to render mode\n");
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+/* The intel_region struct doesn't really do enough to capture the
+ * format of the pixels in the region. For now this code assumes that
+ * the region is a display surface and hence is either ARGB8888 or
+ * RGB565.
+ * XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
+ * know the buffer's pixel format.
+ *
+ * \param format as given to glDraw/ReadPixels
+ * \param type as given to glDraw/ReadPixels
+ */
+GLboolean
+intel_check_blit_format(struct intel_region * region,
+ GLenum format, GLenum type)
+{
+ if (region->cpp == 4 &&
+ (type == GL_UNSIGNED_INT_8_8_8_8_REV ||
+ type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
+ return GL_TRUE;
+ }
+
+ if (region->cpp == 2 &&
+ type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
+ return GL_TRUE;
+ }
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s: bad format for blit (cpp %d, type %s format %s)\n",
+ __FUNCTION__, region->cpp,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+
+ return GL_FALSE;
+}
+
+void
+intelInitPixelFuncs(struct dd_function_table *functions)
+{
+ functions->Accum = _swrast_Accum;
+ if (!getenv("INTEL_NO_BLIT")) {
+ functions->Bitmap = intelBitmap;
+ functions->CopyPixels = intelCopyPixels;
+ functions->DrawPixels = intelDrawPixels;
+ }
+ functions->ReadPixels = intelReadPixels;
+}
+
diff --git a/src/mesa/drivers/dri/intel/intel_pixel.h b/src/mesa/drivers/dri/intel/intel_pixel.h
new file mode 100644
index 0000000000..743b6497c5
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel.h
@@ -0,0 +1,67 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_PIXEL_H
+#define INTEL_PIXEL_H
+
+#include "main/mtypes.h"
+
+void intelInitPixelFuncs(struct dd_function_table *functions);
+GLboolean intel_check_blit_fragment_ops(GLcontext * ctx,
+ GLboolean src_alpha_is_one);
+
+GLboolean intel_check_blit_format(struct intel_region *region,
+ GLenum format, GLenum type);
+
+
+void intelReadPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack,
+ GLvoid * pixels);
+
+void intelDrawPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels);
+
+void intelCopyPixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type);
+
+void intelBitmap(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte * pixels);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_pixel_bitmap.c b/src/mesa/drivers/dri/intel/intel_pixel_bitmap.c
new file mode 100644
index 0000000000..076fee89bd
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel_bitmap.c
@@ -0,0 +1,522 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/colormac.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/bufferobj.h"
+#include "main/polygon.h"
+#include "main/pixelstore.h"
+#include "main/polygon.h"
+#include "main/state.h"
+#include "main/teximage.h"
+#include "main/texobj.h"
+#include "main/texstate.h"
+#include "main/texparam.h"
+#include "main/varray.h"
+#include "main/attrib.h"
+#include "main/enable.h"
+#include "main/viewport.h"
+#include "shader/arbprogram.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_regions.h"
+#include "intel_buffers.h"
+#include "intel_pixel.h"
+#include "intel_reg.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+
+/* Unlike the other intel_pixel_* functions, the expectation here is
+ * that the incoming data is not in a PBO. With the XY_TEXT blit
+ * method, there's no benefit haveing it in a PBO, but we could
+ * implement a path based on XY_MONO_SRC_COPY_BLIT which might benefit
+ * PBO bitmaps. I think they are probably pretty rare though - I
+ * wonder if Xgl uses them?
+ */
+static const GLubyte *map_pbo( GLcontext *ctx,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ GLubyte *buf;
+
+ if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
+ GL_COLOR_INDEX, GL_BITMAP,
+ (GLvoid *) bitmap)) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,"glBitmap(invalid PBO access)");
+ return NULL;
+ }
+
+ buf = (GLubyte *) ctx->Driver.MapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT,
+ GL_READ_ONLY_ARB,
+ unpack->BufferObj);
+ if (!buf) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "glBitmap(PBO is mapped)");
+ return NULL;
+ }
+
+ return ADD_POINTERS(buf, bitmap);
+}
+
+static GLboolean test_bit( const GLubyte *src, GLuint bit )
+{
+ return (src[bit/8] & (1<<(bit % 8))) ? 1 : 0;
+}
+
+static void set_bit( GLubyte *dest, GLuint bit )
+{
+ dest[bit/8] |= 1 << (bit % 8);
+}
+
+/* Extract a rectangle's worth of data from the bitmap. Called
+ * per chunk of HW-sized bitmap.
+ */
+static GLuint get_bitmap_rect(GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap,
+ GLuint x, GLuint y,
+ GLuint w, GLuint h,
+ GLubyte *dest,
+ GLuint row_align,
+ GLboolean invert)
+{
+ GLuint src_offset = (x + unpack->SkipPixels) & 0x7;
+ GLuint mask = unpack->LsbFirst ? 0 : 7;
+ GLuint bit = 0;
+ GLint row, col;
+ GLint first, last;
+ GLint incr;
+ GLuint count = 0;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
+ __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
+
+ if (invert) {
+ first = h-1;
+ last = 0;
+ incr = -1;
+ }
+ else {
+ first = 0;
+ last = h-1;
+ incr = 1;
+ }
+
+ /* Require that dest be pre-zero'd.
+ */
+ for (row = first; row != (last+incr); row += incr) {
+ const GLubyte *rowsrc = _mesa_image_address2d(unpack, bitmap,
+ width, height,
+ GL_COLOR_INDEX, GL_BITMAP,
+ y + row, x);
+
+ for (col = 0; col < w; col++, bit++) {
+ if (test_bit(rowsrc, (col + src_offset) ^ mask)) {
+ set_bit(dest, bit ^ 7);
+ count++;
+ }
+ }
+
+ if (row_align)
+ bit = ALIGN(bit, row_align);
+ }
+
+ return count;
+}
+
+/**
+ * Returns the low Y value of the vertical range given, flipped according to
+ * whether the framebuffer is or not.
+ */
+static INLINE int
+y_flip(struct gl_framebuffer *fb, int y, int height)
+{
+ if (fb->Name != 0)
+ return y;
+ else
+ return fb->Height - y - height;
+}
+
+/*
+ * Render a bitmap.
+ */
+static GLboolean
+do_blit_bitmap( GLcontext *ctx,
+ GLint dstx, GLint dsty,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap )
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst = intel_drawbuf_region(intel);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLfloat tmpColor[4];
+ GLubyte ubcolor[4];
+ GLuint color;
+ GLsizei bitmap_width = width;
+ GLsizei bitmap_height = height;
+ GLint px, py;
+ GLuint stipple[32];
+ GLint orig_dstx = dstx;
+ GLint orig_dsty = dsty;
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ if (ctx->Depth.Test) {
+ /* The blit path produces incorrect results when depth testing is on.
+ * It seems the blit Z coord is always 1.0 (the far plane) so fragments
+ * will likely be obscured by other, closer geometry.
+ */
+ return GL_FALSE;
+ }
+
+ if (!dst)
+ return GL_FALSE;
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ bitmap = map_pbo(ctx, width, height, unpack, bitmap);
+ if (bitmap == NULL)
+ return GL_TRUE; /* even though this is an error, we're done */
+ }
+
+ COPY_4V(tmpColor, ctx->Current.RasterColor);
+
+ if (NEED_SECONDARY_COLOR(ctx)) {
+ ADD_3V(tmpColor, tmpColor, ctx->Current.RasterSecondaryColor);
+ }
+
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[0], tmpColor[0]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[1], tmpColor[1]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[2], tmpColor[2]);
+ UNCLAMPED_FLOAT_TO_UBYTE(ubcolor[3], tmpColor[3]);
+
+ if (dst->cpp == 2)
+ color = PACK_COLOR_565(ubcolor[0], ubcolor[1], ubcolor[2]);
+ else
+ color = PACK_COLOR_8888(ubcolor[3], ubcolor[0], ubcolor[1], ubcolor[2]);
+
+ if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
+ return GL_FALSE;
+
+ intel_prepare_render(intel);
+
+ /* Clip to buffer bounds and scissor. */
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+
+ dsty = y_flip(fb, dsty, height);
+
+#define DY 32
+#define DX 32
+
+ /* Chop it all into chunks that can be digested by hardware: */
+ for (py = 0; py < height; py += DY) {
+ for (px = 0; px < width; px += DX) {
+ int h = MIN2(DY, height - py);
+ int w = MIN2(DX, width - px);
+ GLuint sz = ALIGN(ALIGN(w,8) * h, 64)/8;
+ GLenum logic_op = ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY;
+
+ assert(sz <= sizeof(stipple));
+ memset(stipple, 0, sz);
+
+ /* May need to adjust this when padding has been introduced in
+ * sz above:
+ *
+ * Have to translate destination coordinates back into source
+ * coordinates.
+ */
+ if (get_bitmap_rect(bitmap_width, bitmap_height, unpack,
+ bitmap,
+ -orig_dstx + (dstx + px),
+ -orig_dsty + y_flip(fb, dsty + py, h),
+ w, h,
+ (GLubyte *)stipple,
+ 8,
+ fb->Name == 0 ? GL_TRUE : GL_FALSE) == 0)
+ continue;
+
+ if (!intelEmitImmediateColorExpandBlit(intel,
+ dst->cpp,
+ (GLubyte *)stipple,
+ sz,
+ color,
+ dst->pitch,
+ dst->buffer,
+ 0,
+ dst->tiling,
+ dstx + px,
+ dsty + py,
+ w, h,
+ logic_op)) {
+ return GL_FALSE;
+ }
+ }
+ }
+out:
+
+ if (INTEL_DEBUG & DEBUG_SYNC)
+ intel_batchbuffer_flush(intel->batch);
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ /* done with PBO so unmap it now */
+ ctx->Driver.UnmapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT,
+ unpack->BufferObj);
+ }
+
+ intel_check_front_buffer_rendering(intel);
+
+ return GL_TRUE;
+}
+
+static GLboolean
+intel_texture_bitmap(GLcontext * ctx,
+ GLint dst_x, GLint dst_y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap)
+{
+ struct intel_context *intel = intel_context(ctx);
+ static const char *fp =
+ "!!ARBfp1.0\n"
+ "TEMP val;\n"
+ "PARAM color=program.local[0];\n"
+ "TEX val, fragment.texcoord[0], texture[0], 2D;\n"
+ "ADD val, val.wwww, {-.5, -.5, -.5, -.5};\n"
+ "KIL val;\n"
+ "MOV result.color, color;\n"
+ "END\n";
+ GLuint texname;
+ GLfloat vertices[4][4];
+ GLint old_active_texture;
+ GLubyte *a8_bitmap;
+ GLfloat dst_z;
+
+ /* We need a fragment program for the KIL effect */
+ if (!ctx->Extensions.ARB_fragment_program ||
+ !ctx->Extensions.ARB_vertex_program) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr,
+ "glBitmap fallback: No fragment/vertex program support\n");
+ return GL_FALSE;
+ }
+
+ /* We're going to mess with texturing with no regard to existing texture
+ * state, so if there is some set up we have to bail.
+ */
+ if (ctx->Texture._EnabledUnits != 0) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glBitmap fallback: texturing enabled\n");
+ return GL_FALSE;
+ }
+
+ /* Can't do textured DrawPixels with a fragment program, unless we were
+ * to generate a new program that sampled our texture and put the results
+ * in the fragment color before the user's program started.
+ */
+ if (ctx->FragmentProgram.Enabled) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glBitmap fallback: fragment program enabled\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->VertexProgram.Enabled) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glBitmap fallback: vertex program enabled\n");
+ return GL_FALSE;
+ }
+
+ if (!ctx->Extensions.ARB_texture_non_power_of_two &&
+ (!is_power_of_two(width) || !is_power_of_two(height))) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr,
+ "glBitmap() fallback: NPOT texture\n");
+ return GL_FALSE;
+ }
+
+ if (ctx->Fog.Enabled) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glBitmap() fallback: fog\n");
+ return GL_FALSE;
+ }
+
+ /* Check that we can load in a texture this big. */
+ if (width > (1 << (ctx->Const.MaxTextureLevels - 1)) ||
+ height > (1 << (ctx->Const.MaxTextureLevels - 1))) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glBitmap fallback: bitmap too large (%dx%d)\n",
+ width, height);
+ return GL_FALSE;
+ }
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ bitmap = map_pbo(ctx, width, height, unpack, bitmap);
+ if (bitmap == NULL)
+ return GL_TRUE; /* even though this is an error, we're done */
+ }
+
+ /* Convert the A1 bitmap to an A8 format suitable for glTexImage */
+ a8_bitmap = calloc(1, width * height);
+ _mesa_expand_bitmap(width, height, unpack, bitmap, a8_bitmap, width, 0xff);
+
+ if (_mesa_is_bufferobj(unpack->BufferObj)) {
+ /* done with PBO so unmap it now */
+ ctx->Driver.UnmapBuffer(ctx, GL_PIXEL_UNPACK_BUFFER_EXT,
+ unpack->BufferObj);
+ }
+
+ /* Save GL state before we start setting up our drawing */
+ _mesa_PushAttrib(GL_ENABLE_BIT | GL_CURRENT_BIT | GL_POLYGON_BIT |
+ GL_TEXTURE_BIT | GL_VIEWPORT_BIT);
+ _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT |
+ GL_CLIENT_PIXEL_STORE_BIT);
+ old_active_texture = ctx->Texture.CurrentUnit;
+
+ _mesa_Disable(GL_POLYGON_STIPPLE);
+ _mesa_PolygonMode(GL_FRONT_AND_BACK, GL_FILL);
+
+ /* Upload our bitmap data to an alpha texture */
+ _mesa_ActiveTextureARB(GL_TEXTURE0_ARB);
+ _mesa_Enable(GL_TEXTURE_2D);
+ _mesa_GenTextures(1, &texname);
+ _mesa_BindTexture(GL_TEXTURE_2D, texname);
+ _mesa_TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ _mesa_TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+
+ _mesa_PixelStorei(GL_UNPACK_SWAP_BYTES, GL_FALSE);
+ _mesa_PixelStorei(GL_UNPACK_LSB_FIRST, GL_FALSE);
+ _mesa_PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ _mesa_PixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
+ _mesa_PixelStorei(GL_UNPACK_SKIP_ROWS, 0);
+ _mesa_PixelStorei(GL_UNPACK_ALIGNMENT, 1);
+ _mesa_TexImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, width, height, 0,
+ GL_ALPHA, GL_UNSIGNED_BYTE, a8_bitmap);
+ free(a8_bitmap);
+
+ meta_set_fragment_program(&intel->meta, &intel->meta.bitmap_fp, fp);
+ _mesa_ProgramLocalParameter4fvARB(GL_FRAGMENT_PROGRAM_ARB, 0,
+ ctx->Current.RasterColor);
+ meta_set_passthrough_vertex_program(&intel->meta);
+ meta_set_passthrough_transform(&intel->meta);
+
+ /* convert rasterpos Z from [0,1] to NDC coord in [-1,1] */
+ dst_z = -1.0 + 2.0 * ctx->Current.RasterPos[2];
+
+ /* RasterPos[2] already takes into account the DepthRange mapping. */
+ _mesa_DepthRange(0.0, 1.0);
+
+ vertices[0][0] = dst_x;
+ vertices[0][1] = dst_y;
+ vertices[0][2] = dst_z;
+ vertices[0][3] = 1.0;
+ vertices[1][0] = dst_x + width;
+ vertices[1][1] = dst_y;
+ vertices[1][2] = dst_z;
+ vertices[1][3] = 1.0;
+ vertices[2][0] = dst_x + width;
+ vertices[2][1] = dst_y + height;
+ vertices[2][2] = dst_z;
+ vertices[2][3] = 1.0;
+ vertices[3][0] = dst_x;
+ vertices[3][1] = dst_y + height;
+ vertices[3][2] = dst_z;
+ vertices[3][3] = 1.0;
+
+ _mesa_VertexPointer(4, GL_FLOAT, 4 * sizeof(GLfloat), &vertices);
+ _mesa_Enable(GL_VERTEX_ARRAY);
+ meta_set_default_texrect(&intel->meta);
+ _mesa_DrawArrays(GL_TRIANGLE_FAN, 0, 4);
+
+ meta_restore_texcoords(&intel->meta);
+ meta_restore_transform(&intel->meta);
+ meta_restore_fragment_program(&intel->meta);
+ meta_restore_vertex_program(&intel->meta);
+
+ _mesa_ActiveTextureARB(GL_TEXTURE0_ARB + old_active_texture);
+ _mesa_PopClientAttrib();
+ _mesa_PopAttrib();
+
+ _mesa_DeleteTextures(1, &texname);
+
+ return GL_TRUE;
+}
+
+/* There are a large number of possible ways to implement bitmap on
+ * this hardware, most of them have some sort of drawback. Here are a
+ * few that spring to mind:
+ *
+ * Blit:
+ * - XY_MONO_SRC_BLT_CMD
+ * - use XY_SETUP_CLIP_BLT for cliprect clipping.
+ * - XY_TEXT_BLT
+ * - XY_TEXT_IMMEDIATE_BLT
+ * - blit per cliprect, subject to maximum immediate data size.
+ * - XY_COLOR_BLT
+ * - per pixel or run of pixels
+ * - XY_PIXEL_BLT
+ * - good for sparse bitmaps
+ *
+ * 3D engine:
+ * - Point per pixel
+ * - Translate bitmap to an alpha texture and render as a quad
+ * - Chop bitmap up into 32x32 squares and render w/polygon stipple.
+ */
+void
+intelBitmap(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte * pixels)
+{
+ if (do_blit_bitmap(ctx, x, y, width, height,
+ unpack, pixels))
+ return;
+
+ if (intel_texture_bitmap(ctx, x, y, width, height,
+ unpack, pixels))
+ return;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s: fallback to swrast\n", __FUNCTION__);
+
+ _swrast_Bitmap(ctx, x, y, width, height, unpack, pixels);
+}
diff --git a/src/mesa/drivers/dri/intel/intel_pixel_copy.c b/src/mesa/drivers/dri/intel/intel_pixel_copy.c
new file mode 100644
index 0000000000..2008a4c2be
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel_copy.c
@@ -0,0 +1,214 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/image.h"
+#include "main/state.h"
+#include "main/mtypes.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_fbo.h"
+
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
+static struct intel_region *
+copypix_src_region(struct intel_context *intel, GLenum type)
+{
+ struct intel_renderbuffer *depth;
+
+ depth = (struct intel_renderbuffer *)
+ &intel->ctx.DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer;
+
+ switch (type) {
+ case GL_COLOR:
+ return intel_readbuf_region(intel);
+ case GL_DEPTH:
+ /* Don't think this is really possible execpt at 16bpp, when we
+ * have no stencil. */
+ if (depth && depth->region->cpp == 2)
+ return depth->region;
+ case GL_STENCIL:
+ /* Don't think this is really possible. */
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ /* Does it matter whether it is stencil/depth or depth/stencil?
+ */
+ return depth->region;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Check if any fragment operations are in effect which might effect
+ * glCopyPixels. Differs from intel_check_blit_fragment_ops in that
+ * we allow Scissor.
+ */
+static GLboolean
+intel_check_copypixel_blit_fragment_ops(GLcontext * ctx)
+{
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ /* Could do logicop with the blitter:
+ */
+ return !(ctx->_ImageTransferState ||
+ ctx->Color.AlphaEnabled ||
+ ctx->Depth.Test ||
+ ctx->Fog.Enabled ||
+ ctx->Stencil._Enabled ||
+ !ctx->Color.ColorMask[0][0] ||
+ !ctx->Color.ColorMask[0][1] ||
+ !ctx->Color.ColorMask[0][2] ||
+ !ctx->Color.ColorMask[0][3] ||
+ ctx->Texture._EnabledUnits ||
+ ctx->FragmentProgram._Enabled ||
+ ctx->Color.BlendEnabled);
+}
+
+
+/**
+ * CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
+ */
+static GLboolean
+do_blit_copypixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *dst;
+ struct intel_region *src;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct gl_framebuffer *read_fb = ctx->ReadBuffer;
+ GLint orig_dstx;
+ GLint orig_dsty;
+ GLint orig_srcx;
+ GLint orig_srcy;
+ GLboolean flip = GL_FALSE;
+
+ if (type == GL_DEPTH || type == GL_STENCIL) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glCopyPixels() fallback: GL_DEPTH || GL_STENCIL\n");
+ return GL_FALSE;
+ }
+
+ /* Update draw buffer bounds */
+ _mesa_update_state(ctx);
+
+ /* Copypixels can be more than a straight copy. Ensure all the
+ * extra operations are disabled:
+ */
+ if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
+ ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
+ return GL_FALSE;
+
+ intel_prepare_render(intel);
+
+ dst = intel_drawbuf_region(intel);
+ src = copypix_src_region(intel, type);
+
+ if (!src || !dst)
+ return GL_FALSE;
+
+ intel_flush(&intel->ctx);
+
+ /* Clip to destination buffer. */
+ orig_dstx = dstx;
+ orig_dsty = dsty;
+ if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
+ fb->_Xmax, fb->_Ymax,
+ &dstx, &dsty, &width, &height))
+ goto out;
+ /* Adjust src coords for our post-clipped destination origin */
+ srcx += dstx - orig_dstx;
+ srcy += dsty - orig_dsty;
+
+ /* Clip to source buffer. */
+ orig_srcx = srcx;
+ orig_srcy = srcy;
+ if (!_mesa_clip_to_region(0, 0,
+ read_fb->Width, read_fb->Height,
+ &srcx, &srcy, &width, &height))
+ goto out;
+ /* Adjust dst coords for our post-clipped source origin */
+ dstx += srcx - orig_srcx;
+ dsty += srcy - orig_srcy;
+
+ /* Flip dest Y if it's a window system framebuffer. */
+ if (fb->Name == 0) {
+ /* copypixels to a window system framebuffer */
+ dsty = fb->Height - dsty - height;
+ flip = !flip;
+ }
+
+ /* Flip source Y if it's a window system framebuffer. */
+ if (read_fb->Name == 0) {
+ srcy = read_fb->Height - srcy - height;
+ flip = !flip;
+ }
+
+ if (!intel_region_copy(intel,
+ dst, 0, dstx, dsty,
+ src, 0, srcx, srcy,
+ width, height, flip,
+ ctx->Color.ColorLogicOpEnabled ?
+ ctx->Color.LogicOp : GL_COPY)) {
+ DBG("%s: blit failure\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+out:
+ intel_check_front_buffer_rendering(intel);
+
+ DBG("%s: success\n", __FUNCTION__);
+ return GL_TRUE;
+}
+
+
+void
+intelCopyPixels(GLcontext * ctx,
+ GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint destx, GLint desty, GLenum type)
+{
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
+ return;
+
+ /* this will use swrast if needed */
+ _mesa_meta_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
+}
diff --git a/src/mesa/drivers/dri/intel/intel_pixel_draw.c b/src/mesa/drivers/dri/intel/intel_pixel_draw.c
new file mode 100644
index 0000000000..a40b232fff
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel_draw.c
@@ -0,0 +1,279 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portionsalloc
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/mtypes.h"
+#include "main/teximage.h"
+#include "main/texenv.h"
+#include "main/texobj.h"
+#include "main/texstate.h"
+#include "main/texparam.h"
+#include "main/varray.h"
+#include "main/attrib.h"
+#include "main/enable.h"
+#include "main/buffers.h"
+#include "main/fbobject.h"
+#include "main/depth.h"
+#include "main/hash.h"
+#include "main/blend.h"
+#include "swrast/swrast.h"
+#include "drivers/common/meta.h"
+
+#include "intel_context.h"
+#include "intel_pixel.h"
+#include "intel_fbo.h"
+
+
+/** XXX compare perf of this vs. _mesa_meta_DrawPixels(STENCIL) */
+static GLboolean
+intel_stencil_drawpixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid *pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint texname, rb_name, fb_name, old_fb_name;
+ GLfloat vertices[4][2];
+ struct intel_renderbuffer *irb;
+ struct intel_renderbuffer *depth_irb;
+ struct gl_pixelstore_attrib old_unpack;
+ GLstencil *stencil_pixels;
+ int row, y1, y2;
+ GLint old_active_texture;
+ GLboolean rendering_to_fbo = ctx->DrawBuffer->Name != 0;
+
+ if (format != GL_STENCIL_INDEX)
+ return GL_FALSE;
+
+ /* If there's nothing to write, we're done. */
+ if (ctx->Stencil.WriteMask[0] == 0)
+ return GL_TRUE;
+
+ /* Can't do a per-bit writemask while treating stencil as rgba data. */
+ if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glDrawPixels(STENCIL_INDEX) fallback: "
+ "stencil mask enabled\n");
+ return GL_FALSE;
+ }
+
+ /* We don't support stencil testing/ops here */
+ if (ctx->Stencil._Enabled)
+ return GL_FALSE;
+
+ /* We use FBOs for our wrapping of the depthbuffer into a color
+ * destination.
+ */
+ if (!ctx->Extensions.EXT_framebuffer_object)
+ return GL_FALSE;
+
+ /* We're going to mess with texturing with no regard to existing texture
+ * state, so if there is some set up we have to bail.
+ */
+ if (ctx->Texture._EnabledUnits != 0) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glDrawPixels(STENCIL_INDEX) fallback: "
+ "texturing enabled\n");
+ return GL_FALSE;
+ }
+
+ /* Can't do textured DrawPixels with a fragment program, unless we were
+ * to generate a new program that sampled our texture and put the results
+ * in the fragment color before the user's program started.
+ */
+ if (ctx->FragmentProgram.Enabled) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glDrawPixels(STENCIL_INDEX) fallback: "
+ "fragment program enabled\n");
+ return GL_FALSE;
+ }
+
+ /* Check that we can load in a texture this big. */
+ if (width > (1 << (ctx->Const.MaxTextureLevels - 1)) ||
+ height > (1 << (ctx->Const.MaxTextureLevels - 1))) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "glDrawPixels(STENCIL_INDEX) fallback: "
+ "bitmap too large (%dx%d)\n",
+ width, height);
+ return GL_FALSE;
+ }
+
+ if (!ctx->Extensions.ARB_texture_non_power_of_two &&
+ (!is_power_of_two(width) || !is_power_of_two(height))) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr,
+ "glDrawPixels(GL_STENCIL_INDEX) fallback: NPOT texture\n");
+ return GL_FALSE;
+ }
+
+ _mesa_PushAttrib(GL_ENABLE_BIT | GL_TEXTURE_BIT |
+ GL_CURRENT_BIT | GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
+ _mesa_PushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
+ old_fb_name = ctx->DrawBuffer->Name;
+ old_active_texture = ctx->Texture.CurrentUnit;
+
+ _mesa_Disable(GL_POLYGON_STIPPLE);
+ _mesa_Disable(GL_DEPTH_TEST);
+ _mesa_Disable(GL_STENCIL_TEST);
+
+ /* Unpack the supplied stencil values into a ubyte buffer. */
+ assert(sizeof(GLstencil) == sizeof(GLubyte));
+ stencil_pixels = malloc(width * height * sizeof(GLstencil));
+ for (row = 0; row < height; row++) {
+ GLvoid *source = _mesa_image_address2d(unpack, pixels,
+ width, height,
+ GL_COLOR_INDEX, type,
+ row, 0);
+ _mesa_unpack_stencil_span(ctx, width, GL_UNSIGNED_BYTE,
+ stencil_pixels +
+ row * width * sizeof(GLstencil),
+ type, source, unpack, ctx->_ImageTransferState);
+ }
+
+ /* Take the current depth/stencil renderbuffer, and make a new one wrapping
+ * it which will be treated as GL_RGBA8 so we can render to it as a color
+ * buffer.
+ */
+ depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
+ irb = intel_create_renderbuffer(MESA_FORMAT_ARGB8888);
+ irb->Base.Width = depth_irb->Base.Width;
+ irb->Base.Height = depth_irb->Base.Height;
+ intel_renderbuffer_set_region(intel, irb, depth_irb->region);
+
+ /* Create a name for our renderbuffer, which lets us use other mesa
+ * rb functions for convenience.
+ */
+ _mesa_GenRenderbuffersEXT(1, &rb_name);
+ irb->Base.RefCount++;
+ _mesa_HashInsert(ctx->Shared->RenderBuffers, rb_name, &irb->Base);
+
+ /* Bind the new renderbuffer to the color attachment point. */
+ _mesa_GenFramebuffersEXT(1, &fb_name);
+ _mesa_BindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_name);
+ _mesa_FramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT,
+ GL_COLOR_ATTACHMENT0_EXT,
+ GL_RENDERBUFFER_EXT,
+ rb_name);
+ /* Choose to render to the color attachment. */
+ _mesa_DrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
+
+ _mesa_DepthMask(GL_FALSE);
+ _mesa_ColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_TRUE);
+
+ _mesa_ActiveTextureARB(GL_TEXTURE0_ARB);
+ _mesa_Enable(GL_TEXTURE_2D);
+ _mesa_GenTextures(1, &texname);
+ _mesa_BindTexture(GL_TEXTURE_2D, texname);
+ _mesa_TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ _mesa_TexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ _mesa_TexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
+ old_unpack = ctx->Unpack;
+ ctx->Unpack = ctx->DefaultPacking;
+ _mesa_TexImage2D(GL_TEXTURE_2D, 0, GL_INTENSITY, width, height, 0,
+ GL_RED, GL_UNSIGNED_BYTE, stencil_pixels);
+ ctx->Unpack = old_unpack;
+ free(stencil_pixels);
+
+ meta_set_passthrough_transform(&intel->meta);
+
+ /* Since we're rendering to the framebuffer as if it was an FBO,
+ * if it's the window system we have to flip the coordinates.
+ */
+ if (rendering_to_fbo) {
+ y1 = y;
+ y2 = y + height * ctx->Pixel.ZoomY;
+ } else {
+ y1 = irb->Base.Height - (y + height * ctx->Pixel.ZoomY);
+ y2 = irb->Base.Height - y;
+ }
+ vertices[0][0] = x;
+ vertices[0][1] = y1;
+ vertices[1][0] = x + width * ctx->Pixel.ZoomX;
+ vertices[1][1] = y1;
+ vertices[2][0] = x + width * ctx->Pixel.ZoomX;
+ vertices[2][1] = y2;
+ vertices[3][0] = x;
+ vertices[3][1] = y2;
+
+ _mesa_VertexPointer(2, GL_FLOAT, 2 * sizeof(GLfloat), &vertices);
+ _mesa_Enable(GL_VERTEX_ARRAY);
+ meta_set_default_texrect(&intel->meta);
+
+ _mesa_DrawArrays(GL_TRIANGLE_FAN, 0, 4);
+
+ meta_restore_texcoords(&intel->meta);
+ meta_restore_transform(&intel->meta);
+
+ _mesa_ActiveTextureARB(GL_TEXTURE0_ARB + old_active_texture);
+ _mesa_BindFramebufferEXT(GL_FRAMEBUFFER_EXT, old_fb_name);
+
+ _mesa_PopClientAttrib();
+ _mesa_PopAttrib();
+
+ _mesa_DeleteTextures(1, &texname);
+ _mesa_DeleteFramebuffersEXT(1, &fb_name);
+ _mesa_DeleteRenderbuffersEXT(1, &rb_name);
+
+ return GL_TRUE;
+}
+
+void
+intelDrawPixels(GLcontext * ctx,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format,
+ GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid * pixels)
+{
+#if 0
+ /* XXX this function doesn't seem to work reliably even when all
+ * the pre-requisite conditions are met.
+ * Note that this function is never hit with conform.
+ * Fall back to swrast because even the _mesa_meta_DrawPixels() approach
+ * isn't working because of an apparent stencil bug.
+ */
+ if (intel_stencil_drawpixels(ctx, x, y, width, height, format, type,
+ unpack, pixels))
+ return;
+#else
+ (void) intel_stencil_drawpixels; /* silence warning */
+ if (format == GL_STENCIL_INDEX) {
+ _swrast_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+ return;
+ }
+#endif
+
+ _mesa_meta_DrawPixels(ctx, x, y, width, height, format, type,
+ unpack, pixels);
+}
diff --git a/src/mesa/drivers/dri/intel/intel_pixel_read.c b/src/mesa/drivers/dri/intel/intel_pixel_read.c
new file mode 100644
index 0000000000..21d2a7a93e
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_pixel_read.c
@@ -0,0 +1,207 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/enums.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/image.h"
+#include "main/bufferobj.h"
+#include "main/state.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_regions.h"
+#include "intel_pixel.h"
+#include "intel_buffer_objects.h"
+
+/* For many applications, the new ability to pull the source buffers
+ * back out of the GTT and then do the packing/conversion operations
+ * in software will be as much of an improvement as trying to get the
+ * blitter and/or texture engine to do the work.
+ *
+ * This step is gated on private backbuffers.
+ *
+ * Obviously the frontbuffer can't be pulled back, so that is either
+ * an argument for blit/texture readpixels, or for blitting to a
+ * temporary and then pulling that back.
+ *
+ * When the destination is a pbo, however, it's not clear if it is
+ * ever going to be pulled to main memory (though the access param
+ * will be a good hint). So it sounds like we do want to be able to
+ * choose between blit/texture implementation on the gpu and pullback
+ * and cpu-based copying.
+ *
+ * Unless you can magically turn client memory into a PBO for the
+ * duration of this call, there will be a cpu-based copying step in
+ * any case.
+ */
+
+static GLboolean
+do_blit_readpixels(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *src = intel_readbuf_region(intel);
+ struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
+ GLuint dst_offset;
+ GLuint rowLength;
+ drm_intel_bo *dst_buffer;
+ GLboolean all;
+ GLint dst_x, dst_y;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s\n", __FUNCTION__);
+
+ if (!src)
+ return GL_FALSE;
+
+ if (!_mesa_is_bufferobj(pack->BufferObj)) {
+ /* PBO only for now:
+ */
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s - not PBO\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+
+ if (ctx->_ImageTransferState ||
+ !intel_check_blit_format(src, format, type)) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s - bad format for blit\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s: bad packing params\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ if (pack->RowLength > 0)
+ rowLength = pack->RowLength;
+ else
+ rowLength = width;
+
+ if (pack->Invert) {
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+ else {
+ if (ctx->ReadBuffer->Name == 0)
+ rowLength = -rowLength;
+ }
+
+ dst_offset = (GLintptr) _mesa_image_address(2, pack, pixels, width, height,
+ format, type, 0, 0, 0);
+
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dst_x, &dst_y,
+ &x, &y,
+ &width, &height)) {
+ return GL_TRUE;
+ }
+
+ intel_prepare_render(intel);
+
+ all = (width * height * src->cpp == dst->Base.Size &&
+ x == 0 && dst_offset == 0);
+
+ dst_x = 0;
+ dst_y = 0;
+
+ dst_buffer = intel_bufferobj_buffer(intel, dst,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+
+ if (ctx->ReadBuffer->Name == 0)
+ y = ctx->ReadBuffer->Height - (y + height);
+
+ if (!intelEmitCopyBlit(intel,
+ src->cpp,
+ src->pitch, src->buffer, 0, src->tiling,
+ rowLength, dst_buffer, dst_offset, GL_FALSE,
+ x, y,
+ dst_x, dst_y,
+ width, height,
+ GL_COPY)) {
+ return GL_FALSE;
+ }
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s - DONE\n", __FUNCTION__);
+
+ return GL_TRUE;
+}
+
+void
+intelReadPixels(GLcontext * ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLboolean dirty;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ intel_flush(ctx);
+
+ /* glReadPixels() wont dirty the front buffer, so reset the dirty
+ * flag after calling intel_prepare_render(). */
+ dirty = intel->front_buffer_dirty;
+ intel_prepare_render(intel);
+ intel->front_buffer_dirty = dirty;
+
+ if (do_blit_readpixels
+ (ctx, x, y, width, height, format, type, pack, pixels))
+ return;
+
+ if (INTEL_DEBUG & DEBUG_PIXEL)
+ printf("%s: fallback to swrast\n", __FUNCTION__);
+
+ /* Update Mesa state before calling down into _swrast_ReadPixels, as
+ * the spans code requires the computed buffer states to be up to date,
+ * but _swrast_ReadPixels only updates Mesa state after setting up
+ * the spans code.
+ */
+
+ if (ctx->NewState)
+ _mesa_update_state(ctx);
+
+ _swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels);
+
+ /* There's an intel_prepare_render() call in intelSpanRenderStart(). */
+ intel->front_buffer_dirty = dirty;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_reg.h b/src/mesa/drivers/dri/intel/intel_reg.h
new file mode 100644
index 0000000000..c1a281f261
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_reg.h
@@ -0,0 +1,248 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#define CMD_MI (0x0 << 29)
+#define CMD_2D (0x2 << 29)
+#define CMD_3D (0x3 << 29)
+
+#define MI_NOOP (CMD_MI | 0)
+
+#define MI_BATCH_BUFFER_END (CMD_MI | 0xA << 23)
+
+#define MI_FLUSH (CMD_MI | (4 << 23))
+#define FLUSH_MAP_CACHE (1 << 0)
+#define INHIBIT_FLUSH_RENDER_CACHE (1 << 2)
+
+/* Stalls command execution waiting for the given events to have occurred. */
+#define MI_WAIT_FOR_EVENT (CMD_MI | (0x3 << 23))
+#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
+
+/* p189 */
+#define _3DSTATE_LOAD_STATE_IMMEDIATE_1 (CMD_3D | (0x1d<<24) | (0x04<<16))
+#define I1_LOAD_S(n) (1<<(4+n))
+
+#define _3DSTATE_DRAWRECT_INFO (CMD_3D | (0x1d<<24) | (0x80<<16) | 0x3)
+#define _3DSTATE_DRAWRECT_INFO_I965 (CMD_3D | (3 << 27) | (1 << 24) | 0x2)
+
+/** @{
+ *
+ * PIPE_CONTROL operation, a combination MI_FLUSH and register write with
+ * additional flushing control.
+ */
+#define _3DSTATE_PIPE_CONTROL (CMD_3D | (3 << 27) | (2 << 24) | 2)
+#define PIPE_CONTROL_NO_WRITE (0 << 14)
+#define PIPE_CONTROL_WRITE_IMMEDIATE (1 << 14)
+#define PIPE_CONTROL_WRITE_DEPTH_COUNT (2 << 14)
+#define PIPE_CONTROL_WRITE_TIMESTAMP (3 << 14)
+#define PIPE_CONTROL_DEPTH_STALL (1 << 13)
+#define PIPE_CONTROL_WRITE_FLUSH (1 << 12)
+#define PIPE_CONTROL_INSTRUCTION_FLUSH (1 << 11)
+#define PIPE_CONTROL_INTERRUPT_ENABLE (1 << 8)
+#define PIPE_CONTROL_PPGTT_WRITE (0 << 2)
+#define PIPE_CONTROL_GLOBAL_GTT_WRITE (1 << 2)
+
+/** @} */
+
+/** @{
+ * 915 definitions
+ *
+ * 915 documents say that bits 31:28 and 1 are "undefined, must be zero."
+ */
+#define S0_VB_OFFSET_MASK 0x0ffffffc
+#define S0_AUTO_CACHE_INV_DISABLE (1<<0)
+/** @} */
+
+/** @{
+ * 830 definitions
+ */
+#define S0_VB_OFFSET_MASK_830 0xffffff80
+#define S0_VB_PITCH_SHIFT_830 1
+#define S0_VB_ENABLE_830 (1<<0)
+/** @} */
+
+#define S1_VERTEX_WIDTH_SHIFT 24
+#define S1_VERTEX_WIDTH_MASK (0x3f<<24)
+#define S1_VERTEX_PITCH_SHIFT 16
+#define S1_VERTEX_PITCH_MASK (0x3f<<16)
+
+#define TEXCOORDFMT_2D 0x0
+#define TEXCOORDFMT_3D 0x1
+#define TEXCOORDFMT_4D 0x2
+#define TEXCOORDFMT_1D 0x3
+#define TEXCOORDFMT_2D_16 0x4
+#define TEXCOORDFMT_4D_16 0x5
+#define TEXCOORDFMT_NOT_PRESENT 0xf
+#define S2_TEXCOORD_FMT0_MASK 0xf
+#define S2_TEXCOORD_FMT1_SHIFT 4
+#define S2_TEXCOORD_FMT(unit, type) ((type)<<(unit*4))
+#define S2_TEXCOORD_NONE (~0)
+#define S2_TEX_COUNT_SHIFT_830 12
+#define S2_VERTEX_1_WIDTH_SHIFT_830 0
+#define S2_VERTEX_0_WIDTH_SHIFT_830 6
+/* S3 not interesting */
+
+#define S4_POINT_WIDTH_SHIFT 23
+#define S4_POINT_WIDTH_MASK (0x1ff<<23)
+#define S4_LINE_WIDTH_SHIFT 19
+#define S4_LINE_WIDTH_ONE (0x2<<19)
+#define S4_LINE_WIDTH_MASK (0xf<<19)
+#define S4_FLATSHADE_ALPHA (1<<18)
+#define S4_FLATSHADE_FOG (1<<17)
+#define S4_FLATSHADE_SPECULAR (1<<16)
+#define S4_FLATSHADE_COLOR (1<<15)
+#define S4_CULLMODE_BOTH (0<<13)
+#define S4_CULLMODE_NONE (1<<13)
+#define S4_CULLMODE_CW (2<<13)
+#define S4_CULLMODE_CCW (3<<13)
+#define S4_CULLMODE_MASK (3<<13)
+#define S4_VFMT_POINT_WIDTH (1<<12)
+#define S4_VFMT_SPEC_FOG (1<<11)
+#define S4_VFMT_COLOR (1<<10)
+#define S4_VFMT_DEPTH_OFFSET (1<<9)
+#define S4_VFMT_XYZ (1<<6)
+#define S4_VFMT_XYZW (2<<6)
+#define S4_VFMT_XY (3<<6)
+#define S4_VFMT_XYW (4<<6)
+#define S4_VFMT_XYZW_MASK (7<<6)
+#define S4_FORCE_DEFAULT_DIFFUSE (1<<5)
+#define S4_FORCE_DEFAULT_SPECULAR (1<<4)
+#define S4_LOCAL_DEPTH_OFFSET_ENABLE (1<<3)
+#define S4_VFMT_FOG_PARAM (1<<2)
+#define S4_SPRITE_POINT_ENABLE (1<<1)
+#define S4_LINE_ANTIALIAS_ENABLE (1<<0)
+
+#define S4_VFMT_MASK (S4_VFMT_POINT_WIDTH | \
+ S4_VFMT_SPEC_FOG | \
+ S4_VFMT_COLOR | \
+ S4_VFMT_DEPTH_OFFSET | \
+ S4_VFMT_XYZW_MASK | \
+ S4_VFMT_FOG_PARAM)
+
+
+#define S5_WRITEDISABLE_ALPHA (1<<31)
+#define S5_WRITEDISABLE_RED (1<<30)
+#define S5_WRITEDISABLE_GREEN (1<<29)
+#define S5_WRITEDISABLE_BLUE (1<<28)
+#define S5_WRITEDISABLE_MASK (0xf<<28)
+#define S5_FORCE_DEFAULT_POINT_SIZE (1<<27)
+#define S5_LAST_PIXEL_ENABLE (1<<26)
+#define S5_GLOBAL_DEPTH_OFFSET_ENABLE (1<<25)
+#define S5_FOG_ENABLE (1<<24)
+#define S5_STENCIL_REF_SHIFT 16
+#define S5_STENCIL_REF_MASK (0xff<<16)
+#define S5_STENCIL_TEST_FUNC_SHIFT 13
+#define S5_STENCIL_TEST_FUNC_MASK (0x7<<13)
+#define S5_STENCIL_FAIL_SHIFT 10
+#define S5_STENCIL_FAIL_MASK (0x7<<10)
+#define S5_STENCIL_PASS_Z_FAIL_SHIFT 7
+#define S5_STENCIL_PASS_Z_FAIL_MASK (0x7<<7)
+#define S5_STENCIL_PASS_Z_PASS_SHIFT 4
+#define S5_STENCIL_PASS_Z_PASS_MASK (0x7<<4)
+#define S5_STENCIL_WRITE_ENABLE (1<<3)
+#define S5_STENCIL_TEST_ENABLE (1<<2)
+#define S5_COLOR_DITHER_ENABLE (1<<1)
+#define S5_LOGICOP_ENABLE (1<<0)
+
+
+#define S6_ALPHA_TEST_ENABLE (1<<31)
+#define S6_ALPHA_TEST_FUNC_SHIFT 28
+#define S6_ALPHA_TEST_FUNC_MASK (0x7<<28)
+#define S6_ALPHA_REF_SHIFT 20
+#define S6_ALPHA_REF_MASK (0xff<<20)
+#define S6_DEPTH_TEST_ENABLE (1<<19)
+#define S6_DEPTH_TEST_FUNC_SHIFT 16
+#define S6_DEPTH_TEST_FUNC_MASK (0x7<<16)
+#define S6_CBUF_BLEND_ENABLE (1<<15)
+#define S6_CBUF_BLEND_FUNC_SHIFT 12
+#define S6_CBUF_BLEND_FUNC_MASK (0x7<<12)
+#define S6_CBUF_SRC_BLEND_FACT_SHIFT 8
+#define S6_CBUF_SRC_BLEND_FACT_MASK (0xf<<8)
+#define S6_CBUF_DST_BLEND_FACT_SHIFT 4
+#define S6_CBUF_DST_BLEND_FACT_MASK (0xf<<4)
+#define S6_DEPTH_WRITE_ENABLE (1<<3)
+#define S6_COLOR_WRITE_ENABLE (1<<2)
+#define S6_TRISTRIP_PV_SHIFT 0
+#define S6_TRISTRIP_PV_MASK (0x3<<0)
+
+#define S7_DEPTH_OFFSET_CONST_MASK ~0
+
+/* p143 */
+#define _3DSTATE_BUF_INFO_CMD (CMD_3D | (0x1d<<24) | (0x8e<<16) | 1)
+/* Dword 1 */
+#define BUF_3D_ID_COLOR_BACK (0x3<<24)
+#define BUF_3D_ID_DEPTH (0x7<<24)
+#define BUF_3D_USE_FENCE (1<<23)
+#define BUF_3D_TILED_SURFACE (1<<22)
+#define BUF_3D_TILE_WALK_X 0
+#define BUF_3D_TILE_WALK_Y (1<<21)
+#define BUF_3D_PITCH(x) (((x)/4)<<2)
+/* Dword 2 */
+#define BUF_3D_ADDR(x) ((x) & ~0x3)
+
+/* Primitive dispatch on 830-945 */
+#define _3DPRIMITIVE (CMD_3D | (0x1f << 24))
+#define PRIM_INDIRECT (1<<23)
+#define PRIM_INLINE (0<<23)
+#define PRIM_INDIRECT_SEQUENTIAL (0<<17)
+#define PRIM_INDIRECT_ELTS (1<<17)
+
+#define PRIM3D_TRILIST (0x0<<18)
+#define PRIM3D_TRISTRIP (0x1<<18)
+#define PRIM3D_TRISTRIP_RVRSE (0x2<<18)
+#define PRIM3D_TRIFAN (0x3<<18)
+#define PRIM3D_POLY (0x4<<18)
+#define PRIM3D_LINELIST (0x5<<18)
+#define PRIM3D_LINESTRIP (0x6<<18)
+#define PRIM3D_RECTLIST (0x7<<18)
+#define PRIM3D_POINTLIST (0x8<<18)
+#define PRIM3D_DIB (0x9<<18)
+#define PRIM3D_MASK (0x1f<<18)
+
+#define XY_SETUP_BLT_CMD (CMD_2D | (0x01 << 22) | 6)
+
+#define XY_COLOR_BLT_CMD (CMD_2D | (0x50 << 22) | 4)
+
+#define XY_SRC_COPY_BLT_CMD (CMD_2D | (0x53 << 22) | 6)
+
+#define XY_TEXT_IMMEDIATE_BLIT_CMD (CMD_2D | (0x31 << 22))
+# define XY_TEXT_BYTE_PACKED (1 << 16)
+
+/* BR00 */
+#define XY_BLT_WRITE_ALPHA (1 << 21)
+#define XY_BLT_WRITE_RGB (1 << 20)
+#define XY_SRC_TILED (1 << 15)
+#define XY_DST_TILED (1 << 11)
+
+/* BR13 */
+#define BR13_8 (0x0 << 24)
+#define BR13_565 (0x1 << 24)
+#define BR13_8888 (0x3 << 24)
+
+#define FENCE_LINEAR 0
+#define FENCE_XMAJOR 1
+#define FENCE_YMAJOR 2
diff --git a/src/mesa/drivers/dri/intel/intel_regions.c b/src/mesa/drivers/dri/intel/intel_regions.c
new file mode 100644
index 0000000000..fe4de18960
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_regions.c
@@ -0,0 +1,507 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Provide additional functionality on top of bufmgr buffers:
+ * - 2d semantics and blit operations
+ * - refcounting of buffers for multiple images in a buffer.
+ * - refcounting of buffer mappings.
+ * - some logic for moving the buffers to the best memory pools for
+ * given operations.
+ *
+ * Most of this is to make it easier to implement the fixed-layout
+ * mipmap tree required by intel hardware in the face of GL's
+ * programming interface where each image can be specifed in random
+ * order and it isn't clear what layout the tree should have until the
+ * last moment.
+ */
+
+#include <sys/ioctl.h>
+#include <errno.h>
+
+#include "main/hash.h"
+#include "intel_context.h"
+#include "intel_regions.h"
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "intel_bufmgr.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_REGION
+
+/* This should be set to the maximum backtrace size desired.
+ * Set it to 0 to disable backtrace debugging.
+ */
+#define DEBUG_BACKTRACE_SIZE 0
+
+#if DEBUG_BACKTRACE_SIZE == 0
+/* Use the standard debug output */
+#define _DBG(...) DBG(__VA_ARGS__)
+#else
+/* Use backtracing debug output */
+#define _DBG(...) {debug_backtrace(); DBG(__VA_ARGS__);}
+
+/* Backtracing debug support */
+#include <execinfo.h>
+
+static void
+debug_backtrace(void)
+{
+ void *trace[DEBUG_BACKTRACE_SIZE];
+ char **strings = NULL;
+ int traceSize;
+ register int i;
+
+ traceSize = backtrace(trace, DEBUG_BACKTRACE_SIZE);
+ strings = backtrace_symbols(trace, traceSize);
+ if (strings == NULL) {
+ DBG("no backtrace:");
+ return;
+ }
+
+ /* Spit out all the strings with a colon separator. Ignore
+ * the first, since we don't really care about the call
+ * to debug_backtrace() itself. Skip until the final "/" in
+ * the trace to avoid really long lines.
+ */
+ for (i = 1; i < traceSize; i++) {
+ char *p = strings[i], *slash = strings[i];
+ while (*p) {
+ if (*p++ == '/') {
+ slash = p;
+ }
+ }
+
+ DBG("%s:", slash);
+ }
+
+ /* Free up the memory, and we're done */
+ free(strings);
+}
+
+#endif
+
+
+
+/* XXX: Thread safety?
+ */
+GLubyte *
+intel_region_map(struct intel_context *intel, struct intel_region *region)
+{
+ intel_flush(&intel->ctx);
+
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!region->map_refcount++) {
+ if (region->pbo)
+ intel_region_cow(intel, region);
+
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_map_gtt(region->buffer);
+ else
+ drm_intel_bo_map(region->buffer, GL_TRUE);
+ region->map = region->buffer->virtual;
+ }
+
+ return region->map;
+}
+
+void
+intel_region_unmap(struct intel_context *intel, struct intel_region *region)
+{
+ _DBG("%s %p\n", __FUNCTION__, region);
+ if (!--region->map_refcount) {
+ if (region->tiling != I915_TILING_NONE)
+ drm_intel_gem_bo_unmap_gtt(region->buffer);
+ else
+ drm_intel_bo_unmap(region->buffer);
+ region->map = NULL;
+ }
+}
+
+static struct intel_region *
+intel_region_alloc_internal(struct intel_context *intel,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ drm_intel_bo *buffer)
+{
+ struct intel_region *region;
+
+ if (buffer == NULL) {
+ _DBG("%s <-- NULL\n", __FUNCTION__);
+ return NULL;
+ }
+
+ region = calloc(sizeof(*region), 1);
+ region->cpp = cpp;
+ region->width = width;
+ region->height = height;
+ region->pitch = pitch;
+ region->refcount = 1;
+ region->buffer = buffer;
+
+ /* Default to no tiling */
+ region->tiling = I915_TILING_NONE;
+
+ _DBG("%s <-- %p\n", __FUNCTION__, region);
+ return region;
+}
+
+struct intel_region *
+intel_region_alloc(struct intel_context *intel,
+ uint32_t tiling,
+ GLuint cpp, GLuint width, GLuint height,
+ GLboolean expect_accelerated_upload)
+{
+ drm_intel_bo *buffer;
+ struct intel_region *region;
+ unsigned long flags = 0;
+ unsigned long aligned_pitch;
+
+ if (expect_accelerated_upload)
+ flags |= BO_ALLOC_FOR_RENDER;
+
+ buffer = drm_intel_bo_alloc_tiled(intel->bufmgr, "region",
+ width, height, cpp,
+ &tiling, &aligned_pitch, flags);
+
+ region = intel_region_alloc_internal(intel, cpp, width, height,
+ aligned_pitch / cpp, buffer);
+ region->tiling = tiling;
+
+ return region;
+}
+
+struct intel_region *
+intel_region_alloc_for_handle(struct intel_context *intel,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ GLuint handle, const char *name)
+{
+ struct intel_region *region, *dummy;
+ drm_intel_bo *buffer;
+ int ret;
+ uint32_t bit_6_swizzle;
+
+ region = _mesa_HashLookup(intel->intelScreen->named_regions, handle);
+ if (region != NULL) {
+ dummy = NULL;
+ if (region->width != width || region->height != height ||
+ region->cpp != cpp || region->pitch != pitch) {
+ fprintf(stderr,
+ "Region for name %d already exists but is not compatible\n",
+ handle);
+ return NULL;
+ }
+ intel_region_reference(&dummy, region);
+ return dummy;
+ }
+
+ buffer = intel_bo_gem_create_from_name(intel->bufmgr, name, handle);
+
+ region = intel_region_alloc_internal(intel, cpp,
+ width, height, pitch, buffer);
+ if (region == NULL)
+ return region;
+
+ ret = drm_intel_bo_get_tiling(region->buffer, &region->tiling,
+ &bit_6_swizzle);
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't get tiling of buffer %d (%s): %s\n",
+ handle, name, strerror(-ret));
+ intel_region_release(&region);
+ return NULL;
+ }
+
+ region->name = handle;
+ region->screen = intel->intelScreen;
+ _mesa_HashInsert(intel->intelScreen->named_regions, handle, region);
+
+ return region;
+}
+
+void
+intel_region_reference(struct intel_region **dst, struct intel_region *src)
+{
+ if (src)
+ _DBG("%s %p %d\n", __FUNCTION__, src, src->refcount);
+
+ assert(*dst == NULL);
+ if (src) {
+ src->refcount++;
+ *dst = src;
+ }
+}
+
+void
+intel_region_release(struct intel_region **region_handle)
+{
+ struct intel_region *region = *region_handle;
+
+ if (region == NULL) {
+ _DBG("%s NULL\n", __FUNCTION__);
+ return;
+ }
+
+ _DBG("%s %p %d\n", __FUNCTION__, region, region->refcount - 1);
+
+ ASSERT(region->refcount > 0);
+ region->refcount--;
+
+ if (region->refcount == 0) {
+ assert(region->map_refcount == 0);
+
+ if (region->pbo)
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ drm_intel_bo_unreference(region->buffer);
+
+ if (region->name > 0)
+ _mesa_HashRemove(region->screen->named_regions, region->name);
+
+ free(region);
+ }
+ *region_handle = NULL;
+}
+
+/*
+ * XXX Move this into core Mesa?
+ */
+void
+_mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ const GLubyte * src,
+ GLuint src_pitch, GLuint src_x, GLuint src_y)
+{
+ GLuint i;
+
+ dst_pitch *= cpp;
+ src_pitch *= cpp;
+ dst += dst_x * cpp;
+ src += src_x * cpp;
+ dst += dst_y * dst_pitch;
+ src += src_y * src_pitch;
+ width *= cpp;
+
+ if (width == dst_pitch && width == src_pitch)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_pitch;
+ src += src_pitch;
+ }
+ }
+}
+
+
+/* Upload data to a rectangular sub-region. Lots of choices how to do this:
+ *
+ * - memcpy by span to current destination
+ * - upload data as new buffer and blit
+ *
+ * Currently always memcpy.
+ */
+void
+intel_region_data(struct intel_context *intel,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ const void *src, GLuint src_pitch,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height)
+{
+ _DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intel, dst);
+ else
+ intel_region_cow(intel, dst);
+ }
+
+ intel_prepare_render(intel);
+
+ _mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
+ dst->cpp,
+ dst->pitch,
+ dstx, dsty, width, height, src, src_pitch, srcx, srcy);
+
+ intel_region_unmap(intel, dst);
+}
+
+/* Copy rectangular sub-regions. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+GLboolean
+intel_region_copy(struct intel_context *intel,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height,
+ GLboolean flip,
+ GLenum logicop)
+{
+ uint32_t src_pitch = src->pitch;
+
+ _DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return GL_FALSE;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intel, dst);
+ else
+ intel_region_cow(intel, dst);
+ }
+
+ assert(src->cpp == dst->cpp);
+
+ if (flip)
+ src_pitch = -src_pitch;
+
+ return intelEmitCopyBlit(intel,
+ dst->cpp,
+ src_pitch, src->buffer, src_offset, src->tiling,
+ dst->pitch, dst->buffer, dst_offset, dst->tiling,
+ srcx, srcy, dstx, dsty, width, height,
+ logicop);
+}
+
+/* Attach to a pbo, discarding our data. Effectively zero-copy upload
+ * the pbo's data.
+ */
+void
+intel_region_attach_pbo(struct intel_context *intel,
+ struct intel_region *region,
+ struct intel_buffer_object *pbo)
+{
+ drm_intel_bo *buffer;
+
+ if (region->pbo == pbo)
+ return;
+
+ _DBG("%s %p %p\n", __FUNCTION__, region, pbo);
+
+ /* If there is already a pbo attached, break the cow tie now.
+ * Don't call intel_region_release_pbo() as that would
+ * unnecessarily allocate a new buffer we would have to immediately
+ * discard.
+ */
+ if (region->pbo) {
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ }
+
+ if (region->buffer) {
+ drm_intel_bo_unreference(region->buffer);
+ region->buffer = NULL;
+ }
+
+ /* make sure pbo has a buffer of its own */
+ buffer = intel_bufferobj_buffer(intel, pbo, INTEL_WRITE_FULL);
+
+ region->pbo = pbo;
+ region->pbo->region = region;
+ drm_intel_bo_reference(buffer);
+ region->buffer = buffer;
+ region->tiling = I915_TILING_NONE;
+}
+
+
+/* Break the COW tie to the pbo and allocate a new buffer.
+ * The pbo gets to keep the data.
+ */
+void
+intel_region_release_pbo(struct intel_context *intel,
+ struct intel_region *region)
+{
+ _DBG("%s %p\n", __FUNCTION__, region);
+ assert(region->buffer == region->pbo->buffer);
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ drm_intel_bo_unreference(region->buffer);
+ region->buffer = NULL;
+
+ region->buffer = drm_intel_bo_alloc(intel->bufmgr, "region",
+ region->pitch * region->cpp *
+ region->height,
+ 64);
+}
+
+/* Break the COW tie to the pbo. Both the pbo and the region end up
+ * with a copy of the data.
+ */
+void
+intel_region_cow(struct intel_context *intel, struct intel_region *region)
+{
+ struct intel_buffer_object *pbo = region->pbo;
+ GLboolean ok;
+
+ intel_region_release_pbo(intel, region);
+
+ assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
+
+ _DBG("%s %p (%d bytes)\n", __FUNCTION__, region, pbo->Base.Size);
+
+ /* Now blit from the texture buffer to the new buffer:
+ */
+
+ intel_prepare_render(intel);
+ ok = intelEmitCopyBlit(intel,
+ region->cpp,
+ region->pitch, pbo->buffer, 0, region->tiling,
+ region->pitch, region->buffer, 0, region->tiling,
+ 0, 0, 0, 0,
+ region->pitch, region->height,
+ GL_COPY);
+ assert(ok);
+}
+
+drm_intel_bo *
+intel_region_buffer(struct intel_context *intel,
+ struct intel_region *region, GLuint flag)
+{
+ if (region->pbo) {
+ if (flag == INTEL_WRITE_PART)
+ intel_region_cow(intel, region);
+ else if (flag == INTEL_WRITE_FULL)
+ intel_region_release_pbo(intel, region);
+ }
+
+ return region->buffer;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_regions.h b/src/mesa/drivers/dri/intel/intel_regions.h
new file mode 100644
index 0000000000..6bbed32f2a
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_regions.h
@@ -0,0 +1,160 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTEL_REGIONS_H
+#define INTEL_REGIONS_H
+
+/** @file intel_regions.h
+ *
+ * Structure definitions and prototypes for intel_region handling,
+ * which is the basic structure for rectangular collections of pixels
+ * stored in a drm_intel_bo.
+ */
+
+#include <xf86drm.h>
+
+#include "main/mtypes.h"
+#include "intel_bufmgr.h"
+
+struct intel_context;
+struct intel_buffer_object;
+
+/**
+ * A layer on top of the bufmgr buffers that adds a few useful things:
+ *
+ * - Refcounting for local buffer references.
+ * - Refcounting for buffer maps
+ * - Buffer dimensions - pitch and height.
+ * - Blitter commands for copying 2D regions between buffers. (really???)
+ */
+struct intel_region
+{
+ drm_intel_bo *buffer; /**< buffer manager's buffer */
+ GLuint refcount; /**< Reference count for region */
+ GLuint cpp; /**< bytes per pixel */
+ GLuint width; /**< in pixels */
+ GLuint height; /**< in pixels */
+ GLuint pitch; /**< in pixels */
+ GLubyte *map; /**< only non-NULL when region is actually mapped */
+ GLuint map_refcount; /**< Reference count for mapping */
+
+ GLuint draw_offset; /**< Offset of drawing address within the region */
+ GLuint draw_x, draw_y; /**< Offset of drawing within the region */
+
+ uint32_t tiling; /**< Which tiling mode the region is in */
+ struct intel_buffer_object *pbo; /* zero-copy uploads */
+
+ uint32_t name; /**< Global name for the bo */
+ struct intel_screen *screen;
+};
+
+
+/* Allocate a refcounted region. Pointers to regions should only be
+ * copied by calling intel_reference_region().
+ */
+struct intel_region *intel_region_alloc(struct intel_context *intel,
+ uint32_t tiling,
+ GLuint cpp, GLuint width,
+ GLuint height,
+ GLboolean expect_accelerated_upload);
+
+struct intel_region *
+intel_region_alloc_for_handle(struct intel_context *intel,
+ GLuint cpp,
+ GLuint width, GLuint height, GLuint pitch,
+ unsigned int handle, const char *name);
+
+void intel_region_reference(struct intel_region **dst,
+ struct intel_region *src);
+
+void intel_region_release(struct intel_region **ib);
+
+void intel_recreate_static_regions(struct intel_context *intel);
+
+/* Map/unmap regions. This is refcounted also:
+ */
+GLubyte *intel_region_map(struct intel_context *intel,
+ struct intel_region *ib);
+
+void intel_region_unmap(struct intel_context *intel, struct intel_region *ib);
+
+
+/* Upload data to a rectangular sub-region
+ */
+void intel_region_data(struct intel_context *intel,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ const void *src, GLuint src_stride,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height);
+
+/* Copy rectangular sub-regions
+ */
+GLboolean
+intel_region_copy(struct intel_context *intel,
+ struct intel_region *dest,
+ GLuint dest_offset,
+ GLuint destx, GLuint desty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height,
+ GLboolean flip,
+ GLenum logicop);
+
+/* Helpers for zerocopy uploads, particularly texture image uploads:
+ */
+void intel_region_attach_pbo(struct intel_context *intel,
+ struct intel_region *region,
+ struct intel_buffer_object *pbo);
+void intel_region_release_pbo(struct intel_context *intel,
+ struct intel_region *region);
+void intel_region_cow(struct intel_context *intel,
+ struct intel_region *region);
+
+drm_intel_bo *intel_region_buffer(struct intel_context *intel,
+ struct intel_region *region,
+ GLuint flag);
+
+void _mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ const GLubyte * src,
+ GLuint src_pitch, GLuint src_x, GLuint src_y);
+
+struct __DRIimageRec {
+ struct intel_region *region;
+ GLenum internal_format;
+ GLuint format;
+ GLenum data_type;
+ void *data;
+};
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_screen.c b/src/mesa/drivers/dri/intel/intel_screen.c
new file mode 100644
index 0000000000..15a465c640
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_screen.c
@@ -0,0 +1,574 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/framebuffer.h"
+#include "main/renderbuffer.h"
+#include "main/hash.h"
+#include "main/fbobject.h"
+
+#include "utils.h"
+#include "xmlpool.h"
+
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_bufmgr.h"
+#include "intel_chipset.h"
+#include "intel_fbo.h"
+#include "intel_screen.h"
+#include "intel_tex.h"
+#include "intel_regions.h"
+
+#include "i915_drm.h"
+
+#define DRI_CONF_TEXTURE_TILING(def) \
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN
+ DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_ALWAYS_SYNC)
+ /* Options correspond to DRI_CONF_BO_REUSE_DISABLED,
+ * DRI_CONF_BO_REUSE_ALL
+ */
+ DRI_CONF_OPT_BEGIN_V(bo_reuse, enum, 1, "0:1")
+ DRI_CONF_DESC_BEGIN(en, "Buffer object reuse")
+ DRI_CONF_ENUM(0, "Disable buffer object reuse")
+ DRI_CONF_ENUM(1, "Enable reuse of all sizes of buffer objects")
+ DRI_CONF_DESC_END
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(texture_tiling, bool, true)
+ DRI_CONF_DESC(en, "Enable texture tiling")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(early_z, bool, false)
+ DRI_CONF_DESC(en, "Enable early Z in classic mode (unstable, 945-only).")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_OPT_BEGIN(fragment_shader, bool, false)
+ DRI_CONF_DESC(en, "Enable limited ARB_fragment_shader support on 915/945.")
+ DRI_CONF_OPT_END
+
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_QUALITY
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(2)
+ DRI_CONF_SECTION_END
+ DRI_CONF_SECTION_DEBUG
+ DRI_CONF_NO_RAST(false)
+ DRI_CONF_ALWAYS_FLUSH_BATCH(false)
+ DRI_CONF_ALWAYS_FLUSH_CACHE(false)
+
+ DRI_CONF_OPT_BEGIN(stub_occlusion_query, bool, false)
+ DRI_CONF_DESC(en, "Enable stub ARB_occlusion_query support on 915/945.")
+ DRI_CONF_OPT_END
+ DRI_CONF_SECTION_END
+DRI_CONF_END;
+
+const GLuint __driNConfigOptions = 11;
+
+#ifdef USE_NEW_INTERFACE
+static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
+#endif /*USE_NEW_INTERFACE */
+
+static const __DRItexBufferExtension intelTexBufferExtension = {
+ { __DRI_TEX_BUFFER, __DRI_TEX_BUFFER_VERSION },
+ intelSetTexBuffer,
+ intelSetTexBuffer2,
+};
+
+static void
+intelDRI2Flush(__DRIdrawable *drawable)
+{
+ struct intel_context *intel = drawable->driContextPriv->driverPrivate;
+
+ if (intel->gen < 4)
+ INTEL_FIREVERTICES(intel);
+
+ intel->need_throttle = GL_TRUE;
+
+ if (intel->batch->map != intel->batch->ptr)
+ intel_batchbuffer_flush(intel->batch);
+}
+
+static const struct __DRI2flushExtensionRec intelFlushExtension = {
+ { __DRI2_FLUSH, __DRI2_FLUSH_VERSION },
+ intelDRI2Flush,
+ dri2InvalidateDrawable,
+};
+
+static __DRIimage *
+intel_create_image_from_name(__DRIcontext *context,
+ int width, int height, int format,
+ int name, int pitch, void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_context *intel = context->driverPrivate;
+ int cpp;
+
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ switch (format) {
+ case __DRI_IMAGE_FORMAT_RGB565:
+ image->format = MESA_FORMAT_RGB565;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_XRGB8888:
+ image->format = MESA_FORMAT_XRGB8888;
+ image->internal_format = GL_RGB;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ case __DRI_IMAGE_FORMAT_ARGB8888:
+ image->format = MESA_FORMAT_ARGB8888;
+ image->internal_format = GL_RGBA;
+ image->data_type = GL_UNSIGNED_BYTE;
+ break;
+ default:
+ free(image);
+ return NULL;
+ }
+
+ image->data = loaderPrivate;
+ cpp = _mesa_get_format_bytes(image->format);
+
+ image->region = intel_region_alloc_for_handle(intel, cpp, width, height,
+ pitch, name, "image");
+ if (image->region == NULL) {
+ FREE(image);
+ return NULL;
+ }
+
+ return image;
+}
+
+static __DRIimage *
+intel_create_image_from_renderbuffer(__DRIcontext *context,
+ int renderbuffer, void *loaderPrivate)
+{
+ __DRIimage *image;
+ struct intel_context *intel = context->driverPrivate;
+ struct gl_renderbuffer *rb;
+ struct intel_renderbuffer *irb;
+
+ rb = _mesa_lookup_renderbuffer(&intel->ctx, renderbuffer);
+ if (!rb) {
+ _mesa_error(&intel->ctx,
+ GL_INVALID_OPERATION, "glRenderbufferExternalMESA");
+ return NULL;
+ }
+
+ irb = intel_renderbuffer(rb);
+ image = CALLOC(sizeof *image);
+ if (image == NULL)
+ return NULL;
+
+ image->internal_format = rb->InternalFormat;
+ image->format = rb->Format;
+ image->data_type = rb->DataType;
+ image->data = loaderPrivate;
+ intel_region_reference(&image->region, irb->region);
+
+ return image;
+}
+
+static void
+intel_destroy_image(__DRIimage *image)
+{
+ intel_region_release(&image->region);
+ FREE(image);
+}
+
+static struct __DRIimageExtensionRec intelImageExtension = {
+ { __DRI_IMAGE, __DRI_IMAGE_VERSION },
+ intel_create_image_from_name,
+ intel_create_image_from_renderbuffer,
+ intel_destroy_image,
+};
+
+static const __DRIextension *intelScreenExtensions[] = {
+ &driReadDrawableExtension,
+ &intelTexBufferExtension.base,
+ &intelFlushExtension.base,
+ &intelImageExtension.base,
+ &dri2ConfigQueryExtension.base,
+ NULL
+};
+
+static GLboolean
+intel_get_param(__DRIscreen *psp, int param, int *value)
+{
+ int ret;
+ struct drm_i915_getparam gp;
+
+ gp.param = param;
+ gp.value = value;
+
+ ret = drmCommandWriteRead(psp->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
+ if (ret) {
+ _mesa_warning(NULL, "drm_i915_getparam: %d", ret);
+ return GL_FALSE;
+ }
+
+ return GL_TRUE;
+}
+
+static void
+nop_callback(GLuint key, void *data, void *userData)
+{
+}
+
+static void
+intelDestroyScreen(__DRIscreen * sPriv)
+{
+ struct intel_screen *intelScreen = sPriv->private;
+
+ dri_bufmgr_destroy(intelScreen->bufmgr);
+ driDestroyOptionInfo(&intelScreen->optionCache);
+
+ /* Some regions may still have references to them at this point, so
+ * flush the hash table to prevent _mesa_DeleteHashTable() from
+ * complaining about the hash not being empty; */
+ _mesa_HashDeleteAll(intelScreen->named_regions, nop_callback, NULL);
+ _mesa_DeleteHashTable(intelScreen->named_regions);
+
+ FREE(intelScreen);
+ sPriv->private = NULL;
+}
+
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+static GLboolean
+intelCreateBuffer(__DRIscreen * driScrnPriv,
+ __DRIdrawable * driDrawPriv,
+ const __GLcontextModes * mesaVis, GLboolean isPixmap)
+{
+ struct intel_renderbuffer *rb;
+
+ if (isPixmap) {
+ return GL_FALSE; /* not implemented */
+ }
+ else {
+ GLboolean swStencil = (mesaVis->stencilBits > 0 &&
+ mesaVis->depthBits != 24);
+ gl_format rgbFormat;
+
+ struct gl_framebuffer *fb = CALLOC_STRUCT(gl_framebuffer);
+
+ if (!fb)
+ return GL_FALSE;
+
+ _mesa_initialize_window_framebuffer(fb, mesaVis);
+
+ if (mesaVis->redBits == 5)
+ rgbFormat = MESA_FORMAT_RGB565;
+ else if (mesaVis->alphaBits == 0)
+ rgbFormat = MESA_FORMAT_XRGB8888;
+ else
+ rgbFormat = MESA_FORMAT_ARGB8888;
+
+ /* setup the hardware-based renderbuffers */
+ rb = intel_create_renderbuffer(rgbFormat);
+ _mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &rb->Base);
+
+ if (mesaVis->doubleBufferMode) {
+ rb = intel_create_renderbuffer(rgbFormat);
+ _mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &rb->Base);
+ }
+
+ if (mesaVis->depthBits == 24) {
+ assert(mesaVis->stencilBits == 8);
+ /* combined depth/stencil buffer */
+ struct intel_renderbuffer *depthStencilRb
+ = intel_create_renderbuffer(MESA_FORMAT_S8_Z24);
+ /* note: bind RB to two attachment points */
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthStencilRb->Base);
+ _mesa_add_renderbuffer(fb, BUFFER_STENCIL, &depthStencilRb->Base);
+ }
+ else if (mesaVis->depthBits == 16) {
+ /* just 16-bit depth buffer, no hw stencil */
+ struct intel_renderbuffer *depthRb
+ = intel_create_renderbuffer(MESA_FORMAT_Z16);
+ _mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
+ }
+
+ /* now add any/all software-based renderbuffers we may need */
+ _mesa_add_soft_renderbuffers(fb,
+ GL_FALSE, /* never sw color */
+ GL_FALSE, /* never sw depth */
+ swStencil, mesaVis->accumRedBits > 0,
+ GL_FALSE, /* never sw alpha */
+ GL_FALSE /* never sw aux */ );
+ driDrawPriv->driverPrivate = fb;
+
+ return GL_TRUE;
+ }
+}
+
+static void
+intelDestroyBuffer(__DRIdrawable * driDrawPriv)
+{
+ struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
+
+ _mesa_reference_framebuffer(&fb, NULL);
+}
+
+/* There are probably better ways to do this, such as an
+ * init-designated function to register chipids and createcontext
+ * functions.
+ */
+extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate);
+
+extern GLboolean i915CreateContext(int api,
+ const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate);
+extern GLboolean brwCreateContext(int api,
+ const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate);
+
+static GLboolean
+intelCreateContext(gl_api api,
+ const __GLcontextModes * mesaVis,
+ __DRIcontext * driContextPriv,
+ void *sharedContextPrivate)
+{
+ __DRIscreen *sPriv = driContextPriv->driScreenPriv;
+ struct intel_screen *intelScreen = sPriv->private;
+
+#ifdef I915
+ if (IS_9XX(intelScreen->deviceID)) {
+ if (!IS_965(intelScreen->deviceID)) {
+ return i915CreateContext(api, mesaVis, driContextPriv,
+ sharedContextPrivate);
+ }
+ } else {
+ intelScreen->no_vbo = GL_TRUE;
+ return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
+ }
+#else
+ if (IS_965(intelScreen->deviceID))
+ return brwCreateContext(api, mesaVis,
+ driContextPriv, sharedContextPrivate);
+#endif
+ fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
+ return GL_FALSE;
+}
+
+static GLboolean
+intel_init_bufmgr(struct intel_screen *intelScreen)
+{
+ __DRIscreen *spriv = intelScreen->driScrnPriv;
+ int num_fences = 0;
+
+ intelScreen->no_hw = getenv("INTEL_NO_HW") != NULL;
+
+ intelScreen->bufmgr = intel_bufmgr_gem_init(spriv->fd, BATCH_SZ);
+ /* Otherwise, use the classic buffer manager. */
+ if (intelScreen->bufmgr == NULL) {
+ fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
+ __func__, __LINE__);
+ return GL_FALSE;
+ }
+
+ if (!intel_get_param(spriv, I915_PARAM_NUM_FENCES_AVAIL, &num_fences) ||
+ num_fences == 0) {
+ fprintf(stderr, "[%s: %u] Kernel 2.6.29 required.\n", __func__, __LINE__);
+ return GL_FALSE;
+ }
+
+ drm_intel_bufmgr_gem_enable_fenced_relocs(intelScreen->bufmgr);
+
+ intelScreen->named_regions = _mesa_NewHashTable();
+
+ return GL_TRUE;
+}
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ * Called when using DRI2.
+ *
+ * \return the __GLcontextModes supported by this driver
+ */
+static const
+__DRIconfig **intelInitScreen2(__DRIscreen *psp)
+{
+ struct intel_screen *intelScreen;
+ GLenum fb_format[3];
+ GLenum fb_type[3];
+ unsigned int api_mask;
+
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+ uint8_t depth_bits[4], stencil_bits[4], msaa_samples_array[1];
+ int color;
+ __DRIconfig **configs = NULL;
+
+ /* Allocate the private area */
+ intelScreen = CALLOC(sizeof *intelScreen);
+ if (!intelScreen) {
+ fprintf(stderr, "\nERROR! Allocating private area failed\n");
+ return GL_FALSE;
+ }
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo(&intelScreen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ intelScreen->driScrnPriv = psp;
+ psp->private = (void *) intelScreen;
+
+ /* Determine chipset ID */
+ if (!intel_get_param(psp, I915_PARAM_CHIPSET_ID,
+ &intelScreen->deviceID))
+ return GL_FALSE;
+
+ api_mask = (1 << __DRI_API_OPENGL);
+#if FEATURE_ES1
+ api_mask |= (1 << __DRI_API_GLES);
+#endif
+#if FEATURE_ES2
+ api_mask |= (1 << __DRI_API_GLES2);
+#endif
+
+ if (IS_9XX(intelScreen->deviceID) || IS_965(intelScreen->deviceID))
+ psp->api_mask = api_mask;
+
+ if (!intel_init_bufmgr(intelScreen))
+ return GL_FALSE;
+
+ psp->extensions = intelScreenExtensions;
+
+ msaa_samples_array[0] = 0;
+
+ fb_format[0] = GL_RGB;
+ fb_type[0] = GL_UNSIGNED_SHORT_5_6_5;
+
+ fb_format[1] = GL_BGR;
+ fb_type[1] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ fb_format[2] = GL_BGRA;
+ fb_type[2] = GL_UNSIGNED_INT_8_8_8_8_REV;
+
+ depth_bits[0] = 0;
+ stencil_bits[0] = 0;
+
+ /* Generate a rich set of useful configs that do not include an
+ * accumulation buffer.
+ */
+ for (color = 0; color < ARRAY_SIZE(fb_format); color++) {
+ __DRIconfig **new_configs;
+ int depth_factor;
+
+ /* Starting with DRI2 protocol version 1.1 we can request a depth/stencil
+ * buffer that has a diffferent number of bits per pixel than the color
+ * buffer. This isn't yet supported here.
+ */
+ if (fb_type[color] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[1] = 16;
+ stencil_bits[1] = 0;
+ } else {
+ depth_bits[1] = 24;
+ stencil_bits[1] = 8;
+ }
+
+ depth_factor = 2;
+
+ new_configs = driCreateConfigs(fb_format[color], fb_type[color],
+ depth_bits,
+ stencil_bits,
+ depth_factor,
+ back_buffer_modes,
+ ARRAY_SIZE(back_buffer_modes),
+ msaa_samples_array,
+ ARRAY_SIZE(msaa_samples_array),
+ GL_FALSE);
+ if (configs == NULL)
+ configs = new_configs;
+ else
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ /* Generate the minimum possible set of configs that include an
+ * accumulation buffer.
+ */
+ for (color = 0; color < ARRAY_SIZE(fb_format); color++) {
+ __DRIconfig **new_configs;
+
+ if (fb_type[color] == GL_UNSIGNED_SHORT_5_6_5) {
+ depth_bits[0] = 16;
+ stencil_bits[0] = 0;
+ } else {
+ depth_bits[0] = 24;
+ stencil_bits[0] = 8;
+ }
+
+ new_configs = driCreateConfigs(fb_format[color], fb_type[color],
+ depth_bits, stencil_bits, 1,
+ back_buffer_modes + 1, 1,
+ msaa_samples_array, 1,
+ GL_TRUE);
+ if (configs == NULL)
+ configs = new_configs;
+ else
+ configs = driConcatConfigs(configs, new_configs);
+ }
+
+ if (configs == NULL) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ return (const __DRIconfig **)configs;
+}
+
+const struct __DriverAPIRec driDriverAPI = {
+ .DestroyScreen = intelDestroyScreen,
+ .CreateContext = intelCreateContext,
+ .DestroyContext = intelDestroyContext,
+ .CreateBuffer = intelCreateBuffer,
+ .DestroyBuffer = intelDestroyBuffer,
+ .MakeCurrent = intelMakeCurrent,
+ .UnbindContext = intelUnbindContext,
+ .InitScreen2 = intelInitScreen2,
+};
+
+/* This is the table of extensions that the loader will dlsym() for. */
+PUBLIC const __DRIextension *__driDriverExtensions[] = {
+ &driCoreExtension.base,
+ &driDRI2Extension.base,
+ NULL
+};
diff --git a/src/mesa/drivers/dri/intel/intel_screen.h b/src/mesa/drivers/dri/intel/intel_screen.h
new file mode 100644
index 0000000000..5863093f00
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_screen.h
@@ -0,0 +1,68 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _INTEL_INIT_H_
+#define _INTEL_INIT_H_
+
+#include <sys/time.h>
+#include "dri_util.h"
+#include "intel_bufmgr.h"
+#include "i915_drm.h"
+#include "xmlconfig.h"
+
+struct intel_screen
+{
+ int deviceID;
+
+ int logTextureGranularity;
+
+ __DRIscreen *driScrnPriv;
+
+ GLboolean no_hw;
+
+ GLboolean no_vbo;
+ dri_bufmgr *bufmgr;
+ struct _mesa_HashTable *named_regions;
+
+ /**
+ * Configuration cache with default values for all contexts
+ */
+ driOptionCache optionCache;
+};
+
+extern GLboolean intelMapScreenRegions(__DRIscreen * sPriv);
+
+extern void intelDestroyContext(__DRIcontext * driContextPriv);
+
+extern GLboolean intelUnbindContext(__DRIcontext * driContextPriv);
+
+extern GLboolean
+intelMakeCurrent(__DRIcontext * driContextPriv,
+ __DRIdrawable * driDrawPriv,
+ __DRIdrawable * driReadPriv);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_span.c b/src/mesa/drivers/dri/intel/intel_span.c
new file mode 100644
index 0000000000..fb840c1020
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_span.c
@@ -0,0 +1,384 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "main/colormac.h"
+
+#include "intel_buffers.h"
+#include "intel_fbo.h"
+#include "intel_screen.h"
+#include "intel_span.h"
+#include "intel_regions.h"
+#include "intel_tex.h"
+
+#include "swrast/swrast.h"
+
+static void
+intel_set_span_functions(struct intel_context *intel,
+ struct gl_renderbuffer *rb);
+
+#undef DBG
+#define DBG 0
+
+#define LOCAL_VARS \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLint yScale = rb->Name ? 1 : -1; \
+ const GLint yBias = rb->Name ? 0 : rb->Height - 1; \
+ int minx = 0, miny = 0; \
+ int maxx = rb->Width; \
+ int maxy = rb->Height; \
+ int pitch = irb->region->pitch * irb->region->cpp; \
+ void *buf = irb->region->buffer->virtual; \
+ GLuint p; \
+ (void) p; \
+ (void)buf; (void)pitch; /* unused for non-gttmap. */ \
+
+#define HW_CLIPLOOP()
+#define HW_ENDCLIPLOOP()
+
+#define Y_FLIP(_y) ((_y) * yScale + yBias)
+
+#define HW_LOCK()
+
+#define HW_UNLOCK()
+
+/* Convenience macros to avoid typing the address argument over and over */
+#define NO_TILE(_X, _Y) (((_Y) * irb->region->pitch + (_X)) * irb->region->cpp)
+
+/* r5g6b5 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_RGB
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
+#define TAG(x) intel_##x##_RGB565
+#define TAG2(x,y) intel_##x##y_RGB565
+#include "spantmp2.h"
+
+/* a4r4g4b4 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_BGRA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_4_4_4_4_REV
+#define TAG(x) intel_##x##_ARGB4444
+#define TAG2(x,y) intel_##x##y_ARGB4444
+#include "spantmp2.h"
+
+/* a1r5g5b5 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_BGRA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_1_5_5_5_REV
+#define TAG(x) intel_##x##_ARGB1555
+#define TAG2(x,y) intel_##x##y##_ARGB1555
+#include "spantmp2.h"
+
+/* a8r8g8b8 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_BGRA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
+#define TAG(x) intel_##x##_ARGB8888
+#define TAG2(x,y) intel_##x##y##_ARGB8888
+#include "spantmp2.h"
+
+/* x8r8g8b8 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_BGR
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
+#define TAG(x) intel_##x##_xRGB8888
+#define TAG2(x,y) intel_##x##y##_xRGB8888
+#include "spantmp2.h"
+
+/* a8 color span and pixel functions */
+#define SPANTMP_PIXEL_FMT GL_ALPHA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_BYTE
+#define TAG(x) intel_##x##_A8
+#define TAG2(x,y) intel_##x##y##_A8
+#include "spantmp2.h"
+
+#define LOCAL_DEPTH_VARS \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLint yScale = rb->Name ? 1 : -1; \
+ const GLint yBias = rb->Name ? 0 : rb->Height - 1; \
+ int minx = 0, miny = 0; \
+ int maxx = rb->Width; \
+ int maxy = rb->Height; \
+ int pitch = irb->region->pitch * irb->region->cpp; \
+ void *buf = irb->region->buffer->virtual; \
+ (void)buf; (void)pitch; /* unused for non-gttmap. */ \
+
+#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
+
+/* z16 depthbuffer functions. */
+#define VALUE_TYPE GLushort
+#define WRITE_DEPTH(_x, _y, d) \
+ (*(uint16_t *)(irb->region->buffer->virtual + NO_TILE(_x, _y)) = d)
+#define READ_DEPTH(d, _x, _y) \
+ d = *(uint16_t *)(irb->region->buffer->virtual + NO_TILE(_x, _y))
+#define TAG(x) intel_##x##_z16
+#include "depthtmp.h"
+
+/* z24_s8 and z24_x8 depthbuffer functions. */
+#define VALUE_TYPE GLuint
+#define WRITE_DEPTH(_x, _y, d) \
+ (*(uint32_t *)(irb->region->buffer->virtual + NO_TILE(_x, _y)) = d)
+#define READ_DEPTH(d, _x, _y) \
+ d = *(uint32_t *)(irb->region->buffer->virtual + NO_TILE(_x, _y))
+#define TAG(x) intel_##x##_z24_s8
+#include "depthtmp.h"
+
+void
+intel_renderbuffer_map(struct intel_context *intel, struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ if (irb == NULL || irb->region == NULL)
+ return;
+
+ drm_intel_gem_bo_map_gtt(irb->region->buffer);
+
+ intel_set_span_functions(intel, rb);
+}
+
+void
+intel_renderbuffer_unmap(struct intel_context *intel,
+ struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ if (irb == NULL || irb->region == NULL)
+ return;
+
+ drm_intel_gem_bo_unmap_gtt(irb->region->buffer);
+
+ rb->GetRow = NULL;
+ rb->PutRow = NULL;
+}
+
+/**
+ * Map or unmap all the renderbuffers which we may need during
+ * software rendering.
+ * XXX in the future, we could probably convey extra information to
+ * reduce the number of mappings needed. I.e. if doing a glReadPixels
+ * from the depth buffer, we really only need one mapping.
+ *
+ * XXX Rewrite this function someday.
+ * We can probably just loop over all the renderbuffer attachments,
+ * map/unmap all of them, and not worry about the _ColorDrawBuffers
+ * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
+ */
+static void
+intel_map_unmap_framebuffer(struct intel_context *intel,
+ struct gl_framebuffer *fb,
+ GLboolean map)
+{
+ GLuint i;
+
+ /* color draw buffers */
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
+ if (map)
+ intel_renderbuffer_map(intel, fb->_ColorDrawBuffers[i]);
+ else
+ intel_renderbuffer_unmap(intel, fb->_ColorDrawBuffers[i]);
+ }
+
+ /* color read buffer */
+ if (map)
+ intel_renderbuffer_map(intel, fb->_ColorReadBuffer);
+ else
+ intel_renderbuffer_unmap(intel, fb->_ColorReadBuffer);
+
+ /* check for render to textures */
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ struct gl_renderbuffer_attachment *att =
+ fb->Attachment + i;
+ struct gl_texture_object *tex = att->Texture;
+ if (tex) {
+ /* render to texture */
+ ASSERT(att->Renderbuffer);
+ if (map)
+ intel_tex_map_images(intel, intel_texture_object(tex));
+ else
+ intel_tex_unmap_images(intel, intel_texture_object(tex));
+ }
+ }
+
+ /* depth buffer (Note wrapper!) */
+ if (fb->_DepthBuffer) {
+ if (map)
+ intel_renderbuffer_map(intel, fb->_DepthBuffer->Wrapped);
+ else
+ intel_renderbuffer_unmap(intel, fb->_DepthBuffer->Wrapped);
+ }
+
+ /* stencil buffer (Note wrapper!) */
+ if (fb->_StencilBuffer) {
+ if (map)
+ intel_renderbuffer_map(intel, fb->_StencilBuffer->Wrapped);
+ else
+ intel_renderbuffer_unmap(intel, fb->_StencilBuffer->Wrapped);
+ }
+
+ intel_check_front_buffer_rendering(intel);
+}
+
+/**
+ * Prepare for software rendering. Map current read/draw framebuffers'
+ * renderbuffes and all currently bound texture objects.
+ *
+ * Old note: Moved locking out to get reasonable span performance.
+ */
+void
+intelSpanRenderStart(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ intel_flush(&intel->ctx);
+ intel_prepare_render(intel);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_finalize_mipmap_tree(intel, i);
+ intel_tex_map_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_framebuffer(intel, ctx->DrawBuffer, GL_TRUE);
+ if (ctx->ReadBuffer != ctx->DrawBuffer)
+ intel_map_unmap_framebuffer(intel, ctx->ReadBuffer, GL_TRUE);
+}
+
+/**
+ * Called when done software rendering. Unmap the buffers we mapped in
+ * the above function.
+ */
+void
+intelSpanRenderFinish(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ _swrast_flush(ctx);
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_framebuffer(intel, ctx->DrawBuffer, GL_FALSE);
+ if (ctx->ReadBuffer != ctx->DrawBuffer)
+ intel_map_unmap_framebuffer(intel, ctx->ReadBuffer, GL_FALSE);
+}
+
+
+void
+intelInitSpanFuncs(GLcontext * ctx)
+{
+ struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
+ swdd->SpanRenderStart = intelSpanRenderStart;
+ swdd->SpanRenderFinish = intelSpanRenderFinish;
+}
+
+void
+intel_map_vertex_shader_textures(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_map_images(intel, intel_texture_object(texObj));
+ }
+ }
+}
+
+void
+intel_unmap_vertex_shader_textures(GLcontext *ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ int i;
+
+ if (ctx->VertexProgram._Current == NULL)
+ return;
+
+ for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled &&
+ ctx->VertexProgram._Current->Base.TexturesUsed[i] != 0) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+}
+
+/**
+ * Plug in appropriate span read/write functions for the given renderbuffer.
+ * These are used for the software fallbacks.
+ */
+static void
+intel_set_span_functions(struct intel_context *intel,
+ struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
+
+ switch (irb->Base.Format) {
+ case MESA_FORMAT_A8:
+ intel_InitPointers_A8(rb);
+ break;
+ case MESA_FORMAT_RGB565:
+ intel_InitPointers_RGB565(rb);
+ break;
+ case MESA_FORMAT_ARGB4444:
+ intel_InitPointers_ARGB4444(rb);
+ break;
+ case MESA_FORMAT_ARGB1555:
+ intel_InitPointers_ARGB1555(rb);
+ break;
+ case MESA_FORMAT_XRGB8888:
+ intel_InitPointers_xRGB8888(rb);
+ break;
+ case MESA_FORMAT_ARGB8888:
+ intel_InitPointers_ARGB8888(rb);
+ break;
+ case MESA_FORMAT_Z16:
+ intel_InitDepthPointers_z16(rb);
+ break;
+ case MESA_FORMAT_X8_Z24:
+ case MESA_FORMAT_S8_Z24:
+ intel_InitDepthPointers_z24_s8(rb);
+ break;
+ default:
+ _mesa_problem(NULL,
+ "Unexpected MesaFormat %d in intelSetSpanFunctions",
+ irb->Base.Format);
+ break;
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_span.h b/src/mesa/drivers/dri/intel/intel_span.h
new file mode 100644
index 0000000000..bffe109aa5
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_span.h
@@ -0,0 +1,42 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _INTEL_SPAN_H
+#define _INTEL_SPAN_H
+
+extern void intelInitSpanFuncs(GLcontext * ctx);
+
+extern void intelSpanRenderFinish(GLcontext * ctx);
+extern void intelSpanRenderStart(GLcontext * ctx);
+void intel_renderbuffer_map(struct intel_context *intel,
+ struct gl_renderbuffer *rb);
+void intel_renderbuffer_unmap(struct intel_context *intel,
+ struct gl_renderbuffer *rb);
+void intel_map_vertex_shader_textures(GLcontext *ctx);
+void intel_unmap_vertex_shader_textures(GLcontext *ctx);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_state.c b/src/mesa/drivers/dri/intel/intel_state.c
new file mode 100644
index 0000000000..c5ef909dbf
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_state.c
@@ -0,0 +1,211 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/macros.h"
+#include "main/enums.h"
+#include "main/colormac.h"
+#include "main/dd.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+
+int
+intel_translate_shadow_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_ALWAYS;
+ case GL_LESS:
+ return COMPAREFUNC_LEQUAL;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LESS;
+ case GL_GREATER:
+ return COMPAREFUNC_GEQUAL;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GREATER;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_NEVER;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_NEVER;
+}
+
+int
+intel_translate_compare_func(GLenum func)
+{
+ switch (func) {
+ case GL_NEVER:
+ return COMPAREFUNC_NEVER;
+ case GL_LESS:
+ return COMPAREFUNC_LESS;
+ case GL_LEQUAL:
+ return COMPAREFUNC_LEQUAL;
+ case GL_GREATER:
+ return COMPAREFUNC_GREATER;
+ case GL_GEQUAL:
+ return COMPAREFUNC_GEQUAL;
+ case GL_NOTEQUAL:
+ return COMPAREFUNC_NOTEQUAL;
+ case GL_EQUAL:
+ return COMPAREFUNC_EQUAL;
+ case GL_ALWAYS:
+ return COMPAREFUNC_ALWAYS;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
+ return COMPAREFUNC_ALWAYS;
+}
+
+int
+intel_translate_stencil_op(GLenum op)
+{
+ switch (op) {
+ case GL_KEEP:
+ return STENCILOP_KEEP;
+ case GL_ZERO:
+ return STENCILOP_ZERO;
+ case GL_REPLACE:
+ return STENCILOP_REPLACE;
+ case GL_INCR:
+ return STENCILOP_INCRSAT;
+ case GL_DECR:
+ return STENCILOP_DECRSAT;
+ case GL_INCR_WRAP:
+ return STENCILOP_INCR;
+ case GL_DECR_WRAP:
+ return STENCILOP_DECR;
+ case GL_INVERT:
+ return STENCILOP_INVERT;
+ default:
+ return STENCILOP_ZERO;
+ }
+}
+
+int
+intel_translate_blend_factor(GLenum factor)
+{
+ switch (factor) {
+ case GL_ZERO:
+ return BLENDFACT_ZERO;
+ case GL_SRC_ALPHA:
+ return BLENDFACT_SRC_ALPHA;
+ case GL_ONE:
+ return BLENDFACT_ONE;
+ case GL_SRC_COLOR:
+ return BLENDFACT_SRC_COLR;
+ case GL_ONE_MINUS_SRC_COLOR:
+ return BLENDFACT_INV_SRC_COLR;
+ case GL_DST_COLOR:
+ return BLENDFACT_DST_COLR;
+ case GL_ONE_MINUS_DST_COLOR:
+ return BLENDFACT_INV_DST_COLR;
+ case GL_ONE_MINUS_SRC_ALPHA:
+ return BLENDFACT_INV_SRC_ALPHA;
+ case GL_DST_ALPHA:
+ return BLENDFACT_DST_ALPHA;
+ case GL_ONE_MINUS_DST_ALPHA:
+ return BLENDFACT_INV_DST_ALPHA;
+ case GL_SRC_ALPHA_SATURATE:
+ return BLENDFACT_SRC_ALPHA_SATURATE;
+ case GL_CONSTANT_COLOR:
+ return BLENDFACT_CONST_COLOR;
+ case GL_ONE_MINUS_CONSTANT_COLOR:
+ return BLENDFACT_INV_CONST_COLOR;
+ case GL_CONSTANT_ALPHA:
+ return BLENDFACT_CONST_ALPHA;
+ case GL_ONE_MINUS_CONSTANT_ALPHA:
+ return BLENDFACT_INV_CONST_ALPHA;
+ }
+
+ fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, factor);
+ return BLENDFACT_ZERO;
+}
+
+int
+intel_translate_logic_op(GLenum opcode)
+{
+ switch (opcode) {
+ case GL_CLEAR:
+ return LOGICOP_CLEAR;
+ case GL_AND:
+ return LOGICOP_AND;
+ case GL_AND_REVERSE:
+ return LOGICOP_AND_RVRSE;
+ case GL_COPY:
+ return LOGICOP_COPY;
+ case GL_COPY_INVERTED:
+ return LOGICOP_COPY_INV;
+ case GL_AND_INVERTED:
+ return LOGICOP_AND_INV;
+ case GL_NOOP:
+ return LOGICOP_NOOP;
+ case GL_XOR:
+ return LOGICOP_XOR;
+ case GL_OR:
+ return LOGICOP_OR;
+ case GL_OR_INVERTED:
+ return LOGICOP_OR_INV;
+ case GL_NOR:
+ return LOGICOP_NOR;
+ case GL_EQUIV:
+ return LOGICOP_EQUIV;
+ case GL_INVERT:
+ return LOGICOP_INV;
+ case GL_OR_REVERSE:
+ return LOGICOP_OR_RVRSE;
+ case GL_NAND:
+ return LOGICOP_NAND;
+ case GL_SET:
+ return LOGICOP_SET;
+ default:
+ return LOGICOP_SET;
+ }
+}
+
+/* Fallback to swrast for select and feedback.
+ */
+static void
+intelRenderMode(GLcontext *ctx, GLenum mode)
+{
+ struct intel_context *intel = intel_context(ctx);
+ FALLBACK(intel, INTEL_FALLBACK_RENDERMODE, (mode != GL_RENDER));
+}
+
+
+void
+intelInitStateFuncs(struct dd_function_table *functions)
+{
+ functions->RenderMode = intelRenderMode;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_syncobj.c b/src/mesa/drivers/dri/intel/intel_syncobj.c
new file mode 100644
index 0000000000..c2d86432ff
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_syncobj.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file intel_syncobj.c
+ *
+ * Support for ARB_sync
+ *
+ * ARB_sync is implemented by flushing the current batchbuffer and keeping a
+ * reference on it. We can then check for completion or wait for compeltion
+ * using the normal buffer object mechanisms. This does mean that if an
+ * application is using many sync objects, it will emit small batchbuffers
+ * which may end up being a significant overhead. In other tests of removing
+ * gratuitous batchbuffer syncs in Mesa, it hasn't appeared to be a significant
+ * performance bottleneck, though.
+ */
+
+#include "main/simple_list.h"
+#include "main/imports.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+
+static struct gl_sync_object *
+intel_new_sync_object(GLcontext *ctx, GLuint id)
+{
+ struct intel_sync_object *sync;
+
+ sync = calloc(1, sizeof(struct intel_sync_object));
+
+ return &sync->Base;
+}
+
+static void
+intel_delete_sync_object(GLcontext *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ drm_intel_bo_unreference(sync->bo);
+ free(sync);
+}
+
+static void
+intel_fence_sync(GLcontext *ctx, struct gl_sync_object *s,
+ GLenum condition, GLbitfield flags)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ assert(condition == GL_SYNC_GPU_COMMANDS_COMPLETE);
+ intel_batchbuffer_emit_mi_flush(intel->batch);
+
+ sync->bo = intel->batch->buf;
+ drm_intel_bo_reference(sync->bo);
+
+ intel_flush(ctx);
+}
+
+/* We ignore the user-supplied timeout. This is weaselly -- we're allowed to
+ * round to an implementation-dependent accuracy, and right now our
+ * implementation "rounds" to the wait-forever value.
+ *
+ * The fix would be a new kernel function to do the GTT transition with a
+ * timeout.
+ */
+static void intel_client_wait_sync(GLcontext *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo) {
+ drm_intel_bo_wait_rendering(sync->bo);
+ s->StatusFlag = 1;
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ }
+}
+
+/* We have nothing to do for WaitSync. Our GL command stream is sequential,
+ * so given that the sync object has already flushed the batchbuffer,
+ * any batchbuffers coming after this waitsync will naturally not occur until
+ * the previous one is done.
+ */
+static void intel_server_wait_sync(GLcontext *ctx, struct gl_sync_object *s,
+ GLbitfield flags, GLuint64 timeout)
+{
+}
+
+static void intel_check_sync(GLcontext *ctx, struct gl_sync_object *s)
+{
+ struct intel_sync_object *sync = (struct intel_sync_object *)s;
+
+ if (sync->bo && !drm_intel_bo_busy(sync->bo)) {
+ drm_intel_bo_unreference(sync->bo);
+ sync->bo = NULL;
+ s->StatusFlag = 1;
+ }
+}
+
+void intel_init_syncobj_functions(struct dd_function_table *functions)
+{
+ functions->NewSyncObject = intel_new_sync_object;
+ functions->DeleteSyncObject = intel_delete_sync_object;
+ functions->FenceSync = intel_fence_sync;
+ functions->CheckSync = intel_check_sync;
+ functions->ClientWaitSync = intel_client_wait_sync;
+ functions->ServerWaitSync = intel_server_wait_sync;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex.c b/src/mesa/drivers/dri/intel/intel_tex.c
new file mode 100644
index 0000000000..8bb6ae99fb
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex.c
@@ -0,0 +1,228 @@
+#include "swrast/swrast.h"
+#include "main/texobj.h"
+#include "main/teximage.h"
+#include "main/mipmap.h"
+#include "drivers/common/meta.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static GLboolean
+intelIsTextureResident(GLcontext * ctx, struct gl_texture_object *texObj)
+{
+#if 0
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ return
+ intelObj->mt &&
+ intelObj->mt->region &&
+ intel_is_region_resident(intel, intelObj->mt->region);
+#endif
+ return 1;
+}
+
+
+
+static struct gl_texture_image *
+intelNewTextureImage(GLcontext * ctx)
+{
+ DBG("%s\n", __FUNCTION__);
+ (void) ctx;
+ return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
+}
+
+
+static struct gl_texture_object *
+intelNewTextureObject(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
+
+ DBG("%s\n", __FUNCTION__);
+ _mesa_initialize_texture_object(&obj->base, name, target);
+
+ return &obj->base;
+}
+
+static void
+intelDeleteTextureObject(GLcontext *ctx,
+ struct gl_texture_object *texObj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ _mesa_delete_texture_object(ctx, texObj);
+}
+
+
+static void
+intelFreeTextureImageData(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+
+ if (texImage->Data) {
+ _mesa_free_texmemory(texImage->Data);
+ texImage->Data = NULL;
+ }
+}
+
+
+/* The system memcpy (at least on ubuntu 5.10) has problems copying
+ * to agp (writecombined) memory from a source which isn't 64-byte
+ * aligned - there is a 4x performance falloff.
+ *
+ * The x86 __memcpy is immune to this but is slightly slower
+ * (10%-ish) than the system memcpy.
+ *
+ * The sse_memcpy seems to have a slight cliff at 64/32 bytes, but
+ * isn't much faster than x86_memcpy for agp copies.
+ *
+ * TODO: switch dynamically.
+ */
+static void *
+do_memcpy(void *dest, const void *src, size_t n)
+{
+ if ((((unsigned long) src) & 63) || (((unsigned long) dest) & 63)) {
+ return __memcpy(dest, src, n);
+ }
+ else
+ return memcpy(dest, src, n);
+}
+
+
+#if DO_DEBUG && !defined(__ia64__)
+
+#ifndef __x86_64__
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t"
+ "pushl %%ebx\n\t"
+ "cpuid\n\t" ".byte 0x0f, 0x31\n\t"
+ "popl %%ebx\n":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "cc");
+
+ return eax;
+}
+#else
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t" "cpuid\n\t" ".byte 0x0f, 0x31\n\t":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "ebx", "cc");
+
+ return eax;
+}
+#endif
+
+static unsigned
+time_diff(unsigned t, unsigned t2)
+{
+ return ((t < t2) ? t2 - t : 0xFFFFFFFFU - (t - t2 - 1));
+}
+
+
+static void *
+timed_memcpy(void *dest, const void *src, size_t n)
+{
+ void *ret;
+ unsigned t1, t2;
+ double rate;
+
+ if ((((unsigned) src) & 63) || (((unsigned) dest) & 63))
+ printf("Warning - non-aligned texture copy!\n");
+
+ t1 = fastrdtsc();
+ ret = do_memcpy(dest, src, n);
+ t2 = fastrdtsc();
+
+ rate = time_diff(t1, t2);
+ rate /= (double) n;
+ printf("timed_memcpy: %u %u --> %f clocks/byte\n", t1, t2, rate);
+ return ret;
+}
+#endif /* DO_DEBUG */
+
+
+/**
+ * Called via ctx->Driver.GenerateMipmap()
+ * This is basically a wrapper for _mesa_meta_GenerateMipmap() which checks
+ * if we'll be using software mipmap generation. In that case, we need to
+ * map/unmap the base level texture image.
+ */
+static void
+intelGenerateMipmap(GLcontext *ctx, GLenum target,
+ struct gl_texture_object *texObj)
+{
+ if (_mesa_meta_check_generate_mipmap_fallback(ctx, target, texObj)) {
+ /* sw path: need to map texture images */
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+ intel_tex_map_level_images(intel, intelObj, texObj->BaseLevel);
+ _mesa_generate_mipmap(ctx, target, texObj);
+ intel_tex_unmap_level_images(intel, intelObj, texObj->BaseLevel);
+
+ {
+ GLuint nr_faces = (texObj->Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face, i;
+ /* Update the level information in our private data in the new images,
+ * since it didn't get set as part of a normal TexImage path.
+ */
+ for (face = 0; face < nr_faces; face++) {
+ for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(texObj->Image[face][i]);
+ if (!intelImage)
+ break;
+ intelImage->level = i;
+ intelImage->face = face;
+ /* Unreference the miptree to signal that the new Data is a
+ * bare pointer from mesa.
+ */
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+ }
+ }
+ }
+ else {
+ _mesa_meta_GenerateMipmap(ctx, target, texObj);
+ }
+}
+
+
+void
+intelInitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->ChooseTextureFormat = intelChooseTextureFormat;
+ functions->GenerateMipmap = intelGenerateMipmap;
+
+ functions->NewTextureObject = intelNewTextureObject;
+ functions->NewTextureImage = intelNewTextureImage;
+ functions->DeleteTexture = intelDeleteTextureObject;
+ functions->FreeTexImageData = intelFreeTextureImageData;
+ functions->UpdateTexturePalette = 0;
+ functions->IsTextureResident = intelIsTextureResident;
+
+#if DO_DEBUG && !defined(__ia64__)
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ functions->TextureMemCpy = timed_memcpy;
+ else
+#endif
+ functions->TextureMemCpy = do_memcpy;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex.h b/src/mesa/drivers/dri/intel/intel_tex.h
new file mode 100644
index 0000000000..4bb012dc65
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex.h
@@ -0,0 +1,71 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef INTELTEX_INC
+#define INTELTEX_INC
+
+#include "main/mtypes.h"
+#include "main/formats.h"
+#include "intel_context.h"
+#include "texmem.h"
+
+
+void intelInitTextureFuncs(struct dd_function_table *functions);
+
+void intelInitTextureImageFuncs(struct dd_function_table *functions);
+
+void intelInitTextureSubImageFuncs(struct dd_function_table *functions);
+
+void intelInitTextureCopyImageFuncs(struct dd_function_table *functions);
+
+gl_format intelChooseTextureFormat(GLcontext *ctx, GLint internalFormat,
+ GLenum format, GLenum type);
+
+void intelSetTexBuffer(__DRIcontext *pDRICtx,
+ GLint target, __DRIdrawable *pDraw);
+void intelSetTexBuffer2(__DRIcontext *pDRICtx,
+ GLint target, GLint format, __DRIdrawable *pDraw);
+
+GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
+
+void intel_tex_map_level_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ int level);
+
+void intel_tex_unmap_level_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ int level);
+
+void intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj);
+
+void intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj);
+
+int intel_compressed_num_bytes(GLuint mesaFormat);
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/intel_tex_copy.c b/src/mesa/drivers/dri/intel/intel_tex_copy.c
new file mode 100644
index 0000000000..224b506c05
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_copy.c
@@ -0,0 +1,330 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/image.h"
+#include "main/teximage.h"
+#include "main/texstate.h"
+#include "main/mipmap.h"
+
+#include "drivers/common/meta.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_fbo.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Get the intel_region which is the source for any glCopyTex[Sub]Image call.
+ *
+ * Do the best we can using the blitter. A future project is to use
+ * the texture engine and fragment programs for these copies.
+ */
+static const struct intel_region *
+get_teximage_source(struct intel_context *intel, GLenum internalFormat)
+{
+ struct intel_renderbuffer *irb;
+
+ DBG("%s %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(internalFormat));
+
+ switch (internalFormat) {
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 2)
+ return irb->region;
+ return NULL;
+ case GL_DEPTH24_STENCIL8_EXT:
+ case GL_DEPTH_STENCIL_EXT:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 4)
+ return irb->region;
+ return NULL;
+ case GL_RGBA:
+ case GL_RGBA8:
+ irb = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ /* We're required to set alpha to 1.0 in this case, but we can't
+ * do that with the blitter, so fall back. We could use the 3D
+ * engine or do two passes with the blitter, but it doesn't seem
+ * worth it for this case. */
+ if (irb->Base._BaseFormat == GL_RGB)
+ return NULL;
+ return irb->region;
+ case GL_RGB:
+ case GL_RGB8:
+ return intel_readbuf_region(intel);
+ default:
+ return NULL;
+ }
+}
+
+
+static GLboolean
+do_copy_texsubimage(struct intel_context *intel,
+ GLenum target,
+ struct intel_texture_image *intelImage,
+ GLenum internalFormat,
+ GLint dstx, GLint dsty,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ GLcontext *ctx = &intel->ctx;
+ const struct intel_region *src = get_teximage_source(intel, internalFormat);
+
+ if (!intelImage->mt || !src) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s fail %p %p (0x%08x)\n",
+ __FUNCTION__, intelImage->mt, src, internalFormat);
+ return GL_FALSE;
+ }
+
+ if (intelImage->mt->cpp != src->cpp) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s fail %d vs %d cpp\n",
+ __FUNCTION__, intelImage->mt->cpp, src->cpp);
+ return GL_FALSE;
+ }
+
+ /* intel_flush(ctx); */
+ intel_prepare_render(intel);
+ {
+ drm_intel_bo *dst_bo = intel_region_buffer(intel,
+ intelImage->mt->region,
+ INTEL_WRITE_PART);
+ GLuint image_x, image_y;
+ GLshort src_pitch;
+
+ /* get dest x/y in destination texture */
+ intel_miptree_get_image_offset(intelImage->mt,
+ intelImage->level,
+ intelImage->face,
+ 0,
+ &image_x, &image_y);
+
+ /* The blitter can't handle Y-tiled buffers. */
+ if (intelImage->mt->region->tiling == I915_TILING_Y) {
+ return GL_FALSE;
+ }
+
+ if (ctx->ReadBuffer->Name == 0) {
+ /* Flip vertical orientation for system framebuffers */
+ y = ctx->ReadBuffer->Height - (y + height);
+ src_pitch = -src->pitch;
+ } else {
+ /* reading from a FBO, y is already oriented the way we like */
+ src_pitch = src->pitch;
+ }
+
+ /* blit from src buffer to texture */
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_pitch,
+ src->buffer,
+ 0,
+ src->tiling,
+ intelImage->mt->region->pitch,
+ dst_bo,
+ 0,
+ intelImage->mt->region->tiling,
+ src->draw_x + x, src->draw_y + y,
+ image_x + dstx, image_y + dsty,
+ width, height,
+ GL_COPY)) {
+ return GL_FALSE;
+ }
+ }
+
+ return GL_TRUE;
+}
+
+
+static void
+intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLint border)
+{
+ struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx);
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ int srcx, srcy, dstx, dsty, height;
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage1D(ctx, target, level, internalFormat,
+ width, border,
+ GL_RGBA, CHAN_TYPE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+ srcx = x;
+ srcy = y;
+ dstx = 0;
+ dsty = 0;
+ height = 1;
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dstx, &dsty,
+ &srcx, &srcy,
+ &width, &height))
+ return;
+
+ if (!do_copy_texsubimage(intel_context(ctx), target,
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, height))
+ goto fail;
+
+ return;
+
+ fail:
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexImage1D(ctx, target, level, internalFormat, x, y,
+ width, border);
+}
+
+
+static void
+intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLint border)
+{
+ struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx);
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ int srcx, srcy, dstx, dsty;
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage2D(ctx, target, level, internalFormat,
+ width, height, border,
+ GL_RGBA, GL_UNSIGNED_BYTE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+
+ srcx = x;
+ srcy = y;
+ dstx = 0;
+ dsty = 0;
+ if (!_mesa_clip_copytexsubimage(ctx,
+ &dstx, &dsty,
+ &srcx, &srcy,
+ &width, &height))
+ return;
+
+ if (!do_copy_texsubimage(intel_context(ctx), target,
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, height))
+ goto fail;
+
+ return;
+
+ fail:
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexImage2D(ctx, target, level, internalFormat, x, y,
+ width, height, border);
+}
+
+
+static void
+intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint x, GLint y, GLsizei width)
+{
+ struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx);
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+ /* XXX need to check <border> as in above function? */
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx), target,
+ intel_texture_image(texImage),
+ internalFormat, xoffset, 0, x, y, width, 1)) {
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexSubImage1D(ctx, target, level, xoffset, x, y, width);
+ }
+}
+
+
+static void
+intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ struct gl_texture_unit *texUnit = _mesa_get_current_tex_unit(ctx);
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx), target,
+ intel_texture_image(texImage),
+ internalFormat,
+ xoffset, yoffset, x, y, width, height)) {
+
+ if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ _mesa_meta_CopyTexSubImage2D(ctx, target, level,
+ xoffset, yoffset, x, y, width, height);
+ }
+}
+
+
+void
+intelInitTextureCopyImageFuncs(struct dd_function_table *functions)
+{
+ functions->CopyTexImage1D = intelCopyTexImage1D;
+ functions->CopyTexImage2D = intelCopyTexImage2D;
+ functions->CopyTexSubImage1D = intelCopyTexSubImage1D;
+ functions->CopyTexSubImage2D = intelCopyTexSubImage2D;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_format.c b/src/mesa/drivers/dri/intel/intel_tex_format.c
new file mode 100644
index 0000000000..5f813c0efa
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_format.c
@@ -0,0 +1,228 @@
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "main/enums.h"
+#include "main/formats.h"
+
+/**
+ * Choose hardware texture format given the user's glTexImage parameters.
+ *
+ * It works out that this function is fine for all the supported
+ * hardware. However, there is still a need to map the formats onto
+ * hardware descriptors.
+ *
+ * Note that the i915 can actually support many more formats than
+ * these if we take the step of simply swizzling the colors
+ * immediately after sampling...
+ */
+gl_format
+intelChooseTextureFormat(GLcontext * ctx, GLint internalFormat,
+ GLenum format, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLboolean do32bpt = (intel->ctx.Visual.rgbBits >= 24);
+
+#if 0
+ printf("%s intFmt=0x%x format=0x%x type=0x%x\n",
+ __FUNCTION__, internalFormat, format, type);
+#endif
+
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ case GL_COMPRESSED_RGBA:
+ if (format == GL_BGRA) {
+ if (type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) {
+ return MESA_FORMAT_ARGB8888;
+ }
+ else if (type == GL_UNSIGNED_SHORT_4_4_4_4_REV) {
+ return MESA_FORMAT_ARGB4444;
+ }
+ else if (type == GL_UNSIGNED_SHORT_1_5_5_5_REV) {
+ return MESA_FORMAT_ARGB1555;
+ }
+ }
+ return do32bpt ? MESA_FORMAT_ARGB8888 : MESA_FORMAT_ARGB4444;
+
+ case 3:
+ case GL_RGB:
+ case GL_COMPRESSED_RGB:
+ if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
+ return MESA_FORMAT_RGB565;
+ }
+ if (do32bpt) {
+ if (intel->has_xrgb_textures)
+ return MESA_FORMAT_XRGB8888;
+ else
+ return MESA_FORMAT_ARGB8888;
+ } else {
+ return MESA_FORMAT_RGB565;
+ }
+
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ return do32bpt ? MESA_FORMAT_ARGB8888 : MESA_FORMAT_ARGB4444;
+
+ case GL_RGBA4:
+ case GL_RGBA2:
+ return MESA_FORMAT_ARGB4444;
+
+ case GL_RGB5_A1:
+ return MESA_FORMAT_ARGB1555;
+
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ if (intel->has_xrgb_textures)
+ return MESA_FORMAT_XRGB8888;
+ else
+ return MESA_FORMAT_ARGB8888;
+
+ case GL_RGB5:
+ case GL_RGB4:
+ case GL_R3_G3_B2:
+ return MESA_FORMAT_RGB565;
+
+ case GL_ALPHA:
+ case GL_ALPHA4:
+ case GL_ALPHA8:
+ case GL_ALPHA12:
+ case GL_ALPHA16:
+ case GL_COMPRESSED_ALPHA:
+ return MESA_FORMAT_A8;
+
+ case 1:
+ case GL_LUMINANCE:
+ case GL_LUMINANCE4:
+ case GL_LUMINANCE8:
+ case GL_LUMINANCE12:
+ case GL_LUMINANCE16:
+ case GL_COMPRESSED_LUMINANCE:
+ return MESA_FORMAT_L8;
+
+ case GL_LUMINANCE12_ALPHA4:
+ case GL_LUMINANCE12_ALPHA12:
+ case GL_LUMINANCE16_ALPHA16:
+#ifndef I915
+ return MESA_FORMAT_AL1616;
+#else
+ /* FALLTHROUGH */
+#endif
+
+ case 2:
+ case GL_LUMINANCE_ALPHA:
+ case GL_LUMINANCE4_ALPHA4:
+ case GL_LUMINANCE6_ALPHA2:
+ case GL_LUMINANCE8_ALPHA8:
+ case GL_COMPRESSED_LUMINANCE_ALPHA:
+ return MESA_FORMAT_AL88;
+
+ case GL_INTENSITY:
+ case GL_INTENSITY4:
+ case GL_INTENSITY8:
+ case GL_INTENSITY12:
+ case GL_INTENSITY16:
+ case GL_COMPRESSED_INTENSITY:
+ return MESA_FORMAT_I8;
+
+ case GL_YCBCR_MESA:
+ if (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE)
+ return MESA_FORMAT_YCBCR;
+ else
+ return MESA_FORMAT_YCBCR_REV;
+
+ case GL_COMPRESSED_RGB_FXT1_3DFX:
+ return MESA_FORMAT_RGB_FXT1;
+ case GL_COMPRESSED_RGBA_FXT1_3DFX:
+ return MESA_FORMAT_RGBA_FXT1;
+
+ case GL_RGB_S3TC:
+ case GL_RGB4_S3TC:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ return MESA_FORMAT_RGB_DXT1;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ return MESA_FORMAT_RGBA_DXT1;
+
+ case GL_RGBA_S3TC:
+ case GL_RGBA4_S3TC:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ return MESA_FORMAT_RGBA_DXT3;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return MESA_FORMAT_RGBA_DXT5;
+
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+#if 0
+ return MESA_FORMAT_Z16;
+#else
+ /* fall-through.
+ * 16bpp depth texture can't be paired with a stencil buffer so
+ * always used combined depth/stencil format.
+ */
+#endif
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ return MESA_FORMAT_S8_Z24;
+
+#ifndef I915
+ case GL_SRGB_EXT:
+ case GL_SRGB8_EXT:
+ case GL_SRGB_ALPHA_EXT:
+ case GL_SRGB8_ALPHA8_EXT:
+ case GL_COMPRESSED_SRGB_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_EXT:
+ case GL_COMPRESSED_SLUMINANCE_EXT:
+ case GL_COMPRESSED_SLUMINANCE_ALPHA_EXT:
+ return MESA_FORMAT_SARGB8;
+ case GL_SLUMINANCE_EXT:
+ case GL_SLUMINANCE8_EXT:
+ if (intel->has_luminance_srgb)
+ return MESA_FORMAT_SL8;
+ else
+ return MESA_FORMAT_SARGB8;
+ case GL_SLUMINANCE_ALPHA_EXT:
+ case GL_SLUMINANCE8_ALPHA8_EXT:
+ if (intel->has_luminance_srgb)
+ return MESA_FORMAT_SLA8;
+ else
+ return MESA_FORMAT_SARGB8;
+ case GL_COMPRESSED_SRGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
+ return MESA_FORMAT_SRGB_DXT1;
+
+ /* i915 could also do this */
+ case GL_DUDV_ATI:
+ case GL_DU8DV8_ATI:
+ return MESA_FORMAT_DUDV8;
+ case GL_RGBA_SNORM:
+ case GL_RGBA8_SNORM:
+ return MESA_FORMAT_SIGNED_RGBA8888_REV;
+#endif
+
+ default:
+ fprintf(stderr, "unexpected texture format %s in %s\n",
+ _mesa_lookup_enum_by_nr(internalFormat), __FUNCTION__);
+ return MESA_FORMAT_NONE;
+ }
+
+ return MESA_FORMAT_NONE; /* never get here */
+}
+
+int intel_compressed_num_bytes(GLuint mesaFormat)
+{
+ GLuint bw, bh;
+ GLuint block_size;
+
+ block_size = _mesa_get_format_bytes(mesaFormat);
+ _mesa_get_format_block_size(mesaFormat, &bw, &bh);
+
+ return block_size / bw;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_image.c b/src/mesa/drivers/dri/intel/intel_tex_image.c
new file mode 100644
index 0000000000..7d33df3599
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_image.c
@@ -0,0 +1,856 @@
+
+#include "main/glheader.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "main/enums.h"
+#include "main/bufferobj.h"
+#include "main/convolve.h"
+#include "main/context.h"
+#include "main/formats.h"
+#include "main/texcompress.h"
+#include "main/texstore.h"
+#include "main/texgetimage.h"
+#include "main/texobj.h"
+#include "main/texstore.h"
+#include "main/teximage.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+#include "intel_fbo.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/* Functions to store texture images. Where possible, mipmap_tree's
+ * will be created or further instantiated with image data, otherwise
+ * images will be stored in malloc'd memory. A validation step is
+ * required to pull those images into a mipmap tree, or otherwise
+ * decide a fallback is required.
+ */
+
+
+static int
+logbase2(int n)
+{
+ GLint i = 1;
+ GLint log2 = 0;
+
+ while (n > i) {
+ i *= 2;
+ log2++;
+ }
+
+ return log2;
+}
+
+
+/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
+ * 1).
+ *
+ * Otherwise, if max_level >= level >= min_level, create tree with
+ * space for textures from min_level down to max_level.
+ *
+ * Otherwise, create tree with space for textures from (level
+ * 0)..(1x1). Consider pruning this tree at a validation if the
+ * saving is worth it.
+ */
+static void
+guess_and_alloc_mipmap_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage,
+ GLboolean expect_accelerated_upload)
+{
+ GLuint firstLevel;
+ GLuint lastLevel;
+ GLuint width = intelImage->base.Width;
+ GLuint height = intelImage->base.Height;
+ GLuint depth = intelImage->base.Depth;
+ GLuint l2width, l2height, l2depth;
+ GLuint i, comp_byte = 0;
+ GLuint texelBytes;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->base.Border ||
+ ((intelImage->base._BaseFormat == GL_DEPTH_COMPONENT) &&
+ ((intelObj->base.WrapS == GL_CLAMP_TO_BORDER) ||
+ (intelObj->base.WrapT == GL_CLAMP_TO_BORDER))))
+ return;
+
+ if (intelImage->level > intelObj->base.BaseLevel &&
+ (intelImage->base.Width == 1 ||
+ (intelObj->base.Target != GL_TEXTURE_1D &&
+ intelImage->base.Height == 1) ||
+ (intelObj->base.Target == GL_TEXTURE_3D &&
+ intelImage->base.Depth == 1)))
+ return;
+
+ /* If this image disrespects BaseLevel, allocate from level zero.
+ * Usually BaseLevel == 0, so it's unlikely to happen.
+ */
+ if (intelImage->level < intelObj->base.BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = intelObj->base.BaseLevel;
+
+
+ /* Figure out image dimensions at start level.
+ */
+ for (i = intelImage->level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+
+ /* Guess a reasonable value for lastLevel. This is probably going
+ * to be wrong fairly often and might mean that we have to look at
+ * resizable buffers, or require that buffers implement lazy
+ * pagetable arrangements.
+ */
+ if ((intelObj->base.MinFilter == GL_NEAREST ||
+ intelObj->base.MinFilter == GL_LINEAR) &&
+ intelImage->level == firstLevel &&
+ (intel->gen < 4 || firstLevel == 0)) {
+ lastLevel = firstLevel;
+ }
+ else {
+ l2width = logbase2(width);
+ l2height = logbase2(height);
+ l2depth = logbase2(depth);
+ lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
+ }
+
+ assert(!intelObj->mt);
+ if (_mesa_is_format_compressed(intelImage->base.TexFormat))
+ comp_byte = intel_compressed_num_bytes(intelImage->base.TexFormat);
+
+ texelBytes = _mesa_get_format_bytes(intelImage->base.TexFormat);
+
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ intelImage->base._BaseFormat,
+ intelImage->base.InternalFormat,
+ firstLevel,
+ lastLevel,
+ width,
+ height,
+ depth,
+ texelBytes,
+ comp_byte,
+ expect_accelerated_upload);
+
+ DBG("%s - success\n", __FUNCTION__);
+}
+
+
+
+
+static GLuint
+target_to_face(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return ((GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
+ default:
+ return 0;
+ }
+}
+
+/* There are actually quite a few combinations this will work for,
+ * more than what I've listed here.
+ */
+static GLboolean
+check_pbo_format(GLint internalFormat,
+ GLenum format, GLenum type,
+ gl_format mesa_format)
+{
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ case GL_RGBA8:
+ return (format == GL_BGRA &&
+ (type == GL_UNSIGNED_BYTE ||
+ type == GL_UNSIGNED_INT_8_8_8_8_REV) &&
+ mesa_format == MESA_FORMAT_ARGB8888);
+ case 3:
+ case GL_RGB:
+ return (format == GL_RGB &&
+ type == GL_UNSIGNED_SHORT_5_6_5 &&
+ mesa_format == MESA_FORMAT_RGB565);
+ case 1:
+ case GL_LUMINANCE:
+ return (format == GL_LUMINANCE &&
+ type == GL_UNSIGNED_BYTE &&
+ mesa_format == MESA_FORMAT_L8);
+ case GL_YCBCR_MESA:
+ return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
+ default:
+ return GL_FALSE;
+ }
+}
+
+
+/* XXX: Do this for TexSubImage also:
+ */
+static GLboolean
+try_pbo_upload(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_x, dst_y, dst_stride;
+ drm_intel_bo *dst_buffer = intel_region_buffer(intel,
+ intelImage->mt->region,
+ INTEL_WRITE_FULL);
+
+ if (!_mesa_is_bufferobj(unpack->BufferObj) ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ DBG("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ /* note: potential 64-bit ptr to 32-bit int cast */
+ src_offset = (GLuint) (unsigned long) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ intel_miptree_get_image_offset(intelImage->mt, intelImage->level,
+ intelImage->face, 0,
+ &dst_x, &dst_y);
+
+ dst_stride = intelImage->mt->region->pitch;
+
+ if (drm_intel_bo_references(intel->batch->buf, dst_buffer))
+ intel_flush(&intel->ctx);
+
+ {
+ drm_intel_bo *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ);
+
+ if (!intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_stride, src_buffer, src_offset, GL_FALSE,
+ dst_stride, dst_buffer, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, dst_x, dst_y, width, height,
+ GL_COPY)) {
+ return GL_FALSE;
+ }
+ }
+
+ return GL_TRUE;
+}
+
+
+static GLboolean
+try_pbo_zcopy(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_x, dst_y, dst_stride;
+
+ if (!_mesa_is_bufferobj(unpack->BufferObj) ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ DBG("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ /* note: potential 64-bit ptr to 32-bit int cast */
+ src_offset = (GLuint) (unsigned long) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ intel_miptree_get_image_offset(intelImage->mt, intelImage->level,
+ intelImage->face, 0,
+ &dst_x, &dst_y);
+
+ dst_stride = intelImage->mt->region->pitch;
+
+ if (src_stride != dst_stride || dst_x != 0 || dst_y != 0 ||
+ src_offset != 0) {
+ DBG("%s: failure 2\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ intel_region_attach_pbo(intel, intelImage->mt->region, pbo);
+
+ return GL_TRUE;
+}
+
+
+static void
+intelTexImage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage, GLsizei imageSize,
+ GLboolean compressed)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLint postConvWidth = width;
+ GLint postConvHeight = height;
+ GLint texelBytes, sizeInBytes;
+ GLuint dstRowStride = 0, srcRowStride = texImage->RowStride;
+
+ DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target), level, width, height, depth, border);
+
+ intelImage->face = target_to_face(target);
+ intelImage->level = level;
+
+ if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
+ _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
+ &postConvHeight);
+ }
+
+ if (_mesa_is_format_compressed(texImage->TexFormat)) {
+ texelBytes = 0;
+ }
+ else {
+ texelBytes = _mesa_get_format_bytes(texImage->TexFormat);
+
+ /* Minimum pitch of 32 bytes */
+ if (postConvWidth * texelBytes < 32) {
+ postConvWidth = 32 / texelBytes;
+ texImage->RowStride = postConvWidth;
+ }
+
+ if (!intelImage->mt) {
+ assert(texImage->RowStride == postConvWidth);
+ }
+ }
+
+ /* Release the reference to a potentially orphaned buffer.
+ * Release any old malloced memory.
+ */
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ assert(!texImage->Data);
+ }
+ else if (texImage->Data) {
+ _mesa_free_texmemory(texImage->Data);
+ texImage->Data = NULL;
+ }
+
+ /* If this is the only texture image in the tree, could call
+ * bmBufferData with NULL data to free the old block and avoid
+ * waiting on any outstanding fences.
+ */
+ if (intelObj->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level &&
+ intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
+ !intel_miptree_match_image(intelObj->mt, &intelImage->base)) {
+
+ DBG("release it\n");
+ intel_miptree_release(intel, &intelObj->mt);
+ assert(!intelObj->mt);
+ }
+
+ if (!intelObj->mt) {
+ guess_and_alloc_mipmap_tree(intel, intelObj, intelImage, pixels == NULL);
+ if (!intelObj->mt) {
+ DBG("guess_and_alloc_mipmap_tree: failed\n");
+ }
+ }
+
+ assert(!intelImage->mt);
+
+ if (intelObj->mt &&
+ intel_miptree_match_image(intelObj->mt, &intelImage->base)) {
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+ assert(intelImage->mt);
+ } else if (intelImage->base.Border == 0) {
+ int comp_byte = 0;
+ GLuint texelBytes = _mesa_get_format_bytes(intelImage->base.TexFormat);
+ GLenum baseFormat = _mesa_get_format_base_format(intelImage->base.TexFormat);
+ if (_mesa_is_format_compressed(intelImage->base.TexFormat)) {
+ comp_byte =
+ intel_compressed_num_bytes(intelImage->base.TexFormat);
+ }
+
+ /* Didn't fit in the object miptree, but it's suitable for inclusion in
+ * a miptree, so create one just for our level and store it in the image.
+ * It'll get moved into the object miptree at validate time.
+ */
+ intelImage->mt = intel_miptree_create(intel, target,
+ baseFormat,
+ internalFormat,
+ level, level,
+ width, height, depth,
+ texelBytes,
+ comp_byte, pixels == NULL);
+
+ }
+
+ /* PBO fastpaths:
+ */
+ if (dims <= 2 &&
+ intelImage->mt &&
+ _mesa_is_bufferobj(unpack->BufferObj) &&
+ check_pbo_format(internalFormat, format,
+ type, intelImage->base.TexFormat)) {
+
+ DBG("trying pbo upload\n");
+
+ /* Attempt to texture directly from PBO data (zero copy upload).
+ *
+ * Currently disable as it can lead to worse as well as better
+ * performance (in particular when intel_region_cow() is
+ * required).
+ */
+ if (intelObj->mt == intelImage->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level) {
+
+ if (try_pbo_zcopy(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+
+ DBG("pbo zcopy upload succeeded\n");
+ return;
+ }
+ }
+
+
+ /* Otherwise, attempt to use the blitter for PBO image uploads.
+ */
+ if (try_pbo_upload(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+ DBG("pbo upload succeeded\n");
+ return;
+ }
+
+ DBG("pbo upload failed\n");
+ }
+
+ /* intelCopyTexImage calls this function with pixels == NULL, with
+ * the expectation that the mipmap tree will be set up but nothing
+ * more will be done. This is where those calls return:
+ */
+ if (compressed) {
+ pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels,
+ unpack,
+ "glCompressedTexImage");
+ } else {
+ pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1,
+ format, type,
+ pixels, unpack, "glTexImage");
+ }
+
+ if (intelImage->mt) {
+ if (pixels != NULL) {
+ /* Flush any queued rendering with the texture before mapping. */
+ if (drm_intel_bo_references(intel->batch->buf,
+ intelImage->mt->region->buffer)) {
+ intel_flush(ctx);
+ }
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ intelImage->base.ImageOffsets);
+ }
+
+ texImage->RowStride = dstRowStride / intelImage->mt->cpp;
+ }
+ else {
+ /* Allocate regular memory and store the image there temporarily. */
+ if (_mesa_is_format_compressed(texImage->TexFormat)) {
+ sizeInBytes = _mesa_format_image_size(texImage->TexFormat,
+ texImage->Width,
+ texImage->Height,
+ texImage->Depth);
+ dstRowStride =
+ _mesa_format_row_stride(texImage->TexFormat, width);
+ assert(dims != 3);
+ }
+ else {
+ dstRowStride = postConvWidth * texelBytes;
+ sizeInBytes = depth * dstRowStride * postConvHeight;
+ }
+
+ texImage->Data = _mesa_alloc_texmemory(sizeInBytes);
+ }
+
+ DBG("Upload image %dx%dx%d row_len %d "
+ "pitch %d pixels %d compressed %d\n",
+ width, height, depth, width * texelBytes, dstRowStride,
+ pixels ? 1 : 0, compressed);
+
+ /* Copy data. Would like to know when it's ok for us to eg. use
+ * the blitter to copy. Or, use the hardware to do the format
+ * conversion and copy:
+ */
+ if (pixels) {
+ if (compressed) {
+ if (intelImage->mt) {
+ struct intel_region *dst = intelImage->mt->region;
+ _mesa_copy_rect(texImage->Data, dst->cpp, dst->pitch,
+ 0, 0,
+ intelImage->mt->level[level].width,
+ (intelImage->mt->level[level].height+3)/4,
+ pixels,
+ srcRowStride,
+ 0, 0);
+ }
+ else {
+ memcpy(texImage->Data, pixels, imageSize);
+ }
+ }
+ else if (!_mesa_texstore(ctx, dims,
+ texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, unpack)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
+ }
+ }
+
+ _mesa_unmap_teximage_pbo(ctx, unpack);
+
+ if (intelImage->mt) {
+ if (pixels != NULL)
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+}
+
+
+static void
+intelTexImage3D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 3, target, level,
+ internalFormat, width, height, depth, border,
+ format, type, pixels, unpack, texObj, texImage, 0, GL_FALSE);
+}
+
+
+static void
+intelTexImage2D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 2, target, level,
+ internalFormat, width, height, 1, border,
+ format, type, pixels, unpack, texObj, texImage, 0, GL_FALSE);
+}
+
+
+static void
+intelTexImage1D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 1, target, level,
+ internalFormat, width, 1, 1, border,
+ format, type, pixels, unpack, texObj, texImage, 0, GL_FALSE);
+}
+
+
+static void
+intelCompressedTexImage2D( GLcontext *ctx, GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLsizei imageSize, const GLvoid *data,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage )
+{
+ intelTexImage(ctx, 2, target, level,
+ internalFormat, width, height, 1, border,
+ 0, 0, data, &ctx->Unpack, texObj, texImage, imageSize, GL_TRUE);
+}
+
+
+/**
+ * Need to map texture image into memory before copying image data,
+ * then unmap it.
+ */
+static void
+intel_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage, GLboolean compressed)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ /* If we're reading from a texture that has been rendered to, need to
+ * make sure rendering is complete.
+ * We could probably predicate this on texObj->_RenderToTexture
+ */
+ intel_flush(ctx);
+
+ /* Map */
+ if (intelImage->mt) {
+ /* Image is stored in hardware format in a buffer managed by the
+ * kernel. Need to explicitly map and unmap it.
+ */
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ intelImage->base.RowStride /= intelImage->mt->cpp;
+ }
+ else {
+ /* Otherwise, the image should actually be stored in
+ * intelImage->base.Data. This is pretty confusing for
+ * everybody, I'd much prefer to separate the two functions of
+ * texImage->Data - storage for texture images in main memory
+ * and access (ie mappings) of images. In other words, we'd
+ * create a new texImage->Map field and leave Data simply for
+ * storage.
+ */
+ assert(intelImage->base.Data);
+ }
+
+
+ if (compressed) {
+ _mesa_get_compressed_teximage(ctx, target, level, pixels,
+ texObj, texImage);
+ }
+ else {
+ _mesa_get_teximage(ctx, target, level, format, type, pixels,
+ texObj, texImage);
+ }
+
+
+ /* Unmap */
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+}
+
+
+static void
+intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intel_get_tex_image(ctx, target, level, format, type, pixels,
+ texObj, texImage, GL_FALSE);
+}
+
+
+static void
+intelGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
+ GLvoid *pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intel_get_tex_image(ctx, target, level, 0, 0, pixels,
+ texObj, texImage, GL_TRUE);
+}
+
+void
+intelSetTexBuffer2(__DRIcontext *pDRICtx, GLint target,
+ GLint texture_format,
+ __DRIdrawable *dPriv)
+{
+ struct gl_framebuffer *fb = dPriv->driverPrivate;
+ struct intel_context *intel = pDRICtx->driverPrivate;
+ GLcontext *ctx = &intel->ctx;
+ struct intel_texture_object *intelObj;
+ struct intel_texture_image *intelImage;
+ struct intel_mipmap_tree *mt;
+ struct intel_renderbuffer *rb;
+ struct gl_texture_object *texObj;
+ struct gl_texture_image *texImage;
+ int level = 0, internalFormat;
+
+ texObj = _mesa_get_current_tex_object(ctx, target);
+ intelObj = intel_texture_object(texObj);
+
+ if (!intelObj)
+ return;
+
+ if (dPriv->lastStamp != dPriv->dri2.stamp ||
+ !pDRICtx->driScreenPriv->dri2.useInvalidate)
+ intel_update_renderbuffers(pDRICtx, dPriv);
+
+ rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
+ /* If the region isn't set, then intel_update_renderbuffers was unable
+ * to get the buffers for the drawable.
+ */
+ if (rb->region == NULL)
+ return;
+
+ if (texture_format == __DRI_TEXTURE_FORMAT_RGB)
+ internalFormat = GL_RGB;
+ else
+ internalFormat = GL_RGBA;
+
+ mt = intel_miptree_create_for_region(intel, target,
+ internalFormat,
+ 0, 0, rb->region, 1, 0);
+ if (mt == NULL)
+ return;
+
+ _mesa_lock_texture(&intel->ctx, texObj);
+
+ texImage = _mesa_get_tex_image(&intel->ctx, texObj, target, level);
+ intelImage = intel_texture_image(texImage);
+
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ assert(!texImage->Data);
+ }
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intelObj->mt = mt;
+ _mesa_init_teximage_fields(&intel->ctx, target, texImage,
+ rb->region->width, rb->region->height, 1,
+ 0, internalFormat);
+
+ intelImage->face = target_to_face(target);
+ intelImage->level = level;
+ if (texture_format == __DRI_TEXTURE_FORMAT_RGB)
+ texImage->TexFormat = MESA_FORMAT_XRGB8888;
+ else
+ texImage->TexFormat = MESA_FORMAT_ARGB8888;
+ texImage->RowStride = rb->region->pitch;
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+
+ if (!intel_miptree_match_image(intelObj->mt, &intelImage->base)) {
+ fprintf(stderr, "miptree doesn't match image\n");
+ }
+
+ _mesa_unlock_texture(&intel->ctx, texObj);
+}
+
+void
+intelSetTexBuffer(__DRIcontext *pDRICtx, GLint target, __DRIdrawable *dPriv)
+{
+ /* The old interface didn't have the format argument, so copy our
+ * implementation's behavior at the time.
+ */
+ intelSetTexBuffer2(pDRICtx, target, __DRI_TEXTURE_FORMAT_RGBA, dPriv);
+}
+
+#if FEATURE_OES_EGL_image
+static void
+intel_image_target_texture_2d(GLcontext *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ struct intel_mipmap_tree *mt;
+ __DRIscreen *screen;
+ __DRIimage *image;
+
+ screen = intel->intelScreen->driScrnPriv;
+ image = screen->dri2.image->lookupEGLImage(intel->driContext, image_handle,
+ intel->driContext->loaderPrivate);
+ if (image == NULL)
+ return;
+
+ mt = intel_miptree_create_for_region(intel, target,
+ image->internal_format,
+ 0, 0, image->region, 1, 0);
+ if (mt == NULL)
+ return;
+
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ assert(!texImage->Data);
+ }
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intelObj->mt = mt;
+ _mesa_init_teximage_fields(&intel->ctx, target, texImage,
+ image->region->width, image->region->height, 1,
+ 0, image->internal_format);
+
+ intelImage->face = target_to_face(target);
+ intelImage->level = 0;
+ texImage->TexFormat = image->format;
+ texImage->RowStride = image->region->pitch;
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+
+ if (!intel_miptree_match_image(intelObj->mt, &intelImage->base))
+ fprintf(stderr, "miptree doesn't match image\n");
+}
+#endif
+
+void
+intelInitTextureImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexImage1D = intelTexImage1D;
+ functions->TexImage2D = intelTexImage2D;
+ functions->TexImage3D = intelTexImage3D;
+ functions->GetTexImage = intelGetTexImage;
+
+ functions->CompressedTexImage2D = intelCompressedTexImage2D;
+ functions->GetCompressedTexImage = intelGetCompressedTexImage;
+
+#if FEATURE_OES_EGL_image
+ functions->EGLImageTargetTexture2D = intel_image_target_texture_2d;
+#endif
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_layout.c b/src/mesa/drivers/dri/intel/intel_tex_layout.c
new file mode 100644
index 0000000000..d39733b6c5
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_layout.c
@@ -0,0 +1,136 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "intel_mipmap_tree.h"
+#include "intel_tex_layout.h"
+#include "intel_context.h"
+#include "main/macros.h"
+
+void intel_get_texture_alignment_unit(GLenum internalFormat, GLuint *w, GLuint *h)
+{
+ switch (internalFormat) {
+ case GL_COMPRESSED_RGB_FXT1_3DFX:
+ case GL_COMPRESSED_RGBA_FXT1_3DFX:
+ *w = 8;
+ *h = 4;
+ break;
+
+ case GL_RGB_S3TC:
+ case GL_RGB4_S3TC:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ case GL_RGBA_S3TC:
+ case GL_RGBA4_S3TC:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ *w = 4;
+ *h = 4;
+ break;
+
+ default:
+ *w = 4;
+ *h = 2;
+ break;
+ }
+}
+
+void i945_miptree_layout_2d(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling, int nr_images)
+{
+ GLuint align_h = 2, align_w = 4;
+ GLuint level;
+ GLuint x = 0;
+ GLuint y = 0;
+ GLuint width = mt->width0;
+ GLuint height = mt->height0;
+
+ mt->total_width = mt->width0;
+ intel_get_texture_alignment_unit(mt->internal_format, &align_w, &align_h);
+
+ if (mt->compressed) {
+ mt->total_width = ALIGN(mt->width0, align_w);
+ }
+
+ /* May need to adjust width to accomodate the placement of
+ * the 2nd mipmap. This occurs when the alignment
+ * constraints of mipmap placement push the right edge of the
+ * 2nd mipmap out past the width of its parent.
+ */
+ if (mt->first_level != mt->last_level) {
+ GLuint mip1_width;
+
+ if (mt->compressed) {
+ mip1_width = ALIGN(minify(mt->width0), align_w)
+ + ALIGN(minify(minify(mt->width0)), align_w);
+ } else {
+ mip1_width = ALIGN(minify(mt->width0), align_w)
+ + minify(minify(mt->width0));
+ }
+
+ if (mip1_width > mt->total_width) {
+ mt->total_width = mip1_width;
+ }
+ }
+
+ mt->total_height = 0;
+
+ for ( level = mt->first_level ; level <= mt->last_level ; level++ ) {
+ GLuint img_height;
+
+ intel_miptree_set_level_info(mt, level, nr_images, x, y, width,
+ height, 1);
+
+ if (mt->compressed)
+ img_height = MAX2(1, height/4);
+ else
+ img_height = ALIGN(height, align_h);
+
+
+ /* Because the images are packed better, the final offset
+ * might not be the maximal one:
+ */
+ mt->total_height = MAX2(mt->total_height, y + img_height);
+
+ /* Layout_below: step right after second mipmap.
+ */
+ if (level == mt->first_level + 1) {
+ x += ALIGN(width, align_w);
+ }
+ else {
+ y += img_height;
+ }
+
+ width = minify(width);
+ height = minify(height);
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_layout.h b/src/mesa/drivers/dri/intel/intel_tex_layout.h
new file mode 100644
index 0000000000..1c8c53e545
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_layout.h
@@ -0,0 +1,44 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+ /*
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ * Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+#include "main/macros.h"
+
+
+static INLINE GLuint minify( GLuint d )
+{
+ return MAX2(1, d>>1);
+}
+
+extern void i945_miptree_layout_2d(struct intel_context *intel,
+ struct intel_mipmap_tree *mt,
+ uint32_t tiling, int nr_images);
+extern void intel_get_texture_alignment_unit(GLenum, GLuint *, GLuint *);
diff --git a/src/mesa/drivers/dri/intel/intel_tex_obj.h b/src/mesa/drivers/dri/intel/intel_tex_obj.h
new file mode 100644
index 0000000000..5f60e0ea4f
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_obj.h
@@ -0,0 +1,80 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _INTEL_TEX_OBJ_H
+#define _INTEL_TEX_OBJ_H
+
+struct intel_texture_object
+{
+ struct gl_texture_object base; /* The "parent" object */
+
+ /* The mipmap tree must include at least these levels once
+ * validated:
+ */
+ GLuint firstLevel;
+ GLuint lastLevel;
+
+ /* Offset for firstLevel image:
+ */
+ GLuint textureOffset;
+
+ /* On validation any active images held in main memory or in other
+ * regions will be copied to this region and the old storage freed.
+ */
+ struct intel_mipmap_tree *mt;
+};
+
+struct intel_texture_image
+{
+ struct gl_texture_image base;
+
+ /* These aren't stored in gl_texture_image
+ */
+ GLuint level;
+ GLuint face;
+
+ /* If intelImage->mt != NULL, image data is stored here.
+ * Else if intelImage->base.Data != NULL, image is stored there.
+ * Else there is no image data.
+ */
+ struct intel_mipmap_tree *mt;
+ GLboolean used_as_render_target;
+};
+
+static INLINE struct intel_texture_object *
+intel_texture_object(struct gl_texture_object *obj)
+{
+ return (struct intel_texture_object *) obj;
+}
+
+static INLINE struct intel_texture_image *
+intel_texture_image(struct gl_texture_image *img)
+{
+ return (struct intel_texture_image *) img;
+}
+
+#endif /* _INTEL_TEX_OBJ_H */
diff --git a/src/mesa/drivers/dri/intel/intel_tex_subimage.c b/src/mesa/drivers/dri/intel/intel_tex_subimage.c
new file mode 100644
index 0000000000..b7ce50a820
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_subimage.c
@@ -0,0 +1,277 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "main/mtypes.h"
+#include "main/texobj.h"
+#include "main/texstore.h"
+#include "main/texcompress.h"
+#include "main/enums.h"
+
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static void
+intelTexSubimage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLint width, GLint height, GLint depth,
+ GLsizei imageSize,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLboolean compressed)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLuint dstRowStride = 0;
+ drm_intel_bo *temp_bo = NULL, *dst_bo = NULL;
+ unsigned int blit_x = 0, blit_y = 0;
+
+ DBG("%s target %s level %d offset %d,%d %dx%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ level, xoffset, yoffset, width, height);
+
+ intel_flush(ctx);
+
+ if (compressed)
+ pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize,
+ pixels, packing,
+ "glCompressedTexImage");
+ else
+ pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, depth,
+ format, type, pixels, packing,
+ "glTexSubImage");
+ if (!pixels)
+ return;
+
+ intel_prepare_render(intel);
+
+ /* Map buffer if necessary. Need to lock to prevent other contexts
+ * from uploading the buffer under us.
+ */
+ if (intelImage->mt) {
+ dst_bo = intel_region_buffer(intel, intelImage->mt->region,
+ INTEL_WRITE_PART);
+
+ if (!compressed &&
+ intelImage->mt->region->tiling != I915_TILING_Y &&
+ intel->gen < 6 && target == GL_TEXTURE_2D &&
+ drm_intel_bo_busy(dst_bo))
+ {
+ unsigned long pitch;
+ uint32_t tiling_mode = I915_TILING_NONE;
+ temp_bo = drm_intel_bo_alloc_tiled(intel->bufmgr,
+ "subimage blit bo",
+ width, height,
+ intelImage->mt->cpp,
+ &tiling_mode,
+ &pitch,
+ 0);
+ drm_intel_gem_bo_map_gtt(temp_bo);
+ texImage->Data = temp_bo->virtual;
+ texImage->ImageOffsets[0] = 0;
+ dstRowStride = pitch;
+
+ intel_miptree_get_image_offset(intelImage->mt, level,
+ intelImage->face, 0,
+ &blit_x, &blit_y);
+ blit_x += xoffset;
+ blit_y += yoffset;
+ xoffset = 0;
+ yoffset = 0;
+ } else {
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ texImage->ImageOffsets);
+ }
+ } else {
+ if (_mesa_is_format_compressed(texImage->TexFormat)) {
+ dstRowStride =
+ _mesa_format_row_stride(texImage->TexFormat, width);
+ assert(dims != 3);
+ }
+ else {
+ dstRowStride = texImage->RowStride * _mesa_get_format_bytes(texImage->TexFormat);
+ }
+ }
+
+ assert(dstRowStride);
+
+ if (compressed) {
+ if (intelImage->mt) {
+ struct intel_region *dst = intelImage->mt->region;
+
+ _mesa_copy_rect(texImage->Data, dst->cpp, dst->pitch,
+ xoffset, yoffset / 4,
+ (width + 3) & ~3, (height + 3) / 4,
+ pixels, (width + 3) & ~3, 0, 0);
+ }
+ else {
+ memcpy(texImage->Data, pixels, imageSize);
+ }
+ }
+ else {
+ if (!_mesa_texstore(ctx, dims, texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data,
+ xoffset, yoffset, zoffset,
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+ if (temp_bo) {
+ GLboolean ret;
+ unsigned int dst_pitch = intelImage->mt->region->pitch *
+ intelImage->mt->cpp;
+
+ drm_intel_gem_bo_unmap_gtt(temp_bo);
+ texImage->Data = NULL;
+
+ ret = intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ dstRowStride / intelImage->mt->cpp,
+ temp_bo, 0, GL_FALSE,
+ dst_pitch / intelImage->mt->cpp, dst_bo, 0,
+ intelImage->mt->region->tiling,
+ 0, 0, blit_x, blit_y, width, height,
+ GL_COPY);
+ assert(ret);
+ }
+ }
+
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ if (temp_bo) {
+ drm_intel_bo_unreference(temp_bo);
+ temp_bo = NULL;
+ } else if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+}
+
+
+static void
+intelTexSubImage3D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 3,
+ target, level,
+ xoffset, yoffset, zoffset,
+ width, height, depth, 0,
+ format, type, pixels, packing, texObj, texImage, GL_FALSE);
+}
+
+
+static void
+intelTexSubImage2D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 2,
+ target, level,
+ xoffset, yoffset, 0,
+ width, height, 1, 0,
+ format, type, pixels, packing, texObj, texImage, GL_FALSE);
+}
+
+
+static void
+intelTexSubImage1D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLsizei width,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 1,
+ target, level,
+ xoffset, 0, 0,
+ width, 1, 1, 0,
+ format, type, pixels, packing, texObj, texImage, GL_FALSE);
+}
+
+static void
+intelCompressedTexSubImage2D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLsizei imageSize,
+ const GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 2,
+ target, level,
+ xoffset, yoffset, 0,
+ width, height, 1, imageSize,
+ format, 0, pixels, &ctx->Unpack, texObj, texImage, GL_TRUE);
+}
+
+
+
+void
+intelInitTextureSubImageFuncs(struct dd_function_table *functions)
+{
+ functions->TexSubImage1D = intelTexSubImage1D;
+ functions->TexSubImage2D = intelTexSubImage2D;
+ functions->TexSubImage3D = intelTexSubImage3D;
+ functions->CompressedTexSubImage2D = intelCompressedTexSubImage2D;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_validate.c b/src/mesa/drivers/dri/intel/intel_tex_validate.c
new file mode 100644
index 0000000000..ed5c5d896b
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_validate.c
@@ -0,0 +1,304 @@
+#include "main/mtypes.h"
+#include "main/macros.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Compute which mipmap levels that really need to be sent to the hardware.
+ * This depends on the base image size, GL_TEXTURE_MIN_LOD,
+ * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
+ */
+static void
+intel_calculate_first_last_level(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ struct gl_texture_object *tObj = &intelObj->base;
+ const struct gl_texture_image *const baseImage =
+ tObj->Image[0][tObj->BaseLevel];
+
+ /* These must be signed values. MinLod and MaxLod can be negative numbers,
+ * and having firstLevel and lastLevel as signed prevents the need for
+ * extra sign checks.
+ */
+ int firstLevel;
+ int lastLevel;
+
+ /* Yes, this looks overly complicated, but it's all needed.
+ */
+ switch (tObj->Target) {
+ case GL_TEXTURE_1D:
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_CUBE_MAP:
+ if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
+ /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
+ */
+ firstLevel = lastLevel = tObj->BaseLevel;
+ }
+ else {
+ if (intel->gen == 2) {
+ firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
+ firstLevel = MAX2(firstLevel, tObj->BaseLevel);
+ firstLevel = MIN2(firstLevel, tObj->BaseLevel + baseImage->MaxLog2);
+ lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
+ lastLevel = MAX2(lastLevel, tObj->BaseLevel);
+ lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
+ lastLevel = MIN2(lastLevel, tObj->MaxLevel);
+ lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
+ } else {
+ /* Min/max LOD are taken into account in sampler state. We don't
+ * want to re-layout textures just because clamping has been applied
+ * since it means a bunch of blitting around and probably no memory
+ * savings (since we have to keep the other levels around anyway).
+ */
+ firstLevel = tObj->BaseLevel;
+ lastLevel = MIN2(tObj->BaseLevel + baseImage->MaxLog2,
+ tObj->MaxLevel);
+ /* need at least one level */
+ lastLevel = MAX2(firstLevel, lastLevel);
+ }
+ }
+ break;
+ case GL_TEXTURE_RECTANGLE_NV:
+ case GL_TEXTURE_4D_SGIS:
+ firstLevel = lastLevel = 0;
+ break;
+ default:
+ return;
+ }
+
+ /* save these values */
+ intelObj->firstLevel = firstLevel;
+ intelObj->lastLevel = lastLevel;
+}
+
+/**
+ * Copies the image's contents at its level into the object's miptree,
+ * and updates the image to point at the object's miptree.
+ */
+static void
+copy_image_data_to_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage)
+{
+ if (intelImage->mt) {
+ /* Copy potentially with the blitter:
+ */
+ intel_miptree_image_copy(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level, intelImage->mt);
+
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+ else {
+ assert(intelImage->base.Data != NULL);
+
+ /* More straightforward upload.
+ */
+ intel_miptree_image_data(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level,
+ intelImage->base.Data,
+ intelImage->base.RowStride,
+ intelImage->base.RowStride *
+ intelImage->base.Height);
+ _mesa_align_free(intelImage->base.Data);
+ intelImage->base.Data = NULL;
+ }
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+}
+
+
+/*
+ */
+GLuint
+intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+{
+ struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ int comp_byte = 0;
+ int cpp;
+ GLuint face, i;
+ GLuint nr_faces = 0;
+ struct intel_texture_image *firstImage;
+
+ /* We know/require this is true by now:
+ */
+ assert(intelObj->base._Complete);
+
+ /* What levels must the tree include at a minimum?
+ */
+ intel_calculate_first_last_level(intel, intelObj);
+ firstImage = intel_texture_image(tObj->Image[0][intelObj->firstLevel]);
+
+ /* Fallback case:
+ */
+ if (firstImage->base.Border) {
+ if (intelObj->mt) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+ return GL_FALSE;
+ }
+
+
+ /* If both firstImage and intelObj have a tree which can contain
+ * all active images, favour firstImage. Note that because of the
+ * completeness requirement, we know that the image dimensions
+ * will match.
+ */
+ if (firstImage->mt &&
+ firstImage->mt != intelObj->mt &&
+ firstImage->mt->first_level <= intelObj->firstLevel &&
+ firstImage->mt->last_level >= intelObj->lastLevel) {
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intel_miptree_reference(&intelObj->mt, firstImage->mt);
+ }
+
+ if (_mesa_is_format_compressed(firstImage->base.TexFormat)) {
+ comp_byte = intel_compressed_num_bytes(firstImage->base.TexFormat);
+ cpp = comp_byte;
+ }
+ else
+ cpp = _mesa_get_format_bytes(firstImage->base.TexFormat);
+
+ /* Check tree can hold all active levels. Check tree matches
+ * target, imageFormat, etc.
+ *
+ * XXX: For some layouts (eg i945?), the test might have to be
+ * first_level == firstLevel, as the tree isn't valid except at the
+ * original start level. Hope to get around this by
+ * programming minLod, maxLod, baseLevel into the hardware and
+ * leaving the tree alone.
+ */
+ if (intelObj->mt &&
+ (intelObj->mt->target != intelObj->base.Target ||
+ intelObj->mt->internal_format != firstImage->base.InternalFormat ||
+ intelObj->mt->first_level != intelObj->firstLevel ||
+ intelObj->mt->last_level != intelObj->lastLevel ||
+ intelObj->mt->width0 != firstImage->base.Width ||
+ intelObj->mt->height0 != firstImage->base.Height ||
+ intelObj->mt->depth0 != firstImage->base.Depth ||
+ intelObj->mt->cpp != cpp ||
+ intelObj->mt->compressed != _mesa_is_format_compressed(firstImage->base.TexFormat))) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+
+
+ /* May need to create a new tree:
+ */
+ if (!intelObj->mt) {
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ firstImage->base._BaseFormat,
+ firstImage->base.InternalFormat,
+ intelObj->firstLevel,
+ intelObj->lastLevel,
+ firstImage->base.Width,
+ firstImage->base.Height,
+ firstImage->base.Depth,
+ cpp,
+ comp_byte,
+ GL_TRUE);
+ }
+
+ /* Pull in any images not in the object's tree:
+ */
+ nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ /* Need to import images in main memory or held in other trees.
+ * If it's a render target, then its data isn't needed to be in
+ * the object tree (otherwise we'd be FBO incomplete), and we need
+ * to keep track of the image's MT as needing to be pulled in still,
+ * or we'll lose the rendering that's done to it.
+ */
+ if (intelObj->mt != intelImage->mt &&
+ !intelImage->used_as_render_target) {
+ copy_image_data_to_tree(intel, intelObj, intelImage);
+ }
+ }
+ }
+
+ return GL_TRUE;
+}
+
+void
+intel_tex_map_level_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ int level)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face;
+
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][level]);
+
+ if (intelImage && intelImage->mt) {
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ /* convert stride to texels, not bytes */
+ intelImage->base.RowStride /= intelImage->mt->cpp;
+ /* intelImage->base.ImageStride /= intelImage->mt->cpp; */
+ }
+ }
+}
+
+void
+intel_tex_unmap_level_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ int level)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face;
+
+ for (face = 0; face < nr_faces; face++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][level]);
+
+ if (intelImage && intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+ }
+}
+
+void
+intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ int i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++)
+ intel_tex_map_level_images(intel, intelObj, i);
+}
+
+void
+intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ int i;
+
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++)
+ intel_tex_unmap_level_images(intel, intelObj, i);
+}
diff --git a/src/mesa/drivers/dri/intel/server/i830_dri.h b/src/mesa/drivers/dri/intel/server/i830_dri.h
new file mode 100644
index 0000000000..def049e7a6
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/server/i830_dri.h
@@ -0,0 +1,62 @@
+/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/i810/i830_dri.h,v 1.6 2003/09/28 20:15:59 alanh Exp $ */
+
+#ifndef _I830_DRI_H
+#define _I830_DRI_H
+
+#include "xf86drm.h"
+
+#define I830_MAX_DRAWABLES 256
+
+#define I830_MAJOR_VERSION 1
+#define I830_MINOR_VERSION 9
+#define I830_PATCHLEVEL 0
+
+#define I830_REG_SIZE 0x80000
+
+typedef struct _I830DRIRec {
+ drm_handle_t regs;
+ drmSize regsSize;
+
+ drmSize unused1; /* backbufferSize */
+ drm_handle_t unused2; /* backbuffer */
+
+ drmSize unused3; /* depthbufferSize */
+ drm_handle_t unused4; /* depthbuffer */
+
+ drmSize unused5; /* rotatedSize */
+ drm_handle_t unused6; /* rotatedbuffer */
+
+ drm_handle_t unused7; /* textures */
+ int unused8; /* textureSize */
+
+ drm_handle_t unused9; /* agp_buffers */
+ drmSize unused10; /* agp_buf_size */
+
+ int deviceID;
+ int width;
+ int height;
+ int mem;
+ int cpp;
+ int bitsPerPixel;
+
+ int unused11[8]; /* was front/back/depth/rotated offset/pitch */
+
+ int unused12; /* logTextureGranularity */
+ int unused13; /* textureOffset */
+
+ int irq;
+ int sarea_priv_offset;
+} I830DRIRec, *I830DRIPtr;
+
+typedef struct {
+ /* Nothing here yet */
+ int dummy;
+} I830ConfigPrivRec, *I830ConfigPrivPtr;
+
+typedef struct {
+ /* Nothing here yet */
+ int dummy;
+} I830DRIContextRec, *I830DRIContextPtr;
+
+
+#endif
diff --git a/src/mesa/drivers/dri/intel/server/intel.h b/src/mesa/drivers/dri/intel/server/intel.h
new file mode 100644
index 0000000000..6ea72499c1
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/server/intel.h
@@ -0,0 +1,331 @@
+#ifndef _INTEL_H_
+#define _INTEL_H_
+
+#include "xf86drm.h" /* drm_handle_t, etc */
+
+/* Intel */
+#ifndef PCI_CHIP_I810
+#define PCI_CHIP_I810 0x7121
+#define PCI_CHIP_I810_DC100 0x7123
+#define PCI_CHIP_I810_E 0x7125
+#define PCI_CHIP_I815 0x1132
+#define PCI_CHIP_I810_BRIDGE 0x7120
+#define PCI_CHIP_I810_DC100_BRIDGE 0x7122
+#define PCI_CHIP_I810_E_BRIDGE 0x7124
+#define PCI_CHIP_I815_BRIDGE 0x1130
+#endif
+
+#define PCI_CHIP_845_G 0x2562
+#define PCI_CHIP_I830_M 0x3577
+
+#ifndef PCI_CHIP_I855_GM
+#define PCI_CHIP_I855_GM 0x3582
+#define PCI_CHIP_I855_GM_BRIDGE 0x3580
+#endif
+
+#ifndef PCI_CHIP_I865_G
+#define PCI_CHIP_I865_G 0x2572
+#define PCI_CHIP_I865_G_BRIDGE 0x2570
+#endif
+
+#ifndef PCI_CHIP_I915_G
+#define PCI_CHIP_I915_G 0x2582
+#define PCI_CHIP_I915_G_BRIDGE 0x2580
+#endif
+
+#ifndef PCI_CHIP_I915_GM
+#define PCI_CHIP_I915_GM 0x2592
+#define PCI_CHIP_I915_GM_BRIDGE 0x2590
+#endif
+
+#ifndef PCI_CHIP_E7221_G
+#define PCI_CHIP_E7221_G 0x258A
+/* Same as I915_G_BRIDGE */
+#define PCI_CHIP_E7221_G_BRIDGE 0x2580
+#endif
+
+#ifndef PCI_CHIP_I945_G
+#define PCI_CHIP_I945_G 0x2772
+#define PCI_CHIP_I945_G_BRIDGE 0x2770
+#endif
+
+#ifndef PCI_CHIP_I945_GM
+#define PCI_CHIP_I945_GM 0x27A2
+#define PCI_CHIP_I945_GM_BRIDGE 0x27A0
+#endif
+
+#define IS_I810(pI810) (pI810->Chipset == PCI_CHIP_I810 || \
+ pI810->Chipset == PCI_CHIP_I810_DC100 || \
+ pI810->Chipset == PCI_CHIP_I810_E)
+#define IS_I815(pI810) (pI810->Chipset == PCI_CHIP_I815)
+#define IS_I830(pI810) (pI810->Chipset == PCI_CHIP_I830_M)
+#define IS_845G(pI810) (pI810->Chipset == PCI_CHIP_845_G)
+#define IS_I85X(pI810) (pI810->Chipset == PCI_CHIP_I855_GM)
+#define IS_I852(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I852_GM || pI810->variant == I852_GME))
+#define IS_I855(pI810) (pI810->Chipset == PCI_CHIP_I855_GM && (pI810->variant == I855_GM || pI810->variant == I855_GME))
+#define IS_I865G(pI810) (pI810->Chipset == PCI_CHIP_I865_G)
+
+#define IS_I915G(pI810) (pI810->Chipset == PCI_CHIP_I915_G || pI810->Chipset == PCI_CHIP_E7221_G)
+#define IS_I915GM(pI810) (pI810->Chipset == PCI_CHIP_I915_GM)
+#define IS_I945G(pI810) (pI810->Chipset == PCI_CHIP_I945_G)
+#define IS_I945GM(pI810) (pI810->Chipset == PCI_CHIP_I945_GM)
+#define IS_I9XX(pI810) (IS_I915G(pI810) || IS_I915GM(pI810) || IS_I945G(pI810) || IS_I945GM(pI810))
+
+#define IS_MOBILE(pI810) (IS_I830(pI810) || IS_I85X(pI810) || IS_I915GM(pI810) || IS_I945GM(pI810))
+
+#define I830_GMCH_CTRL 0x52
+
+#define I830_GMCH_MEM_MASK 0x1
+#define I830_GMCH_MEM_64M 0x1
+#define I830_GMCH_MEM_128M 0
+
+#define I830_GMCH_GMS_MASK 0x70
+#define I830_GMCH_GMS_DISABLED 0x00
+#define I830_GMCH_GMS_LOCAL 0x10
+#define I830_GMCH_GMS_STOLEN_512 0x20
+#define I830_GMCH_GMS_STOLEN_1024 0x30
+#define I830_GMCH_GMS_STOLEN_8192 0x40
+
+#define I855_GMCH_GMS_MASK (0x7 << 4)
+#define I855_GMCH_GMS_DISABLED 0x00
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915G_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915G_GMCH_GMS_STOLEN_64M (0x7 << 4)
+
+typedef unsigned char Bool;
+#define TRUE 1
+#define FALSE 0
+
+#define PIPE_NONE 0<<0
+#define PIPE_CRT 1<<0
+#define PIPE_TV 1<<1
+#define PIPE_DFP 1<<2
+#define PIPE_LFP 1<<3
+#define PIPE_CRT2 1<<4
+#define PIPE_TV2 1<<5
+#define PIPE_DFP2 1<<6
+#define PIPE_LFP2 1<<7
+
+typedef struct _I830MemPool *I830MemPoolPtr;
+typedef struct _I830MemRange *I830MemRangePtr;
+typedef struct _I830MemRange {
+ long Start;
+ long End;
+ long Size;
+ unsigned long Physical;
+ unsigned long Offset; /* Offset of AGP-allocated portion */
+ unsigned long Alignment;
+ drm_handle_t Key;
+ unsigned long Pitch; // add pitch
+ I830MemPoolPtr Pool;
+} I830MemRange;
+
+typedef struct _I830MemPool {
+ I830MemRange Total;
+ I830MemRange Free;
+ I830MemRange Fixed;
+ I830MemRange Allocated;
+} I830MemPool;
+
+typedef struct {
+ int tail_mask;
+ I830MemRange mem;
+ unsigned char *virtual_start;
+ int head;
+ int tail;
+ int space;
+} I830RingBuffer;
+
+typedef struct _I830Rec {
+ unsigned char *MMIOBase;
+ unsigned char *FbBase;
+ int cpp;
+ uint32_t aper_size;
+ unsigned int bios_version;
+
+ /* These are set in PreInit and never changed. */
+ long FbMapSize;
+ long TotalVideoRam;
+ I830MemRange StolenMemory; /* pre-allocated memory */
+ long BIOSMemorySize; /* min stolen pool size */
+ int BIOSMemSizeLoc;
+
+ /* These change according to what has been allocated. */
+ long FreeMemory;
+ I830MemRange MemoryAperture;
+ I830MemPool StolenPool;
+ long allocatedMemory;
+
+ /* Regions allocated either from the above pools, or from agpgart. */
+ /* for single and dual head configurations */
+ I830MemRange FrontBuffer;
+ I830MemRange FrontBuffer2;
+ I830MemRange Scratch;
+ I830MemRange Scratch2;
+
+ I830RingBuffer *LpRing;
+
+ I830MemRange BackBuffer;
+ I830MemRange DepthBuffer;
+ I830MemRange TexMem;
+ int TexGranularity;
+ I830MemRange ContextMem;
+ int drmMinor;
+ Bool have3DWindows;
+
+ Bool NeedRingBufferLow;
+ Bool allowPageFlip;
+ Bool disableTiling;
+
+ int Chipset;
+ unsigned long LinearAddr;
+ unsigned long MMIOAddr;
+
+ drmSize registerSize; /**< \brief MMIO register map size */
+ drm_handle_t registerHandle; /**< \brief MMIO register map handle */
+ // IOADDRESS ioBase;
+ int irq; /**< \brief IRQ number */
+ int GttBound;
+
+ drm_handle_t ring_map;
+ unsigned int Fence[8];
+
+} I830Rec;
+
+/*
+ * 12288 is set as the maximum, chosen because it is enough for
+ * 1920x1440@32bpp with a 2048 pixel line pitch with some to spare.
+ */
+#define I830_MAXIMUM_VBIOS_MEM 12288
+#define I830_DEFAULT_VIDEOMEM_2D (MB(32) / 1024)
+#define I830_DEFAULT_VIDEOMEM_3D (MB(64) / 1024)
+
+/* Flags for memory allocation function */
+#define FROM_ANYWHERE 0x00000000
+#define FROM_POOL_ONLY 0x00000001
+#define FROM_NEW_ONLY 0x00000002
+#define FROM_MASK 0x0000000f
+
+#define ALLOCATE_AT_TOP 0x00000010
+#define ALLOCATE_AT_BOTTOM 0x00000020
+#define FORCE_GAPS 0x00000040
+
+#define NEED_PHYSICAL_ADDR 0x00000100
+#define ALIGN_BOTH_ENDS 0x00000200
+#define FORCE_LOW 0x00000400
+
+#define ALLOC_NO_TILING 0x00001000
+#define ALLOC_INITIAL 0x00002000
+
+#define ALLOCATE_DRY_RUN 0x80000000
+
+/* Chipset registers for VIDEO BIOS memory RW access */
+#define _855_DRAM_RW_CONTROL 0x58
+#define _845_DRAM_RW_CONTROL 0x90
+#define DRAM_WRITE 0x33330000
+
+#define KB(x) ((x) * 1024)
+#define MB(x) ((x) * KB(1024))
+
+#define GTT_PAGE_SIZE KB(4)
+#define ROUND_TO(x, y) (((x) + (y) - 1) / (y) * (y))
+#define ROUND_DOWN_TO(x, y) ((x) / (y) * (y))
+#define ROUND_TO_PAGE(x) ROUND_TO((x), GTT_PAGE_SIZE)
+#define ROUND_TO_MB(x) ROUND_TO((x), MB(1))
+#define PRIMARY_RINGBUFFER_SIZE KB(128)
+
+
+/* Ring buffer registers, p277, overview p19
+ */
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+
+#define RING_TAIL 0x00
+#define TAIL_ADDR 0x000FFFF8
+#define I830_TAIL_MASK 0x001FFFF8
+
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define I830_HEAD_MASK 0x001FFFFC
+
+#define RING_START 0x08
+#define START_ADDR 0x03FFFFF8
+#define I830_RING_START_MASK 0xFFFFF000
+
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x001FF000
+#define I830_RING_NR_PAGES 0x001FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
+
+
+/* Fence/Tiling ranges [0..7]
+ */
+#define FENCE 0x2000
+#define FENCE_NR 8
+
+#define I915G_FENCE_START_MASK 0x0ff00000
+
+#define I830_FENCE_START_MASK 0x07f80000
+
+#define FENCE_START_MASK 0x03F80000
+#define FENCE_X_MAJOR 0x00000000
+#define FENCE_Y_MAJOR 0x00001000
+#define FENCE_SIZE_MASK 0x00000700
+#define FENCE_SIZE_512K 0x00000000
+#define FENCE_SIZE_1M 0x00000100
+#define FENCE_SIZE_2M 0x00000200
+#define FENCE_SIZE_4M 0x00000300
+#define FENCE_SIZE_8M 0x00000400
+#define FENCE_SIZE_16M 0x00000500
+#define FENCE_SIZE_32M 0x00000600
+#define FENCE_SIZE_64M 0x00000700
+#define I915G_FENCE_SIZE_1M 0x00000000
+#define I915G_FENCE_SIZE_2M 0x00000100
+#define I915G_FENCE_SIZE_4M 0x00000200
+#define I915G_FENCE_SIZE_8M 0x00000300
+#define I915G_FENCE_SIZE_16M 0x00000400
+#define I915G_FENCE_SIZE_32M 0x00000500
+#define I915G_FENCE_SIZE_64M 0x00000600
+#define I915G_FENCE_SIZE_128M 0x00000700
+#define FENCE_PITCH_1 0x00000000
+#define FENCE_PITCH_2 0x00000010
+#define FENCE_PITCH_4 0x00000020
+#define FENCE_PITCH_8 0x00000030
+#define FENCE_PITCH_16 0x00000040
+#define FENCE_PITCH_32 0x00000050
+#define FENCE_PITCH_64 0x00000060
+#define FENCE_VALID 0x00000001
+
+#include <mmio.h>
+
+# define MMIO_IN8(base, offset) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset))
+# define MMIO_IN32(base, offset) \
+ read_MMIO_LE32(base, offset)
+# define MMIO_OUT8(base, offset, val) \
+ *(volatile unsigned char *)(((unsigned char*)(base)) + (offset)) = (val)
+# define MMIO_OUT32(base, offset, val) \
+ *(volatile unsigned int *)(void *)(((unsigned char*)(base)) + (offset)) = CPU_TO_LE32(val)
+
+
+ /* Memory mapped register access macros */
+#define INREG8(addr) MMIO_IN8(MMIO, addr)
+#define INREG(addr) MMIO_IN32(MMIO, addr)
+#define OUTREG8(addr, val) MMIO_OUT8(MMIO, addr, val)
+#define OUTREG(addr, val) MMIO_OUT32(MMIO, addr, val)
+
+#define DSPABASE 0x70184
+
+#endif