summaryrefslogtreecommitdiff
path: root/src/mesa
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2007-11-07 10:04:59 -0800
committerEric Anholt <eric@anholt.net>2007-11-09 14:27:33 -0800
commit77a5bcaff43df8d54e0e0ef833726e4b41d7eb36 (patch)
tree0be8b19b347af2425a211a29c680fed266fa91c8 /src/mesa
parent8b36166d295beb472ed7cedf1989894665233b98 (diff)
[intel] Move over files that will be shared with 965-fbo work.
Diffstat (limited to 'src/mesa')
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_batchbuffer.c251
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_blit.c492
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_buffer_objects.c269
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_buffers.c1210
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c834
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_depthstencil.c283
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_fbo.c688
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_mipmap_tree.c389
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_regions.c484
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_screen.c946
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_span.c410
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex.c193
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex_copy.c303
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex_format.c173
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex_image.c691
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex_subimage.c183
l---------[-rw-r--r--]src/mesa/drivers/dri/i915/intel_tex_validate.c273
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.c250
-rw-r--r--src/mesa/drivers/dri/intel/intel_batchbuffer.h (renamed from src/mesa/drivers/dri/i915/intel_batchbuffer.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_blit.c491
-rw-r--r--src/mesa/drivers/dri/intel/intel_blit.h (renamed from src/mesa/drivers/dri/i915/intel_blit.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffer_objects.c268
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffer_objects.h (renamed from src/mesa/drivers/dri/i915/intel_buffer_objects.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffers.c1209
-rw-r--r--src/mesa/drivers/dri/intel/intel_buffers.h (renamed from src/mesa/drivers/dri/i915/intel_buffers.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c833
-rw-r--r--src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h (renamed from src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_depthstencil.c282
-rw-r--r--src/mesa/drivers/dri/intel/intel_depthstencil.h (renamed from src/mesa/drivers/dri/i915/intel_depthstencil.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_fbo.c687
-rw-r--r--src/mesa/drivers/dri/intel/intel_fbo.h (renamed from src/mesa/drivers/dri/i915/intel_fbo.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_mipmap_tree.c388
-rw-r--r--src/mesa/drivers/dri/intel/intel_mipmap_tree.h (renamed from src/mesa/drivers/dri/i915/intel_mipmap_tree.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_regions.c483
-rw-r--r--src/mesa/drivers/dri/intel/intel_regions.h (renamed from src/mesa/drivers/dri/i915/intel_regions.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_screen.c945
-rw-r--r--src/mesa/drivers/dri/intel/intel_screen.h (renamed from src/mesa/drivers/dri/i915/intel_screen.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_span.c409
-rw-r--r--src/mesa/drivers/dri/intel/intel_span.h (renamed from src/mesa/drivers/dri/i915/intel_span.h)0
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex.c192
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_copy.c302
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_format.c172
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_image.c690
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_subimage.c182
-rw-r--r--src/mesa/drivers/dri/intel/intel_tex_validate.c272
45 files changed, 8072 insertions, 8055 deletions
diff --git a/src/mesa/drivers/dri/i915/intel_batchbuffer.c b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
index 8ece6d2760..d38cdf31cc 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915/intel_batchbuffer.c
@@ -1,250 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "intel_batchbuffer.h"
-#include "intel_ioctl.h"
-#include "intel_decode.h"
-#include "intel_reg.h"
-
-/* Relocations in kernel space:
- * - pass dma buffer seperately
- * - memory manager knows how to patch
- * - pass list of dependent buffers
- * - pass relocation list
- *
- * Either:
- * - get back an offset for buffer to fire
- * - memory manager knows how to fire buffer
- *
- * Really want the buffer to be AGP and pinned.
- *
- */
-
-/* Cliprect fence: The highest fence protecting a dma buffer
- * containing explicit cliprect information. Like the old drawable
- * lock but irq-driven. X server must wait for this fence to expire
- * before changing cliprects [and then doing sw rendering?]. For
- * other dma buffers, the scheduler will grab current cliprect info
- * and mix into buffer. X server must hold the lock while changing
- * cliprects??? Make per-drawable. Need cliprects in shared memory
- * -- beats storing them with every cmd buffer in the queue.
- *
- * ==> X server must wait for this fence to expire before touching the
- * framebuffer with new cliprects.
- *
- * ==> Cliprect-dependent buffers associated with a
- * cliprect-timestamp. All of the buffers associated with a timestamp
- * must go to hardware before any buffer with a newer timestamp.
- *
- * ==> Dma should be queued per-drawable for correct X/GL
- * synchronization. Or can fences be used for this?
- *
- * Applies to: Blit operations, metaops, X server operations -- X
- * server automatically waits on its own dma to complete before
- * modifying cliprects ???
- */
-
-void
-intel_batchbuffer_reset(struct intel_batchbuffer *batch)
-{
- struct intel_context *intel = batch->intel;
-
- if (batch->buf != NULL) {
- dri_bo_unreference(batch->buf);
- batch->buf = NULL;
- }
-
- batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
- intel->intelScreen->maxBatchSize, 4096,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
- dri_bo_map(batch->buf, GL_TRUE);
- batch->map = batch->buf->virtual;
- batch->size = intel->intelScreen->maxBatchSize;
- batch->ptr = batch->map;
-}
-
-struct intel_batchbuffer *
-intel_batchbuffer_alloc(struct intel_context *intel)
-{
- struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
-
- batch->intel = intel;
- batch->last_fence = NULL;
- intel_batchbuffer_reset(batch);
-
- return batch;
-}
-
-void
-intel_batchbuffer_free(struct intel_batchbuffer *batch)
-{
- if (batch->last_fence) {
- dri_fence_wait(batch->last_fence);
- dri_fence_unreference(batch->last_fence);
- batch->last_fence = NULL;
- }
- if (batch->map) {
- dri_bo_unmap(batch->buf);
- batch->map = NULL;
- }
- dri_bo_unreference(batch->buf);
- batch->buf = NULL;
- free(batch);
-}
-
-
-
-/* TODO: Push this whole function into bufmgr.
- */
-static void
-do_flush_locked(struct intel_batchbuffer *batch,
- GLuint used,
- GLboolean ignore_cliprects, GLboolean allow_unlock)
-{
- struct intel_context *intel = batch->intel;
- void *start;
- GLuint count;
-
- start = dri_process_relocs(batch->buf, &count);
-
- batch->map = NULL;
- batch->ptr = NULL;
- batch->flags = 0;
-
- /* Throw away non-effective packets. Won't work once we have
- * hardware contexts which would preserve statechanges beyond a
- * single buffer.
- */
-
- if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
- if (intel->intelScreen->ttm == GL_TRUE) {
- intel_exec_ioctl(batch->intel,
- used, ignore_cliprects, allow_unlock,
- start, count, &batch->last_fence);
- } else {
- intel_batch_ioctl(batch->intel,
- batch->buf->offset,
- used, ignore_cliprects, allow_unlock);
- }
- }
-
- dri_post_submit(batch->buf, &batch->last_fence);
-
- if (intel->numClipRects == 0 && !ignore_cliprects) {
- if (allow_unlock) {
- /* If we are not doing any actual user-visible rendering,
- * do a sched_yield to keep the app from pegging the cpu while
- * achieving nothing.
- */
- UNLOCK_HARDWARE(intel);
- sched_yield();
- LOCK_HARDWARE(intel);
- }
- intel->vtbl.lost_hardware(intel);
- }
-
- if (INTEL_DEBUG & DEBUG_BATCH) {
- // dri_bo_map(batch->buf, GL_FALSE);
- // intel_decode(ptr, used / 4, batch->buf->offset,
- // intel->intelScreen->deviceID);
- // dri_bo_unmap(batch->buf);
- }
-}
-
-void
-intel_batchbuffer_flush(struct intel_batchbuffer *batch)
-{
- struct intel_context *intel = batch->intel;
- GLuint used = batch->ptr - batch->map;
- GLboolean was_locked = intel->locked;
-
- if (used == 0)
- return;
-
- /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
- * performance drain that we would like to avoid.
- */
- if (used & 4) {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = 0;
- ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
- used += 12;
- }
- else {
- ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
- ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
- used += 8;
- }
-
- /* TODO: Just pass the relocation list and dma buffer up to the
- * kernel.
- */
- if (!was_locked)
- LOCK_HARDWARE(intel);
-
- do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
- GL_FALSE);
-
- if (!was_locked)
- UNLOCK_HARDWARE(intel);
-
- /* Reset the buffer:
- */
- intel_batchbuffer_reset(batch);
-}
-
-void
-intel_batchbuffer_finish(struct intel_batchbuffer *batch)
-{
- intel_batchbuffer_flush(batch);
- if (batch->last_fence != NULL)
- dri_fence_wait(batch->last_fence);
-}
-
-
-/* This is the only way buffers get added to the validate list.
- */
-GLboolean
-intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
- dri_bo *buffer,
- GLuint flags, GLuint delta)
-{
- dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
- batch->ptr += 4;
-
- return GL_TRUE;
-}
-
-void
-intel_batchbuffer_data(struct intel_batchbuffer *batch,
- const void *data, GLuint bytes, GLuint flags)
-{
- assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes, flags);
- __memcpy(batch->ptr, data, bytes);
- batch->ptr += bytes;
-}
+../intel/intel_batchbuffer.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_blit.c b/src/mesa/drivers/dri/i915/intel_blit.c
index 2761136f47..dd6c8d17c2 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_blit.c
+++ b/src/mesa/drivers/dri/i915/intel_blit.c
@@ -1,491 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include <stdio.h>
-#include <errno.h>
-
-#include "mtypes.h"
-#include "context.h"
-#include "enums.h"
-
-#include "intel_batchbuffer.h"
-#include "intel_blit.h"
-#include "intel_buffers.h"
-#include "intel_context.h"
-#include "intel_fbo.h"
-#include "intel_reg.h"
-#include "intel_regions.h"
-
-#define FILE_DEBUG_FLAG DEBUG_BLIT
-
-/**
- * Copy the back color buffer to the front color buffer.
- * Used for SwapBuffers().
- */
-void
-intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
- const drm_clip_rect_t * rect)
-{
-
- struct intel_context *intel;
- const intelScreenPrivate *intelScreen;
-
- DBG("%s\n", __FUNCTION__);
-
- assert(dPriv);
-
- intel = intelScreenContext(dPriv->driScreenPriv->private);
- if (!intel)
- return;
-
- intelScreen = intel->intelScreen;
-
- if (intel->last_swap_fence) {
- dri_fence_wait(intel->last_swap_fence);
- dri_fence_unreference(intel->last_swap_fence);
- intel->last_swap_fence = NULL;
- }
- intel->last_swap_fence = intel->first_swap_fence;
- intel->first_swap_fence = NULL;
-
- /* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
- * should work regardless.
- */
- LOCK_HARDWARE(intel);
-
- if (dPriv && dPriv->numClipRects) {
- struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
- const struct intel_region *frontRegion
- = intel_get_rb_region(&intel_fb->Base, BUFFER_FRONT_LEFT);
- const struct intel_region *backRegion
- = intel_get_rb_region(&intel_fb->Base, BUFFER_BACK_LEFT);
- const int nbox = dPriv->numClipRects;
- const drm_clip_rect_t *pbox = dPriv->pClipRects;
- const int pitch = frontRegion->pitch;
- const int cpp = frontRegion->cpp;
- int BR13, CMD;
- int i;
-
- ASSERT(intel_fb);
- ASSERT(intel_fb->Base.Name == 0); /* Not a user-created FBO */
- ASSERT(frontRegion);
- ASSERT(backRegion);
- ASSERT(frontRegion->pitch == backRegion->pitch);
- ASSERT(frontRegion->cpp == backRegion->cpp);
-
- if (cpp == 2) {
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- }
- else {
- BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
- }
-
- for (i = 0; i < nbox; i++, pbox++) {
- drm_clip_rect_t box;
-
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height)
- continue;
-
- box = *pbox;
-
- if (rect) {
- if (rect->x1 > box.x1)
- box.x1 = rect->x1;
- if (rect->y1 > box.y1)
- box.y1 = rect->y1;
- if (rect->x2 < box.x2)
- box.x2 = rect->x2;
- if (rect->y2 < box.y2)
- box.y2 = rect->y2;
-
- if (box.x1 > box.x2 || box.y1 > box.y2)
- continue;
- }
-
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
- OUT_BATCH(CMD);
- OUT_BATCH(BR13);
- OUT_BATCH((pbox->y1 << 16) | pbox->x1);
- OUT_BATCH((pbox->y2 << 16) | pbox->x2);
-
- OUT_RELOC(frontRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
- 0);
- OUT_BATCH((pbox->y1 << 16) | pbox->x1);
- OUT_BATCH(BR13 & 0xffff);
- OUT_RELOC(backRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
- 0);
-
- ADVANCE_BATCH();
- }
-
- if (intel->first_swap_fence)
- dri_fence_unreference(intel->first_swap_fence);
- intel_batchbuffer_flush(intel->batch);
- intel->first_swap_fence = intel->batch->last_fence;
- dri_fence_reference(intel->first_swap_fence);
- }
-
- UNLOCK_HARDWARE(intel);
-}
-
-
-
-
-void
-intelEmitFillBlit(struct intel_context *intel,
- GLuint cpp,
- GLshort dst_pitch,
- dri_bo *dst_buffer,
- GLuint dst_offset,
- GLshort x, GLshort y, GLshort w, GLshort h, GLuint color)
-{
- GLuint BR13, CMD;
- BATCH_LOCALS;
-
- dst_pitch *= cpp;
-
- switch (cpp) {
- case 1:
- case 2:
- case 3:
- BR13 = dst_pitch | (0xF0 << 16) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- break;
- case 4:
- BR13 = dst_pitch | (0xF0 << 16) | (1 << 24) | (1 << 25);
- CMD = (XY_COLOR_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
- break;
- default:
- return;
- }
-
- DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
- __FUNCTION__, dst_buffer, dst_pitch, dst_offset, x, y, w, h);
-
-
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
- OUT_BATCH(CMD);
- OUT_BATCH(BR13);
- OUT_BATCH((y << 16) | x);
- OUT_BATCH(((y + h) << 16) | (x + w));
- OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
- OUT_BATCH(color);
- ADVANCE_BATCH();
-}
-
-
-static GLuint translate_raster_op(GLenum logicop)
-{
- switch(logicop) {
- case GL_CLEAR: return 0x00;
- case GL_AND: return 0x88;
- case GL_AND_REVERSE: return 0x44;
- case GL_COPY: return 0xCC;
- case GL_AND_INVERTED: return 0x22;
- case GL_NOOP: return 0xAA;
- case GL_XOR: return 0x66;
- case GL_OR: return 0xEE;
- case GL_NOR: return 0x11;
- case GL_EQUIV: return 0x99;
- case GL_INVERT: return 0x55;
- case GL_OR_REVERSE: return 0xDD;
- case GL_COPY_INVERTED: return 0x33;
- case GL_OR_INVERTED: return 0xBB;
- case GL_NAND: return 0x77;
- case GL_SET: return 0xFF;
- default: return 0;
- }
-}
-
-
-/* Copy BitBlt
- */
-void
-intelEmitCopyBlit(struct intel_context *intel,
- GLuint cpp,
- GLshort src_pitch,
- dri_bo *src_buffer,
- GLuint src_offset,
- GLshort dst_pitch,
- dri_bo *dst_buffer,
- GLuint dst_offset,
- GLshort src_x, GLshort src_y,
- GLshort dst_x, GLshort dst_y,
- GLshort w, GLshort h,
- GLenum logic_op)
-{
- GLuint CMD, BR13;
- int dst_y2 = dst_y + h;
- int dst_x2 = dst_x + w;
- BATCH_LOCALS;
-
-
- DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
- __FUNCTION__,
- src_buffer, src_pitch, src_offset, src_x, src_y,
- dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
-
- src_pitch *= cpp;
- dst_pitch *= cpp;
-
- switch (cpp) {
- case 1:
- case 2:
- case 3:
- BR13 = (((GLint) dst_pitch) & 0xffff) |
- (translate_raster_op(logic_op) << 16) | (1 << 24);
- CMD = XY_SRC_COPY_BLT_CMD;
- break;
- case 4:
- BR13 =
- (((GLint) dst_pitch) & 0xffff) |
- (translate_raster_op(logic_op) << 16) | (1 << 24) | (1 << 25);
- CMD =
- (XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
- break;
- default:
- return;
- }
-
- if (dst_y2 < dst_y || dst_x2 < dst_x) {
- return;
- }
-
- /* Initial y values don't seem to work with negative pitches. If
- * we adjust the offsets manually (below), it seems to work fine.
- *
- * On the other hand, if we always adjust, the hardware doesn't
- * know which blit directions to use, so overlapping copypixels get
- * the wrong result.
- */
- if (dst_pitch > 0 && src_pitch > 0) {
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
- OUT_BATCH(CMD);
- OUT_BATCH(BR13);
- OUT_BATCH((dst_y << 16) | dst_x);
- OUT_BATCH((dst_y2 << 16) | dst_x2);
- OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
- OUT_BATCH((src_y << 16) | src_x);
- OUT_BATCH(((GLint) src_pitch & 0xffff));
- OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, src_offset);
- ADVANCE_BATCH();
- }
- else {
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
- OUT_BATCH(CMD);
- OUT_BATCH(BR13);
- OUT_BATCH((0 << 16) | dst_x);
- OUT_BATCH((h << 16) | dst_x2);
- OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
- dst_offset + dst_y * dst_pitch);
- OUT_BATCH((0 << 16) | src_x);
- OUT_BATCH(((GLint) src_pitch & 0xffff));
- OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
- src_offset + src_y * src_pitch);
- ADVANCE_BATCH();
- }
-}
-
-
-/**
- * Use blitting to clear the renderbuffers named by 'flags'.
- * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field
- * since that might include software renderbuffers or renderbuffers
- * which we're clearing with triangles.
- * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
- */
-void
-intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
-{
- struct intel_context *intel = intel_context(ctx);
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- GLuint clear_depth;
- GLbitfield skipBuffers = 0;
- BATCH_LOCALS;
-
- DBG("%s %x\n", __FUNCTION__, mask);
-
- /*
- * Compute values for clearing the buffers.
- */
- clear_depth = 0;
- if (mask & BUFFER_BIT_DEPTH) {
- clear_depth = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
- }
- if (mask & BUFFER_BIT_STENCIL) {
- clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
- }
-
- /* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in
- * the loop below.
- */
- if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) {
- skipBuffers = BUFFER_BIT_STENCIL;
- }
-
- /* XXX Move this flush/lock into the following conditional? */
- intelFlush(&intel->ctx);
- LOCK_HARDWARE(intel);
-
- if (intel->numClipRects) {
- GLint cx, cy, cw, ch;
- drm_clip_rect_t clear;
- int i;
-
- /* Get clear bounds after locking */
- cx = fb->_Xmin;
- cy = fb->_Ymin;
- cw = fb->_Xmax - cx;
- ch = fb->_Ymax - cy;
-
- if (fb->Name == 0) {
- /* clearing a window */
-
- /* flip top to bottom */
- clear.x1 = cx + intel->drawX;
- clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch;
- clear.x2 = clear.x1 + cw;
- clear.y2 = clear.y1 + ch;
- }
- else {
- /* clearing FBO */
- assert(intel->numClipRects == 1);
- assert(intel->pClipRects == &intel->fboRect);
- clear.x1 = cx;
- clear.y1 = cy;
- clear.x2 = clear.x1 + cw;
- clear.y2 = clear.y1 + ch;
- /* no change to mask */
- }
-
- for (i = 0; i < intel->numClipRects; i++) {
- const drm_clip_rect_t *box = &intel->pClipRects[i];
- drm_clip_rect_t b;
- GLuint buf;
- GLuint clearMask = mask; /* use copy, since we modify it below */
- GLboolean all = (cw == fb->Width && ch == fb->Height);
-
- if (!all) {
- intel_intersect_cliprects(&b, &clear, box);
- }
- else {
- b = *box;
- }
-
- if (b.x1 >= b.x2 || b.y1 >= b.y2)
- continue;
-
- if (0)
- _mesa_printf("clear %d,%d..%d,%d, mask %x\n",
- b.x1, b.y1, b.x2, b.y2, mask);
-
- /* Loop over all renderbuffers */
- for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) {
- const GLbitfield bufBit = 1 << buf;
- if ((clearMask & bufBit) && !(bufBit & skipBuffers)) {
- /* OK, clear this renderbuffer */
- struct intel_region *irb_region =
- intel_get_rb_region(fb, buf);
- dri_bo *write_buffer =
- intel_region_buffer(intel->intelScreen, irb_region,
- all ? INTEL_WRITE_FULL :
- INTEL_WRITE_PART);
-
- GLuint clearVal;
- GLint pitch, cpp;
- GLuint BR13, CMD;
-
- ASSERT(irb_region);
-
- pitch = irb_region->pitch;
- cpp = irb_region->cpp;
-
- DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
- __FUNCTION__,
- irb_region->buffer, (pitch * cpp),
- irb_region->draw_offset,
- b.x1, b.y1, b.x2 - b.x1, b.y2 - b.y1);
-
-
- /* Setup the blit command */
- if (cpp == 4) {
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
- if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
- CMD = XY_COLOR_BLT_CMD;
- if (clearMask & BUFFER_BIT_DEPTH)
- CMD |= XY_BLT_WRITE_RGB;
- if (clearMask & BUFFER_BIT_STENCIL)
- CMD |= XY_BLT_WRITE_ALPHA;
- }
- else {
- /* clearing RGBA */
- CMD = XY_COLOR_BLT_CMD |
- XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
- }
- }
- else {
- ASSERT(cpp == 2 || cpp == 0);
- BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
- CMD = XY_COLOR_BLT_CMD;
- }
-
- if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
- clearVal = clear_depth;
- }
- else {
- clearVal = (cpp == 4)
- ? intel->ClearColor8888 : intel->ClearColor565;
- }
- /*
- _mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
- buf, irb->Base.Name);
- */
- intel_wait_flips(intel, INTEL_BATCH_NO_CLIPRECTS);
-
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
- OUT_BATCH(CMD);
- OUT_BATCH(BR13);
- OUT_BATCH((b.y1 << 16) | b.x1);
- OUT_BATCH((b.y2 << 16) | b.x2);
- OUT_RELOC(write_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
- irb_region->draw_offset);
- OUT_BATCH(clearVal);
- ADVANCE_BATCH();
- clearMask &= ~bufBit; /* turn off bit, for faster loop exit */
- }
- }
- }
- intel_batchbuffer_flush(intel->batch);
- }
-
- UNLOCK_HARDWARE(intel);
-}
+../intel/intel_blit.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.c b/src/mesa/drivers/dri/i915/intel_buffer_objects.c
index 5348822816..e06dd3c8d3 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i915/intel_buffer_objects.c
@@ -1,268 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "imports.h"
-#include "mtypes.h"
-#include "bufferobj.h"
-
-#include "intel_context.h"
-#include "intel_buffer_objects.h"
-#include "intel_regions.h"
-#include "dri_bufmgr.h"
-
-/** Allocates a new dri_bo to store the data for the buffer object. */
-static void
-intel_bufferobj_alloc_buffer(struct intel_context *intel,
- struct intel_buffer_object *intel_obj)
-{
- intel_obj->buffer = dri_bo_alloc(intel->intelScreen->bufmgr, "bufferobj",
- intel_obj->Base.Size, 64,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
-}
-
-/**
- * There is some duplication between mesa's bufferobjects and our
- * bufmgr buffers. Both have an integer handle and a hashtable to
- * lookup an opaque structure. It would be nice if the handles and
- * internal structure where somehow shared.
- */
-static struct gl_buffer_object *
-intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
-{
- struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
-
- _mesa_initialize_buffer_object(&obj->Base, name, target);
-
- obj->buffer = NULL;
-
- return &obj->Base;
-}
-
-/* Break the COW tie to the region. The region gets to keep the data.
- */
-void
-intel_bufferobj_release_region(struct intel_context *intel,
- struct intel_buffer_object *intel_obj)
-{
- assert(intel_obj->region->buffer == intel_obj->buffer);
- intel_obj->region->pbo = NULL;
- intel_obj->region = NULL;
-
- dri_bo_unreference(intel_obj->buffer);
- intel_obj->buffer = NULL;
-}
-
-/* Break the COW tie to the region. Both the pbo and the region end
- * up with a copy of the data.
- */
-void
-intel_bufferobj_cow(struct intel_context *intel,
- struct intel_buffer_object *intel_obj)
-{
- assert(intel_obj->region);
- intel_region_cow(intel->intelScreen, intel_obj->region);
-}
-
-
-/**
- * Deallocate/free a vertex/pixel buffer object.
- * Called via glDeleteBuffersARB().
- */
-static void
-intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- assert(intel_obj);
-
- if (intel_obj->region) {
- intel_bufferobj_release_region(intel, intel_obj);
- }
- else if (intel_obj->buffer) {
- dri_bo_unreference(intel_obj->buffer);
- }
-
- _mesa_free(intel_obj);
-}
-
-
-
-/**
- * Allocate space for and store data in a buffer object. Any data that was
- * previously stored in the buffer object is lost. If data is NULL,
- * memory will be allocated, but no copy will occur.
- * Called via glBufferDataARB().
- */
-static void
-intel_bufferobj_data(GLcontext * ctx,
- GLenum target,
- GLsizeiptrARB size,
- const GLvoid * data,
- GLenum usage, struct gl_buffer_object *obj)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- intel_obj->Base.Size = size;
- intel_obj->Base.Usage = usage;
-
- if (intel_obj->region)
- intel_bufferobj_release_region(intel, intel_obj);
-
- if (intel_obj->buffer != NULL && intel_obj->buffer->size != size) {
- dri_bo_unreference(intel_obj->buffer);
- intel_obj->buffer = NULL;
- }
-
- intel_bufferobj_alloc_buffer(intel, intel_obj);
-
- if (data != NULL)
- dri_bo_subdata(intel_obj->buffer, 0, size, data);
-}
-
-
-/**
- * Replace data in a subrange of buffer object. If the data range
- * specified by size + offset extends beyond the end of the buffer or
- * if data is NULL, no copy is performed.
- * Called via glBufferSubDataARB().
- */
-static void
-intel_bufferobj_subdata(GLcontext * ctx,
- GLenum target,
- GLintptrARB offset,
- GLsizeiptrARB size,
- const GLvoid * data, struct gl_buffer_object *obj)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- assert(intel_obj);
-
- if (intel_obj->region)
- intel_bufferobj_cow(intel, intel_obj);
-
- dri_bo_subdata(intel_obj->buffer, offset, size, data);
-}
-
-
-/**
- * Called via glGetBufferSubDataARB().
- */
-static void
-intel_bufferobj_get_subdata(GLcontext * ctx,
- GLenum target,
- GLintptrARB offset,
- GLsizeiptrARB size,
- GLvoid * data, struct gl_buffer_object *obj)
-{
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- assert(intel_obj);
- dri_bo_get_subdata(intel_obj->buffer, offset, size, data);
-}
-
-
-
-/**
- * Called via glMapBufferARB().
- */
-static void *
-intel_bufferobj_map(GLcontext * ctx,
- GLenum target,
- GLenum access, struct gl_buffer_object *obj)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- /* XXX: Translate access to flags arg below:
- */
- assert(intel_obj);
-
- if (intel_obj->region)
- intel_bufferobj_cow(intel, intel_obj);
-
- if (intel_obj->buffer == NULL) {
- obj->Pointer = NULL;
- return NULL;
- }
-
- dri_bo_map(intel_obj->buffer, GL_TRUE);
- obj->Pointer = intel_obj->buffer->virtual;
- return obj->Pointer;
-}
-
-
-/**
- * Called via glMapBufferARB().
- */
-static GLboolean
-intel_bufferobj_unmap(GLcontext * ctx,
- GLenum target, struct gl_buffer_object *obj)
-{
- struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
-
- assert(intel_obj);
- if (intel_obj->buffer != NULL) {
- assert(obj->Pointer);
- dri_bo_unmap(intel_obj->buffer);
- obj->Pointer = NULL;
- }
- return GL_TRUE;
-}
-
-dri_bo *
-intel_bufferobj_buffer(struct intel_context *intel,
- struct intel_buffer_object *intel_obj, GLuint flag)
-{
- if (intel_obj->region) {
- if (flag == INTEL_WRITE_PART)
- intel_bufferobj_cow(intel, intel_obj);
- else if (flag == INTEL_WRITE_FULL) {
- intel_bufferobj_release_region(intel, intel_obj);
- intel_bufferobj_alloc_buffer(intel, intel_obj);
- }
- }
-
- return intel_obj->buffer;
-}
-
-void
-intel_bufferobj_init(struct intel_context *intel)
-{
- GLcontext *ctx = &intel->ctx;
-
- ctx->Driver.NewBufferObject = intel_bufferobj_alloc;
- ctx->Driver.DeleteBuffer = intel_bufferobj_free;
- ctx->Driver.BufferData = intel_bufferobj_data;
- ctx->Driver.BufferSubData = intel_bufferobj_subdata;
- ctx->Driver.GetBufferSubData = intel_bufferobj_get_subdata;
- ctx->Driver.MapBuffer = intel_bufferobj_map;
- ctx->Driver.UnmapBuffer = intel_bufferobj_unmap;
-}
+../intel/intel_buffer_objects.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_buffers.c b/src/mesa/drivers/dri/i915/intel_buffers.c
index 1ae8b5feb4..c86daa49f4 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_buffers.c
+++ b/src/mesa/drivers/dri/i915/intel_buffers.c
@@ -1,1209 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "intel_screen.h"
-#include "intel_context.h"
-#include "intel_blit.h"
-#include "intel_buffers.h"
-#include "intel_depthstencil.h"
-#include "intel_fbo.h"
-#include "intel_tris.h"
-#include "intel_regions.h"
-#include "intel_batchbuffer.h"
-#include "intel_reg.h"
-#include "context.h"
-#include "utils.h"
-#include "drirenderbuffer.h"
-#include "framebuffer.h"
-#include "swrast/swrast.h"
-#include "vblank.h"
-
-
-/* This block can be removed when libdrm >= 2.3.1 is required */
-
-#ifndef DRM_IOCTL_I915_FLIP
-
-#define DRM_VBLANK_FLIP 0x8000000
-
-typedef struct drm_i915_flip {
- int pipes;
-} drm_i915_flip_t;
-
-#undef DRM_IOCTL_I915_FLIP
-#define DRM_IOCTL_I915_FLIP DRM_IOW(DRM_COMMAND_BASE + DRM_I915_FLIP, \
- drm_i915_flip_t)
-
-#endif
-
-
-/**
- * XXX move this into a new dri/common/cliprects.c file.
- */
-GLboolean
-intel_intersect_cliprects(drm_clip_rect_t * dst,
- const drm_clip_rect_t * a,
- const drm_clip_rect_t * b)
-{
- GLint bx = b->x1;
- GLint by = b->y1;
- GLint bw = b->x2 - bx;
- GLint bh = b->y2 - by;
-
- if (bx < a->x1)
- bw -= a->x1 - bx, bx = a->x1;
- if (by < a->y1)
- bh -= a->y1 - by, by = a->y1;
- if (bx + bw > a->x2)
- bw = a->x2 - bx;
- if (by + bh > a->y2)
- bh = a->y2 - by;
- if (bw <= 0)
- return GL_FALSE;
- if (bh <= 0)
- return GL_FALSE;
-
- dst->x1 = bx;
- dst->y1 = by;
- dst->x2 = bx + bw;
- dst->y2 = by + bh;
-
- return GL_TRUE;
-}
-
-/**
- * Return pointer to current color drawing region, or NULL.
- */
-struct intel_region *
-intel_drawbuf_region(struct intel_context *intel)
-{
- struct intel_renderbuffer *irbColor =
- intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0][0]);
- if (irbColor)
- return irbColor->region;
- else
- return NULL;
-}
-
-/**
- * Return pointer to current color reading region, or NULL.
- */
-struct intel_region *
-intel_readbuf_region(struct intel_context *intel)
-{
- struct intel_renderbuffer *irb
- = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
- if (irb)
- return irb->region;
- else
- return NULL;
-}
-
-
-
-/**
- * Update the following fields for rendering to a user-created FBO:
- * intel->numClipRects
- * intel->pClipRects
- * intel->drawX
- * intel->drawY
- */
-static void
-intelSetRenderbufferClipRects(struct intel_context *intel)
-{
- assert(intel->ctx.DrawBuffer->Width > 0);
- assert(intel->ctx.DrawBuffer->Height > 0);
- intel->fboRect.x1 = 0;
- intel->fboRect.y1 = 0;
- intel->fboRect.x2 = intel->ctx.DrawBuffer->Width;
- intel->fboRect.y2 = intel->ctx.DrawBuffer->Height;
- intel->numClipRects = 1;
- intel->pClipRects = &intel->fboRect;
- intel->drawX = 0;
- intel->drawY = 0;
-}
-
-
-/**
- * As above, but for rendering to front buffer of a window.
- * \sa intelSetRenderbufferClipRects
- */
-static void
-intelSetFrontClipRects(struct intel_context *intel)
-{
- __DRIdrawablePrivate *dPriv = intel->driDrawable;
-
- if (!dPriv)
- return;
-
- intel->numClipRects = dPriv->numClipRects;
- intel->pClipRects = dPriv->pClipRects;
- intel->drawX = dPriv->x;
- intel->drawY = dPriv->y;
-}
-
-
-/**
- * As above, but for rendering to back buffer of a window.
- */
-static void
-intelSetBackClipRects(struct intel_context *intel)
-{
- __DRIdrawablePrivate *dPriv = intel->driDrawable;
- struct intel_framebuffer *intel_fb;
-
- if (!dPriv)
- return;
-
- intel_fb = dPriv->driverPrivate;
-
- if (intel_fb->pf_active || dPriv->numBackClipRects == 0) {
- /* use the front clip rects */
- intel->numClipRects = dPriv->numClipRects;
- intel->pClipRects = dPriv->pClipRects;
- intel->drawX = dPriv->x;
- intel->drawY = dPriv->y;
- }
- else {
- /* use the back clip rects */
- intel->numClipRects = dPriv->numBackClipRects;
- intel->pClipRects = dPriv->pBackClipRects;
- intel->drawX = dPriv->backX;
- intel->drawY = dPriv->backY;
- }
-}
-
-
-/**
- * This will be called whenever the currently bound window is moved/resized.
- * XXX: actually, it seems to NOT be called when the window is only moved (BP).
- */
-void
-intelWindowMoved(struct intel_context *intel)
-{
- GLcontext *ctx = &intel->ctx;
- __DRIdrawablePrivate *dPriv = intel->driDrawable;
- struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
-
- if (!intel->ctx.DrawBuffer) {
- /* when would this happen? -BP */
- intelSetFrontClipRects(intel);
- }
- else if (intel->ctx.DrawBuffer->Name != 0) {
- /* drawing to user-created FBO - do nothing */
- /* Cliprects would be set from intelDrawBuffer() */
- }
- else {
- /* drawing to a window */
- switch (intel_fb->Base._ColorDrawBufferMask[0]) {
- case BUFFER_BIT_FRONT_LEFT:
- intelSetFrontClipRects(intel);
- break;
- case BUFFER_BIT_BACK_LEFT:
- intelSetBackClipRects(intel);
- break;
- default:
- /* glDrawBuffer(GL_NONE or GL_FRONT_AND_BACK): software fallback */
- intelSetFrontClipRects(intel);
- }
- }
-
- if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 7) {
- drmI830Sarea *sarea = intel->sarea;
- drm_clip_rect_t drw_rect = { .x1 = dPriv->x, .x2 = dPriv->x + dPriv->w,
- .y1 = dPriv->y, .y2 = dPriv->y + dPriv->h };
- drm_clip_rect_t planeA_rect = { .x1 = sarea->planeA_x, .y1 = sarea->planeA_y,
- .x2 = sarea->planeA_x + sarea->planeA_w,
- .y2 = sarea->planeA_y + sarea->planeA_h };
- drm_clip_rect_t planeB_rect = { .x1 = sarea->planeB_x, .y1 = sarea->planeB_y,
- .x2 = sarea->planeB_x + sarea->planeB_w,
- .y2 = sarea->planeB_y + sarea->planeB_h };
- GLint areaA = driIntersectArea( drw_rect, planeA_rect );
- GLint areaB = driIntersectArea( drw_rect, planeB_rect );
- GLuint flags = dPriv->vblFlags;
- GLboolean pf_active;
- GLint pf_planes;
-
- /* Update page flipping info
- */
- pf_planes = 0;
-
- if (areaA > 0)
- pf_planes |= 1;
-
- if (areaB > 0)
- pf_planes |= 2;
-
- intel_fb->pf_current_page = (intel->sarea->pf_current_page >>
- (intel_fb->pf_planes & 0x2)) & 0x3;
-
- intel_fb->pf_num_pages = intel->intelScreen->third.handle ? 3 : 2;
-
- pf_active = pf_planes && (pf_planes & intel->sarea->pf_active) == pf_planes;
-
- if (INTEL_DEBUG & DEBUG_LOCK)
- if (pf_active != intel_fb->pf_active)
- _mesa_printf("%s - Page flipping %sactive\n", __progname,
- pf_active ? "" : "in");
-
- if (pf_active) {
- /* Sync pages between planes if flipping on both at the same time */
- if (pf_planes == 0x3 && pf_planes != intel_fb->pf_planes &&
- (intel->sarea->pf_current_page & 0x3) !=
- (((intel->sarea->pf_current_page) >> 2) & 0x3)) {
- drm_i915_flip_t flip;
-
- if (intel_fb->pf_current_page ==
- (intel->sarea->pf_current_page & 0x3)) {
- /* XXX: This is ugly, but emitting two flips 'in a row' can cause
- * lockups for unknown reasons.
- */
- intel->sarea->pf_current_page =
- intel->sarea->pf_current_page & 0x3;
- intel->sarea->pf_current_page |=
- ((intel_fb->pf_current_page + intel_fb->pf_num_pages - 1) %
- intel_fb->pf_num_pages) << 2;
-
- flip.pipes = 0x2;
- } else {
- intel->sarea->pf_current_page =
- intel->sarea->pf_current_page & (0x3 << 2);
- intel->sarea->pf_current_page |=
- (intel_fb->pf_current_page + intel_fb->pf_num_pages - 1) %
- intel_fb->pf_num_pages;
-
- flip.pipes = 0x1;
- }
-
- drmCommandWrite(intel->driFd, DRM_I915_FLIP, &flip, sizeof(flip));
- }
-
- intel_fb->pf_planes = pf_planes;
- }
-
- intel_fb->pf_active = pf_active;
- intel_flip_renderbuffers(intel_fb);
- intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
-
- /* Update vblank info
- */
- if (areaB > areaA || (areaA == areaB && areaB > 0)) {
- flags = dPriv->vblFlags | VBLANK_FLAG_SECONDARY;
- } else {
- flags = dPriv->vblFlags & ~VBLANK_FLAG_SECONDARY;
- }
-
- /* Check to see if we changed pipes */
- if (flags != dPriv->vblFlags && dPriv->vblFlags &&
- !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ)) {
- int64_t count;
- drmVBlank vbl;
- int i;
-
- /*
- * Deal with page flipping
- */
- vbl.request.type = DRM_VBLANK_ABSOLUTE;
-
- if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
- vbl.request.type |= DRM_VBLANK_SECONDARY;
- }
-
- for (i = 0; i < intel_fb->pf_num_pages; i++) {
- if (!intel_fb->color_rb[i] ||
- (intel_fb->vbl_waited - intel_fb->color_rb[i]->vbl_pending) <=
- (1<<23))
- continue;
-
- vbl.request.sequence = intel_fb->color_rb[i]->vbl_pending;
- drmWaitVBlank(intel->driFd, &vbl);
- }
-
- /*
- * Update msc_base from old pipe
- */
- driDrawableGetMSC32(dPriv->driScreenPriv, dPriv, &count);
- dPriv->msc_base = count;
- /*
- * Then get new vblank_base and vblSeq values
- */
- dPriv->vblFlags = flags;
- driGetCurrentVBlank(dPriv);
- dPriv->vblank_base = dPriv->vblSeq;
-
- intel_fb->vbl_waited = dPriv->vblSeq;
-
- for (i = 0; i < intel_fb->pf_num_pages; i++) {
- if (intel_fb->color_rb[i])
- intel_fb->color_rb[i]->vbl_pending = intel_fb->vbl_waited;
- }
- }
- } else {
- dPriv->vblFlags &= ~VBLANK_FLAG_SECONDARY;
- }
-
- /* Update Mesa's notion of window size */
- driUpdateFramebufferSize(ctx, dPriv);
- intel_fb->Base.Initialized = GL_TRUE; /* XXX remove someday */
-
- /* Update hardware scissor */
- ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
- ctx->Scissor.Width, ctx->Scissor.Height);
-
- /* Re-calculate viewport related state */
- ctx->Driver.DepthRange( ctx, ctx->Viewport.Near, ctx->Viewport.Far );
-}
-
-
-
-/* A true meta version of this would be very simple and additionally
- * machine independent. Maybe we'll get there one day.
- */
-static void
-intelClearWithTris(struct intel_context *intel, GLbitfield mask)
-{
- GLcontext *ctx = &intel->ctx;
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- drm_clip_rect_t clear;
-
- if (INTEL_DEBUG & DEBUG_BLIT)
- _mesa_printf("%s 0x%x\n", __FUNCTION__, mask);
-
- LOCK_HARDWARE(intel);
-
- /* XXX FBO: was: intel->driDrawable->numClipRects */
- if (intel->numClipRects) {
- GLint cx, cy, cw, ch;
- GLuint buf;
-
- intel->vtbl.install_meta_state(intel);
-
- /* Get clear bounds after locking */
- cx = fb->_Xmin;
- cy = fb->_Ymin;
- ch = fb->_Ymax - cx;
- cw = fb->_Xmax - cy;
-
- /* note: regardless of 'all', cx, cy, cw, ch are now correct */
- clear.x1 = cx;
- clear.y1 = cy;
- clear.x2 = cx + cw;
- clear.y2 = cy + ch;
-
- /* Back and stencil cliprects are the same. Try and do both
- * buffers at once:
- */
- if (mask &
- (BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH)) {
- struct intel_region *backRegion =
- intel_get_rb_region(fb, BUFFER_BACK_LEFT);
- struct intel_region *depthRegion =
- intel_get_rb_region(fb, BUFFER_DEPTH);
- const GLuint clearColor = (backRegion && backRegion->cpp == 4)
- ? intel->ClearColor8888 : intel->ClearColor565;
-
- intel->vtbl.meta_draw_region(intel, backRegion, depthRegion);
-
- if (mask & BUFFER_BIT_BACK_LEFT)
- intel->vtbl.meta_color_mask(intel, GL_TRUE);
- else
- intel->vtbl.meta_color_mask(intel, GL_FALSE);
-
- if (mask & BUFFER_BIT_STENCIL)
- intel->vtbl.meta_stencil_replace(intel,
- intel->ctx.Stencil.WriteMask[0],
- intel->ctx.Stencil.Clear);
- else
- intel->vtbl.meta_no_stencil_write(intel);
-
- if (mask & BUFFER_BIT_DEPTH)
- intel->vtbl.meta_depth_replace(intel);
- else
- intel->vtbl.meta_no_depth_write(intel);
-
- /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
- * drawing origin may not be correctly emitted.
- */
- intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, intel->ctx.Depth.Clear, clearColor, 0, 0, 0, 0); /* texcoords */
-
- mask &=
- ~(BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH);
- }
-
- /* clear the remaining (color) renderbuffers */
- for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
- const GLuint bufBit = 1 << buf;
- if (mask & bufBit) {
- struct intel_renderbuffer *irbColor =
- intel_renderbuffer(fb->Attachment[buf].Renderbuffer);
- GLuint color = (irbColor->region->cpp == 4)
- ? intel->ClearColor8888 : intel->ClearColor565;
-
- ASSERT(irbColor);
-
- intel->vtbl.meta_no_depth_write(intel);
- intel->vtbl.meta_no_stencil_write(intel);
- intel->vtbl.meta_color_mask(intel, GL_TRUE);
- intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
-
- /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
- * drawing origin may not be correctly emitted.
- */
- intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, 0, /* depth clear val */
- color, 0, 0, 0, 0); /* texcoords */
-
- mask &= ~bufBit;
- }
- }
-
- intel->vtbl.leave_meta_state(intel);
- intel_batchbuffer_flush(intel->batch);
- }
- UNLOCK_HARDWARE(intel);
-}
-
-
-
-
-/**
- * Copy the window contents named by dPriv to the rotated (or reflected)
- * color buffer.
- * srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
- */
-void
-intelRotateWindow(struct intel_context *intel,
- __DRIdrawablePrivate * dPriv, GLuint srcBuf)
-{
- intelScreenPrivate *screen = intel->intelScreen;
- drm_clip_rect_t fullRect;
- struct intel_framebuffer *intel_fb;
- struct intel_region *src;
- const drm_clip_rect_t *clipRects;
- int numClipRects;
- int i;
- GLenum format, type;
-
- int xOrig, yOrig;
- int origNumClipRects;
- drm_clip_rect_t *origRects;
-
- /*
- * set up hardware state
- */
- intelFlush(&intel->ctx);
-
- LOCK_HARDWARE(intel);
-
- if (!intel->numClipRects) {
- UNLOCK_HARDWARE(intel);
- return;
- }
-
- intel->vtbl.install_meta_state(intel);
-
- intel->vtbl.meta_no_depth_write(intel);
- intel->vtbl.meta_no_stencil_write(intel);
- intel->vtbl.meta_color_mask(intel, GL_FALSE);
-
-
- /* save current drawing origin and cliprects (restored at end) */
- xOrig = intel->drawX;
- yOrig = intel->drawY;
- origNumClipRects = intel->numClipRects;
- origRects = intel->pClipRects;
-
- /*
- * set drawing origin, cliprects for full-screen access to rotated screen
- */
- fullRect.x1 = 0;
- fullRect.y1 = 0;
- fullRect.x2 = screen->rotatedWidth;
- fullRect.y2 = screen->rotatedHeight;
- intel->drawX = 0;
- intel->drawY = 0;
- intel->numClipRects = 1;
- intel->pClipRects = &fullRect;
-
- intel->vtbl.meta_draw_region(intel, screen->rotated_region, NULL); /* ? */
-
- intel_fb = dPriv->driverPrivate;
-
- if ((srcBuf == BUFFER_BIT_BACK_LEFT && !intel_fb->pf_active)) {
- src = intel_get_rb_region(&intel_fb->Base, BUFFER_BACK_LEFT);
- clipRects = dPriv->pBackClipRects;
- numClipRects = dPriv->numBackClipRects;
- }
- else {
- src = intel_get_rb_region(&intel_fb->Base, BUFFER_FRONT_LEFT);
- clipRects = dPriv->pClipRects;
- numClipRects = dPriv->numClipRects;
- }
-
- if (src->cpp == 4) {
- format = GL_BGRA;
- type = GL_UNSIGNED_BYTE;
- }
- else {
- format = GL_BGR;
- type = GL_UNSIGNED_SHORT_5_6_5_REV;
- }
-
- /* set the whole screen up as a texture to avoid alignment issues */
- intel->vtbl.meta_tex_rect_source(intel,
- src->buffer,
- screen->width,
- screen->height, src->pitch, format, type);
-
- intel->vtbl.meta_texture_blend_replace(intel);
-
- /*
- * loop over the source window's cliprects
- */
- for (i = 0; i < numClipRects; i++) {
- int srcX0 = clipRects[i].x1;
- int srcY0 = clipRects[i].y1;
- int srcX1 = clipRects[i].x2;
- int srcY1 = clipRects[i].y2;
- GLfloat verts[4][2], tex[4][2];
- int j;
-
- /* build vertices for four corners of clip rect */
- verts[0][0] = srcX0;
- verts[0][1] = srcY0;
- verts[1][0] = srcX1;
- verts[1][1] = srcY0;
- verts[2][0] = srcX1;
- verts[2][1] = srcY1;
- verts[3][0] = srcX0;
- verts[3][1] = srcY1;
-
- /* .. and texcoords */
- tex[0][0] = srcX0;
- tex[0][1] = srcY0;
- tex[1][0] = srcX1;
- tex[1][1] = srcY0;
- tex[2][0] = srcX1;
- tex[2][1] = srcY1;
- tex[3][0] = srcX0;
- tex[3][1] = srcY1;
-
- /* transform coords to rotated screen coords */
-
- for (j = 0; j < 4; j++) {
- matrix23TransformCoordf(&screen->rotMatrix,
- &verts[j][0], &verts[j][1]);
- }
-
- /* draw polygon to map source image to dest region */
- intel_meta_draw_poly(intel, 4, verts, 0, 0, tex);
-
- } /* cliprect loop */
-
- intel->vtbl.leave_meta_state(intel);
- intel_batchbuffer_flush(intel->batch);
-
- /* restore original drawing origin and cliprects */
- intel->drawX = xOrig;
- intel->drawY = yOrig;
- intel->numClipRects = origNumClipRects;
- intel->pClipRects = origRects;
-
- UNLOCK_HARDWARE(intel);
-}
-
-
-/**
- * Called by ctx->Driver.Clear.
- */
-static void
-intelClear(GLcontext *ctx, GLbitfield mask)
-{
- struct intel_context *intel = intel_context(ctx);
- const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask);
- GLbitfield tri_mask = 0;
- GLbitfield blit_mask = 0;
- GLbitfield swrast_mask = 0;
- struct gl_framebuffer *fb = ctx->DrawBuffer;
- GLuint i;
-
- if (0)
- fprintf(stderr, "%s\n", __FUNCTION__);
-
- /* HW color buffers (front, back, aux, generic FBO, etc) */
- if (colorMask == ~0) {
- /* clear all R,G,B,A */
- /* XXX FBO: need to check if colorbuffers are software RBOs! */
- blit_mask |= (mask & BUFFER_BITS_COLOR);
- }
- else {
- /* glColorMask in effect */
- tri_mask |= (mask & BUFFER_BITS_COLOR);
- }
-
- /* HW stencil */
- if (mask & BUFFER_BIT_STENCIL) {
- const struct intel_region *stencilRegion
- = intel_get_rb_region(fb, BUFFER_STENCIL);
- if (stencilRegion) {
- /* have hw stencil */
- if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
- /* not clearing all stencil bits, so use triangle clearing */
- tri_mask |= BUFFER_BIT_STENCIL;
- }
- else {
- /* clearing all stencil bits, use blitting */
- blit_mask |= BUFFER_BIT_STENCIL;
- }
- }
- }
-
- /* HW depth */
- if (mask & BUFFER_BIT_DEPTH) {
- /* clear depth with whatever method is used for stencil (see above) */
- if (tri_mask & BUFFER_BIT_STENCIL)
- tri_mask |= BUFFER_BIT_DEPTH;
- else
- blit_mask |= BUFFER_BIT_DEPTH;
- }
-
- /* SW fallback clearing */
- swrast_mask = mask & ~tri_mask & ~blit_mask;
-
- for (i = 0; i < BUFFER_COUNT; i++) {
- GLuint bufBit = 1 << i;
- if ((blit_mask | tri_mask) & bufBit) {
- if (!fb->Attachment[i].Renderbuffer->ClassID) {
- blit_mask &= ~bufBit;
- tri_mask &= ~bufBit;
- swrast_mask |= bufBit;
- }
- }
- }
-
-
- intelFlush(ctx); /* XXX intelClearWithBlit also does this */
-
- if (blit_mask)
- intelClearWithBlit(ctx, blit_mask);
-
- if (tri_mask)
- intelClearWithTris(intel, tri_mask);
-
- if (swrast_mask)
- _swrast_Clear(ctx, swrast_mask);
-}
-
-
-/* Emit wait for pending flips */
-void
-intel_wait_flips(struct intel_context *intel, GLuint batch_flags)
-{
- struct intel_framebuffer *intel_fb =
- (struct intel_framebuffer *) intel->ctx.DrawBuffer;
- struct intel_renderbuffer *intel_rb =
- intel_get_renderbuffer(&intel_fb->Base,
- intel_fb->Base._ColorDrawBufferMask[0] ==
- BUFFER_BIT_FRONT_LEFT ? BUFFER_FRONT_LEFT :
- BUFFER_BACK_LEFT);
-
- if (intel_fb->Base.Name == 0 && intel_rb->pf_pending == intel_fb->pf_seq) {
- GLint pf_planes = intel_fb->pf_planes;
- BATCH_LOCALS;
-
- /* Wait for pending flips to take effect */
- BEGIN_BATCH(2, batch_flags);
- OUT_BATCH(pf_planes & 0x1 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP)
- : 0);
- OUT_BATCH(pf_planes & 0x2 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_B_FLIP)
- : 0);
- ADVANCE_BATCH();
-
- intel_rb->pf_pending--;
- }
-}
-
-
-/* Flip the front & back buffers
- */
-static GLboolean
-intelPageFlip(const __DRIdrawablePrivate * dPriv)
-{
- struct intel_context *intel;
- int ret;
- struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
-
- if (INTEL_DEBUG & DEBUG_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
-
- assert(dPriv);
- assert(dPriv->driContextPriv);
- assert(dPriv->driContextPriv->driverPrivate);
-
- intel = (struct intel_context *) dPriv->driContextPriv->driverPrivate;
-
- if (intel->intelScreen->drmMinor < 9)
- return GL_FALSE;
-
- intelFlush(&intel->ctx);
-
- ret = 0;
-
- LOCK_HARDWARE(intel);
-
- if (dPriv->numClipRects && intel_fb->pf_active) {
- drm_i915_flip_t flip;
-
- flip.pipes = intel_fb->pf_planes;
-
- ret = drmCommandWrite(intel->driFd, DRM_I915_FLIP, &flip, sizeof(flip));
- }
-
- UNLOCK_HARDWARE(intel);
-
- if (ret || !intel_fb->pf_active)
- return GL_FALSE;
-
- if (!dPriv->numClipRects) {
- usleep(10000); /* throttle invisible client 10ms */
- }
-
- intel_fb->pf_current_page = (intel->sarea->pf_current_page >>
- (intel_fb->pf_planes & 0x2)) & 0x3;
-
- if (dPriv->numClipRects != 0) {
- intel_get_renderbuffer(&intel_fb->Base, BUFFER_FRONT_LEFT)->pf_pending =
- intel_get_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT)->pf_pending =
- ++intel_fb->pf_seq;
- }
-
- intel_flip_renderbuffers(intel_fb);
- intel_draw_buffer(&intel->ctx, &intel_fb->Base);
-
- return GL_TRUE;
-}
-
-#if 0
-void
-intelSwapBuffers(__DRIdrawablePrivate * dPriv)
-{
- if (dPriv->driverPrivate) {
- const struct gl_framebuffer *fb
- = (struct gl_framebuffer *) dPriv->driverPrivate;
- if (fb->Visual.doubleBufferMode) {
- GET_CURRENT_CONTEXT(ctx);
- if (ctx && ctx->DrawBuffer == fb) {
- _mesa_notifySwapBuffers(ctx); /* flush pending rendering */
- }
- if (intel->doPageFlip) {
- intelPageFlip(dPriv);
- }
- else {
- intelCopyBuffer(dPriv);
- }
- }
- }
- else {
- _mesa_problem(NULL,
- "dPriv has no gl_framebuffer pointer in intelSwapBuffers");
- }
-}
-#else
-/* Trunk version:
- */
-
-static GLboolean
-intelScheduleSwap(__DRIdrawablePrivate * dPriv, GLboolean *missed_target)
-{
- struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
- unsigned int interval;
- struct intel_context *intel =
- intelScreenContext(dPriv->driScreenPriv->private);
- const intelScreenPrivate *intelScreen = intel->intelScreen;
- unsigned int target;
- drm_i915_vblank_swap_t swap;
- GLboolean ret;
-
- if (!dPriv->vblFlags ||
- (dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) ||
- intelScreen->current_rotation != 0 ||
- intelScreen->drmMinor < (intel_fb->pf_active ? 9 : 6))
- return GL_FALSE;
-
- interval = driGetVBlankInterval(dPriv);
-
- swap.seqtype = DRM_VBLANK_ABSOLUTE;
-
- if (dPriv->vblFlags & VBLANK_FLAG_SYNC) {
- swap.seqtype |= DRM_VBLANK_NEXTONMISS;
- } else if (interval == 0) {
- return GL_FALSE;
- }
-
- swap.drawable = dPriv->hHWDrawable;
- target = swap.sequence = dPriv->vblSeq + interval;
-
- if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
- swap.seqtype |= DRM_VBLANK_SECONDARY;
- }
-
- LOCK_HARDWARE(intel);
-
- intel_batchbuffer_flush(intel->batch);
-
- if ( intel_fb->pf_active ) {
- swap.seqtype |= DRM_VBLANK_FLIP;
-
- intel_fb->pf_current_page = (((intel->sarea->pf_current_page >>
- (intel_fb->pf_planes & 0x2)) & 0x3) + 1) %
- intel_fb->pf_num_pages;
- }
-
- if (!drmCommandWriteRead(intel->driFd, DRM_I915_VBLANK_SWAP, &swap,
- sizeof(swap))) {
- dPriv->vblSeq = swap.sequence;
- swap.sequence -= target;
- *missed_target = swap.sequence > 0 && swap.sequence <= (1 << 23);
-
- intel_get_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT)->vbl_pending =
- intel_get_renderbuffer(&intel_fb->Base,
- BUFFER_FRONT_LEFT)->vbl_pending =
- dPriv->vblSeq;
-
- if (swap.seqtype & DRM_VBLANK_FLIP) {
- intel_flip_renderbuffers(intel_fb);
- intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
- }
-
- ret = GL_TRUE;
- } else {
- if (swap.seqtype & DRM_VBLANK_FLIP) {
- intel_fb->pf_current_page = ((intel->sarea->pf_current_page >>
- (intel_fb->pf_planes & 0x2)) & 0x3) %
- intel_fb->pf_num_pages;
- }
-
- ret = GL_FALSE;
- }
-
- UNLOCK_HARDWARE(intel);
-
- return ret;
-}
-
-void
-intelSwapBuffers(__DRIdrawablePrivate * dPriv)
-{
- if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
- GET_CURRENT_CONTEXT(ctx);
- struct intel_context *intel;
-
- if (ctx == NULL)
- return;
-
- intel = intel_context(ctx);
-
- if (ctx->Visual.doubleBufferMode) {
- intelScreenPrivate *screen = intel->intelScreen;
- GLboolean missed_target;
- struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
- int64_t ust;
-
- _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
-
- if (screen->current_rotation != 0 ||
- !intelScheduleSwap(dPriv, &missed_target)) {
- driWaitForVBlank(dPriv, &missed_target);
-
- if (screen->current_rotation != 0 || !intelPageFlip(dPriv)) {
- intelCopyBuffer(dPriv, NULL);
- }
-
- if (screen->current_rotation != 0) {
- intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
- }
- }
-
- intel_fb->swap_count++;
- (*dri_interface->getUST) (&ust);
- if (missed_target) {
- intel_fb->swap_missed_count++;
- intel_fb->swap_missed_ust = ust - intel_fb->swap_ust;
- }
-
- intel_fb->swap_ust = ust;
- }
- }
- else {
- /* XXX this shouldn't be an error but we can't handle it for now */
- fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
- }
-}
-#endif
-
-void
-intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
-{
- if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
- struct intel_context *intel =
- (struct intel_context *) dPriv->driContextPriv->driverPrivate;
- GLcontext *ctx = &intel->ctx;
-
- if (ctx->Visual.doubleBufferMode) {
- drm_clip_rect_t rect;
- rect.x1 = x + dPriv->x;
- rect.y1 = (dPriv->h - y - h) + dPriv->y;
- rect.x2 = rect.x1 + w;
- rect.y2 = rect.y1 + h;
- _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
- intelCopyBuffer(dPriv, &rect);
- }
- }
- else {
- /* XXX this shouldn't be an error but we can't handle it for now */
- fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
- }
-}
-
-
-/**
- * Update the hardware state for drawing into a window or framebuffer object.
- *
- * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
- * places within the driver.
- *
- * Basically, this needs to be called any time the current framebuffer
- * changes, the renderbuffers change, or we need to draw into different
- * color buffers.
- */
-void
-intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_region *colorRegion, *depthRegion = NULL;
- struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
- int front = 0; /* drawing to front color buffer? */
-
- if (!fb) {
- /* this can happen during the initial context initialization */
- return;
- }
-
- /* Do this here, note core Mesa, since this function is called from
- * many places within the driver.
- */
- if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
- /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
- _mesa_update_framebuffer(ctx);
- /* this updates the DrawBuffer's Width/Height if it's a FBO */
- _mesa_update_draw_buffer_bounds(ctx);
- }
-
- if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
- /* this may occur when we're called by glBindFrameBuffer() during
- * the process of someone setting up renderbuffers, etc.
- */
- /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
- return;
- }
-
- if (fb->Name)
- intel_validate_paired_depth_stencil(ctx, fb);
-
- /*
- * How many color buffers are we drawing into?
- */
- if (fb->_NumColorDrawBuffers[0] != 1
-#if 0
- /* XXX FBO temporary - always use software rendering */
- || 1
-#endif
- ) {
- /* writing to 0 or 2 or 4 color buffers */
- /*_mesa_debug(ctx, "Software rendering\n");*/
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
- front = 1; /* might not have back color buffer */
- }
- else {
- /* draw to exactly one color buffer */
- /*_mesa_debug(ctx, "Hardware rendering\n");*/
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
- if (fb->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
- front = 1;
- }
- }
-
- /*
- * Get the intel_renderbuffer for the colorbuffer we're drawing into.
- * And set up cliprects.
- */
- if (fb->Name == 0) {
- /* drawing to window system buffer */
- if (front) {
- intelSetFrontClipRects(intel);
- colorRegion = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
- }
- else {
- intelSetBackClipRects(intel);
- colorRegion = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
- }
- }
- else {
- /* drawing to user-created FBO */
- struct intel_renderbuffer *irb;
- intelSetRenderbufferClipRects(intel);
- irb = intel_renderbuffer(fb->_ColorDrawBuffers[0][0]);
- colorRegion = (irb && irb->region) ? irb->region : NULL;
- }
-
- /* Update culling direction which changes depending on the
- * orientation of the buffer:
- */
- if (ctx->Driver.FrontFace)
- ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
- else
- ctx->NewState |= _NEW_POLYGON;
-
- if (!colorRegion) {
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
- }
- else {
- FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
- }
-
- /***
- *** Get depth buffer region and check if we need a software fallback.
- *** Note that the depth buffer is usually a DEPTH_STENCIL buffer.
- ***/
- if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
- irbDepth = intel_renderbuffer(fb->_DepthBuffer->Wrapped);
- if (irbDepth && irbDepth->region) {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
- depthRegion = irbDepth->region;
- }
- else {
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
- depthRegion = NULL;
- }
- }
- else {
- /* not using depth buffer */
- FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
- depthRegion = NULL;
- }
-
- /***
- *** Stencil buffer
- *** This can only be hardware accelerated if we're using a
- *** combined DEPTH_STENCIL buffer (for now anyway).
- ***/
- if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
- irbStencil = intel_renderbuffer(fb->_StencilBuffer->Wrapped);
- if (irbStencil && irbStencil->region) {
- ASSERT(irbStencil->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
- /* need to re-compute stencil hw state */
- ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
- if (!depthRegion)
- depthRegion = irbStencil->region;
- }
- else {
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
- }
- }
- else {
- /* XXX FBO: instead of FALSE, pass ctx->Stencil.Enabled ??? */
- FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
- /* need to re-compute stencil hw state */
- ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
- }
-
- /*
- * Update depth test state
- */
- if (ctx->Depth.Test && fb->Visual.depthBits > 0) {
- ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE);
- }
- else {
- ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE);
- }
-
- /**
- ** Release old regions, reference new regions
- **/
-#if 0 /* XXX FBO: this seems to be redundant with i915_state_draw_region() */
- if (intel->draw_region != colorRegion) {
- intel_region_release(&intel->draw_region);
- intel_region_reference(&intel->draw_region, colorRegion);
- }
- if (intel->intelScreen->depth_region != depthRegion) {
- intel_region_release(&intel->intelScreen->depth_region);
- intel_region_reference(&intel->intelScreen->depth_region, depthRegion);
- }
-#endif
-
- intel->vtbl.set_draw_region(intel, colorRegion, depthRegion);
-
- /* update viewport since it depends on window size */
- ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
- ctx->Viewport.Width, ctx->Viewport.Height);
-
- /* Update hardware scissor */
- ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
- ctx->Scissor.Width, ctx->Scissor.Height);
-}
-
-
-static void
-intelDrawBuffer(GLcontext * ctx, GLenum mode)
-{
- intel_draw_buffer(ctx, ctx->DrawBuffer);
-}
-
-
-static void
-intelReadBuffer(GLcontext * ctx, GLenum mode)
-{
- if (ctx->ReadBuffer == ctx->DrawBuffer) {
- /* This will update FBO completeness status.
- * A framebuffer will be incomplete if the GL_READ_BUFFER setting
- * refers to a missing renderbuffer. Calling glReadBuffer can set
- * that straight and can make the drawing buffer complete.
- */
- intel_draw_buffer(ctx, ctx->DrawBuffer);
- }
- /* Generally, functions which read pixels (glReadPixels, glCopyPixels, etc)
- * reference ctx->ReadBuffer and do appropriate state checks.
- */
-}
-
-
-void
-intelInitBufferFuncs(struct dd_function_table *functions)
-{
- functions->Clear = intelClear;
- functions->DrawBuffer = intelDrawBuffer;
- functions->ReadBuffer = intelReadBuffer;
-}
+../intel/intel_buffers.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
index 32d9886091..e9df5c6279 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
+++ b/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.c
@@ -1,833 +1 @@
-/**************************************************************************
- *
- * Copyright © 2007 Red Hat Inc.
- * Copyright © 2007 Intel Corporation
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
- * Eric Anholt <eric@anholt.net>
- * Dave Airlie <airlied@linux.ie>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include "glthread.h"
-#include "errno.h"
-#include "mtypes.h"
-#include "dri_bufmgr.h"
-#include "string.h"
-#include "imports.h"
-
-#include "i915_drm.h"
-
-#include "intel_bufmgr_ttm.h"
-
-#define BUFMGR_DEBUG 0
-
-struct intel_reloc_info
-{
- GLuint type;
- GLuint reloc;
- GLuint delta; /* not needed? */
- GLuint index;
- drm_handle_t handle;
-};
-
-struct intel_bo_node
-{
- drmMMListHead head;
- drmBO *buf;
- struct drm_i915_op_arg bo_arg;
- unsigned long arg0;
- unsigned long arg1;
- void (*destroy)(void *);
- void *priv;
-};
-
-struct intel_bo_reloc_list
-{
- drmMMListHead head;
- drmBO buf;
- uint32_t *relocs;
-};
-
-struct intel_bo_reloc_node
-{
- drmMMListHead head;
- drm_handle_t handle;
- uint32_t nr_reloc_types;
- struct intel_bo_reloc_list type_list;
-};
-
-struct intel_bo_list {
- unsigned numCurrent;
- drmMMListHead list;
- void (*destroy)(void *node);
-};
-
-typedef struct _dri_bufmgr_ttm {
- dri_bufmgr bufmgr;
-
- int fd;
- _glthread_Mutex mutex;
- unsigned int fence_type;
- unsigned int fence_type_flush;
-
- uint32_t max_relocs;
- /** ttm relocation list */
- struct intel_bo_list list;
- struct intel_bo_list reloc_list;
-
-} dri_bufmgr_ttm;
-
-typedef struct _dri_bo_ttm {
- dri_bo bo;
-
- int refcount; /* Protected by bufmgr->mutex */
- drmBO drm_bo;
- const char *name;
-} dri_bo_ttm;
-
-typedef struct _dri_fence_ttm
-{
- dri_fence fence;
-
- int refcount; /* Protected by bufmgr->mutex */
- const char *name;
- drmFence drm_fence;
-} dri_fence_ttm;
-
-
-static void intel_bo_free_list(struct intel_bo_list *list)
-{
- struct intel_bo_node *node;
- drmMMListHead *l;
-
- l = list->list.next;
- while(l != &list->list) {
- DRMLISTDEL(l);
- node = DRMLISTENTRY(struct intel_bo_node, l, head);
- list->destroy(node);
- l = list->list.next;
- list->numCurrent--;
- }
-}
-
-static void generic_destroy(void *nodep)
-{
- free(nodep);
-}
-
-static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
-{
- DRMINITLISTHEAD(&list->list);
- list->numCurrent = 0;
- if (destroy)
- list->destroy = destroy;
- else
- list->destroy = generic_destroy;
- return 0;
-}
-
-
-static struct drm_i915_op_arg *
-intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
-{
- struct intel_bo_node *node;
- struct intel_bo_reloc_node *rl_node;
- drmMMListHead *l, *rl;
- struct drm_i915_op_arg *arg, *first;
- struct drm_bo_op_req *req;
- uint64_t *prevNext = NULL;
- GLuint count = 0;
-
- first = NULL;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(struct intel_bo_node, l, head);
-
- arg = &node->bo_arg;
- req = &arg->d.req;
-
- if (!first)
- first = arg;
-
- if (prevNext)
- *prevNext = (unsigned long) arg;
-
- memset(arg, 0, sizeof(*arg));
- prevNext = &arg->next;
- req->bo_req.handle = node->buf->handle;
- req->op = drm_bo_validate;
- req->bo_req.flags = node->arg0;
- req->bo_req.hint = 0;
- req->bo_req.mask = node->arg1;
- req->bo_req.fence_class = 0; /* Backwards compat. */
- arg->reloc_handle = 0;
-
- for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
- rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
-
- if (rl_node->handle == node->buf->handle) {
- arg->reloc_handle = rl_node->type_list.buf.handle;
- }
- }
- count++;
- }
-
- if (!first)
- return 0;
-
- *count_p = count;
- return first;
-}
-
-static void intel_free_validate_list(int fd, struct intel_bo_list *list)
-{
- struct intel_bo_node *node;
- drmMMListHead *l;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(struct intel_bo_node, l, head);
-
- if (node->destroy)
- (*node->destroy)(node->priv);
-
- }
-}
-
-static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
-{
- struct intel_bo_reloc_node *reloc_node;
- drmMMListHead *rl, *tmp;
-
- for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
- reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
-
- DRMLISTDEL(rl);
-
- if (reloc_node->nr_reloc_types > 1) {
-
- /* TODO */
- }
-
- drmBOUnmap(fd, &reloc_node->type_list.buf);
- drmBOUnreference(fd, &reloc_node->type_list.buf);
- free(reloc_node);
- }
-}
-
-static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
- unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
-{
- struct intel_bo_node *node, *cur;
- drmMMListHead *l;
- int count = 0;
- int ret = 0;
- drmBO *buf_bo = &((dri_bo_ttm *)buf)->drm_bo;
- cur = NULL;
-
- for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(struct intel_bo_node, l, head);
- if (node->buf->handle == buf_bo->handle) {
- cur = node;
- break;
- }
- count++;
- }
-
- if (!cur) {
- cur = drmMalloc(sizeof(*cur));
- if (!cur) {
- return -ENOMEM;
- }
- cur->buf = buf_bo;
- cur->priv = buf;
- cur->arg0 = flags;
- cur->arg1 = mask;
- cur->destroy = destroy_cb;
- ret = 1;
-
- DRMLISTADDTAIL(&cur->head, &list->list);
-
- } else {
- unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
- unsigned memFlags = cur->arg0 & flags & memMask;
-
- if (!memFlags) {
- return -EINVAL;
- }
- if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
- return -EINVAL;
- }
- cur->arg1 |= mask;
- cur->arg0 = memFlags | ((cur->arg0 | flags) &
- cur->arg1 & ~DRM_BO_MASK_MEM);
- }
- *itemLoc = count;
- return ret;
-}
-
-
-#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * sizeof(uint32_t))
-
-static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type, int max_relocs)
-{
- int ret;
-
- /* should allocate a drmBO here */
- ret = drmBOCreate(fd, RELOC_BUF_SIZE(max_relocs), 0,
- NULL,
- DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
- 0, &cur_type->buf);
- if (ret)
- return ret;
-
- ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
- if (ret)
- return ret;
- return 0;
-}
-
-
-static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info, uint32_t max_relocs)
-{
- struct intel_bo_reloc_node *rl_node, *cur;
- drmMMListHead *rl, *l;
- int ret = 0;
- uint32_t *reloc_start;
- int num_relocs;
- struct intel_bo_reloc_list *cur_type;
-
- cur = NULL;
-
- for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
- rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
- if (rl_node->handle == reloc_info->handle) {
- cur = rl_node;
- break;
- }
- }
-
- if (!cur) {
-
- cur = malloc(sizeof(*cur));
- if (!cur)
- return -ENOMEM;
-
- cur->nr_reloc_types = 1;
- cur->handle = reloc_info->handle;
- cur_type = &cur->type_list;
-
- DRMINITLISTHEAD(&cur->type_list.head);
- ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
- if (ret) {
- return -1;
- }
- DRMLISTADDTAIL(&cur->head, &reloc_list->list);
-
- cur_type->relocs[0] = 0 | (reloc_info->type << 16);
- cur_type->relocs[1] = 0; // next reloc buffer handle is 0
-
- } else {
- int found = 0;
- if ((cur->type_list.relocs[0] >> 16) == reloc_info->type) {
- cur_type = &cur->type_list;
- found = 1;
- } else {
- for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
- cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
- if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
- found = 1;
- break;
- }
- }
-
- /* didn't find the relocation type */
- if (!found) {
- cur_type = malloc(sizeof(*cur_type));
- if (!cur_type) {
- return -ENOMEM;
- }
-
- ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
- DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
-
- cur_type->relocs[0] = (reloc_info->type << 16);
- cur_type->relocs[1] = 0;
-
- cur->nr_reloc_types++;
- }
- }
-
- reloc_start = cur_type->relocs;
-
- num_relocs = (reloc_start[0] & 0xffff);
-
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER] = reloc_info->reloc;
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+1] = reloc_info->delta;
- reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+2] = reloc_info->index;
- reloc_start[0]++;
- if (((reloc_start[0] & 0xffff)) > (max_relocs)) {
- return -ENOMEM;
- }
- return 0;
-}
-
-
-#if 0
-int
-driFenceSignaled(DriFenceObject * fence, unsigned type)
-{
- int signaled;
- int ret;
-
- if (fence == NULL)
- return GL_TRUE;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
- _glthread_UNLOCK_MUTEX(fence->mutex);
- BM_CKFATAL(ret);
- return signaled;
-}
-#endif
-
-static dri_bo *
-dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
- unsigned long size, unsigned int alignment,
- unsigned int location_mask)
-{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- unsigned int pageSize = getpagesize();
- int ret;
- unsigned int flags, hint;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- /* The mask argument doesn't do anything for us that we want other than
- * determine which pool (TTM or local) the buffer is allocated into, so just
- * pass all of the allocation class flags.
- */
- flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_EXE;
- /* No hints we want to use. */
- hint = 0;
-
- ret = drmBOCreate(ttm_bufmgr->fd, size, alignment / pageSize,
- NULL, flags, hint, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return &ttm_buf->bo;
-}
-
-/* Our TTM backend doesn't allow creation of static buffers, as that requires
- * privelege for the non-fake case, and the lock in the fake case where we were
- * working around the X Server not creating buffers and passing handles to us.
- */
-static dri_bo *
-dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
- unsigned long offset, unsigned long size, void *virtual,
- unsigned int location_mask)
-{
- return NULL;
-}
-
-/** Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_bo *
-intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
- unsigned int handle)
-{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_bo_ttm *ttm_buf;
- int ret;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_buf = malloc(sizeof(*ttm_buf));
- if (!ttm_buf)
- return NULL;
-
- ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
- if (ret != 0) {
- free(ttm_buf);
- return NULL;
- }
- ttm_buf->bo.size = ttm_buf->drm_bo.size;
- ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
- ttm_buf->bo.virtual = NULL;
- ttm_buf->bo.bufmgr = bufmgr;
- ttm_buf->name = name;
- ttm_buf->refcount = 1;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
- ttm_buf->name);
-#endif
-
- return &ttm_buf->bo;
-}
-
-static void
-dri_ttm_bo_reference(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ttm_buf->refcount++;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static void
-dri_ttm_bo_unreference(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- if (!buf)
- return;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--ttm_buf->refcount == 0) {
- int ret;
-
- ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
- if (ret != 0) {
- fprintf(stderr, "drmBOUnreference failed (%s): %s\n", ttm_buf->name,
- strerror(-ret));
- }
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unreference final: %p (%s)\n",
- &ttm_buf->bo, ttm_buf->name);
-#endif
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(buf);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static int
-dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
- unsigned int flags;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- flags = DRM_BO_FLAG_READ;
- if (write_enable)
- flags |= DRM_BO_FLAG_WRITE;
-
- assert(buf->virtual == NULL);
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
-}
-
-static int
-dri_ttm_bo_unmap(dri_bo *buf)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
-
- if (buf == NULL)
- return 0;
-
- bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
-
- assert(buf->virtual != NULL);
-
- buf->virtual = NULL;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
-#endif
-
- return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
-}
-
-/* Returns a dri_bo wrapping the given buffer object handle.
- *
- * This can be used when one application needs to pass a buffer object
- * to another.
- */
-dri_fence *
-intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
- drm_fence_arg_t *arg)
-{
- dri_bufmgr_ttm *ttm_bufmgr;
- dri_fence_ttm *ttm_fence;
-
- ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
-
- ttm_fence = malloc(sizeof(*ttm_fence));
- if (!ttm_fence)
- return NULL;
-
- ttm_fence->drm_fence.handle = arg->handle;
- ttm_fence->drm_fence.fence_class = arg->fence_class;
- ttm_fence->drm_fence.type = arg->type;
- ttm_fence->drm_fence.flags = arg->flags;
- ttm_fence->drm_fence.signaled = 0;
- ttm_fence->drm_fence.sequence = arg->sequence;
-
- ttm_fence->fence.bufmgr = bufmgr;
- ttm_fence->name = name;
- ttm_fence->refcount = 1;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
- ttm_fence->name);
-#endif
-
- return &ttm_fence->fence;
-}
-
-
-static void
-dri_ttm_fence_reference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ++fence_ttm->refcount;
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
-#endif
-}
-
-static void
-dri_ttm_fence_unreference(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
-
- if (!fence)
- return;
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
-#endif
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- if (--fence_ttm->refcount == 0) {
- int ret;
-
- ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
- if (ret != 0) {
- fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
- fence_ttm->name, strerror(-ret));
- }
-
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- free(fence);
- return;
- }
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
-}
-
-static void
-dri_ttm_fence_wait(dri_fence *fence)
-{
- dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
- int ret;
-
- _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
- ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
- _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
- if (ret != 0) {
- _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
- __FILE__, __LINE__, ret, fence_ttm->name);
- abort();
- }
-
-#if BUFMGR_DEBUG
- fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
- fence_ttm->name);
-#endif
-}
-
-static void
-dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
-
- intel_bo_free_list(&bufmgr_ttm->list);
- intel_bo_free_list(&bufmgr_ttm->reloc_list);
-
- _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
- free(bufmgr);
-}
-
-
-static void intel_dribo_destroy_callback(void *priv)
-{
- dri_bo *dribo = priv;
-
- if (dribo) {
- dri_bo_unreference(dribo);
- }
-}
-
-static void
-dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
- dri_bo *relocatee)
-{
- dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- int newItem;
- struct intel_reloc_info reloc;
- int mask;
- int ret;
-
- mask = DRM_BO_MASK_MEM;
- mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
-
- ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
- if (ret < 0)
- return;
-
- if (ret == 1) {
- dri_bo_reference(relocatee);
- }
-
- reloc.type = I915_RELOC_TYPE_0;
- reloc.reloc = offset;
- reloc.delta = delta;
- reloc.index = newItem;
- reloc.handle = ttm_buf->drm_bo.handle;
-
- intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc, bufmgr_ttm->max_relocs);
- return;
-}
-
-
-static void *
-dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
- void *ptr;
- int itemLoc;
-
- dri_bo_unmap(batch_buf);
-
- intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
- DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
-
- ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
-
- return ptr;
-}
-
-static void
-dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
-{
- dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
-
- intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
- intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
-
- intel_bo_free_list(&bufmgr_ttm->list);
-}
-
-/**
- * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
- * and manage map buffer objections.
- *
- * \param fd File descriptor of the opened DRM device.
- * \param fence_type Driver-specific fence type used for fences with no flush.
- * \param fence_type_flush Driver-specific fence type used for fences with a
- * flush.
- */
-dri_bufmgr *
-intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
- unsigned int fence_type_flush, int batch_size)
-{
- dri_bufmgr_ttm *bufmgr_ttm;
-
- bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
- bufmgr_ttm->fd = fd;
- bufmgr_ttm->fence_type = fence_type;
- bufmgr_ttm->fence_type_flush = fence_type_flush;
- _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
-
- /* lets go with one relocation per every four dwords - purely heuristic */
- bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
-
- intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
- intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
-
- bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
- bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
- bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
- bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
- bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
- bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
- bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
- bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
- bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
- bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
- bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
- bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
- bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
- return &bufmgr_ttm->bufmgr;
-}
-
+../intel/intel_bufmgr_ttm.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_depthstencil.c b/src/mesa/drivers/dri/i915/intel_depthstencil.c
index d269a85a3c..4ac4ae690a 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_depthstencil.c
+++ b/src/mesa/drivers/dri/i915/intel_depthstencil.c
@@ -1,282 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "glheader.h"
-#include "imports.h"
-#include "context.h"
-#include "depthstencil.h"
-#include "fbobject.h"
-#include "framebuffer.h"
-#include "hash.h"
-#include "mtypes.h"
-#include "renderbuffer.h"
-
-#include "intel_context.h"
-#include "intel_fbo.h"
-#include "intel_depthstencil.h"
-#include "intel_regions.h"
-
-
-/**
- * The GL_EXT_framebuffer_object allows the user to create their own
- * framebuffer objects consisting of color renderbuffers (0 or more),
- * depth renderbuffers (0 or 1) and stencil renderbuffers (0 or 1).
- *
- * The spec considers depth and stencil renderbuffers to be totally independent
- * buffers. In reality, most graphics hardware today uses a combined
- * depth+stencil buffer (one 32-bit pixel = 24 bits of Z + 8 bits of stencil).
- *
- * This causes difficulty because the user may create some number of depth
- * renderbuffers and some number of stencil renderbuffers and bind them
- * together in framebuffers in any combination.
- *
- * This code manages all that.
- *
- * 1. Depth renderbuffers are always allocated in hardware as 32bpp
- * GL_DEPTH24_STENCIL8 buffers.
- *
- * 2. Stencil renderbuffers are initially allocated in software as 8bpp
- * GL_STENCIL_INDEX8 buffers.
- *
- * 3. Depth and Stencil renderbuffers use the PairedStencil and PairedDepth
- * fields (respectively) to indicate if the buffer's currently paired
- * with another stencil or depth buffer (respectively).
- *
- * 4. When a depth and stencil buffer are initially both attached to the
- * current framebuffer, we merge the stencil buffer values into the
- * depth buffer (really a depth+stencil buffer). The then hardware uses
- * the combined buffer.
- *
- * 5. Whenever a depth or stencil buffer is reallocated (with
- * glRenderbufferStorage) we undo the pairing and copy the stencil values
- * from the combined depth/stencil buffer back to the stencil-only buffer.
- *
- * 6. We also undo the pairing when we find a change in buffer bindings.
- *
- * 7. If a framebuffer is only using a depth renderbuffer (no stencil), we
- * just use the combined depth/stencil buffer and ignore the stencil values.
- *
- * 8. If a framebuffer is only using a stencil renderbuffer (no depth) we have
- * to promote the 8bpp software stencil buffer to a 32bpp hardware
- * depth+stencil buffer.
- *
- */
-
-
-
-static void
-map_regions(GLcontext * ctx,
- struct intel_renderbuffer *depthRb,
- struct intel_renderbuffer *stencilRb)
-{
- struct intel_context *intel = intel_context(ctx);
- if (depthRb && depthRb->region) {
- intel_region_map(intel->intelScreen, depthRb->region);
- depthRb->pfMap = depthRb->region->map;
- depthRb->pfPitch = depthRb->region->pitch;
- }
- if (stencilRb && stencilRb->region) {
- intel_region_map(intel->intelScreen, stencilRb->region);
- stencilRb->pfMap = stencilRb->region->map;
- stencilRb->pfPitch = stencilRb->region->pitch;
- }
-}
-
-static void
-unmap_regions(GLcontext * ctx,
- struct intel_renderbuffer *depthRb,
- struct intel_renderbuffer *stencilRb)
-{
- struct intel_context *intel = intel_context(ctx);
- if (depthRb && depthRb->region) {
- intel_region_unmap(intel->intelScreen, depthRb->region);
- depthRb->pfMap = NULL;
- depthRb->pfPitch = 0;
- }
- if (stencilRb && stencilRb->region) {
- intel_region_unmap(intel->intelScreen, stencilRb->region);
- stencilRb->pfMap = NULL;
- stencilRb->pfPitch = 0;
- }
-}
-
-
-
-/**
- * Undo the pairing/interleaving between depth and stencil buffers.
- * irb should be a depth/stencil or stencil renderbuffer.
- */
-void
-intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb)
-{
- if (irb->PairedStencil) {
- /* irb is a depth/stencil buffer */
- struct gl_renderbuffer *stencilRb;
- struct intel_renderbuffer *stencilIrb;
-
- ASSERT(irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
-
- stencilRb = _mesa_lookup_renderbuffer(ctx, irb->PairedStencil);
- stencilIrb = intel_renderbuffer(stencilRb);
- if (stencilIrb) {
- /* need to extract stencil values from the depth buffer */
- ASSERT(stencilIrb->PairedDepth == irb->Base.Name);
- map_regions(ctx, irb, stencilIrb);
- _mesa_extract_stencil(ctx, &irb->Base, &stencilIrb->Base);
- unmap_regions(ctx, irb, stencilIrb);
- stencilIrb->PairedDepth = 0;
- }
- irb->PairedStencil = 0;
- }
- else if (irb->PairedDepth) {
- /* irb is a stencil buffer */
- struct gl_renderbuffer *depthRb;
- struct intel_renderbuffer *depthIrb;
-
- ASSERT(irb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
- irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
-
- depthRb = _mesa_lookup_renderbuffer(ctx, irb->PairedDepth);
- depthIrb = intel_renderbuffer(depthRb);
- if (depthIrb) {
- /* need to extract stencil values from the depth buffer */
- ASSERT(depthIrb->PairedStencil == irb->Base.Name);
- map_regions(ctx, depthIrb, irb);
- _mesa_extract_stencil(ctx, &depthIrb->Base, &irb->Base);
- unmap_regions(ctx, depthIrb, irb);
- depthIrb->PairedStencil = 0;
- }
- irb->PairedDepth = 0;
- }
- else {
- _mesa_problem(ctx, "Problem in undo_depth_stencil_pairing");
- }
-
- ASSERT(irb->PairedStencil == 0);
- ASSERT(irb->PairedDepth == 0);
-}
-
-
-/**
- * Examine the depth and stencil renderbuffers which are attached to the
- * framebuffer. If both depth and stencil are attached, make sure that the
- * renderbuffers are 'paired' (combined). If only depth or only stencil is
- * attached, undo any previous pairing.
- *
- * Must be called if NewState & _NEW_BUFFER (when renderbuffer attachments
- * change, for example).
- */
-void
-intel_validate_paired_depth_stencil(GLcontext * ctx,
- struct gl_framebuffer *fb)
-{
- struct intel_renderbuffer *depthRb, *stencilRb;
-
- depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
- stencilRb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
-
- if (depthRb && stencilRb) {
- if (depthRb == stencilRb) {
- /* Using a user-created combined depth/stencil buffer.
- * Nothing to do.
- */
- ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_STENCIL_EXT);
- ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
- }
- else {
- /* Separate depth/stencil buffers, need to interleave now */
- ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_COMPONENT);
- ASSERT(stencilRb->Base._BaseFormat == GL_STENCIL_INDEX);
- /* may need to interleave depth/stencil now */
- if (depthRb->PairedStencil == stencilRb->Base.Name) {
- /* OK, the depth and stencil buffers are already interleaved */
- ASSERT(stencilRb->PairedDepth == depthRb->Base.Name);
- }
- else {
- /* need to setup new pairing/interleaving */
- if (depthRb->PairedStencil) {
- intel_unpair_depth_stencil(ctx, depthRb);
- }
- if (stencilRb->PairedDepth) {
- intel_unpair_depth_stencil(ctx, stencilRb);
- }
-
- ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
- ASSERT(stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
- stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
-
- /* establish new pairing: interleave stencil into depth buffer */
- map_regions(ctx, depthRb, stencilRb);
- _mesa_insert_stencil(ctx, &depthRb->Base, &stencilRb->Base);
- unmap_regions(ctx, depthRb, stencilRb);
- depthRb->PairedStencil = stencilRb->Base.Name;
- stencilRb->PairedDepth = depthRb->Base.Name;
- }
-
- }
- }
- else if (depthRb) {
- /* Depth buffer but no stencil buffer.
- * We'll use a GL_DEPTH24_STENCIL8 buffer and ignore the stencil bits.
- */
- /* can't assert this until storage is allocated:
- ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
- */
- /* intel_undo any previous pairing */
- if (depthRb->PairedStencil) {
- intel_unpair_depth_stencil(ctx, depthRb);
- }
- }
- else if (stencilRb) {
- /* Stencil buffer but no depth buffer.
- * Since h/w doesn't typically support just 8bpp stencil w/out Z,
- * we'll use a GL_DEPTH24_STENCIL8 buffer and ignore the depth bits.
- */
- /* undo any previous pairing */
- if (stencilRb->PairedDepth) {
- intel_unpair_depth_stencil(ctx, stencilRb);
- }
- if (stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
- /* promote buffer to GL_DEPTH24_STENCIL8 for hw rendering */
- _mesa_promote_stencil(ctx, &stencilRb->Base);
- ASSERT(stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
- }
- }
-
- /* Finally, update the fb->_DepthBuffer and fb->_StencilBuffer fields */
- _mesa_update_depth_buffer(ctx, fb, BUFFER_DEPTH);
- if (depthRb && depthRb->PairedStencil)
- _mesa_update_stencil_buffer(ctx, fb, BUFFER_DEPTH);
- else
- _mesa_update_stencil_buffer(ctx, fb, BUFFER_STENCIL);
-
-
- /* The hardware should use fb->Attachment[BUFFER_DEPTH].Renderbuffer
- * first, if present, then fb->Attachment[BUFFER_STENCIL].Renderbuffer
- * if present.
- */
-}
+../intel/intel_depthstencil.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_fbo.c b/src/mesa/drivers/dri/i915/intel_fbo.c
index 6f99f401c7..a19f86dcc5 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_fbo.c
+++ b/src/mesa/drivers/dri/i915/intel_fbo.c
@@ -1,687 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-
-#include "imports.h"
-#include "mtypes.h"
-#include "fbobject.h"
-#include "framebuffer.h"
-#include "renderbuffer.h"
-#include "context.h"
-#include "texformat.h"
-#include "texrender.h"
-
-#include "intel_context.h"
-#include "intel_buffers.h"
-#include "intel_depthstencil.h"
-#include "intel_fbo.h"
-#include "intel_mipmap_tree.h"
-#include "intel_regions.h"
-#include "intel_span.h"
-
-
-#define FILE_DEBUG_FLAG DEBUG_FBO
-
-#define INTEL_RB_CLASS 0x12345678
-
-
-/* XXX FBO: move this to intel_context.h (inlined) */
-/**
- * Return a gl_renderbuffer ptr casted to intel_renderbuffer.
- * NULL will be returned if the rb isn't really an intel_renderbuffer.
- * This is determiend by checking the ClassID.
- */
-struct intel_renderbuffer *
-intel_renderbuffer(struct gl_renderbuffer *rb)
-{
- struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
- if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
- /*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
- return irb;
- }
- else
- return NULL;
-}
-
-
-struct intel_renderbuffer *
-intel_get_renderbuffer(struct gl_framebuffer *fb, GLuint attIndex)
-{
- return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
-}
-
-
-void
-intel_flip_renderbuffers(struct intel_framebuffer *intel_fb)
-{
- int current_page = intel_fb->pf_current_page;
- int next_page = (current_page + 1) % intel_fb->pf_num_pages;
- struct gl_renderbuffer *tmp_rb;
-
- /* Exchange renderbuffers if necessary but make sure their reference counts
- * are preserved.
- */
- if (intel_fb->color_rb[current_page] &&
- intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
- &intel_fb->color_rb[current_page]->Base) {
- tmp_rb = NULL;
- _mesa_reference_renderbuffer(&tmp_rb,
- intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
- tmp_rb = &intel_fb->color_rb[current_page]->Base;
- _mesa_reference_renderbuffer(
- &intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
- _mesa_reference_renderbuffer(&tmp_rb, NULL);
- }
-
- if (intel_fb->color_rb[next_page] &&
- intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
- &intel_fb->color_rb[next_page]->Base) {
- tmp_rb = NULL;
- _mesa_reference_renderbuffer(&tmp_rb,
- intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
- tmp_rb = &intel_fb->color_rb[next_page]->Base;
- _mesa_reference_renderbuffer(
- &intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
- _mesa_reference_renderbuffer(&tmp_rb, NULL);
- }
-}
-
-
-struct intel_region *
-intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
-{
- struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
-
- if (irb)
- return irb->region;
- else
- return NULL;
-}
-
-
-
-/**
- * Create a new framebuffer object.
- */
-static struct gl_framebuffer *
-intel_new_framebuffer(GLcontext * ctx, GLuint name)
-{
- /* Only drawable state in intel_framebuffer at this time, just use Mesa's
- * class
- */
- return _mesa_new_framebuffer(ctx, name);
-}
-
-
-static void
-intel_delete_renderbuffer(struct gl_renderbuffer *rb)
-{
- GET_CURRENT_CONTEXT(ctx);
- struct intel_context *intel = intel_context(ctx);
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
-
- ASSERT(irb);
-
- if (irb->PairedStencil || irb->PairedDepth) {
- intel_unpair_depth_stencil(ctx, irb);
- }
-
- if (intel && irb->region) {
- intel_region_release(&irb->region);
- }
-
- _mesa_free(irb);
-}
-
-
-
-/**
- * Return a pointer to a specific pixel in a renderbuffer.
- */
-static void *
-intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
- GLint x, GLint y)
-{
- /* By returning NULL we force all software rendering to go through
- * the span routines.
- */
- return NULL;
-}
-
-
-
-/**
- * Called via glRenderbufferStorageEXT() to set the format and allocate
- * storage for a user-created renderbuffer.
- */
-static GLboolean
-intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
- GLenum internalFormat,
- GLuint width, GLuint height)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- GLboolean softwareBuffer = GL_FALSE;
- int cpp;
-
- ASSERT(rb->Name != 0);
-
- switch (internalFormat) {
- case GL_R3_G3_B2:
- case GL_RGB4:
- case GL_RGB5:
- rb->_ActualFormat = GL_RGB5;
- rb->DataType = GL_UNSIGNED_BYTE;
- rb->RedBits = 5;
- rb->GreenBits = 6;
- rb->BlueBits = 5;
- cpp = 2;
- break;
- case GL_RGB:
- case GL_RGB8:
- case GL_RGB10:
- case GL_RGB12:
- case GL_RGB16:
- case GL_RGBA:
- case GL_RGBA2:
- case GL_RGBA4:
- case GL_RGB5_A1:
- case GL_RGBA8:
- case GL_RGB10_A2:
- case GL_RGBA12:
- case GL_RGBA16:
- rb->_ActualFormat = GL_RGBA8;
- rb->DataType = GL_UNSIGNED_BYTE;
- rb->RedBits = 8;
- rb->GreenBits = 8;
- rb->BlueBits = 8;
- rb->AlphaBits = 8;
- cpp = 4;
- break;
- case GL_STENCIL_INDEX:
- case GL_STENCIL_INDEX1_EXT:
- case GL_STENCIL_INDEX4_EXT:
- case GL_STENCIL_INDEX8_EXT:
- case GL_STENCIL_INDEX16_EXT:
- /* alloc a depth+stencil buffer */
- rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
- rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
- rb->StencilBits = 8;
- cpp = 4;
- break;
- case GL_DEPTH_COMPONENT16:
- rb->_ActualFormat = GL_DEPTH_COMPONENT16;
- rb->DataType = GL_UNSIGNED_SHORT;
- rb->DepthBits = 16;
- cpp = 2;
- break;
- case GL_DEPTH_COMPONENT:
- case GL_DEPTH_COMPONENT24:
- case GL_DEPTH_COMPONENT32:
- rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
- rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
- rb->DepthBits = 24;
- cpp = 4;
- break;
- case GL_DEPTH_STENCIL_EXT:
- case GL_DEPTH24_STENCIL8_EXT:
- rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
- rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
- rb->DepthBits = 24;
- rb->StencilBits = 8;
- cpp = 4;
- break;
- default:
- _mesa_problem(ctx,
- "Unexpected format in intel_alloc_renderbuffer_storage");
- return GL_FALSE;
- }
-
- intelFlush(ctx);
-
- /* free old region */
- if (irb->region) {
- intel_region_release(&irb->region);
- }
-
- /* allocate new memory region/renderbuffer */
- if (softwareBuffer) {
- return _mesa_soft_renderbuffer_storage(ctx, rb, internalFormat,
- width, height);
- }
- else {
- /* Choose a pitch to match hardware requirements:
- */
- GLuint pitch = ((cpp * width + 63) & ~63) / cpp;
-
- /* alloc hardware renderbuffer */
- DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width,
- height, pitch);
-
- irb->region = intel_region_alloc(intel->intelScreen, cpp, pitch, height);
- if (!irb->region)
- return GL_FALSE; /* out of memory? */
-
- ASSERT(irb->region->buffer);
-
- rb->Width = width;
- rb->Height = height;
-
- /* This sets the Get/PutRow/Value functions */
- intel_set_span_functions(&irb->Base);
-
- return GL_TRUE;
- }
-}
-
-
-
-/**
- * Called for each hardware renderbuffer when a _window_ is resized.
- * Just update fields.
- * Not used for user-created renderbuffers!
- */
-static GLboolean
-intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
- GLenum internalFormat, GLuint width, GLuint height)
-{
- ASSERT(rb->Name == 0);
- rb->Width = width;
- rb->Height = height;
- rb->_ActualFormat = internalFormat;
-
- return GL_TRUE;
-}
-
-static void
-intel_resize_buffers(GLcontext *ctx, struct gl_framebuffer *fb,
- GLuint width, GLuint height)
-{
- struct intel_framebuffer *intel_fb = (struct intel_framebuffer*)fb;
- int i;
-
- _mesa_resize_framebuffer(ctx, fb, width, height);
-
- fb->Initialized = GL_TRUE; /* XXX remove someday */
-
- if (fb->Name != 0) {
- return;
- }
-
- /* Make sure all window system renderbuffers are up to date */
- for (i = 0; i < 3; i++) {
- struct gl_renderbuffer *rb = &intel_fb->color_rb[i]->Base;
-
- /* only resize if size is changing */
- if (rb && (rb->Width != width || rb->Height != height)) {
- rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
- }
- }
-}
-
-static GLboolean
-intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
- GLenum internalFormat, GLuint width, GLuint height)
-{
- _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
- return GL_FALSE;
-}
-
-
-
-/**
- * Create a new intel_renderbuffer which corresponds to an on-screen window,
- * not a user-created renderbuffer.
- * \param width the screen width
- * \param height the screen height
- */
-struct intel_renderbuffer *
-intel_create_renderbuffer(GLenum intFormat, GLsizei width, GLsizei height,
- int offset, int pitch, int cpp, void *map)
-{
- GET_CURRENT_CONTEXT(ctx);
-
- struct intel_renderbuffer *irb;
- const GLuint name = 0;
-
- irb = CALLOC_STRUCT(intel_renderbuffer);
- if (!irb) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
- return NULL;
- }
-
- _mesa_init_renderbuffer(&irb->Base, name);
- irb->Base.ClassID = INTEL_RB_CLASS;
-
- switch (intFormat) {
- case GL_RGB5:
- irb->Base._ActualFormat = GL_RGB5;
- irb->Base._BaseFormat = GL_RGBA;
- irb->Base.RedBits = 5;
- irb->Base.GreenBits = 6;
- irb->Base.BlueBits = 5;
- irb->Base.DataType = GL_UNSIGNED_BYTE;
- cpp = 2;
- break;
- case GL_RGBA8:
- irb->Base._ActualFormat = GL_RGBA8;
- irb->Base._BaseFormat = GL_RGBA;
- irb->Base.RedBits = 8;
- irb->Base.GreenBits = 8;
- irb->Base.BlueBits = 8;
- irb->Base.AlphaBits = 8;
- irb->Base.DataType = GL_UNSIGNED_BYTE;
- cpp = 4;
- break;
- case GL_STENCIL_INDEX8_EXT:
- irb->Base._ActualFormat = GL_STENCIL_INDEX8_EXT;
- irb->Base._BaseFormat = GL_STENCIL_INDEX;
- irb->Base.StencilBits = 8;
- irb->Base.DataType = GL_UNSIGNED_BYTE;
- cpp = 1;
- break;
- case GL_DEPTH_COMPONENT16:
- irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
- irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
- irb->Base.DepthBits = 16;
- irb->Base.DataType = GL_UNSIGNED_SHORT;
- cpp = 2;
- break;
- case GL_DEPTH_COMPONENT24:
- irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
- irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
- irb->Base.DepthBits = 24;
- irb->Base.DataType = GL_UNSIGNED_INT;
- cpp = 4;
- break;
- case GL_DEPTH24_STENCIL8_EXT:
- irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
- irb->Base._BaseFormat = GL_DEPTH_STENCIL_EXT;
- irb->Base.DepthBits = 24;
- irb->Base.StencilBits = 8;
- irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
- cpp = 4;
- break;
- default:
- _mesa_problem(NULL,
- "Unexpected intFormat in intel_create_renderbuffer");
- return NULL;
- }
-
- irb->Base.InternalFormat = intFormat;
-
- /* intel-specific methods */
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_alloc_window_storage;
- irb->Base.GetPointer = intel_get_pointer;
- /* This sets the Get/PutRow/Value functions */
- intel_set_span_functions(&irb->Base);
-
- irb->pfMap = map;
- irb->pfPitch = pitch / cpp; /* in pixels */
-
-#if 00
- irb->region = intel_region_create_static(intel,
- DRM_MM_TT,
- offset, map, cpp, width, height);
-#endif
-
- return irb;
-}
-
-
-/**
- * Create a new renderbuffer object.
- * Typically called via glBindRenderbufferEXT().
- */
-static struct gl_renderbuffer *
-intel_new_renderbuffer(GLcontext * ctx, GLuint name)
-{
- /*struct intel_context *intel = intel_context(ctx); */
- struct intel_renderbuffer *irb;
-
- irb = CALLOC_STRUCT(intel_renderbuffer);
- if (!irb) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
- return NULL;
- }
-
- _mesa_init_renderbuffer(&irb->Base, name);
- irb->Base.ClassID = INTEL_RB_CLASS;
-
- /* intel-specific methods */
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
- irb->Base.GetPointer = intel_get_pointer;
- /* span routines set in alloc_storage function */
-
- return &irb->Base;
-}
-
-
-/**
- * Called via glBindFramebufferEXT().
- */
-static void
-intel_bind_framebuffer(GLcontext * ctx, GLenum target,
- struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
-{
- if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
- intel_draw_buffer(ctx, fb);
- /* Integer depth range depends on depth buffer bits */
- ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
- }
- else {
- /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
- }
-}
-
-
-/**
- * Called via glFramebufferRenderbufferEXT().
- */
-static void
-intel_framebuffer_renderbuffer(GLcontext * ctx,
- struct gl_framebuffer *fb,
- GLenum attachment, struct gl_renderbuffer *rb)
-{
- DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
-
- intelFlush(ctx);
-
- _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
- intel_draw_buffer(ctx, fb);
-}
-
-
-/**
- * When glFramebufferTexture[123]D is called this function sets up the
- * gl_renderbuffer wrapper around the texture image.
- * This will have the region info needed for hardware rendering.
- */
-static struct intel_renderbuffer *
-intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
-{
- const GLuint name = ~0; /* not significant, but distinct for debugging */
- struct intel_renderbuffer *irb;
-
- /* make an intel_renderbuffer to wrap the texture image */
- irb = CALLOC_STRUCT(intel_renderbuffer);
- if (!irb) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
- return NULL;
- }
-
- _mesa_init_renderbuffer(&irb->Base, name);
- irb->Base.ClassID = INTEL_RB_CLASS;
-
- if (texImage->TexFormat == &_mesa_texformat_argb8888) {
- irb->Base._ActualFormat = GL_RGBA8;
- irb->Base._BaseFormat = GL_RGBA;
- DBG("Render to RGBA8 texture OK\n");
- }
- else if (texImage->TexFormat == &_mesa_texformat_rgb565) {
- irb->Base._ActualFormat = GL_RGB5;
- irb->Base._BaseFormat = GL_RGB;
- DBG("Render to RGB5 texture OK\n");
- }
- else if (texImage->TexFormat == &_mesa_texformat_z16) {
- irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
- irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
- DBG("Render to DEPTH16 texture OK\n");
- }
- else {
- DBG("Render to texture BAD FORMAT %d\n",
- texImage->TexFormat->MesaFormat);
- _mesa_free(irb);
- return NULL;
- }
-
- irb->Base.InternalFormat = irb->Base._ActualFormat;
- irb->Base.Width = texImage->Width;
- irb->Base.Height = texImage->Height;
- irb->Base.DataType = GL_UNSIGNED_BYTE; /* FBO XXX fix */
- irb->Base.RedBits = texImage->TexFormat->RedBits;
- irb->Base.GreenBits = texImage->TexFormat->GreenBits;
- irb->Base.BlueBits = texImage->TexFormat->BlueBits;
- irb->Base.AlphaBits = texImage->TexFormat->AlphaBits;
- irb->Base.DepthBits = texImage->TexFormat->DepthBits;
-
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_nop_alloc_storage;
- intel_set_span_functions(&irb->Base);
-
- irb->RenderToTexture = GL_TRUE;
-
- return irb;
-}
-
-
-/**
- * Called by glFramebufferTexture[123]DEXT() (and other places) to
- * prepare for rendering into texture memory. This might be called
- * many times to choose different texture levels, cube faces, etc
- * before intel_finish_render_texture() is ever called.
- */
-static void
-intel_render_texture(GLcontext * ctx,
- struct gl_framebuffer *fb,
- struct gl_renderbuffer_attachment *att)
-{
- struct gl_texture_image *newImage
- = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
- struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
- struct intel_texture_image *intel_image;
- GLuint imageOffset;
-
- (void) fb;
-
- ASSERT(newImage);
-
- if (!irb) {
- irb = intel_wrap_texture(ctx, newImage);
- if (irb) {
- /* bind the wrapper to the attachment point */
- _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
- }
- else {
- /* fallback to software rendering */
- _mesa_render_texture(ctx, fb, att);
- return;
- }
- }
-
- DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
- _glthread_GetID(),
- att->Texture->Name, newImage->Width, newImage->Height,
- irb->Base.RefCount);
-
- /* point the renderbufer's region to the texture image region */
- intel_image = intel_texture_image(newImage);
- if (irb->region != intel_image->mt->region) {
- if (irb->region)
- intel_region_release(&irb->region);
- intel_region_reference(&irb->region, intel_image->mt->region);
- }
-
- /* compute offset of the particular 2D image within the texture region */
- imageOffset = intel_miptree_image_offset(intel_image->mt,
- att->CubeMapFace,
- att->TextureLevel);
-
- if (att->Texture->Target == GL_TEXTURE_3D) {
- const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
- att->TextureLevel);
- imageOffset += offsets[att->Zoffset];
- }
-
- /* store that offset in the region */
- intel_image->mt->region->draw_offset = imageOffset;
-
- /* update drawing region, etc */
- intel_draw_buffer(ctx, fb);
-}
-
-
-/**
- * Called by Mesa when rendering to a texture is done.
- */
-static void
-intel_finish_render_texture(GLcontext * ctx,
- struct gl_renderbuffer_attachment *att)
-{
- struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
-
- DBG("End render texture (tid %x) tex %u\n", _glthread_GetID(), att->Texture->Name);
-
- if (irb) {
- /* just release the region */
- intel_region_release(&irb->region);
- }
- else if (att->Renderbuffer) {
- /* software fallback */
- _mesa_finish_render_texture(ctx, att);
- /* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
- }
-}
-
-
-/**
- * Do one-time context initializations related to GL_EXT_framebuffer_object.
- * Hook in device driver functions.
- */
-void
-intel_fbo_init(struct intel_context *intel)
-{
- intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
- intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
- intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
- intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
- intel->ctx.Driver.RenderTexture = intel_render_texture;
- intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
- intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
-}
+../intel/intel_fbo.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_mipmap_tree.c b/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
index 2c167a9ab7..242fed0b6a 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
+++ b/src/mesa/drivers/dri/i915/intel_mipmap_tree.c
@@ -1,388 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "intel_context.h"
-#include "intel_mipmap_tree.h"
-#include "intel_regions.h"
-#include "enums.h"
-
-#define FILE_DEBUG_FLAG DEBUG_MIPTREE
-
-static GLenum
-target_to_target(GLenum target)
-{
- switch (target) {
- case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
- case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
- case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
- return GL_TEXTURE_CUBE_MAP_ARB;
- default:
- return target;
- }
-}
-
-struct intel_mipmap_tree *
-intel_miptree_create(struct intel_context *intel,
- GLenum target,
- GLenum internal_format,
- GLuint first_level,
- GLuint last_level,
- GLuint width0,
- GLuint height0,
- GLuint depth0, GLuint cpp, GLuint compress_byte)
-{
- GLboolean ok;
- struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
-
- DBG("%s target %s format %s level %d..%d\n", __FUNCTION__,
- _mesa_lookup_enum_by_nr(target),
- _mesa_lookup_enum_by_nr(internal_format), first_level, last_level);
-
- mt->target = target_to_target(target);
- mt->internal_format = internal_format;
- mt->first_level = first_level;
- mt->last_level = last_level;
- mt->width0 = width0;
- mt->height0 = height0;
- mt->depth0 = depth0;
- mt->cpp = compress_byte ? compress_byte : cpp;
- mt->compressed = compress_byte ? 1 : 0;
- mt->refcount = 1;
-
- switch (intel->intelScreen->deviceID) {
- case PCI_CHIP_I945_G:
- case PCI_CHIP_I945_GM:
- case PCI_CHIP_I945_GME:
- case PCI_CHIP_G33_G:
- case PCI_CHIP_Q33_G:
- case PCI_CHIP_Q35_G:
- ok = i945_miptree_layout(mt);
- break;
- case PCI_CHIP_I915_G:
- case PCI_CHIP_I915_GM:
- case PCI_CHIP_I830_M:
- case PCI_CHIP_I855_GM:
- case PCI_CHIP_I865_G:
- default:
- /* All the i830 chips and the i915 use this layout:
- */
- ok = i915_miptree_layout(mt);
- break;
- }
-
- if (ok) {
- if (!mt->compressed) {
- int align;
-
- if (intel->intelScreen->ttm) {
- /* XXX: Align pitch to multiple of 64 bytes for now to allow
- * render-to-texture to work in all cases. This should probably be
- * replaced at some point by some scheme to only do this when really
- * necessary.
- */
- align = 63;
- } else {
- align = 3;
- }
-
- mt->pitch = (mt->pitch * cpp + align) & ~align;
-
- /* XXX: At least the i915 seems very upset when the pitch is a multiple
- * of 1024 and sometimes 512 bytes - performance can drop by several
- * times. Go to the next multiple of the required alignment for now.
- */
- if (!(mt->pitch & 511))
- mt->pitch += align + 1;
-
- mt->pitch /= cpp;
- }
-
- mt->region = intel_region_alloc(intel->intelScreen,
- mt->cpp, mt->pitch, mt->total_height);
- }
-
- if (!mt->region) {
- free(mt);
- return NULL;
- }
-
- return mt;
-}
-
-
-void
-intel_miptree_reference(struct intel_mipmap_tree **dst,
- struct intel_mipmap_tree *src)
-{
- src->refcount++;
- *dst = src;
- DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
-}
-
-void
-intel_miptree_release(struct intel_context *intel,
- struct intel_mipmap_tree **mt)
-{
- if (!*mt)
- return;
-
- DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
- if (--(*mt)->refcount <= 0) {
- GLuint i;
-
- DBG("%s deleting %p\n", __FUNCTION__, *mt);
-
- intel_region_release(&((*mt)->region));
-
- for (i = 0; i < MAX_TEXTURE_LEVELS; i++)
- if ((*mt)->level[i].image_offset)
- free((*mt)->level[i].image_offset);
-
- free(*mt);
- }
- *mt = NULL;
-}
-
-
-
-
-/* Can the image be pulled into a unified mipmap tree. This mirrors
- * the completeness test in a lot of ways.
- *
- * Not sure whether I want to pass gl_texture_image here.
- */
-GLboolean
-intel_miptree_match_image(struct intel_mipmap_tree *mt,
- struct gl_texture_image *image,
- GLuint face, GLuint level)
-{
- /* Images with borders are never pulled into mipmap trees.
- */
- if (image->Border)
- return GL_FALSE;
-
- if (image->InternalFormat != mt->internal_format ||
- image->IsCompressed != mt->compressed)
- return GL_FALSE;
-
- /* Test image dimensions against the base level image adjusted for
- * minification. This will also catch images not present in the
- * tree, changed targets, etc.
- */
- if (image->Width != mt->level[level].width ||
- image->Height != mt->level[level].height ||
- image->Depth != mt->level[level].depth)
- return GL_FALSE;
-
- return GL_TRUE;
-}
-
-
-void
-intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
- GLuint level,
- GLuint nr_images,
- GLuint x, GLuint y, GLuint w, GLuint h, GLuint d)
-{
-
- mt->level[level].width = w;
- mt->level[level].height = h;
- mt->level[level].depth = d;
- mt->level[level].level_offset = (x + y * mt->pitch) * mt->cpp;
- mt->level[level].nr_images = nr_images;
-
- DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
- level, w, h, d, x, y, mt->level[level].level_offset);
-
- /* Not sure when this would happen, but anyway:
- */
- if (mt->level[level].image_offset) {
- free(mt->level[level].image_offset);
- mt->level[level].image_offset = NULL;
- }
-
- assert(nr_images);
-
- mt->level[level].image_offset = malloc(nr_images * sizeof(GLuint));
- mt->level[level].image_offset[0] = 0;
-}
-
-
-
-void
-intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
- GLuint level, GLuint img, GLuint x, GLuint y)
-{
- if (img == 0 && level == 0)
- assert(x == 0 && y == 0);
-
- assert(img < mt->level[level].nr_images);
-
- mt->level[level].image_offset[img] = (x + y * mt->pitch);
-
- DBG("%s level %d img %d pos %d,%d image_offset %x\n",
- __FUNCTION__, level, img, x, y, mt->level[level].image_offset[img]);
-}
-
-
-/* Although we use the image_offset[] array to store relative offsets
- * to cube faces, Mesa doesn't know anything about this and expects
- * each cube face to be treated as a separate image.
- *
- * These functions present that view to mesa:
- */
-const GLuint *
-intel_miptree_depth_offsets(struct intel_mipmap_tree *mt, GLuint level)
-{
- static const GLuint zero = 0;
-
- if (mt->target != GL_TEXTURE_3D || mt->level[level].nr_images == 1)
- return &zero;
- else
- return mt->level[level].image_offset;
-}
-
-
-GLuint
-intel_miptree_image_offset(struct intel_mipmap_tree * mt,
- GLuint face, GLuint level)
-{
- if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
- return (mt->level[level].level_offset +
- mt->level[level].image_offset[face] * mt->cpp);
- else
- return mt->level[level].level_offset;
-}
-
-
-
-/**
- * Map a teximage in a mipmap tree.
- * \param row_stride returns row stride in bytes
- * \param image_stride returns image stride in bytes (for 3D textures).
- * \return address of mapping
- */
-GLubyte *
-intel_miptree_image_map(struct intel_context * intel,
- struct intel_mipmap_tree * mt,
- GLuint face,
- GLuint level,
- GLuint * row_stride, GLuint * image_offsets)
-{
- DBG("%s \n", __FUNCTION__);
-
- if (row_stride)
- *row_stride = mt->pitch * mt->cpp;
-
- if (image_offsets)
- memcpy(image_offsets, mt->level[level].image_offset,
- mt->level[level].depth * sizeof(GLuint));
-
- return (intel_region_map(intel->intelScreen, mt->region) +
- intel_miptree_image_offset(mt, face, level));
-}
-
-void
-intel_miptree_image_unmap(struct intel_context *intel,
- struct intel_mipmap_tree *mt)
-{
- DBG("%s\n", __FUNCTION__);
- intel_region_unmap(intel->intelScreen, mt->region);
-}
-
-
-
-/* Upload data for a particular image.
- */
-void
-intel_miptree_image_data(struct intel_context *intel,
- struct intel_mipmap_tree *dst,
- GLuint face,
- GLuint level,
- void *src,
- GLuint src_row_pitch, GLuint src_image_pitch)
-{
- GLuint depth = dst->level[level].depth;
- GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
- const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
- GLuint i;
- GLuint height = 0;
-
- DBG("%s\n", __FUNCTION__);
- for (i = 0; i < depth; i++) {
- height = dst->level[level].height;
- if(dst->compressed)
- height /= 4;
- intel_region_data(intel->intelScreen, dst->region,
- dst_offset + dst_depth_offset[i], /* dst_offset */
- 0, 0, /* dstx, dsty */
- src,
- src_row_pitch,
- 0, 0, /* source x, y */
- dst->level[level].width, height); /* width, height */
-
- src += src_image_pitch * dst->cpp;
- }
-}
-
-extern GLuint intel_compressed_alignment(GLenum);
-/* Copy mipmap image between trees
- */
-void
-intel_miptree_image_copy(struct intel_context *intel,
- struct intel_mipmap_tree *dst,
- GLuint face, GLuint level,
- struct intel_mipmap_tree *src)
-{
- GLuint width = src->level[level].width;
- GLuint height = src->level[level].height;
- GLuint depth = src->level[level].depth;
- GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
- GLuint src_offset = intel_miptree_image_offset(src, face, level);
- const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
- const GLuint *src_depth_offset = intel_miptree_depth_offsets(src, level);
- GLuint i;
-
- if (dst->compressed) {
- GLuint alignment = intel_compressed_alignment(dst->internal_format);
- height = (height + 3) / 4;
- width = ((width + alignment - 1) & ~(alignment - 1));
- }
-
- for (i = 0; i < depth; i++) {
- intel_region_copy(intel->intelScreen,
- dst->region, dst_offset + dst_depth_offset[i],
- 0,
- 0,
- src->region, src_offset + src_depth_offset[i],
- 0, 0, width, height);
- }
-
-}
+../intel/intel_mipmap_tree.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_regions.c b/src/mesa/drivers/dri/i915/intel_regions.c
index 7799fdbb11..89b2f15c10 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_regions.c
+++ b/src/mesa/drivers/dri/i915/intel_regions.c
@@ -1,483 +1 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/* Provide additional functionality on top of bufmgr buffers:
- * - 2d semantics and blit operations
- * - refcounting of buffers for multiple images in a buffer.
- * - refcounting of buffer mappings.
- * - some logic for moving the buffers to the best memory pools for
- * given operations.
- *
- * Most of this is to make it easier to implement the fixed-layout
- * mipmap tree required by intel hardware in the face of GL's
- * programming interface where each image can be specifed in random
- * order and it isn't clear what layout the tree should have until the
- * last moment.
- */
-
-#include "intel_context.h"
-#include "intel_regions.h"
-#include "intel_blit.h"
-#include "intel_buffer_objects.h"
-#include "dri_bufmgr.h"
-#include "intel_bufmgr_ttm.h"
-#include "intel_batchbuffer.h"
-
-#define FILE_DEBUG_FLAG DEBUG_REGION
-
-void
-intel_region_idle(intelScreenPrivate *intelScreen, struct intel_region *region)
-{
- DBG("%s\n", __FUNCTION__);
- /* XXX: Using this function is likely bogus -- it ought to only have been
- * used before a map, anyway, but leave this cheap implementation of it
- * for now.
- */
- if (region && region->buffer) {
- /* Mapping it for read will ensure that any acceleration to the region
- * would have landed already.
- */
- dri_bo_map(region->buffer, GL_TRUE);
- dri_bo_unmap(region->buffer);
- }
-}
-
-/* XXX: Thread safety?
- */
-GLubyte *
-intel_region_map(intelScreenPrivate *intelScreen, struct intel_region *region)
-{
- DBG("%s\n", __FUNCTION__);
- if (!region->map_refcount++) {
- if (region->pbo)
- intel_region_cow(intelScreen, region);
-
- dri_bo_map(region->buffer, GL_TRUE);
- region->map = region->buffer->virtual;
- }
-
- return region->map;
-}
-
-void
-intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *region)
-{
- DBG("%s\n", __FUNCTION__);
- if (!--region->map_refcount) {
- dri_bo_unmap(region->buffer);
- region->map = NULL;
- }
-}
-
-struct intel_region *
-intel_region_alloc(intelScreenPrivate *intelScreen,
- GLuint cpp, GLuint pitch, GLuint height)
-{
- struct intel_region *region = calloc(sizeof(*region), 1);
-
- DBG("%s\n", __FUNCTION__);
-
- region->cpp = cpp;
- region->pitch = pitch;
- region->height = height; /* needed? */
- region->refcount = 1;
-
- region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
- pitch * cpp * height, 64, DRM_BO_FLAG_MEM_TT);
- return region;
-}
-
-void
-intel_region_reference(struct intel_region **dst, struct intel_region *src)
-{
- assert(*dst == NULL);
- if (src) {
- src->refcount++;
- *dst = src;
- }
-}
-
-void
-intel_region_release(struct intel_region **region)
-{
- if (!*region)
- return;
-
- DBG("%s %d\n", __FUNCTION__, (*region)->refcount - 1);
-
- ASSERT((*region)->refcount > 0);
- (*region)->refcount--;
-
- if ((*region)->refcount == 0) {
- assert((*region)->map_refcount == 0);
-
- if ((*region)->pbo)
- (*region)->pbo->region = NULL;
- (*region)->pbo = NULL;
- dri_bo_unreference((*region)->buffer);
- free(*region);
- }
- *region = NULL;
-}
-
-
-struct intel_region *
-intel_region_create_static(intelScreenPrivate *intelScreen,
- GLuint mem_type,
- unsigned int bo_handle,
- GLuint offset,
- void *virtual,
- GLuint cpp, GLuint pitch, GLuint height)
-{
- struct intel_region *region = calloc(sizeof(*region), 1);
- DBG("%s\n", __FUNCTION__);
-
- region->cpp = cpp;
- region->pitch = pitch;
- region->height = height; /* needed? */
- region->refcount = 1;
-
- if (intelScreen->ttm) {
- assert(bo_handle != -1);
- region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
- "static region",
- bo_handle);
- } else {
- region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
- "static region",
- offset, pitch * cpp * height,
- virtual,
- DRM_BO_FLAG_MEM_TT);
- }
-
- return region;
-}
-
-
-
-void
-intel_region_update_static(intelScreenPrivate *intelScreen,
- struct intel_region *region,
- GLuint mem_type,
- unsigned int bo_handle,
- GLuint offset,
- void *virtual,
- GLuint cpp, GLuint pitch, GLuint height)
-{
- DBG("%s\n", __FUNCTION__);
-
- region->cpp = cpp;
- region->pitch = pitch;
- region->height = height; /* needed? */
-
- /*
- * We use a "shared" buffer type to indicate buffers created and
- * shared by others.
- */
-
- dri_bo_unreference(region->buffer);
- if (intelScreen->ttm) {
- assert(bo_handle != -1);
- region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
- "static region",
- bo_handle);
- } else {
- region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
- "static region",
- offset, pitch * cpp * height,
- virtual,
- DRM_BO_FLAG_MEM_TT);
- }
-}
-
-
-
-/*
- * XXX Move this into core Mesa?
- */
-static void
-_mesa_copy_rect(GLubyte * dst,
- GLuint cpp,
- GLuint dst_pitch,
- GLuint dst_x,
- GLuint dst_y,
- GLuint width,
- GLuint height,
- const GLubyte * src,
- GLuint src_pitch, GLuint src_x, GLuint src_y)
-{
- GLuint i;
-
- dst_pitch *= cpp;
- src_pitch *= cpp;
- dst += dst_x * cpp;
- src += src_x * cpp;
- dst += dst_y * dst_pitch;
- src += src_y * dst_pitch;
- width *= cpp;
-
- if (width == dst_pitch && width == src_pitch)
- memcpy(dst, src, height * width);
- else {
- for (i = 0; i < height; i++) {
- memcpy(dst, src, width);
- dst += dst_pitch;
- src += src_pitch;
- }
- }
-}
-
-
-/* Upload data to a rectangular sub-region. Lots of choices how to do this:
- *
- * - memcpy by span to current destination
- * - upload data as new buffer and blit
- *
- * Currently always memcpy.
- */
-void
-intel_region_data(intelScreenPrivate *intelScreen,
- struct intel_region *dst,
- GLuint dst_offset,
- GLuint dstx, GLuint dsty,
- const void *src, GLuint src_pitch,
- GLuint srcx, GLuint srcy, GLuint width, GLuint height)
-{
- struct intel_context *intel = intelScreenContext(intelScreen);
-
- DBG("%s\n", __FUNCTION__);
-
- if (intel == NULL)
- return;
-
- if (dst->pbo) {
- if (dstx == 0 &&
- dsty == 0 && width == dst->pitch && height == dst->height)
- intel_region_release_pbo(intelScreen, dst);
- else
- intel_region_cow(intelScreen, dst);
- }
-
-
- LOCK_HARDWARE(intel);
-
- _mesa_copy_rect(intel_region_map(intelScreen, dst) + dst_offset,
- dst->cpp,
- dst->pitch,
- dstx, dsty, width, height, src, src_pitch, srcx, srcy);
-
- intel_region_unmap(intelScreen, dst);
-
- UNLOCK_HARDWARE(intel);
-
-}
-
-/* Copy rectangular sub-regions. Need better logic about when to
- * push buffers into AGP - will currently do so whenever possible.
- */
-void
-intel_region_copy(intelScreenPrivate *intelScreen,
- struct intel_region *dst,
- GLuint dst_offset,
- GLuint dstx, GLuint dsty,
- struct intel_region *src,
- GLuint src_offset,
- GLuint srcx, GLuint srcy, GLuint width, GLuint height)
-{
- struct intel_context *intel = intelScreenContext(intelScreen);
-
- DBG("%s\n", __FUNCTION__);
-
- if (intel == NULL)
- return;
-
- if (dst->pbo) {
- if (dstx == 0 &&
- dsty == 0 && width == dst->pitch && height == dst->height)
- intel_region_release_pbo(intelScreen, dst);
- else
- intel_region_cow(intelScreen, dst);
- }
-
- assert(src->cpp == dst->cpp);
-
- intelEmitCopyBlit(intel,
- dst->cpp,
- src->pitch, src->buffer, src_offset,
- dst->pitch, dst->buffer, dst_offset,
- srcx, srcy, dstx, dsty, width, height,
- GL_COPY);
-}
-
-/* Fill a rectangular sub-region. Need better logic about when to
- * push buffers into AGP - will currently do so whenever possible.
- */
-void
-intel_region_fill(intelScreenPrivate *intelScreen,
- struct intel_region *dst,
- GLuint dst_offset,
- GLuint dstx, GLuint dsty,
- GLuint width, GLuint height, GLuint color)
-{
- struct intel_context *intel = intelScreenContext(intelScreen);
-
- DBG("%s\n", __FUNCTION__);
-
- if (intel == NULL)
- return;
-
- if (dst->pbo) {
- if (dstx == 0 &&
- dsty == 0 && width == dst->pitch && height == dst->height)
- intel_region_release_pbo(intelScreen, dst);
- else
- intel_region_cow(intelScreen, dst);
- }
-
- intelEmitFillBlit(intel,
- dst->cpp,
- dst->pitch, dst->buffer, dst_offset,
- dstx, dsty, width, height, color);
-}
-
-/* Attach to a pbo, discarding our data. Effectively zero-copy upload
- * the pbo's data.
- */
-void
-intel_region_attach_pbo(intelScreenPrivate *intelScreen,
- struct intel_region *region,
- struct intel_buffer_object *pbo)
-{
- if (region->pbo == pbo)
- return;
-
- /* If there is already a pbo attached, break the cow tie now.
- * Don't call intel_region_release_pbo() as that would
- * unnecessarily allocate a new buffer we would have to immediately
- * discard.
- */
- if (region->pbo) {
- region->pbo->region = NULL;
- region->pbo = NULL;
- }
-
- if (region->buffer) {
- dri_bo_unreference(region->buffer);
- region->buffer = NULL;
- }
-
- region->pbo = pbo;
- region->pbo->region = region;
- dri_bo_reference(pbo->buffer);
- region->buffer = pbo->buffer;
-}
-
-
-/* Break the COW tie to the pbo and allocate a new buffer.
- * The pbo gets to keep the data.
- */
-void
-intel_region_release_pbo(intelScreenPrivate *intelScreen,
- struct intel_region *region)
-{
- assert(region->buffer == region->pbo->buffer);
- region->pbo->region = NULL;
- region->pbo = NULL;
- dri_bo_unreference(region->buffer);
- region->buffer = NULL;
-
- region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
- region->pitch * region->cpp * region->height,
- 64, DRM_BO_FLAG_MEM_TT);
-}
-
-/* Break the COW tie to the pbo. Both the pbo and the region end up
- * with a copy of the data.
- */
-void
-intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
-{
- struct intel_context *intel = intelScreenContext(intelScreen);
- struct intel_buffer_object *pbo = region->pbo;
-
- if (intel == NULL)
- return;
-
- intel_region_release_pbo(intelScreen, region);
-
- assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
-
- DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
-
- /* Now blit from the texture buffer to the new buffer:
- */
-
- intel_batchbuffer_flush(intel->batch);
-
- if (!intel->locked) {
- LOCK_HARDWARE(intel);
- intelEmitCopyBlit(intel,
- region->cpp,
- region->pitch,
- region->buffer, 0,
- region->pitch,
- pbo->buffer, 0,
- 0, 0, 0, 0,
- region->pitch, region->height,
- GL_COPY);
-
- intel_batchbuffer_flush(intel->batch);
- UNLOCK_HARDWARE(intel);
- }
- else {
- intelEmitCopyBlit(intel,
- region->cpp,
- region->pitch,
- region->buffer, 0,
- region->pitch,
- pbo->buffer, 0,
- 0, 0, 0, 0,
- region->pitch, region->height,
- GL_COPY);
-
- intel_batchbuffer_flush(intel->batch);
- }
-}
-
-dri_bo *
-intel_region_buffer(intelScreenPrivate *intelScreen,
- struct intel_region *region, GLuint flag)
-{
- if (region->pbo) {
- if (flag == INTEL_WRITE_PART)
- intel_region_cow(intelScreen, region);
- else if (flag == INTEL_WRITE_FULL)
- intel_region_release_pbo(intelScreen, region);
- }
-
- return region->buffer;
-}
+../intel/intel_regions.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_screen.c b/src/mesa/drivers/dri/i915/intel_screen.c
index 25f5efa7bc..f2db48272b 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_screen.c
+++ b/src/mesa/drivers/dri/i915/intel_screen.c
@@ -1,945 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "glheader.h"
-#include "context.h"
-#include "framebuffer.h"
-#include "matrix.h"
-#include "renderbuffer.h"
-#include "simple_list.h"
-#include "utils.h"
-#include "vblank.h"
-#include "xmlpool.h"
-
-
-#include "intel_screen.h"
-
-#include "intel_buffers.h"
-#include "intel_tex.h"
-#include "intel_span.h"
-#include "intel_tris.h"
-#include "intel_ioctl.h"
-#include "intel_fbo.h"
-
-#include "i830_dri.h"
-#include "dri_bufmgr.h"
-#include "intel_regions.h"
-#include "intel_batchbuffer.h"
-
-#include "intel_bufmgr_ttm.h"
-
-PUBLIC const char __driConfigOptions[] =
- DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
- DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
- DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
- DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
- DRI_CONF_FORCE_S3TC_ENABLE(false)
- DRI_CONF_ALLOW_LARGE_TEXTURES(1)
- DRI_CONF_SECTION_END DRI_CONF_END;
- const GLuint __driNConfigOptions = 4;
-
-#ifdef USE_NEW_INTERFACE
- static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
-#endif /*USE_NEW_INTERFACE */
-
- extern const struct dri_extension card_extensions[];
- extern const struct dri_extension ttm_extensions[];
-
-/**
- * Map all the memory regions described by the screen.
- * \return GL_TRUE if success, GL_FALSE if error.
- */
-GLboolean
-intelMapScreenRegions(__DRIscreenPrivate * sPriv)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
-
- if (intelScreen->front.handle) {
- if (drmMap(sPriv->fd,
- intelScreen->front.handle,
- intelScreen->front.size,
- (drmAddress *) & intelScreen->front.map) != 0) {
- _mesa_problem(NULL, "drmMap(frontbuffer) failed!");
- return GL_FALSE;
- }
- }
- else {
- _mesa_warning(NULL, "no front buffer handle in intelMapScreenRegions!");
- }
-
- if (0)
- _mesa_printf("Back 0x%08x ", intelScreen->back.handle);
- if (drmMap(sPriv->fd,
- intelScreen->back.handle,
- intelScreen->back.size,
- (drmAddress *) & intelScreen->back.map) != 0) {
- intelUnmapScreenRegions(intelScreen);
- return GL_FALSE;
- }
-
- if (intelScreen->third.handle) {
- if (0)
- _mesa_printf("Third 0x%08x ", intelScreen->third.handle);
- if (drmMap(sPriv->fd,
- intelScreen->third.handle,
- intelScreen->third.size,
- (drmAddress *) & intelScreen->third.map) != 0) {
- intelUnmapScreenRegions(intelScreen);
- return GL_FALSE;
- }
- }
-
- if (0)
- _mesa_printf("Depth 0x%08x ", intelScreen->depth.handle);
- if (drmMap(sPriv->fd,
- intelScreen->depth.handle,
- intelScreen->depth.size,
- (drmAddress *) & intelScreen->depth.map) != 0) {
- intelUnmapScreenRegions(intelScreen);
- return GL_FALSE;
- }
-
- if (0)
- _mesa_printf("TEX 0x%08x ", intelScreen->tex.handle);
- if (intelScreen->tex.size != 0) {
- if (drmMap(sPriv->fd,
- intelScreen->tex.handle,
- intelScreen->tex.size,
- (drmAddress *) & intelScreen->tex.map) != 0) {
- intelUnmapScreenRegions(intelScreen);
- return GL_FALSE;
- }
- }
-
- if (0)
- printf("Mappings: front: %p back: %p third: %p depth: %p tex: %p\n",
- intelScreen->front.map,
- intelScreen->back.map, intelScreen->third.map,
- intelScreen->depth.map, intelScreen->tex.map);
- return GL_TRUE;
-}
-
-/** Driver-specific fence emit implementation for the fake memory manager. */
-static unsigned int
-intel_fence_emit(void *private)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
- unsigned int fence;
-
- /* XXX: Need to emit a flush, if we haven't already (at least with the
- * current batchbuffer implementation, we have).
- */
-
- fence = intelEmitIrqLocked(intelScreen);
-
- return fence;
-}
-
-/** Driver-specific fence wait implementation for the fake memory manager. */
-static int
-intel_fence_wait(void *private, unsigned int cookie)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
-
- intelWaitIrq(intelScreen, cookie);
-
- return 0;
-}
-
-static struct intel_region *
-intel_recreate_static(intelScreenPrivate *intelScreen,
- struct intel_region *region,
- intelRegion *region_desc,
- GLuint mem_type)
-{
- if (region) {
- intel_region_update_static(intelScreen, region, mem_type,
- region_desc->bo_handle, region_desc->offset,
- region_desc->map, intelScreen->cpp,
- region_desc->pitch / intelScreen->cpp,
- intelScreen->height);
- } else {
- region = intel_region_create_static(intelScreen, mem_type,
- region_desc->bo_handle,
- region_desc->offset,
- region_desc->map, intelScreen->cpp,
- region_desc->pitch / intelScreen->cpp,
- intelScreen->height);
- }
-
- assert(region->buffer != NULL);
-
- return region;
-}
-
-
-/* Create intel_region structs to describe the static front,back,depth
- * buffers created by the xserver.
- *
- * Although FBO's mean we now no longer use these as render targets in
- * all circumstances, they won't go away until the back and depth
- * buffers become private, and the front and rotated buffers will
- * remain even then.
- *
- * Note that these don't allocate video memory, just describe
- * allocations alread made by the X server.
- */
-static void
-intel_recreate_static_regions(intelScreenPrivate *intelScreen)
-{
- intelScreen->front_region =
- intel_recreate_static(intelScreen,
- intelScreen->front_region,
- &intelScreen->front,
- DRM_BO_FLAG_MEM_TT);
-
- /* The rotated region is only used for old DDXes that didn't handle rotation
-\ * on their own.
- */
- if (intelScreen->driScrnPriv->ddx_version.minor < 8) {
- intelScreen->rotated_region =
- intel_recreate_static(intelScreen,
- intelScreen->rotated_region,
- &intelScreen->rotated,
- DRM_BO_FLAG_MEM_TT);
- }
-
- intelScreen->back_region =
- intel_recreate_static(intelScreen,
- intelScreen->back_region,
- &intelScreen->back,
- DRM_BO_FLAG_MEM_TT);
-
- if (intelScreen->third.handle) {
- intelScreen->third_region =
- intel_recreate_static(intelScreen,
- intelScreen->third_region,
- &intelScreen->third,
- DRM_BO_FLAG_MEM_TT);
- }
-
- /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
- * private buffers.
- */
- intelScreen->depth_region =
- intel_recreate_static(intelScreen,
- intelScreen->depth_region,
- &intelScreen->depth,
- DRM_BO_FLAG_MEM_TT);
-}
-
-/**
- * Use the information in the sarea to update the screen parameters
- * related to screen rotation. Needs to be called locked.
- */
-void
-intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
-
- intelUnmapScreenRegions(intelScreen);
- intelUpdateScreenFromSAREA(intelScreen, sarea);
- if (!intelMapScreenRegions(sPriv)) {
- fprintf(stderr, "ERROR Remapping screen regions!!!\n");
- }
- intel_recreate_static_regions(intelScreen);
-}
-
-
-void
-intelUnmapScreenRegions(intelScreenPrivate * intelScreen)
-{
-#define REALLY_UNMAP 1
- if (intelScreen->front.map) {
-#if REALLY_UNMAP
- if (drmUnmap(intelScreen->front.map, intelScreen->front.size) != 0)
- printf("drmUnmap front failed!\n");
-#endif
- intelScreen->front.map = NULL;
- }
- if (intelScreen->back.map) {
-#if REALLY_UNMAP
- if (drmUnmap(intelScreen->back.map, intelScreen->back.size) != 0)
- printf("drmUnmap back failed!\n");
-#endif
- intelScreen->back.map = NULL;
- }
- if (intelScreen->third.map) {
-#if REALLY_UNMAP
- if (drmUnmap(intelScreen->third.map, intelScreen->third.size) != 0)
- printf("drmUnmap third failed!\n");
-#endif
- intelScreen->third.map = NULL;
- }
- if (intelScreen->depth.map) {
-#if REALLY_UNMAP
- drmUnmap(intelScreen->depth.map, intelScreen->depth.size);
- intelScreen->depth.map = NULL;
-#endif
- }
- if (intelScreen->tex.map) {
-#if REALLY_UNMAP
- drmUnmap(intelScreen->tex.map, intelScreen->tex.size);
- intelScreen->tex.map = NULL;
-#endif
- }
-}
-
-
-static void
-intelPrintDRIInfo(intelScreenPrivate * intelScreen,
- __DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
-{
- fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
- intelScreen->front.size, intelScreen->front.offset,
- intelScreen->front.pitch);
- fprintf(stderr, "*** Back size: 0x%x offset: 0x%x pitch: %d\n",
- intelScreen->back.size, intelScreen->back.offset,
- intelScreen->back.pitch);
- fprintf(stderr, "*** Depth size: 0x%x offset: 0x%x pitch: %d\n",
- intelScreen->depth.size, intelScreen->depth.offset,
- intelScreen->depth.pitch);
- fprintf(stderr, "*** Rotated size: 0x%x offset: 0x%x pitch: %d\n",
- intelScreen->rotated.size, intelScreen->rotated.offset,
- intelScreen->rotated.pitch);
- fprintf(stderr, "*** Texture size: 0x%x offset: 0x%x\n",
- intelScreen->tex.size, intelScreen->tex.offset);
- fprintf(stderr, "*** Memory : 0x%x\n", gDRIPriv->mem);
-}
-
-
-static void
-intelPrintSAREA(const drmI830Sarea * sarea)
-{
- fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
- sarea->height);
- fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
- fprintf(stderr,
- "SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
- sarea->front_offset, sarea->front_size,
- (unsigned) sarea->front_handle);
- fprintf(stderr,
- "SAREA: back offset: 0x%08x size: 0x%x handle: 0x%x\n",
- sarea->back_offset, sarea->back_size,
- (unsigned) sarea->back_handle);
- fprintf(stderr, "SAREA: depth offset: 0x%08x size: 0x%x handle: 0x%x\n",
- sarea->depth_offset, sarea->depth_size,
- (unsigned) sarea->depth_handle);
- fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
- sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
- fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
- fprintf(stderr,
- "SAREA: rotated offset: 0x%08x size: 0x%x\n",
- sarea->rotated_offset, sarea->rotated_size);
- fprintf(stderr, "SAREA: rotated pitch: %d\n", sarea->rotated_pitch);
-}
-
-
-/**
- * A number of the screen parameters are obtained/computed from
- * information in the SAREA. This function updates those parameters.
- */
-void
-intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
- drmI830Sarea * sarea)
-{
- intelScreen->width = sarea->width;
- intelScreen->height = sarea->height;
-
- intelScreen->front.offset = sarea->front_offset;
- intelScreen->front.pitch = sarea->pitch * intelScreen->cpp;
- intelScreen->front.handle = sarea->front_handle;
- intelScreen->front.size = sarea->front_size;
-
- intelScreen->back.offset = sarea->back_offset;
- intelScreen->back.pitch = sarea->pitch * intelScreen->cpp;
- intelScreen->back.handle = sarea->back_handle;
- intelScreen->back.size = sarea->back_size;
-
- if (intelScreen->driScrnPriv->ddx_version.minor >= 8) {
- intelScreen->third.offset = sarea->third_offset;
- intelScreen->third.pitch = sarea->pitch * intelScreen->cpp;
- intelScreen->third.handle = sarea->third_handle;
- intelScreen->third.size = sarea->third_size;
- }
-
- intelScreen->depth.offset = sarea->depth_offset;
- intelScreen->depth.pitch = sarea->pitch * intelScreen->cpp;
- intelScreen->depth.handle = sarea->depth_handle;
- intelScreen->depth.size = sarea->depth_size;
-
- if (intelScreen->driScrnPriv->ddx_version.minor >= 9) {
- intelScreen->front.bo_handle = sarea->front_bo_handle;
- intelScreen->back.bo_handle = sarea->back_bo_handle;
- intelScreen->third.bo_handle = sarea->third_bo_handle;
- intelScreen->depth.bo_handle = sarea->depth_bo_handle;
- } else {
- intelScreen->front.bo_handle = -1;
- intelScreen->back.bo_handle = -1;
- intelScreen->third.bo_handle = -1;
- intelScreen->depth.bo_handle = -1;
- }
-
- intelScreen->tex.offset = sarea->tex_offset;
- intelScreen->logTextureGranularity = sarea->log_tex_granularity;
- intelScreen->tex.handle = sarea->tex_handle;
- intelScreen->tex.size = sarea->tex_size;
-
- intelScreen->rotated.offset = sarea->rotated_offset;
- intelScreen->rotated.pitch = sarea->rotated_pitch * intelScreen->cpp;
- intelScreen->rotated.size = sarea->rotated_size;
- intelScreen->current_rotation = sarea->rotation;
- matrix23Rotate(&intelScreen->rotMatrix,
- sarea->width, sarea->height, sarea->rotation);
- intelScreen->rotatedWidth = sarea->virtualX;
- intelScreen->rotatedHeight = sarea->virtualY;
-
- if (0)
- intelPrintSAREA(sarea);
-}
-
-static const __DRItexOffsetExtension intelTexOffsetExtension = {
- { __DRI_TEX_OFFSET },
- intelSetTexOffset,
-};
-
-static const __DRIextension *intelExtensions[] = {
- &driReadDrawableExtension,
- &driCopySubBufferExtension.base,
- &driSwapControlExtension.base,
- &driFrameTrackingExtension.base,
- &driMediaStreamCounterExtension.base,
- &intelTexOffsetExtension.base,
- NULL
-};
-
-
-static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
-{
- intelScreenPrivate *intelScreen;
- I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
- drmI830Sarea *sarea;
-
- if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
- fprintf(stderr,
- "\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
- return GL_FALSE;
- }
-
- /* Allocate the private area */
- intelScreen = (intelScreenPrivate *) CALLOC(sizeof(intelScreenPrivate));
- if (!intelScreen) {
- fprintf(stderr, "\nERROR! Allocating private area failed\n");
- return GL_FALSE;
- }
- /* parse information in __driConfigOptions */
- driParseOptionInfo(&intelScreen->optionCache,
- __driConfigOptions, __driNConfigOptions);
-
- intelScreen->driScrnPriv = sPriv;
- sPriv->private = (void *) intelScreen;
- intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
- sarea = (drmI830Sarea *)
- (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
-
- intelScreen->deviceID = gDRIPriv->deviceID;
- if (intelScreen->deviceID == PCI_CHIP_I865_G)
- intelScreen->maxBatchSize = 4096;
- else
- intelScreen->maxBatchSize = BATCH_SZ;
-
- intelScreen->mem = gDRIPriv->mem;
- intelScreen->cpp = gDRIPriv->cpp;
-
- switch (gDRIPriv->bitsPerPixel) {
- case 16:
- intelScreen->fbFormat = DV_PF_565;
- break;
- case 32:
- intelScreen->fbFormat = DV_PF_8888;
- break;
- default:
- exit(1);
- break;
- }
-
- intelUpdateScreenFromSAREA(intelScreen, sarea);
-
- if (!intelMapScreenRegions(sPriv)) {
- fprintf(stderr, "\nERROR! mapping regions\n");
- _mesa_free(intelScreen);
- sPriv->private = NULL;
- return GL_FALSE;
- }
-
- intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
-
- if (0)
- intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
-
- intelScreen->drmMinor = sPriv->drm_version.minor;
-
- /* Determine if IRQs are active? */
- {
- int ret;
- drmI830GetParam gp;
-
- gp.param = I830_PARAM_IRQ_ACTIVE;
- gp.value = &intelScreen->irq_active;
-
- ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
- &gp, sizeof(gp));
- if (ret) {
- fprintf(stderr, "drmI830GetParam: %d\n", ret);
- return GL_FALSE;
- }
- }
-
- /* Determine if batchbuffers are allowed */
- {
- int ret;
- drmI830GetParam gp;
-
- gp.param = I830_PARAM_ALLOW_BATCHBUFFER;
- gp.value = &intelScreen->allow_batchbuffer;
-
- ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
- &gp, sizeof(gp));
- if (ret) {
- fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
- return GL_FALSE;
- }
- }
-
- sPriv->extensions = intelExtensions;
-
- /* If we've got a new enough DDX that's initializing TTM and giving us
- * object handles for the shared buffers, use that.
- */
- intelScreen->ttm = GL_FALSE;
- if (getenv("INTEL_NO_TTM") == NULL &&
- intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
- intelScreen->drmMinor >= 11 &&
- intelScreen->front.bo_handle != -1) {
- intelScreen->bufmgr = intel_bufmgr_ttm_init(sPriv->fd,
- DRM_FENCE_TYPE_EXE,
- DRM_FENCE_TYPE_EXE |
- DRM_I915_FENCE_TYPE_RW,
- BATCH_SZ);
- if (intelScreen->bufmgr != NULL)
- intelScreen->ttm = GL_TRUE;
- }
- /* Otherwise, use the classic buffer manager. */
- if (intelScreen->bufmgr == NULL) {
- if (intelScreen->tex.size == 0) {
- fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
- __func__, __LINE__);
- return GL_FALSE;
- }
- fprintf(stderr, "[%s:%u] Failed to init TTM buffer manager, falling back"
- " to classic.\n", __func__, __LINE__);
- intelScreen->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
- intelScreen->tex.map,
- intelScreen->tex.size,
- intel_fence_emit,
- intel_fence_wait,
- intelScreen);
- }
-
- intel_recreate_static_regions(intelScreen);
-
- return GL_TRUE;
-}
-
-
-static void
-intelDestroyScreen(__DRIscreenPrivate * sPriv)
-{
- intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
-
- intelUnmapScreenRegions(intelScreen);
-
- dri_bufmgr_destroy(intelScreen->bufmgr);
- FREE(intelScreen);
- sPriv->private = NULL;
-}
-
-
-/**
- * This is called when we need to set up GL rendering to a new X window.
- */
-static GLboolean
-intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
- __DRIdrawablePrivate * driDrawPriv,
- const __GLcontextModes * mesaVis, GLboolean isPixmap)
-{
- intelScreenPrivate *screen = (intelScreenPrivate *) driScrnPriv->private;
-
- if (isPixmap) {
- return GL_FALSE; /* not implemented */
- }
- else {
- GLboolean swStencil = (mesaVis->stencilBits > 0 &&
- mesaVis->depthBits != 24);
- GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
-
- struct intel_framebuffer *intel_fb = CALLOC_STRUCT(intel_framebuffer);
-
- if (!intel_fb)
- return GL_FALSE;
-
- _mesa_initialize_framebuffer(&intel_fb->Base, mesaVis);
-
- /* setup the hardware-based renderbuffers */
- {
- intel_fb->color_rb[0]
- = intel_create_renderbuffer(rgbFormat,
- screen->width, screen->height,
- screen->front.offset,
- screen->front.pitch,
- screen->cpp,
- screen->front.map);
- intel_set_span_functions(&intel_fb->color_rb[0]->Base);
- _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_FRONT_LEFT,
- &intel_fb->color_rb[0]->Base);
- }
-
- if (mesaVis->doubleBufferMode) {
- intel_fb->color_rb[1]
- = intel_create_renderbuffer(rgbFormat,
- screen->width, screen->height,
- screen->back.offset,
- screen->back.pitch,
- screen->cpp,
- screen->back.map);
- intel_set_span_functions(&intel_fb->color_rb[1]->Base);
- _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT,
- &intel_fb->color_rb[1]->Base);
-
- if (screen->third.handle) {
- struct gl_renderbuffer *tmp_rb = NULL;
-
- intel_fb->color_rb[2]
- = intel_create_renderbuffer(rgbFormat,
- screen->width, screen->height,
- screen->third.offset,
- screen->third.pitch,
- screen->cpp,
- screen->third.map);
- intel_set_span_functions(&intel_fb->color_rb[2]->Base);
- _mesa_reference_renderbuffer(&tmp_rb, &intel_fb->color_rb[2]->Base);
- }
- }
-
- if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
- /* combined depth/stencil buffer */
- struct intel_renderbuffer *depthStencilRb
- = intel_create_renderbuffer(GL_DEPTH24_STENCIL8_EXT,
- screen->width, screen->height,
- screen->depth.offset,
- screen->depth.pitch,
- screen->cpp, /* 4! */
- screen->depth.map);
- intel_set_span_functions(&depthStencilRb->Base);
- /* note: bind RB to two attachment points */
- _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH,
- &depthStencilRb->Base);
- _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_STENCIL,
- &depthStencilRb->Base);
- }
- else if (mesaVis->depthBits == 16) {
- /* just 16-bit depth buffer, no hw stencil */
- struct intel_renderbuffer *depthRb
- = intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
- screen->width, screen->height,
- screen->depth.offset,
- screen->depth.pitch,
- screen->cpp, /* 2! */
- screen->depth.map);
- intel_set_span_functions(&depthRb->Base);
- _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH, &depthRb->Base);
- }
-
- /* now add any/all software-based renderbuffers we may need */
- _mesa_add_soft_renderbuffers(&intel_fb->Base,
- GL_FALSE, /* never sw color */
- GL_FALSE, /* never sw depth */
- swStencil, mesaVis->accumRedBits > 0,
- GL_FALSE, /* never sw alpha */
- GL_FALSE /* never sw aux */ );
- driDrawPriv->driverPrivate = (void *) intel_fb;
-
- return GL_TRUE;
- }
-}
-
-static void
-intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
-{
- _mesa_unreference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate)));
-}
-
-
-/**
- * Get information about previous buffer swaps.
- */
-static int
-intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
-{
- struct intel_framebuffer *intel_fb;
-
- if ((dPriv == NULL) || (dPriv->driverPrivate == NULL)
- || (sInfo == NULL)) {
- return -1;
- }
-
- intel_fb = dPriv->driverPrivate;
- sInfo->swap_count = intel_fb->swap_count;
- sInfo->swap_ust = intel_fb->swap_ust;
- sInfo->swap_missed_count = intel_fb->swap_missed_count;
-
- sInfo->swap_missed_usage = (sInfo->swap_missed_count != 0)
- ? driCalculateSwapUsage(dPriv, 0, intel_fb->swap_missed_ust)
- : 0.0;
-
- return 0;
-}
-
-
-/* There are probably better ways to do this, such as an
- * init-designated function to register chipids and createcontext
- * functions.
- */
-extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
- __DRIcontextPrivate * driContextPriv,
- void *sharedContextPrivate);
-
-extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
- __DRIcontextPrivate * driContextPriv,
- void *sharedContextPrivate);
-
-
-
-
-static GLboolean
-intelCreateContext(const __GLcontextModes * mesaVis,
- __DRIcontextPrivate * driContextPriv,
- void *sharedContextPrivate)
-{
- __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
- intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
-
- switch (intelScreen->deviceID) {
- /* Don't deal with i830 until texture work complete:
- */
- case PCI_CHIP_845_G:
- case PCI_CHIP_I830_M:
- case PCI_CHIP_I855_GM:
- case PCI_CHIP_I865_G:
- return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
-
- case PCI_CHIP_I915_G:
- case PCI_CHIP_I915_GM:
- case PCI_CHIP_I945_G:
- case PCI_CHIP_I945_GM:
- case PCI_CHIP_I945_GME:
- case PCI_CHIP_G33_G:
- case PCI_CHIP_Q35_G:
- case PCI_CHIP_Q33_G:
- return i915CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
-
- default:
- fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
- return GL_FALSE;
- }
-}
-
-
-static const struct __DriverAPIRec intelAPI = {
- .DestroyScreen = intelDestroyScreen,
- .CreateContext = intelCreateContext,
- .DestroyContext = intelDestroyContext,
- .CreateBuffer = intelCreateBuffer,
- .DestroyBuffer = intelDestroyBuffer,
- .SwapBuffers = intelSwapBuffers,
- .MakeCurrent = intelMakeCurrent,
- .UnbindContext = intelUnbindContext,
- .GetSwapInfo = intelGetSwapInfo,
- .GetMSC = driGetMSC32,
- .GetDrawableMSC = driDrawableGetMSC32,
- .WaitForMSC = driWaitForMSC32,
- .WaitForSBC = NULL,
- .SwapBuffersMSC = NULL,
- .CopySubBuffer = intelCopySubBuffer,
- .setTexOffset = intelSetTexOffset,
-};
-
-
-static __GLcontextModes *
-intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
- unsigned stencil_bits, GLboolean have_back_buffer)
-{
- __GLcontextModes *modes;
- __GLcontextModes *m;
- unsigned num_modes;
- unsigned depth_buffer_factor;
- unsigned back_buffer_factor;
- GLenum fb_format;
- GLenum fb_type;
-
- /* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
- * support pageflipping at all.
- */
- static const GLenum back_buffer_modes[] = {
- GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
- };
-
- u_int8_t depth_bits_array[3];
- u_int8_t stencil_bits_array[3];
-
-
- depth_bits_array[0] = 0;
- depth_bits_array[1] = depth_bits;
- depth_bits_array[2] = depth_bits;
-
- /* Just like with the accumulation buffer, always provide some modes
- * with a stencil buffer. It will be a sw fallback, but some apps won't
- * care about that.
- */
- stencil_bits_array[0] = 0;
- stencil_bits_array[1] = 0;
- if (depth_bits == 24)
- stencil_bits_array[1] = (stencil_bits == 0) ? 8 : stencil_bits;
-
- stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
-
- depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
- back_buffer_factor = (have_back_buffer) ? 3 : 1;
-
- num_modes = depth_buffer_factor * back_buffer_factor * 4;
-
- if (pixel_bits == 16) {
- fb_format = GL_RGB;
- fb_type = GL_UNSIGNED_SHORT_5_6_5;
- }
- else {
- fb_format = GL_BGRA;
- fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
- }
-
- modes =
- (*dri_interface->createContextModes) (num_modes,
- sizeof(__GLcontextModes));
- m = modes;
- if (!driFillInModes(&m, fb_format, fb_type,
- depth_bits_array, stencil_bits_array,
- depth_buffer_factor, back_buffer_modes,
- back_buffer_factor, GLX_TRUE_COLOR)) {
- fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
- __LINE__);
- return NULL;
- }
- if (!driFillInModes(&m, fb_format, fb_type,
- depth_bits_array, stencil_bits_array,
- depth_buffer_factor, back_buffer_modes,
- back_buffer_factor, GLX_DIRECT_COLOR)) {
- fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
- __LINE__);
- return NULL;
- }
-
- /* Mark the visual as slow if there are "fake" stencil bits.
- */
- for (m = modes; m != NULL; m = m->next) {
- if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
- m->visualRating = GLX_SLOW_CONFIG;
- }
- }
-
- return modes;
-}
-
-
-/**
- * This is the driver specific part of the createNewScreen entry point.
- *
- * \todo maybe fold this into intelInitDriver
- *
- * \return the __GLcontextModes supported by this driver
- */
-PUBLIC __GLcontextModes *__driDriverInitScreen(__DRIscreenPrivate *psp)
-{
- static const __DRIversion ddx_expected = { 1, 5, 0 };
- static const __DRIversion dri_expected = { 4, 0, 0 };
- static const __DRIversion drm_expected = { 1, 5, 0 };
- I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
-
- psp->DriverAPI = intelAPI;
-
- if (!driCheckDriDdxDrmVersions2("i915",
- &psp->dri_version, &dri_expected,
- &psp->ddx_version, &ddx_expected,
- &psp->drm_version, &drm_expected)) {
- return NULL;
- }
-
- /* Calling driInitExtensions here, with a NULL context pointer,
- * does not actually enable the extensions. It just makes sure
- * that all the dispatch offsets for all the extensions that
- * *might* be enables are known. This is needed because the
- * dispatch offsets need to be known when _mesa_context_create is
- * called, but we can't enable the extensions until we have a
- * context pointer.
- *
- * Hello chicken. Hello egg. How are you two today?
- */
- driInitExtensions(NULL, card_extensions, GL_FALSE);
- driInitExtensions(NULL, ttm_extensions, GL_FALSE);
-
- if (!intelInitDriver(psp))
- return NULL;
-
- return intelFillInModes(dri_priv->cpp * 8,
- (dri_priv->cpp == 2) ? 16 : 24,
- (dri_priv->cpp == 2) ? 0 : 8, 1);
-}
-
-struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen)
-{
- /*
- * This should probably change to have the screen allocate a dummy
- * context at screen creation. For now just use the current context.
- */
-
- GET_CURRENT_CONTEXT(ctx);
- if (ctx == NULL) {
- _mesa_problem(NULL, "No current context in intelScreenContext\n");
- return NULL;
- }
- return intel_context(ctx);
-}
-
+../intel/intel_screen.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_span.c b/src/mesa/drivers/dri/i915/intel_span.c
index d1f8ef06be..05e5e8e583 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_span.c
+++ b/src/mesa/drivers/dri/i915/intel_span.c
@@ -1,409 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "glheader.h"
-#include "macros.h"
-#include "mtypes.h"
-#include "colormac.h"
-
-#include "intel_fbo.h"
-#include "intel_screen.h"
-#include "intel_span.h"
-#include "intel_regions.h"
-#include "intel_ioctl.h"
-#include "intel_tex.h"
-
-#include "swrast/swrast.h"
-
-/*
- break intelWriteRGBASpan_ARGB8888
-*/
-
-#undef DBG
-#define DBG 0
-
-#define LOCAL_VARS \
- struct intel_context *intel = intel_context(ctx); \
- struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
- const GLint yScale = irb->RenderToTexture ? 1 : -1; \
- const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
- GLubyte *buf = (GLubyte *) irb->pfMap \
- + (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
- GLuint p; \
- assert(irb->pfMap);\
- (void) p;
-
-/* XXX FBO: this is identical to the macro in spantmp2.h except we get
- * the cliprect info from the context, not the driDrawable.
- * Move this into spantmp2.h someday.
- */
-#define HW_CLIPLOOP() \
- do { \
- int _nc = intel->numClipRects; \
- while ( _nc-- ) { \
- int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
- int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
- int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
- int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
-
-
-
-
-#define Y_FLIP(_y) ((_y) * yScale + yBias)
-
-#define HW_LOCK()
-
-#define HW_UNLOCK()
-
-/* 16 bit, RGB565 color spanline and pixel functions
- */
-#define SPANTMP_PIXEL_FMT GL_RGB
-#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
-
-#define TAG(x) intel##x##_RGB565
-#define TAG2(x,y) intel##x##_RGB565##y
-#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
-#include "spantmp2.h"
-
-/* 32 bit, ARGB8888 color spanline and pixel functions
- */
-#define SPANTMP_PIXEL_FMT GL_BGRA
-#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
-
-#define TAG(x) intel##x##_ARGB8888
-#define TAG2(x,y) intel##x##_ARGB8888##y
-#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
-#include "spantmp2.h"
-
-
-#define LOCAL_DEPTH_VARS \
- struct intel_context *intel = intel_context(ctx); \
- struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
- const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
- const GLint yScale = irb->RenderToTexture ? 1 : -1; \
- const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
- char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
- (intel->drawY * pitch + intel->drawX) * irb->region->cpp;
-
-
-#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
-
-/**
- ** 16-bit depthbuffer functions.
- **/
-#define WRITE_DEPTH( _x, _y, d ) \
- ((GLushort *)buf)[(_x) + (_y) * pitch] = d;
-
-#define READ_DEPTH( d, _x, _y ) \
- d = ((GLushort *)buf)[(_x) + (_y) * pitch];
-
-
-#define TAG(x) intel##x##_z16
-#include "depthtmp.h"
-
-
-/**
- ** 24/8-bit interleaved depth/stencil functions
- ** Note: we're actually reading back combined depth+stencil values.
- ** The wrappers in main/depthstencil.c are used to extract the depth
- ** and stencil values.
- **/
-/* Change ZZZS -> SZZZ */
-#define WRITE_DEPTH( _x, _y, d ) { \
- GLuint tmp = ((d) >> 8) | ((d) << 24); \
- ((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
-}
-
-/* Change SZZZ -> ZZZS */
-#define READ_DEPTH( d, _x, _y ) { \
- GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
- d = (tmp << 8) | (tmp >> 24); \
-}
-
-#define TAG(x) intel##x##_z24_s8
-#include "depthtmp.h"
-
-
-/**
- ** 8-bit stencil function (XXX FBO: This is obsolete)
- **/
-#define WRITE_STENCIL( _x, _y, d ) { \
- GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
- tmp &= 0xffffff; \
- tmp |= ((d) << 24); \
- ((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
-}
-
-#define READ_STENCIL( d, _x, _y ) \
- d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
-
-#define TAG(x) intel##x##_z24_s8
-#include "stenciltmp.h"
-
-
-
-/**
- * Map or unmap all the renderbuffers which we may need during
- * software rendering.
- * XXX in the future, we could probably convey extra information to
- * reduce the number of mappings needed. I.e. if doing a glReadPixels
- * from the depth buffer, we really only need one mapping.
- *
- * XXX Rewrite this function someday.
- * We can probably just loop over all the renderbuffer attachments,
- * map/unmap all of them, and not worry about the _ColorDrawBuffers
- * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
- */
-static void
-intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
-{
- GLcontext *ctx = &intel->ctx;
- GLuint i, j;
- struct intel_renderbuffer *irb;
-
- /* color draw buffers */
- for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
- for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
- struct gl_renderbuffer *rb =
- ctx->DrawBuffer->_ColorDrawBuffers[i][j];
- irb = intel_renderbuffer(rb);
- if (irb) {
- /* this is a user-created intel_renderbuffer */
- if (irb->region) {
- if (map)
- intel_region_map(intel->intelScreen, irb->region);
- else
- intel_region_unmap(intel->intelScreen, irb->region);
- irb->pfMap = irb->region->map;
- irb->pfPitch = irb->region->pitch;
- }
- }
- }
- }
-
- /* check for render to textures */
- for (i = 0; i < BUFFER_COUNT; i++) {
- struct gl_renderbuffer_attachment *att =
- ctx->DrawBuffer->Attachment + i;
- struct gl_texture_object *tex = att->Texture;
- if (tex) {
- /* render to texture */
- ASSERT(att->Renderbuffer);
- if (map) {
- struct gl_texture_image *texImg;
- texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
- intel_tex_map_images(intel, intel_texture_object(tex));
- }
- else {
- intel_tex_unmap_images(intel, intel_texture_object(tex));
- }
- }
- }
-
- /* color read buffers */
- irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
- if (irb && irb->region) {
- if (map)
- intel_region_map(intel->intelScreen, irb->region);
- else
- intel_region_unmap(intel->intelScreen, irb->region);
- irb->pfMap = irb->region->map;
- irb->pfPitch = irb->region->pitch;
- }
-
- /* Account for front/back color page flipping.
- * The span routines use the pfMap and pfPitch fields which will
- * swap the front/back region map/pitch if we're page flipped.
- * Do this after mapping, above, so the map field is valid.
- */
-#if 0
- if (map && ctx->DrawBuffer->Name == 0) {
- struct intel_renderbuffer *irbFront
- = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
- struct intel_renderbuffer *irbBack
- = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
- if (irbBack) {
- /* double buffered */
- if (intel->sarea->pf_current_page == 0) {
- irbFront->pfMap = irbFront->region->map;
- irbFront->pfPitch = irbFront->region->pitch;
- irbBack->pfMap = irbBack->region->map;
- irbBack->pfPitch = irbBack->region->pitch;
- }
- else {
- irbFront->pfMap = irbBack->region->map;
- irbFront->pfPitch = irbBack->region->pitch;
- irbBack->pfMap = irbFront->region->map;
- irbBack->pfPitch = irbFront->region->pitch;
- }
- }
- }
-#endif
-
- /* depth buffer (Note wrapper!) */
- if (ctx->DrawBuffer->_DepthBuffer) {
- irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
- if (irb && irb->region && irb->Base.Name != 0) {
- if (map) {
- intel_region_map(intel->intelScreen, irb->region);
- irb->pfMap = irb->region->map;
- irb->pfPitch = irb->region->pitch;
- }
- else {
- intel_region_unmap(intel->intelScreen, irb->region);
- irb->pfMap = NULL;
- irb->pfPitch = 0;
- }
- }
- }
-
- /* stencil buffer (Note wrapper!) */
- if (ctx->DrawBuffer->_StencilBuffer) {
- irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
- if (irb && irb->region && irb->Base.Name != 0) {
- if (map) {
- intel_region_map(intel->intelScreen, irb->region);
- irb->pfMap = irb->region->map;
- irb->pfPitch = irb->region->pitch;
- }
- else {
- intel_region_unmap(intel->intelScreen, irb->region);
- irb->pfMap = NULL;
- irb->pfPitch = 0;
- }
- }
- }
-}
-
-
-
-/**
- * Prepare for softare rendering. Map current read/draw framebuffers'
- * renderbuffes and all currently bound texture objects.
- *
- * Old note: Moved locking out to get reasonable span performance.
- */
-void
-intelSpanRenderStart(GLcontext * ctx)
-{
- struct intel_context *intel = intel_context(ctx);
- GLuint i;
-
- intelFinish(&intel->ctx);
- LOCK_HARDWARE(intel);
-
-#if 0
- /* Just map the framebuffer and all textures. Bufmgr code will
- * take care of waiting on the necessary fences:
- */
- intel_region_map(intel->intelScreen, intel->front_region);
- intel_region_map(intel->intelScreen, intel->back_region);
- intel_region_map(intel->intelScreen, intel->intelScreen->depth_region);
-#endif
-
- for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled) {
- struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
- intel_tex_map_images(intel, intel_texture_object(texObj));
- }
- }
-
- intel_map_unmap_buffers(intel, GL_TRUE);
-}
-
-/**
- * Called when done softare rendering. Unmap the buffers we mapped in
- * the above function.
- */
-void
-intelSpanRenderFinish(GLcontext * ctx)
-{
- struct intel_context *intel = intel_context(ctx);
- GLuint i;
-
- _swrast_flush(ctx);
-
- /* Now unmap the framebuffer:
- */
-#if 0
- intel_region_unmap(intel, intel->front_region);
- intel_region_unmap(intel, intel->back_region);
- intel_region_unmap(intel, intel->intelScreen->depth_region);
-#endif
-
- for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
- if (ctx->Texture.Unit[i]._ReallyEnabled) {
- struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
- intel_tex_unmap_images(intel, intel_texture_object(texObj));
- }
- }
-
- intel_map_unmap_buffers(intel, GL_FALSE);
-
- UNLOCK_HARDWARE(intel);
-}
-
-
-void
-intelInitSpanFuncs(GLcontext * ctx)
-{
- struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
- swdd->SpanRenderStart = intelSpanRenderStart;
- swdd->SpanRenderFinish = intelSpanRenderFinish;
-}
-
-
-/**
- * Plug in appropriate span read/write functions for the given renderbuffer.
- * These are used for the software fallbacks.
- */
-void
-intel_set_span_functions(struct gl_renderbuffer *rb)
-{
- if (rb->_ActualFormat == GL_RGB5) {
- /* 565 RGB */
- intelInitPointers_RGB565(rb);
- }
- else if (rb->_ActualFormat == GL_RGBA8) {
- /* 8888 RGBA */
- intelInitPointers_ARGB8888(rb);
- }
- else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
- intelInitDepthPointers_z16(rb);
- }
- else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
- rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
- intelInitDepthPointers_z24_s8(rb);
- }
- else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
- intelInitStencilPointers_z24_s8(rb);
- }
- else {
- _mesa_problem(NULL,
- "Unexpected _ActualFormat in intelSetSpanFunctions");
- }
-}
+../intel/intel_span.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex.c b/src/mesa/drivers/dri/i915/intel_tex.c
index b08dee43bc..d77ce749a3 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex.c
+++ b/src/mesa/drivers/dri/i915/intel_tex.c
@@ -1,192 +1 @@
-#include "texobj.h"
-#include "intel_context.h"
-#include "intel_mipmap_tree.h"
-#include "intel_tex.h"
-
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
-
-static GLboolean
-intelIsTextureResident(GLcontext * ctx, struct gl_texture_object *texObj)
-{
-#if 0
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_object *intelObj = intel_texture_object(texObj);
-
- return
- intelObj->mt &&
- intelObj->mt->region &&
- intel_is_region_resident(intel, intelObj->mt->region);
-#endif
- return 1;
-}
-
-
-
-static struct gl_texture_image *
-intelNewTextureImage(GLcontext * ctx)
-{
- DBG("%s\n", __FUNCTION__);
- (void) ctx;
- return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
-}
-
-
-static struct gl_texture_object *
-intelNewTextureObject(GLcontext * ctx, GLuint name, GLenum target)
-{
- struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
-
- DBG("%s\n", __FUNCTION__);
- _mesa_initialize_texture_object(&obj->base, name, target);
-
- return &obj->base;
-}
-
-static void
-intelDeleteTextureObject(GLcontext *ctx,
- struct gl_texture_object *texObj)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_object *intelObj = intel_texture_object(texObj);
-
- if (intelObj->mt)
- intel_miptree_release(intel, &intelObj->mt);
-
- _mesa_delete_texture_object(ctx, texObj);
-}
-
-
-static void
-intelFreeTextureImageData(GLcontext * ctx, struct gl_texture_image *texImage)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_image *intelImage = intel_texture_image(texImage);
-
- DBG("%s\n", __FUNCTION__);
-
- if (intelImage->mt) {
- intel_miptree_release(intel, &intelImage->mt);
- }
-
- if (texImage->Data) {
- free(texImage->Data);
- texImage->Data = NULL;
- }
-}
-
-
-/* The system memcpy (at least on ubuntu 5.10) has problems copying
- * to agp (writecombined) memory from a source which isn't 64-byte
- * aligned - there is a 4x performance falloff.
- *
- * The x86 __memcpy is immune to this but is slightly slower
- * (10%-ish) than the system memcpy.
- *
- * The sse_memcpy seems to have a slight cliff at 64/32 bytes, but
- * isn't much faster than x86_memcpy for agp copies.
- *
- * TODO: switch dynamically.
- */
-static void *
-do_memcpy(void *dest, const void *src, size_t n)
-{
- if ((((unsigned) src) & 63) || (((unsigned) dest) & 63)) {
- return __memcpy(dest, src, n);
- }
- else
- return memcpy(dest, src, n);
-}
-
-
-#if DO_DEBUG
-
-#ifndef __x86_64__
-static unsigned
-fastrdtsc(void)
-{
- unsigned eax;
- __asm__ volatile ("\t"
- "pushl %%ebx\n\t"
- "cpuid\n\t" ".byte 0x0f, 0x31\n\t"
- "popl %%ebx\n":"=a" (eax)
- :"0"(0)
- :"ecx", "edx", "cc");
-
- return eax;
-}
-#else
-static unsigned
-fastrdtsc(void)
-{
- unsigned eax;
- __asm__ volatile ("\t" "cpuid\n\t" ".byte 0x0f, 0x31\n\t":"=a" (eax)
- :"0"(0)
- :"ecx", "edx", "ebx", "cc");
-
- return eax;
-}
-#endif
-
-static unsigned
-time_diff(unsigned t, unsigned t2)
-{
- return ((t < t2) ? t2 - t : 0xFFFFFFFFU - (t - t2 - 1));
-}
-
-
-static void *
-timed_memcpy(void *dest, const void *src, size_t n)
-{
- void *ret;
- unsigned t1, t2;
- double rate;
-
- if ((((unsigned) src) & 63) || (((unsigned) dest) & 63))
- _mesa_printf("Warning - non-aligned texture copy!\n");
-
- t1 = fastrdtsc();
- ret = do_memcpy(dest, src, n);
- t2 = fastrdtsc();
-
- rate = time_diff(t1, t2);
- rate /= (double) n;
- _mesa_printf("timed_memcpy: %u %u --> %f clocks/byte\n", t1, t2, rate);
- return ret;
-}
-#endif /* DO_DEBUG */
-
-
-void
-intelInitTextureFuncs(struct dd_function_table *functions)
-{
- functions->ChooseTextureFormat = intelChooseTextureFormat;
- functions->TexImage1D = intelTexImage1D;
- functions->TexImage2D = intelTexImage2D;
- functions->TexImage3D = intelTexImage3D;
- functions->TexSubImage1D = intelTexSubImage1D;
- functions->TexSubImage2D = intelTexSubImage2D;
- functions->TexSubImage3D = intelTexSubImage3D;
- functions->CopyTexImage1D = intelCopyTexImage1D;
- functions->CopyTexImage2D = intelCopyTexImage2D;
- functions->CopyTexSubImage1D = intelCopyTexSubImage1D;
- functions->CopyTexSubImage2D = intelCopyTexSubImage2D;
- functions->GetTexImage = intelGetTexImage;
-
- /* compressed texture functions */
- functions->CompressedTexImage2D = intelCompressedTexImage2D;
- functions->GetCompressedTexImage = intelGetCompressedTexImage;
-
- functions->NewTextureObject = intelNewTextureObject;
- functions->NewTextureImage = intelNewTextureImage;
- functions->DeleteTexture = intelDeleteTextureObject;
- functions->FreeTexImageData = intelFreeTextureImageData;
- functions->UpdateTexturePalette = 0;
- functions->IsTextureResident = intelIsTextureResident;
-
-#if DO_DEBUG
- if (INTEL_DEBUG & DEBUG_BUFMGR)
- functions->TextureMemCpy = timed_memcpy;
- else
-#endif
- functions->TextureMemCpy = do_memcpy;
-}
+../intel/intel_tex.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex_copy.c b/src/mesa/drivers/dri/i915/intel_tex_copy.c
index b85a25642a..87196c5d1e 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex_copy.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_copy.c
@@ -1,302 +1 @@
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "mtypes.h"
-#include "enums.h"
-#include "image.h"
-#include "teximage.h"
-#include "swrast/swrast.h"
-
-#include "intel_screen.h"
-#include "intel_context.h"
-#include "intel_batchbuffer.h"
-#include "intel_buffers.h"
-#include "intel_mipmap_tree.h"
-#include "intel_regions.h"
-#include "intel_fbo.h"
-#include "intel_tex.h"
-#include "intel_blit.h"
-#include "intel_pixel.h"
-
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
-
-/**
- * Get the intel_region which is the source for any glCopyTex[Sub]Image call.
- *
- * Do the best we can using the blitter. A future project is to use
- * the texture engine and fragment programs for these copies.
- */
-static const struct intel_region *
-get_teximage_source(struct intel_context *intel, GLenum internalFormat)
-{
- struct intel_renderbuffer *irb;
-
- DBG("%s %s\n", __FUNCTION__,
- _mesa_lookup_enum_by_nr(internalFormat));
-
- switch (internalFormat) {
- case GL_DEPTH_COMPONENT:
- case GL_DEPTH_COMPONENT16_ARB:
- irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
- if (irb && irb->region && irb->region->cpp == 2)
- return irb->region;
- return NULL;
- case GL_DEPTH24_STENCIL8_EXT:
- case GL_DEPTH_STENCIL_EXT:
- irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
- if (irb && irb->region && irb->region->cpp == 4)
- return irb->region;
- return NULL;
- case GL_RGBA:
- case GL_RGBA8:
- return intel_readbuf_region(intel);
- case GL_RGB:
- if (intel->intelScreen->cpp == 2)
- return intel_readbuf_region(intel);
- return NULL;
- default:
- return NULL;
- }
-}
-
-
-static GLboolean
-do_copy_texsubimage(struct intel_context *intel,
- struct intel_texture_image *intelImage,
- GLenum internalFormat,
- GLint dstx, GLint dsty,
- GLint x, GLint y, GLsizei width, GLsizei height)
-{
- GLcontext *ctx = &intel->ctx;
- const struct intel_region *src =
- get_teximage_source(intel, internalFormat);
-
- if (!intelImage->mt || !src) {
- DBG("%s fail %p %p\n", __FUNCTION__, intelImage->mt, src);
- return GL_FALSE;
- }
-
- intelFlush(ctx);
- LOCK_HARDWARE(intel);
- {
- GLuint image_offset = intel_miptree_image_offset(intelImage->mt,
- intelImage->face,
- intelImage->level);
- const GLint orig_x = x;
- const GLint orig_y = y;
- const struct gl_framebuffer *fb = ctx->DrawBuffer;
-
- if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax,
- &x, &y, &width, &height)) {
- /* Update dst for clipped src. Need to also clip the source rect.
- */
- dstx += x - orig_x;
- dsty += y - orig_y;
-
- if (ctx->ReadBuffer->Name == 0) {
- /* reading from a window, adjust x, y */
- __DRIdrawablePrivate *dPriv = intel->driDrawable;
- GLuint window_y;
- /* window_y = position of window on screen if y=0=bottom */
- window_y = intel->intelScreen->height - (dPriv->y + dPriv->h);
- y = window_y + y;
- x += dPriv->x;
- }
- else {
- /* reading from a FBO */
- /* invert Y */
- y = ctx->ReadBuffer->Height - y - 1;
- }
-
-
- /* A bit of fiddling to get the blitter to work with -ve
- * pitches. But we get a nice inverted blit this way, so it's
- * worth it:
- */
- intelEmitCopyBlit(intel,
- intelImage->mt->cpp,
- -src->pitch,
- src->buffer,
- src->height * src->pitch * src->cpp,
- intelImage->mt->pitch,
- intelImage->mt->region->buffer,
- image_offset,
- x, y + height, dstx, dsty, width, height,
- GL_COPY); /* ? */
-
- intel_batchbuffer_flush(intel->batch);
- }
- }
-
-
- UNLOCK_HARDWARE(intel);
-
-#if 0
- /* GL_SGIS_generate_mipmap -- this can be accelerated now.
- * XXX Add a ctx->Driver.GenerateMipmaps() function?
- */
- if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
- intel_generate_mipmap(ctx, target,
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
- texObj);
- }
-#endif
-
- return GL_TRUE;
-}
-
-
-
-
-
-void
-intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
- GLenum internalFormat,
- GLint x, GLint y, GLsizei width, GLint border)
-{
- struct gl_texture_unit *texUnit =
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_object *texObj =
- _mesa_select_tex_object(ctx, texUnit, target);
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, level);
-
- if (border)
- goto fail;
-
- /* Setup or redefine the texture object, mipmap tree and texture
- * image. Don't populate yet.
- */
- ctx->Driver.TexImage1D(ctx, target, level, internalFormat,
- width, border,
- GL_RGBA, CHAN_TYPE, NULL,
- &ctx->DefaultPacking, texObj, texImage);
-
- if (!do_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- internalFormat, 0, 0, x, y, width, 1))
- goto fail;
-
- return;
-
- fail:
- _swrast_copy_teximage1d(ctx, target, level, internalFormat, x, y,
- width, border);
-}
-
-void
-intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
- GLenum internalFormat,
- GLint x, GLint y, GLsizei width, GLsizei height,
- GLint border)
-{
- struct gl_texture_unit *texUnit =
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_object *texObj =
- _mesa_select_tex_object(ctx, texUnit, target);
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, level);
-
- if (border)
- goto fail;
-
- /* Setup or redefine the texture object, mipmap tree and texture
- * image. Don't populate yet.
- */
- ctx->Driver.TexImage2D(ctx, target, level, internalFormat,
- width, height, border,
- GL_RGBA, CHAN_TYPE, NULL,
- &ctx->DefaultPacking, texObj, texImage);
-
-
- if (!do_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- internalFormat, 0, 0, x, y, width, height))
- goto fail;
-
- return;
-
- fail:
- _swrast_copy_teximage2d(ctx, target, level, internalFormat, x, y,
- width, height, border);
-}
-
-
-void
-intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
- GLint xoffset, GLint x, GLint y, GLsizei width)
-{
- struct gl_texture_unit *texUnit =
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_object *texObj =
- _mesa_select_tex_object(ctx, texUnit, target);
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, level);
- GLenum internalFormat = texImage->InternalFormat;
-
- /* XXX need to check <border> as in above function? */
-
- /* Need to check texture is compatible with source format.
- */
-
- if (!do_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- internalFormat, xoffset, 0, x, y, width, 1)) {
- _swrast_copy_texsubimage1d(ctx, target, level, xoffset, x, y, width);
- }
-}
-
-
-
-void
-intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
- GLint xoffset, GLint yoffset,
- GLint x, GLint y, GLsizei width, GLsizei height)
-{
- struct gl_texture_unit *texUnit =
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
- struct gl_texture_object *texObj =
- _mesa_select_tex_object(ctx, texUnit, target);
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, level);
- GLenum internalFormat = texImage->InternalFormat;
-
-
- /* Need to check texture is compatible with source format.
- */
-
- if (!do_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- internalFormat,
- xoffset, yoffset, x, y, width, height)) {
-
- DBG("%s - fallback to swrast\n", __FUNCTION__);
-
- _swrast_copy_texsubimage2d(ctx, target, level,
- xoffset, yoffset, x, y, width, height);
- }
-}
+../intel/intel_tex_copy.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex_format.c b/src/mesa/drivers/dri/i915/intel_tex_format.c
index 6e058dff69..3415f75470 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex_format.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_format.c
@@ -1,172 +1 @@
-#include "intel_context.h"
-#include "intel_tex.h"
-#include "texformat.h"
-#include "enums.h"
-
-/* It works out that this function is fine for all the supported
- * hardware. However, there is still a need to map the formats onto
- * hardware descriptors.
- */
-/* Note that the i915 can actually support many more formats than
- * these if we take the step of simply swizzling the colors
- * immediately after sampling...
- */
-const struct gl_texture_format *
-intelChooseTextureFormat(GLcontext * ctx, GLint internalFormat,
- GLenum format, GLenum type)
-{
- struct intel_context *intel = intel_context(ctx);
- const GLboolean do32bpt = (intel->intelScreen->cpp == 4);
-
- switch (internalFormat) {
- case 4:
- case GL_RGBA:
- case GL_COMPRESSED_RGBA:
- if (format == GL_BGRA) {
- if (type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) {
- return &_mesa_texformat_argb8888;
- }
- else if (type == GL_UNSIGNED_SHORT_4_4_4_4_REV) {
- return &_mesa_texformat_argb4444;
- }
- else if (type == GL_UNSIGNED_SHORT_1_5_5_5_REV) {
- return &_mesa_texformat_argb1555;
- }
- }
- return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
-
- case 3:
- case GL_RGB:
- case GL_COMPRESSED_RGB:
- if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
- return &_mesa_texformat_rgb565;
- }
- return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_rgb565;
-
- case GL_RGBA8:
- case GL_RGB10_A2:
- case GL_RGBA12:
- case GL_RGBA16:
- return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
-
- case GL_RGBA4:
- case GL_RGBA2:
- return &_mesa_texformat_argb4444;
-
- case GL_RGB5_A1:
- return &_mesa_texformat_argb1555;
-
- case GL_RGB8:
- case GL_RGB10:
- case GL_RGB12:
- case GL_RGB16:
- return &_mesa_texformat_argb8888;
-
- case GL_RGB5:
- case GL_RGB4:
- case GL_R3_G3_B2:
- return &_mesa_texformat_rgb565;
-
- case GL_ALPHA:
- case GL_ALPHA4:
- case GL_ALPHA8:
- case GL_ALPHA12:
- case GL_ALPHA16:
- case GL_COMPRESSED_ALPHA:
- return &_mesa_texformat_a8;
-
- case 1:
- case GL_LUMINANCE:
- case GL_LUMINANCE4:
- case GL_LUMINANCE8:
- case GL_LUMINANCE12:
- case GL_LUMINANCE16:
- case GL_COMPRESSED_LUMINANCE:
- return &_mesa_texformat_l8;
-
- case 2:
- case GL_LUMINANCE_ALPHA:
- case GL_LUMINANCE4_ALPHA4:
- case GL_LUMINANCE6_ALPHA2:
- case GL_LUMINANCE8_ALPHA8:
- case GL_LUMINANCE12_ALPHA4:
- case GL_LUMINANCE12_ALPHA12:
- case GL_LUMINANCE16_ALPHA16:
- case GL_COMPRESSED_LUMINANCE_ALPHA:
- return &_mesa_texformat_al88;
-
- case GL_INTENSITY:
- case GL_INTENSITY4:
- case GL_INTENSITY8:
- case GL_INTENSITY12:
- case GL_INTENSITY16:
- case GL_COMPRESSED_INTENSITY:
- return &_mesa_texformat_i8;
-
- case GL_YCBCR_MESA:
- if (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE)
- return &_mesa_texformat_ycbcr;
- else
- return &_mesa_texformat_ycbcr_rev;
-
- case GL_COMPRESSED_RGB_FXT1_3DFX:
- return &_mesa_texformat_rgb_fxt1;
- case GL_COMPRESSED_RGBA_FXT1_3DFX:
- return &_mesa_texformat_rgba_fxt1;
-
- case GL_RGB_S3TC:
- case GL_RGB4_S3TC:
- case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
- return &_mesa_texformat_rgb_dxt1;
-
- case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
- return &_mesa_texformat_rgba_dxt1;
-
- case GL_RGBA_S3TC:
- case GL_RGBA4_S3TC:
- case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
- return &_mesa_texformat_rgba_dxt3;
-
- case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
- return &_mesa_texformat_rgba_dxt5;
-
- case GL_DEPTH_COMPONENT:
- case GL_DEPTH_COMPONENT16:
- case GL_DEPTH_COMPONENT24:
- case GL_DEPTH_COMPONENT32:
- return &_mesa_texformat_z16;
-
- case GL_DEPTH_STENCIL_EXT:
- case GL_DEPTH24_STENCIL8_EXT:
- return &_mesa_texformat_z24_s8;
-
- default:
- fprintf(stderr, "unexpected texture format %s in %s\n",
- _mesa_lookup_enum_by_nr(internalFormat), __FUNCTION__);
- return NULL;
- }
-
- return NULL; /* never get here */
-}
-
-int intel_compressed_num_bytes(GLuint mesaFormat)
-{
- int bytes = 0;
- switch(mesaFormat) {
-
- case MESA_FORMAT_RGB_FXT1:
- case MESA_FORMAT_RGBA_FXT1:
- case MESA_FORMAT_RGB_DXT1:
- case MESA_FORMAT_RGBA_DXT1:
- bytes = 2;
- break;
-
- case MESA_FORMAT_RGBA_DXT3:
- case MESA_FORMAT_RGBA_DXT5:
- bytes = 4;
- default:
- break;
- }
-
- return bytes;
-}
+../intel/intel_tex_format.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex_image.c b/src/mesa/drivers/dri/i915/intel_tex_image.c
index 197cf35ebe..567abe4974 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex_image.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_image.c
@@ -1,690 +1 @@
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "glheader.h"
-#include "macros.h"
-#include "mtypes.h"
-#include "enums.h"
-#include "colortab.h"
-#include "convolve.h"
-#include "context.h"
-#include "simple_list.h"
-#include "texcompress.h"
-#include "texformat.h"
-#include "texobj.h"
-#include "texstore.h"
-
-#include "intel_context.h"
-#include "intel_mipmap_tree.h"
-#include "intel_buffer_objects.h"
-#include "intel_batchbuffer.h"
-#include "intel_tex.h"
-#include "intel_ioctl.h"
-#include "intel_blit.h"
-
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
-
-/* Functions to store texture images. Where possible, mipmap_tree's
- * will be created or further instantiated with image data, otherwise
- * images will be stored in malloc'd memory. A validation step is
- * required to pull those images into a mipmap tree, or otherwise
- * decide a fallback is required.
- */
-
-
-static int
-logbase2(int n)
-{
- GLint i = 1;
- GLint log2 = 0;
-
- while (n > i) {
- i *= 2;
- log2++;
- }
-
- return log2;
-}
-
-
-/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
- * 1).
- *
- * Otherwise, if max_level >= level >= min_level, create tree with
- * space for textures from min_level down to max_level.
- *
- * Otherwise, create tree with space for textures from (level
- * 0)..(1x1). Consider pruning this tree at a validation if the
- * saving is worth it.
- */
-static void
-guess_and_alloc_mipmap_tree(struct intel_context *intel,
- struct intel_texture_object *intelObj,
- struct intel_texture_image *intelImage)
-{
- GLuint firstLevel;
- GLuint lastLevel;
- GLuint width = intelImage->base.Width;
- GLuint height = intelImage->base.Height;
- GLuint depth = intelImage->base.Depth;
- GLuint l2width, l2height, l2depth;
- GLuint i, comp_byte = 0;
-
- DBG("%s\n", __FUNCTION__);
-
- if (intelImage->base.Border)
- return;
-
- if (intelImage->level > intelObj->base.BaseLevel &&
- (intelImage->base.Width == 1 ||
- (intelObj->base.Target != GL_TEXTURE_1D &&
- intelImage->base.Height == 1) ||
- (intelObj->base.Target == GL_TEXTURE_3D &&
- intelImage->base.Depth == 1)))
- return;
-
- /* If this image disrespects BaseLevel, allocate from level zero.
- * Usually BaseLevel == 0, so it's unlikely to happen.
- */
- if (intelImage->level < intelObj->base.BaseLevel)
- firstLevel = 0;
- else
- firstLevel = intelObj->base.BaseLevel;
-
-
- /* Figure out image dimensions at start level.
- */
- for (i = intelImage->level; i > firstLevel; i--) {
- width <<= 1;
- if (height != 1)
- height <<= 1;
- if (depth != 1)
- depth <<= 1;
- }
-
- /* Guess a reasonable value for lastLevel. This is probably going
- * to be wrong fairly often and might mean that we have to look at
- * resizable buffers, or require that buffers implement lazy
- * pagetable arrangements.
- */
- if ((intelObj->base.MinFilter == GL_NEAREST ||
- intelObj->base.MinFilter == GL_LINEAR) &&
- intelImage->level == firstLevel) {
- lastLevel = firstLevel;
- }
- else {
- l2width = logbase2(width);
- l2height = logbase2(height);
- l2depth = logbase2(depth);
- lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
- }
-
- assert(!intelObj->mt);
- if (intelImage->base.IsCompressed)
- comp_byte = intel_compressed_num_bytes(intelImage->base.TexFormat->MesaFormat);
- intelObj->mt = intel_miptree_create(intel,
- intelObj->base.Target,
- intelImage->base.InternalFormat,
- firstLevel,
- lastLevel,
- width,
- height,
- depth,
- intelImage->base.TexFormat->TexelBytes,
- comp_byte);
-
- DBG("%s - success\n", __FUNCTION__);
-}
-
-
-
-
-static GLuint
-target_to_face(GLenum target)
-{
- switch (target) {
- case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
- case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
- case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
- case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
- return ((GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
- default:
- return 0;
- }
-}
-
-/* There are actually quite a few combinations this will work for,
- * more than what I've listed here.
- */
-static GLboolean
-check_pbo_format(GLint internalFormat,
- GLenum format, GLenum type,
- const struct gl_texture_format *mesa_format)
-{
- switch (internalFormat) {
- case 4:
- case GL_RGBA:
- return (format == GL_BGRA &&
- (type == GL_UNSIGNED_BYTE ||
- type == GL_UNSIGNED_INT_8_8_8_8_REV) &&
- mesa_format == &_mesa_texformat_argb8888);
- case 3:
- case GL_RGB:
- return (format == GL_RGB &&
- type == GL_UNSIGNED_SHORT_5_6_5 &&
- mesa_format == &_mesa_texformat_rgb565);
- case GL_YCBCR_MESA:
- return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
- default:
- return GL_FALSE;
- }
-}
-
-
-/* XXX: Do this for TexSubImage also:
- */
-static GLboolean
-try_pbo_upload(struct intel_context *intel,
- struct intel_texture_image *intelImage,
- const struct gl_pixelstore_attrib *unpack,
- GLint internalFormat,
- GLint width, GLint height,
- GLenum format, GLenum type, const void *pixels)
-{
- struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
- GLuint src_offset, src_stride;
- GLuint dst_offset, dst_stride;
-
- if (!pbo ||
- intel->ctx._ImageTransferState ||
- unpack->SkipPixels || unpack->SkipRows) {
- _mesa_printf("%s: failure 1\n", __FUNCTION__);
- return GL_FALSE;
- }
-
- src_offset = (GLuint) pixels;
-
- if (unpack->RowLength > 0)
- src_stride = unpack->RowLength;
- else
- src_stride = width;
-
- dst_offset = intel_miptree_image_offset(intelImage->mt,
- intelImage->face,
- intelImage->level);
-
- dst_stride = intelImage->mt->pitch;
-
- intelFlush(&intel->ctx);
- LOCK_HARDWARE(intel);
- {
- dri_bo *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ);
- dri_bo *dst_buffer = intel_region_buffer(intel->intelScreen,
- intelImage->mt->region,
- INTEL_WRITE_FULL);
-
-
- intelEmitCopyBlit(intel,
- intelImage->mt->cpp,
- src_stride, src_buffer, src_offset,
- dst_stride, dst_buffer, dst_offset,
- 0, 0, 0, 0, width, height,
- GL_COPY);
-
- intel_batchbuffer_flush(intel->batch);
- }
- UNLOCK_HARDWARE(intel);
-
- return GL_TRUE;
-}
-
-
-
-static GLboolean
-try_pbo_zcopy(struct intel_context *intel,
- struct intel_texture_image *intelImage,
- const struct gl_pixelstore_attrib *unpack,
- GLint internalFormat,
- GLint width, GLint height,
- GLenum format, GLenum type, const void *pixels)
-{
- struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
- GLuint src_offset, src_stride;
- GLuint dst_offset, dst_stride;
-
- if (!pbo ||
- intel->ctx._ImageTransferState ||
- unpack->SkipPixels || unpack->SkipRows) {
- _mesa_printf("%s: failure 1\n", __FUNCTION__);
- return GL_FALSE;
- }
-
- src_offset = (GLuint) pixels;
-
- if (unpack->RowLength > 0)
- src_stride = unpack->RowLength;
- else
- src_stride = width;
-
- dst_offset = intel_miptree_image_offset(intelImage->mt,
- intelImage->face,
- intelImage->level);
-
- dst_stride = intelImage->mt->pitch;
-
- if (src_stride != dst_stride || dst_offset != 0 || src_offset != 0) {
- _mesa_printf("%s: failure 2\n", __FUNCTION__);
- return GL_FALSE;
- }
-
- intel_region_attach_pbo(intel->intelScreen, intelImage->mt->region, pbo);
-
- return GL_TRUE;
-}
-
-
-
-
-
-
-static void
-intelTexImage(GLcontext * ctx,
- GLint dims,
- GLenum target, GLint level,
- GLint internalFormat,
- GLint width, GLint height, GLint depth,
- GLint border,
- GLenum format, GLenum type, const void *pixels,
- const struct gl_pixelstore_attrib *unpack,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage, GLsizei imageSize, int compressed)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_object *intelObj = intel_texture_object(texObj);
- struct intel_texture_image *intelImage = intel_texture_image(texImage);
- GLint postConvWidth = width;
- GLint postConvHeight = height;
- GLint texelBytes, sizeInBytes;
- GLuint dstRowStride;
-
-
- DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__,
- _mesa_lookup_enum_by_nr(target), level, width, height, depth, border);
-
- intelFlush(ctx);
-
- intelImage->face = target_to_face(target);
- intelImage->level = level;
-
- if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
- _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
- &postConvHeight);
- }
-
- /* choose the texture format */
- texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat,
- format, type);
-
- _mesa_set_fetch_functions(texImage, dims);
-
- if (texImage->TexFormat->TexelBytes == 0) {
- /* must be a compressed format */
- texelBytes = 0;
- texImage->IsCompressed = GL_TRUE;
- texImage->CompressedSize =
- ctx->Driver.CompressedTextureSize(ctx, texImage->Width,
- texImage->Height, texImage->Depth,
- texImage->TexFormat->MesaFormat);
- } else {
- texelBytes = texImage->TexFormat->TexelBytes;
-
- /* Minimum pitch of 32 bytes */
- if (postConvWidth * texelBytes < 32) {
- postConvWidth = 32 / texelBytes;
- texImage->RowStride = postConvWidth;
- }
-
- assert(texImage->RowStride == postConvWidth);
- }
-
- /* Release the reference to a potentially orphaned buffer.
- * Release any old malloced memory.
- */
- if (intelImage->mt) {
- intel_miptree_release(intel, &intelImage->mt);
- assert(!texImage->Data);
- }
- else if (texImage->Data) {
- _mesa_align_free(texImage->Data);
- }
-
- /* If this is the only texture image in the tree, could call
- * bmBufferData with NULL data to free the old block and avoid
- * waiting on any outstanding fences.
- */
- if (intelObj->mt &&
- intelObj->mt->first_level == level &&
- intelObj->mt->last_level == level &&
- intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
- !intel_miptree_match_image(intelObj->mt, &intelImage->base,
- intelImage->face, intelImage->level)) {
-
- DBG("release it\n");
- intel_miptree_release(intel, &intelObj->mt);
- assert(!intelObj->mt);
- }
-
- if (!intelObj->mt) {
- guess_and_alloc_mipmap_tree(intel, intelObj, intelImage);
- if (!intelObj->mt) {
- DBG("guess_and_alloc_mipmap_tree: failed\n");
- }
- }
-
- assert(!intelImage->mt);
-
- if (intelObj->mt &&
- intel_miptree_match_image(intelObj->mt, &intelImage->base,
- intelImage->face, intelImage->level)) {
-
- intel_miptree_reference(&intelImage->mt, intelObj->mt);
- assert(intelImage->mt);
- }
-
- if (!intelImage->mt)
- DBG("XXX: Image did not fit into tree - storing in local memory!\n");
-
- /* PBO fastpaths:
- */
- if (dims <= 2 &&
- intelImage->mt &&
- intel_buffer_object(unpack->BufferObj) &&
- check_pbo_format(internalFormat, format,
- type, intelImage->base.TexFormat)) {
-
- DBG("trying pbo upload\n");
-
- /* Attempt to texture directly from PBO data (zero copy upload).
- *
- * Currently disable as it can lead to worse as well as better
- * performance (in particular when intel_region_cow() is
- * required).
- */
- if (intelObj->mt == intelImage->mt &&
- intelObj->mt->first_level == level &&
- intelObj->mt->last_level == level) {
-
- if (try_pbo_zcopy(intel, intelImage, unpack,
- internalFormat,
- width, height, format, type, pixels)) {
-
- DBG("pbo zcopy upload succeeded\n");
- return;
- }
- }
-
-
- /* Otherwise, attempt to use the blitter for PBO image uploads.
- */
- if (try_pbo_upload(intel, intelImage, unpack,
- internalFormat,
- width, height, format, type, pixels)) {
- DBG("pbo upload succeeded\n");
- return;
- }
-
- DBG("pbo upload failed\n");
- }
-
-
-
- /* intelCopyTexImage calls this function with pixels == NULL, with
- * the expectation that the mipmap tree will be set up but nothing
- * more will be done. This is where those calls return:
- */
- if (compressed) {
- pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels,
- unpack,
- "glCompressedTexImage");
- } else {
- pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1,
- format, type,
- pixels, unpack, "glTexImage");
- }
- if (!pixels)
- return;
-
-
- if (intelImage->mt)
- intel_region_idle(intel->intelScreen, intelImage->mt->region);
-
- LOCK_HARDWARE(intel);
-
- if (intelImage->mt) {
- texImage->Data = intel_miptree_image_map(intel,
- intelImage->mt,
- intelImage->face,
- intelImage->level,
- &dstRowStride,
- intelImage->base.ImageOffsets);
- }
- else {
- /* Allocate regular memory and store the image there temporarily. */
- if (texImage->IsCompressed) {
- sizeInBytes = texImage->CompressedSize;
- dstRowStride =
- _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width);
- assert(dims != 3);
- }
- else {
- dstRowStride = postConvWidth * texelBytes;
- sizeInBytes = depth * dstRowStride * postConvHeight;
- }
-
- texImage->Data = malloc(sizeInBytes);
- }
-
- DBG("Upload image %dx%dx%d row_len %x "
- "pitch %x\n",
- width, height, depth, width * texelBytes, dstRowStride);
-
- /* Copy data. Would like to know when it's ok for us to eg. use
- * the blitter to copy. Or, use the hardware to do the format
- * conversion and copy:
- */
- if (compressed) {
- memcpy(texImage->Data, pixels, imageSize);
- } else if (!texImage->TexFormat->StoreImage(ctx, dims,
- texImage->_BaseFormat,
- texImage->TexFormat,
- texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
- dstRowStride,
- texImage->ImageOffsets,
- width, height, depth,
- format, type, pixels, unpack)) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
- }
-
- _mesa_unmap_teximage_pbo(ctx, unpack);
-
- if (intelImage->mt) {
- intel_miptree_image_unmap(intel, intelImage->mt);
- texImage->Data = NULL;
- }
-
- UNLOCK_HARDWARE(intel);
-
-#if 0
- /* GL_SGIS_generate_mipmap -- this can be accelerated now.
- */
- if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
- intel_generate_mipmap(ctx, target,
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
- texObj);
- }
-#endif
-}
-
-void
-intelTexImage3D(GLcontext * ctx,
- GLenum target, GLint level,
- GLint internalFormat,
- GLint width, GLint height, GLint depth,
- GLint border,
- GLenum format, GLenum type, const void *pixels,
- const struct gl_pixelstore_attrib *unpack,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- intelTexImage(ctx, 3, target, level,
- internalFormat, width, height, depth, border,
- format, type, pixels, unpack, texObj, texImage, 0, 0);
-}
-
-
-void
-intelTexImage2D(GLcontext * ctx,
- GLenum target, GLint level,
- GLint internalFormat,
- GLint width, GLint height, GLint border,
- GLenum format, GLenum type, const void *pixels,
- const struct gl_pixelstore_attrib *unpack,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- intelTexImage(ctx, 2, target, level,
- internalFormat, width, height, 1, border,
- format, type, pixels, unpack, texObj, texImage, 0, 0);
-}
-
-void
-intelTexImage1D(GLcontext * ctx,
- GLenum target, GLint level,
- GLint internalFormat,
- GLint width, GLint border,
- GLenum format, GLenum type, const void *pixels,
- const struct gl_pixelstore_attrib *unpack,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- intelTexImage(ctx, 1, target, level,
- internalFormat, width, 1, 1, border,
- format, type, pixels, unpack, texObj, texImage, 0, 0);
-}
-
-void intelCompressedTexImage2D( GLcontext *ctx, GLenum target, GLint level,
- GLint internalFormat,
- GLint width, GLint height, GLint border,
- GLsizei imageSize, const GLvoid *data,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage )
-{
- intelTexImage(ctx, 2, target, level,
- internalFormat, width, height, 1, border,
- 0, 0, data, &ctx->Unpack, texObj, texImage, imageSize, 1);
-}
-
-/**
- * Need to map texture image into memory before copying image data,
- * then unmap it.
- */
-static void
-intel_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
- GLenum format, GLenum type, GLvoid * pixels,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage, int compressed)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_image *intelImage = intel_texture_image(texImage);
-
- /* Map */
- if (intelImage->mt) {
- /* Image is stored in hardware format in a buffer managed by the
- * kernel. Need to explicitly map and unmap it.
- */
- intelImage->base.Data =
- intel_miptree_image_map(intel,
- intelImage->mt,
- intelImage->face,
- intelImage->level,
- &intelImage->base.RowStride,
- intelImage->base.ImageOffsets);
- intelImage->base.RowStride /= intelImage->mt->cpp;
- }
- else {
- /* Otherwise, the image should actually be stored in
- * intelImage->base.Data. This is pretty confusing for
- * everybody, I'd much prefer to separate the two functions of
- * texImage->Data - storage for texture images in main memory
- * and access (ie mappings) of images. In other words, we'd
- * create a new texImage->Map field and leave Data simply for
- * storage.
- */
- assert(intelImage->base.Data);
- }
-
-
- if (compressed) {
- _mesa_get_compressed_teximage(ctx, target, level, pixels,
- texObj, texImage);
- } else {
- _mesa_get_teximage(ctx, target, level, format, type, pixels,
- texObj, texImage);
- }
-
-
- /* Unmap */
- if (intelImage->mt) {
- intel_miptree_image_unmap(intel, intelImage->mt);
- intelImage->base.Data = NULL;
- }
-}
-
-void
-intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
- GLenum format, GLenum type, GLvoid * pixels,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- intel_get_tex_image(ctx, target, level, format, type, pixels,
- texObj, texImage, 0);
-
-
-}
-
-void
-intelGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
- GLvoid *pixels,
- const struct gl_texture_object *texObj,
- const struct gl_texture_image *texImage)
-{
- intel_get_tex_image(ctx, target, level, 0, 0, pixels,
- texObj, texImage, 1);
-
-}
-
-void
-intelSetTexOffset(__DRIcontext *pDRICtx, GLint texname,
- unsigned long long offset, GLint depth, GLuint pitch)
-{
- struct intel_context *intel = (struct intel_context*)
- ((__DRIcontextPrivate*)pDRICtx->private)->driverPrivate;
- struct gl_texture_object *tObj = _mesa_lookup_texture(&intel->ctx, texname);
- struct intel_texture_object *intelObj = intel_texture_object(tObj);
-
- if (!intelObj)
- return;
-
- if (intelObj->mt)
- intel_miptree_release(intel, &intelObj->mt);
-
- intelObj->imageOverride = GL_TRUE;
- intelObj->depthOverride = depth;
- intelObj->pitchOverride = pitch;
-
- if (offset)
- intelObj->textureOffset = offset;
-}
+../intel/intel_tex_image.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex_subimage.c b/src/mesa/drivers/dri/i915/intel_tex_subimage.c
index 3935787806..b3a8a3d7ca 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex_subimage.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_subimage.c
@@ -1,182 +1 @@
-
-/**************************************************************************
- *
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#include "mtypes.h"
-#include "texobj.h"
-#include "texstore.h"
-#include "enums.h"
-
-#include "intel_context.h"
-#include "intel_tex.h"
-#include "intel_mipmap_tree.h"
-
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
-
-static void
-intelTexSubimage(GLcontext * ctx,
- GLint dims,
- GLenum target, GLint level,
- GLint xoffset, GLint yoffset, GLint zoffset,
- GLint width, GLint height, GLint depth,
- GLenum format, GLenum type, const void *pixels,
- const struct gl_pixelstore_attrib *packing,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_texture_image *intelImage = intel_texture_image(texImage);
- GLuint dstRowStride;
-
- DBG("%s target %s level %d offset %d,%d %dx%d\n", __FUNCTION__,
- _mesa_lookup_enum_by_nr(target),
- level, xoffset, yoffset, width, height);
-
- intelFlush(ctx);
-
- pixels =
- _mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format,
- type, pixels, packing, "glTexSubImage2D");
- if (!pixels)
- return;
-
- if (intelImage->mt)
- intel_region_idle(intel->intelScreen, intelImage->mt->region);
-
- LOCK_HARDWARE(intel);
-
- /* Map buffer if necessary. Need to lock to prevent other contexts
- * from uploading the buffer under us.
- */
- if (intelImage->mt)
- texImage->Data = intel_miptree_image_map(intel,
- intelImage->mt,
- intelImage->face,
- intelImage->level,
- &dstRowStride,
- texImage->ImageOffsets);
-
- assert(dstRowStride);
-
- if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
- texImage->TexFormat,
- texImage->Data,
- xoffset, yoffset, zoffset,
- dstRowStride,
- texImage->ImageOffsets,
- width, height, depth,
- format, type, pixels, packing)) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
- }
-
-#if 0
- /* GL_SGIS_generate_mipmap */
- if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
- _mesa_generate_mipmap(ctx, target,
- &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
- texObj);
- }
-#endif
-
- _mesa_unmap_teximage_pbo(ctx, packing);
-
- if (intelImage->mt) {
- intel_miptree_image_unmap(intel, intelImage->mt);
- texImage->Data = NULL;
- }
-
- UNLOCK_HARDWARE(intel);
-}
-
-
-
-
-
-void
-intelTexSubImage3D(GLcontext * ctx,
- GLenum target,
- GLint level,
- GLint xoffset, GLint yoffset, GLint zoffset,
- GLsizei width, GLsizei height, GLsizei depth,
- GLenum format, GLenum type,
- const GLvoid * pixels,
- const struct gl_pixelstore_attrib *packing,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
-
- intelTexSubimage(ctx, 3,
- target, level,
- xoffset, yoffset, zoffset,
- width, height, depth,
- format, type, pixels, packing, texObj, texImage);
-
-}
-
-
-
-void
-intelTexSubImage2D(GLcontext * ctx,
- GLenum target,
- GLint level,
- GLint xoffset, GLint yoffset,
- GLsizei width, GLsizei height,
- GLenum format, GLenum type,
- const GLvoid * pixels,
- const struct gl_pixelstore_attrib *packing,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
-
- intelTexSubimage(ctx, 2,
- target, level,
- xoffset, yoffset, 0,
- width, height, 1,
- format, type, pixels, packing, texObj, texImage);
-
-}
-
-
-void
-intelTexSubImage1D(GLcontext * ctx,
- GLenum target,
- GLint level,
- GLint xoffset,
- GLsizei width,
- GLenum format, GLenum type,
- const GLvoid * pixels,
- const struct gl_pixelstore_attrib *packing,
- struct gl_texture_object *texObj,
- struct gl_texture_image *texImage)
-{
- intelTexSubimage(ctx, 1,
- target, level,
- xoffset, 0, 0,
- width, 1, 1,
- format, type, pixels, packing, texObj, texImage);
-
-}
+../intel/intel_tex_subimage.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/i915/intel_tex_validate.c b/src/mesa/drivers/dri/i915/intel_tex_validate.c
index af18c26d55..41a75674c2 100644..120000
--- a/src/mesa/drivers/dri/i915/intel_tex_validate.c
+++ b/src/mesa/drivers/dri/i915/intel_tex_validate.c
@@ -1,272 +1 @@
-#include "mtypes.h"
-#include "macros.h"
-
-#include "intel_context.h"
-#include "intel_batchbuffer.h"
-#include "intel_mipmap_tree.h"
-#include "intel_tex.h"
-
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
-
-/**
- * Compute which mipmap levels that really need to be sent to the hardware.
- * This depends on the base image size, GL_TEXTURE_MIN_LOD,
- * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
- */
-static void
-intel_calculate_first_last_level(struct intel_texture_object *intelObj)
-{
- struct gl_texture_object *tObj = &intelObj->base;
- const struct gl_texture_image *const baseImage =
- tObj->Image[0][tObj->BaseLevel];
-
- /* These must be signed values. MinLod and MaxLod can be negative numbers,
- * and having firstLevel and lastLevel as signed prevents the need for
- * extra sign checks.
- */
- int firstLevel;
- int lastLevel;
-
- /* Yes, this looks overly complicated, but it's all needed.
- */
- switch (tObj->Target) {
- case GL_TEXTURE_1D:
- case GL_TEXTURE_2D:
- case GL_TEXTURE_3D:
- case GL_TEXTURE_CUBE_MAP:
- if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
- /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
- */
- firstLevel = lastLevel = tObj->BaseLevel;
- }
- else {
- firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
- firstLevel = MAX2(firstLevel, tObj->BaseLevel);
- lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
- lastLevel = MAX2(lastLevel, tObj->BaseLevel);
- lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
- lastLevel = MIN2(lastLevel, tObj->MaxLevel);
- lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
- }
- break;
- case GL_TEXTURE_RECTANGLE_NV:
- case GL_TEXTURE_4D_SGIS:
- firstLevel = lastLevel = 0;
- break;
- default:
- return;
- }
-
- /* save these values */
- intelObj->firstLevel = firstLevel;
- intelObj->lastLevel = lastLevel;
-}
-
-static void
-copy_image_data_to_tree(struct intel_context *intel,
- struct intel_texture_object *intelObj,
- struct intel_texture_image *intelImage)
-{
- if (intelImage->mt) {
- /* Copy potentially with the blitter:
- */
- intel_miptree_image_copy(intel,
- intelObj->mt,
- intelImage->face,
- intelImage->level, intelImage->mt);
-
- intel_miptree_release(intel, &intelImage->mt);
- }
- else {
- assert(intelImage->base.Data != NULL);
-
- /* More straightforward upload.
- */
- intel_miptree_image_data(intel,
- intelObj->mt,
- intelImage->face,
- intelImage->level,
- intelImage->base.Data,
- intelImage->base.RowStride,
- intelImage->base.RowStride *
- intelImage->base.Height);
- _mesa_align_free(intelImage->base.Data);
- intelImage->base.Data = NULL;
- }
-
- intel_miptree_reference(&intelImage->mt, intelObj->mt);
-}
-
-
-/*
- */
-GLuint
-intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
-{
- struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
- struct intel_texture_object *intelObj = intel_texture_object(tObj);
- int comp_byte = 0;
- int cpp;
-
- GLuint face, i;
- GLuint nr_faces = 0;
- struct intel_texture_image *firstImage;
-
- GLboolean need_flush = GL_FALSE;
-
- /* We know/require this is true by now:
- */
- assert(intelObj->base._Complete);
-
- /* What levels must the tree include at a minimum?
- */
- intel_calculate_first_last_level(intelObj);
- firstImage =
- intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]);
-
- /* Fallback case:
- */
- if (firstImage->base.Border) {
- if (intelObj->mt) {
- intel_miptree_release(intel, &intelObj->mt);
- }
- return GL_FALSE;
- }
-
-
- /* If both firstImage and intelObj have a tree which can contain
- * all active images, favour firstImage. Note that because of the
- * completeness requirement, we know that the image dimensions
- * will match.
- */
- if (firstImage->mt &&
- firstImage->mt != intelObj->mt &&
- firstImage->mt->first_level <= intelObj->firstLevel &&
- firstImage->mt->last_level >= intelObj->lastLevel) {
-
- if (intelObj->mt)
- intel_miptree_release(intel, &intelObj->mt);
-
- intel_miptree_reference(&intelObj->mt, firstImage->mt);
- }
-
- if (firstImage->base.IsCompressed) {
- comp_byte = intel_compressed_num_bytes(firstImage->base.TexFormat->MesaFormat);
- cpp = comp_byte;
- }
- else cpp = firstImage->base.TexFormat->TexelBytes;
-
- /* Check tree can hold all active levels. Check tree matches
- * target, imageFormat, etc.
- *
- * XXX: For some layouts (eg i945?), the test might have to be
- * first_level == firstLevel, as the tree isn't valid except at the
- * original start level. Hope to get around this by
- * programming minLod, maxLod, baseLevel into the hardware and
- * leaving the tree alone.
- */
- if (intelObj->mt &&
- (intelObj->mt->target != intelObj->base.Target ||
- intelObj->mt->internal_format != firstImage->base.InternalFormat ||
- intelObj->mt->first_level != intelObj->firstLevel ||
- intelObj->mt->last_level != intelObj->lastLevel ||
- intelObj->mt->width0 != firstImage->base.Width ||
- intelObj->mt->height0 != firstImage->base.Height ||
- intelObj->mt->depth0 != firstImage->base.Depth ||
- intelObj->mt->cpp != cpp ||
- intelObj->mt->compressed != firstImage->base.IsCompressed)) {
- intel_miptree_release(intel, &intelObj->mt);
- }
-
-
- /* May need to create a new tree:
- */
- if (!intelObj->mt) {
- intelObj->mt = intel_miptree_create(intel,
- intelObj->base.Target,
- firstImage->base.InternalFormat,
- intelObj->firstLevel,
- intelObj->lastLevel,
- firstImage->base.Width,
- firstImage->base.Height,
- firstImage->base.Depth,
- cpp,
- comp_byte);
- }
-
- /* Pull in any images not in the object's tree:
- */
- nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
- for (face = 0; face < nr_faces; face++) {
- for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
- struct intel_texture_image *intelImage =
- intel_texture_image(intelObj->base.Image[face][i]);
-
- /* Need to import images in main memory or held in other trees.
- */
- if (intelObj->mt != intelImage->mt) {
- copy_image_data_to_tree(intel, intelObj, intelImage);
- need_flush = GL_TRUE;
- }
- }
- }
-
- if (need_flush)
- intel_batchbuffer_flush(intel->batch);
-
- return GL_TRUE;
-}
-
-
-
-void
-intel_tex_map_images(struct intel_context *intel,
- struct intel_texture_object *intelObj)
-{
- GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
- GLuint face, i;
-
- DBG("%s\n", __FUNCTION__);
-
- for (face = 0; face < nr_faces; face++) {
- for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
- struct intel_texture_image *intelImage =
- intel_texture_image(intelObj->base.Image[face][i]);
-
- if (intelImage->mt) {
- intelImage->base.Data =
- intel_miptree_image_map(intel,
- intelImage->mt,
- intelImage->face,
- intelImage->level,
- &intelImage->base.RowStride,
- intelImage->base.ImageOffsets);
- /* convert stride to texels, not bytes */
- intelImage->base.RowStride /= intelImage->mt->cpp;
-/* intelImage->base.ImageStride /= intelImage->mt->cpp; */
- }
- }
- }
-}
-
-
-
-void
-intel_tex_unmap_images(struct intel_context *intel,
- struct intel_texture_object *intelObj)
-{
- GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
- GLuint face, i;
-
- for (face = 0; face < nr_faces; face++) {
- for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
- struct intel_texture_image *intelImage =
- intel_texture_image(intelObj->base.Image[face][i]);
-
- if (intelImage->mt) {
- intel_miptree_image_unmap(intel, intelImage->mt);
- intelImage->base.Data = NULL;
- }
- }
- }
-}
+../intel/intel_tex_validate.c \ No newline at end of file
diff --git a/src/mesa/drivers/dri/intel/intel_batchbuffer.c b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
new file mode 100644
index 0000000000..8ece6d2760
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.c
@@ -0,0 +1,250 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_batchbuffer.h"
+#include "intel_ioctl.h"
+#include "intel_decode.h"
+#include "intel_reg.h"
+
+/* Relocations in kernel space:
+ * - pass dma buffer seperately
+ * - memory manager knows how to patch
+ * - pass list of dependent buffers
+ * - pass relocation list
+ *
+ * Either:
+ * - get back an offset for buffer to fire
+ * - memory manager knows how to fire buffer
+ *
+ * Really want the buffer to be AGP and pinned.
+ *
+ */
+
+/* Cliprect fence: The highest fence protecting a dma buffer
+ * containing explicit cliprect information. Like the old drawable
+ * lock but irq-driven. X server must wait for this fence to expire
+ * before changing cliprects [and then doing sw rendering?]. For
+ * other dma buffers, the scheduler will grab current cliprect info
+ * and mix into buffer. X server must hold the lock while changing
+ * cliprects??? Make per-drawable. Need cliprects in shared memory
+ * -- beats storing them with every cmd buffer in the queue.
+ *
+ * ==> X server must wait for this fence to expire before touching the
+ * framebuffer with new cliprects.
+ *
+ * ==> Cliprect-dependent buffers associated with a
+ * cliprect-timestamp. All of the buffers associated with a timestamp
+ * must go to hardware before any buffer with a newer timestamp.
+ *
+ * ==> Dma should be queued per-drawable for correct X/GL
+ * synchronization. Or can fences be used for this?
+ *
+ * Applies to: Blit operations, metaops, X server operations -- X
+ * server automatically waits on its own dma to complete before
+ * modifying cliprects ???
+ */
+
+void
+intel_batchbuffer_reset(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+
+ if (batch->buf != NULL) {
+ dri_bo_unreference(batch->buf);
+ batch->buf = NULL;
+ }
+
+ batch->buf = dri_bo_alloc(intel->intelScreen->bufmgr, "batchbuffer",
+ intel->intelScreen->maxBatchSize, 4096,
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
+ dri_bo_map(batch->buf, GL_TRUE);
+ batch->map = batch->buf->virtual;
+ batch->size = intel->intelScreen->maxBatchSize;
+ batch->ptr = batch->map;
+}
+
+struct intel_batchbuffer *
+intel_batchbuffer_alloc(struct intel_context *intel)
+{
+ struct intel_batchbuffer *batch = calloc(sizeof(*batch), 1);
+
+ batch->intel = intel;
+ batch->last_fence = NULL;
+ intel_batchbuffer_reset(batch);
+
+ return batch;
+}
+
+void
+intel_batchbuffer_free(struct intel_batchbuffer *batch)
+{
+ if (batch->last_fence) {
+ dri_fence_wait(batch->last_fence);
+ dri_fence_unreference(batch->last_fence);
+ batch->last_fence = NULL;
+ }
+ if (batch->map) {
+ dri_bo_unmap(batch->buf);
+ batch->map = NULL;
+ }
+ dri_bo_unreference(batch->buf);
+ batch->buf = NULL;
+ free(batch);
+}
+
+
+
+/* TODO: Push this whole function into bufmgr.
+ */
+static void
+do_flush_locked(struct intel_batchbuffer *batch,
+ GLuint used,
+ GLboolean ignore_cliprects, GLboolean allow_unlock)
+{
+ struct intel_context *intel = batch->intel;
+ void *start;
+ GLuint count;
+
+ start = dri_process_relocs(batch->buf, &count);
+
+ batch->map = NULL;
+ batch->ptr = NULL;
+ batch->flags = 0;
+
+ /* Throw away non-effective packets. Won't work once we have
+ * hardware contexts which would preserve statechanges beyond a
+ * single buffer.
+ */
+
+ if (!(intel->numClipRects == 0 && !ignore_cliprects)) {
+ if (intel->intelScreen->ttm == GL_TRUE) {
+ intel_exec_ioctl(batch->intel,
+ used, ignore_cliprects, allow_unlock,
+ start, count, &batch->last_fence);
+ } else {
+ intel_batch_ioctl(batch->intel,
+ batch->buf->offset,
+ used, ignore_cliprects, allow_unlock);
+ }
+ }
+
+ dri_post_submit(batch->buf, &batch->last_fence);
+
+ if (intel->numClipRects == 0 && !ignore_cliprects) {
+ if (allow_unlock) {
+ /* If we are not doing any actual user-visible rendering,
+ * do a sched_yield to keep the app from pegging the cpu while
+ * achieving nothing.
+ */
+ UNLOCK_HARDWARE(intel);
+ sched_yield();
+ LOCK_HARDWARE(intel);
+ }
+ intel->vtbl.lost_hardware(intel);
+ }
+
+ if (INTEL_DEBUG & DEBUG_BATCH) {
+ // dri_bo_map(batch->buf, GL_FALSE);
+ // intel_decode(ptr, used / 4, batch->buf->offset,
+ // intel->intelScreen->deviceID);
+ // dri_bo_unmap(batch->buf);
+ }
+}
+
+void
+intel_batchbuffer_flush(struct intel_batchbuffer *batch)
+{
+ struct intel_context *intel = batch->intel;
+ GLuint used = batch->ptr - batch->map;
+ GLboolean was_locked = intel->locked;
+
+ if (used == 0)
+ return;
+
+ /* Add the MI_BATCH_BUFFER_END. Always add an MI_FLUSH - this is a
+ * performance drain that we would like to avoid.
+ */
+ if (used & 4) {
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->ptr)[1] = 0;
+ ((int *) batch->ptr)[2] = MI_BATCH_BUFFER_END;
+ used += 12;
+ }
+ else {
+ ((int *) batch->ptr)[0] = intel->vtbl.flush_cmd();
+ ((int *) batch->ptr)[1] = MI_BATCH_BUFFER_END;
+ used += 8;
+ }
+
+ /* TODO: Just pass the relocation list and dma buffer up to the
+ * kernel.
+ */
+ if (!was_locked)
+ LOCK_HARDWARE(intel);
+
+ do_flush_locked(batch, used, !(batch->flags & INTEL_BATCH_CLIPRECTS),
+ GL_FALSE);
+
+ if (!was_locked)
+ UNLOCK_HARDWARE(intel);
+
+ /* Reset the buffer:
+ */
+ intel_batchbuffer_reset(batch);
+}
+
+void
+intel_batchbuffer_finish(struct intel_batchbuffer *batch)
+{
+ intel_batchbuffer_flush(batch);
+ if (batch->last_fence != NULL)
+ dri_fence_wait(batch->last_fence);
+}
+
+
+/* This is the only way buffers get added to the validate list.
+ */
+GLboolean
+intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
+ dri_bo *buffer,
+ GLuint flags, GLuint delta)
+{
+ dri_emit_reloc(batch->buf, flags, delta, batch->ptr - batch->map, buffer);
+ batch->ptr += 4;
+
+ return GL_TRUE;
+}
+
+void
+intel_batchbuffer_data(struct intel_batchbuffer *batch,
+ const void *data, GLuint bytes, GLuint flags)
+{
+ assert((bytes & 3) == 0);
+ intel_batchbuffer_require_space(batch, bytes, flags);
+ __memcpy(batch->ptr, data, bytes);
+ batch->ptr += bytes;
+}
diff --git a/src/mesa/drivers/dri/i915/intel_batchbuffer.h b/src/mesa/drivers/dri/intel/intel_batchbuffer.h
index b5c7a783a7..b5c7a783a7 100644
--- a/src/mesa/drivers/dri/i915/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/intel/intel_batchbuffer.h
diff --git a/src/mesa/drivers/dri/intel/intel_blit.c b/src/mesa/drivers/dri/intel/intel_blit.c
new file mode 100644
index 0000000000..2761136f47
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_blit.c
@@ -0,0 +1,491 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include <stdio.h>
+#include <errno.h>
+
+#include "mtypes.h"
+#include "context.h"
+#include "enums.h"
+
+#include "intel_batchbuffer.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_reg.h"
+#include "intel_regions.h"
+
+#define FILE_DEBUG_FLAG DEBUG_BLIT
+
+/**
+ * Copy the back color buffer to the front color buffer.
+ * Used for SwapBuffers().
+ */
+void
+intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
+ const drm_clip_rect_t * rect)
+{
+
+ struct intel_context *intel;
+ const intelScreenPrivate *intelScreen;
+
+ DBG("%s\n", __FUNCTION__);
+
+ assert(dPriv);
+
+ intel = intelScreenContext(dPriv->driScreenPriv->private);
+ if (!intel)
+ return;
+
+ intelScreen = intel->intelScreen;
+
+ if (intel->last_swap_fence) {
+ dri_fence_wait(intel->last_swap_fence);
+ dri_fence_unreference(intel->last_swap_fence);
+ intel->last_swap_fence = NULL;
+ }
+ intel->last_swap_fence = intel->first_swap_fence;
+ intel->first_swap_fence = NULL;
+
+ /* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
+ * should work regardless.
+ */
+ LOCK_HARDWARE(intel);
+
+ if (dPriv && dPriv->numClipRects) {
+ struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
+ const struct intel_region *frontRegion
+ = intel_get_rb_region(&intel_fb->Base, BUFFER_FRONT_LEFT);
+ const struct intel_region *backRegion
+ = intel_get_rb_region(&intel_fb->Base, BUFFER_BACK_LEFT);
+ const int nbox = dPriv->numClipRects;
+ const drm_clip_rect_t *pbox = dPriv->pClipRects;
+ const int pitch = frontRegion->pitch;
+ const int cpp = frontRegion->cpp;
+ int BR13, CMD;
+ int i;
+
+ ASSERT(intel_fb);
+ ASSERT(intel_fb->Base.Name == 0); /* Not a user-created FBO */
+ ASSERT(frontRegion);
+ ASSERT(backRegion);
+ ASSERT(frontRegion->pitch == backRegion->pitch);
+ ASSERT(frontRegion->cpp == backRegion->cpp);
+
+ if (cpp == 2) {
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ }
+ else {
+ BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
+ }
+
+ for (i = 0; i < nbox; i++, pbox++) {
+ drm_clip_rect_t box;
+
+ if (pbox->x1 > pbox->x2 ||
+ pbox->y1 > pbox->y2 ||
+ pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height)
+ continue;
+
+ box = *pbox;
+
+ if (rect) {
+ if (rect->x1 > box.x1)
+ box.x1 = rect->x1;
+ if (rect->y1 > box.y1)
+ box.y1 = rect->y1;
+ if (rect->x2 < box.x2)
+ box.x2 = rect->x2;
+ if (rect->y2 < box.y2)
+ box.y2 = rect->y2;
+
+ if (box.x1 > box.x2 || box.y1 > box.y2)
+ continue;
+ }
+
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((pbox->y1 << 16) | pbox->x1);
+ OUT_BATCH((pbox->y2 << 16) | pbox->x2);
+
+ OUT_RELOC(frontRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ 0);
+ OUT_BATCH((pbox->y1 << 16) | pbox->x1);
+ OUT_BATCH(BR13 & 0xffff);
+ OUT_RELOC(backRegion->buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ 0);
+
+ ADVANCE_BATCH();
+ }
+
+ if (intel->first_swap_fence)
+ dri_fence_unreference(intel->first_swap_fence);
+ intel_batchbuffer_flush(intel->batch);
+ intel->first_swap_fence = intel->batch->last_fence;
+ dri_fence_reference(intel->first_swap_fence);
+ }
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+
+void
+intelEmitFillBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort dst_pitch,
+ dri_bo *dst_buffer,
+ GLuint dst_offset,
+ GLshort x, GLshort y, GLshort w, GLshort h, GLuint color)
+{
+ GLuint BR13, CMD;
+ BATCH_LOCALS;
+
+ dst_pitch *= cpp;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = dst_pitch | (0xF0 << 16) | (1 << 24);
+ CMD = XY_COLOR_BLT_CMD;
+ break;
+ case 4:
+ BR13 = dst_pitch | (0xF0 << 16) | (1 << 24) | (1 << 25);
+ CMD = (XY_COLOR_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__, dst_buffer, dst_pitch, dst_offset, x, y, w, h);
+
+
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((y << 16) | x);
+ OUT_BATCH(((y + h) << 16) | (x + w));
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
+ OUT_BATCH(color);
+ ADVANCE_BATCH();
+}
+
+
+static GLuint translate_raster_op(GLenum logicop)
+{
+ switch(logicop) {
+ case GL_CLEAR: return 0x00;
+ case GL_AND: return 0x88;
+ case GL_AND_REVERSE: return 0x44;
+ case GL_COPY: return 0xCC;
+ case GL_AND_INVERTED: return 0x22;
+ case GL_NOOP: return 0xAA;
+ case GL_XOR: return 0x66;
+ case GL_OR: return 0xEE;
+ case GL_NOR: return 0x11;
+ case GL_EQUIV: return 0x99;
+ case GL_INVERT: return 0x55;
+ case GL_OR_REVERSE: return 0xDD;
+ case GL_COPY_INVERTED: return 0x33;
+ case GL_OR_INVERTED: return 0xBB;
+ case GL_NAND: return 0x77;
+ case GL_SET: return 0xFF;
+ default: return 0;
+ }
+}
+
+
+/* Copy BitBlt
+ */
+void
+intelEmitCopyBlit(struct intel_context *intel,
+ GLuint cpp,
+ GLshort src_pitch,
+ dri_bo *src_buffer,
+ GLuint src_offset,
+ GLshort dst_pitch,
+ dri_bo *dst_buffer,
+ GLuint dst_offset,
+ GLshort src_x, GLshort src_y,
+ GLshort dst_x, GLshort dst_y,
+ GLshort w, GLshort h,
+ GLenum logic_op)
+{
+ GLuint CMD, BR13;
+ int dst_y2 = dst_y + h;
+ int dst_x2 = dst_x + w;
+ BATCH_LOCALS;
+
+
+ DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ src_buffer, src_pitch, src_offset, src_x, src_y,
+ dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
+
+ src_pitch *= cpp;
+ dst_pitch *= cpp;
+
+ switch (cpp) {
+ case 1:
+ case 2:
+ case 3:
+ BR13 = (((GLint) dst_pitch) & 0xffff) |
+ (translate_raster_op(logic_op) << 16) | (1 << 24);
+ CMD = XY_SRC_COPY_BLT_CMD;
+ break;
+ case 4:
+ BR13 =
+ (((GLint) dst_pitch) & 0xffff) |
+ (translate_raster_op(logic_op) << 16) | (1 << 24) | (1 << 25);
+ CMD =
+ (XY_SRC_COPY_BLT_CMD | XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB);
+ break;
+ default:
+ return;
+ }
+
+ if (dst_y2 < dst_y || dst_x2 < dst_x) {
+ return;
+ }
+
+ /* Initial y values don't seem to work with negative pitches. If
+ * we adjust the offsets manually (below), it seems to work fine.
+ *
+ * On the other hand, if we always adjust, the hardware doesn't
+ * know which blit directions to use, so overlapping copypixels get
+ * the wrong result.
+ */
+ if (dst_pitch > 0 && src_pitch > 0) {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((dst_y << 16) | dst_x);
+ OUT_BATCH((dst_y2 << 16) | dst_x2);
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE, dst_offset);
+ OUT_BATCH((src_y << 16) | src_x);
+ OUT_BATCH(((GLint) src_pitch & 0xffff));
+ OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, src_offset);
+ ADVANCE_BATCH();
+ }
+ else {
+ BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((0 << 16) | dst_x);
+ OUT_BATCH((h << 16) | dst_x2);
+ OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ dst_offset + dst_y * dst_pitch);
+ OUT_BATCH((0 << 16) | src_x);
+ OUT_BATCH(((GLint) src_pitch & 0xffff));
+ OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
+ src_offset + src_y * src_pitch);
+ ADVANCE_BATCH();
+ }
+}
+
+
+/**
+ * Use blitting to clear the renderbuffers named by 'flags'.
+ * Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field
+ * since that might include software renderbuffers or renderbuffers
+ * which we're clearing with triangles.
+ * \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
+ */
+void
+intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint clear_depth;
+ GLbitfield skipBuffers = 0;
+ BATCH_LOCALS;
+
+ DBG("%s %x\n", __FUNCTION__, mask);
+
+ /*
+ * Compute values for clearing the buffers.
+ */
+ clear_depth = 0;
+ if (mask & BUFFER_BIT_DEPTH) {
+ clear_depth = (GLuint) (fb->_DepthMax * ctx->Depth.Clear);
+ }
+ if (mask & BUFFER_BIT_STENCIL) {
+ clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
+ }
+
+ /* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in
+ * the loop below.
+ */
+ if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) {
+ skipBuffers = BUFFER_BIT_STENCIL;
+ }
+
+ /* XXX Move this flush/lock into the following conditional? */
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+ if (intel->numClipRects) {
+ GLint cx, cy, cw, ch;
+ drm_clip_rect_t clear;
+ int i;
+
+ /* Get clear bounds after locking */
+ cx = fb->_Xmin;
+ cy = fb->_Ymin;
+ cw = fb->_Xmax - cx;
+ ch = fb->_Ymax - cy;
+
+ if (fb->Name == 0) {
+ /* clearing a window */
+
+ /* flip top to bottom */
+ clear.x1 = cx + intel->drawX;
+ clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch;
+ clear.x2 = clear.x1 + cw;
+ clear.y2 = clear.y1 + ch;
+ }
+ else {
+ /* clearing FBO */
+ assert(intel->numClipRects == 1);
+ assert(intel->pClipRects == &intel->fboRect);
+ clear.x1 = cx;
+ clear.y1 = cy;
+ clear.x2 = clear.x1 + cw;
+ clear.y2 = clear.y1 + ch;
+ /* no change to mask */
+ }
+
+ for (i = 0; i < intel->numClipRects; i++) {
+ const drm_clip_rect_t *box = &intel->pClipRects[i];
+ drm_clip_rect_t b;
+ GLuint buf;
+ GLuint clearMask = mask; /* use copy, since we modify it below */
+ GLboolean all = (cw == fb->Width && ch == fb->Height);
+
+ if (!all) {
+ intel_intersect_cliprects(&b, &clear, box);
+ }
+ else {
+ b = *box;
+ }
+
+ if (b.x1 >= b.x2 || b.y1 >= b.y2)
+ continue;
+
+ if (0)
+ _mesa_printf("clear %d,%d..%d,%d, mask %x\n",
+ b.x1, b.y1, b.x2, b.y2, mask);
+
+ /* Loop over all renderbuffers */
+ for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) {
+ const GLbitfield bufBit = 1 << buf;
+ if ((clearMask & bufBit) && !(bufBit & skipBuffers)) {
+ /* OK, clear this renderbuffer */
+ struct intel_region *irb_region =
+ intel_get_rb_region(fb, buf);
+ dri_bo *write_buffer =
+ intel_region_buffer(intel->intelScreen, irb_region,
+ all ? INTEL_WRITE_FULL :
+ INTEL_WRITE_PART);
+
+ GLuint clearVal;
+ GLint pitch, cpp;
+ GLuint BR13, CMD;
+
+ ASSERT(irb_region);
+
+ pitch = irb_region->pitch;
+ cpp = irb_region->cpp;
+
+ DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
+ __FUNCTION__,
+ irb_region->buffer, (pitch * cpp),
+ irb_region->draw_offset,
+ b.x1, b.y1, b.x2 - b.x1, b.y2 - b.y1);
+
+
+ /* Setup the blit command */
+ if (cpp == 4) {
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ CMD = XY_COLOR_BLT_CMD;
+ if (clearMask & BUFFER_BIT_DEPTH)
+ CMD |= XY_BLT_WRITE_RGB;
+ if (clearMask & BUFFER_BIT_STENCIL)
+ CMD |= XY_BLT_WRITE_ALPHA;
+ }
+ else {
+ /* clearing RGBA */
+ CMD = XY_COLOR_BLT_CMD |
+ XY_BLT_WRITE_ALPHA | XY_BLT_WRITE_RGB;
+ }
+ }
+ else {
+ ASSERT(cpp == 2 || cpp == 0);
+ BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
+ CMD = XY_COLOR_BLT_CMD;
+ }
+
+ if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
+ clearVal = clear_depth;
+ }
+ else {
+ clearVal = (cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+ }
+ /*
+ _mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
+ buf, irb->Base.Name);
+ */
+ intel_wait_flips(intel, INTEL_BATCH_NO_CLIPRECTS);
+
+ BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ OUT_BATCH(CMD);
+ OUT_BATCH(BR13);
+ OUT_BATCH((b.y1 << 16) | b.x1);
+ OUT_BATCH((b.y2 << 16) | b.x2);
+ OUT_RELOC(write_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
+ irb_region->draw_offset);
+ OUT_BATCH(clearVal);
+ ADVANCE_BATCH();
+ clearMask &= ~bufBit; /* turn off bit, for faster loop exit */
+ }
+ }
+ }
+ intel_batchbuffer_flush(intel->batch);
+ }
+
+ UNLOCK_HARDWARE(intel);
+}
diff --git a/src/mesa/drivers/dri/i915/intel_blit.h b/src/mesa/drivers/dri/intel/intel_blit.h
index a66af86359..a66af86359 100644
--- a/src/mesa/drivers/dri/i915/intel_blit.h
+++ b/src/mesa/drivers/dri/intel/intel_blit.h
diff --git a/src/mesa/drivers/dri/intel/intel_buffer_objects.c b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
new file mode 100644
index 0000000000..5348822816
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.c
@@ -0,0 +1,268 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "imports.h"
+#include "mtypes.h"
+#include "bufferobj.h"
+
+#include "intel_context.h"
+#include "intel_buffer_objects.h"
+#include "intel_regions.h"
+#include "dri_bufmgr.h"
+
+/** Allocates a new dri_bo to store the data for the buffer object. */
+static void
+intel_bufferobj_alloc_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ intel_obj->buffer = dri_bo_alloc(intel->intelScreen->bufmgr, "bufferobj",
+ intel_obj->Base.Size, 64,
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_CACHED_MAPPED);
+}
+
+/**
+ * There is some duplication between mesa's bufferobjects and our
+ * bufmgr buffers. Both have an integer handle and a hashtable to
+ * lookup an opaque structure. It would be nice if the handles and
+ * internal structure where somehow shared.
+ */
+static struct gl_buffer_object *
+intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
+
+ _mesa_initialize_buffer_object(&obj->Base, name, target);
+
+ obj->buffer = NULL;
+
+ return &obj->Base;
+}
+
+/* Break the COW tie to the region. The region gets to keep the data.
+ */
+void
+intel_bufferobj_release_region(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region->buffer == intel_obj->buffer);
+ intel_obj->region->pbo = NULL;
+ intel_obj->region = NULL;
+
+ dri_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+}
+
+/* Break the COW tie to the region. Both the pbo and the region end
+ * up with a copy of the data.
+ */
+void
+intel_bufferobj_cow(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj)
+{
+ assert(intel_obj->region);
+ intel_region_cow(intel->intelScreen, intel_obj->region);
+}
+
+
+/**
+ * Deallocate/free a vertex/pixel buffer object.
+ * Called via glDeleteBuffersARB().
+ */
+static void
+intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ if (intel_obj->region) {
+ intel_bufferobj_release_region(intel, intel_obj);
+ }
+ else if (intel_obj->buffer) {
+ dri_bo_unreference(intel_obj->buffer);
+ }
+
+ _mesa_free(intel_obj);
+}
+
+
+
+/**
+ * Allocate space for and store data in a buffer object. Any data that was
+ * previously stored in the buffer object is lost. If data is NULL,
+ * memory will be allocated, but no copy will occur.
+ * Called via glBufferDataARB().
+ */
+static void
+intel_bufferobj_data(GLcontext * ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ const GLvoid * data,
+ GLenum usage, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ intel_obj->Base.Size = size;
+ intel_obj->Base.Usage = usage;
+
+ if (intel_obj->region)
+ intel_bufferobj_release_region(intel, intel_obj);
+
+ if (intel_obj->buffer != NULL && intel_obj->buffer->size != size) {
+ dri_bo_unreference(intel_obj->buffer);
+ intel_obj->buffer = NULL;
+ }
+
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+
+ if (data != NULL)
+ dri_bo_subdata(intel_obj->buffer, 0, size, data);
+}
+
+
+/**
+ * Replace data in a subrange of buffer object. If the data range
+ * specified by size + offset extends beyond the end of the buffer or
+ * if data is NULL, no copy is performed.
+ * Called via glBufferSubDataARB().
+ */
+static void
+intel_bufferobj_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ const GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ dri_bo_subdata(intel_obj->buffer, offset, size, data);
+}
+
+
+/**
+ * Called via glGetBufferSubDataARB().
+ */
+static void
+intel_bufferobj_get_subdata(GLcontext * ctx,
+ GLenum target,
+ GLintptrARB offset,
+ GLsizeiptrARB size,
+ GLvoid * data, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ dri_bo_get_subdata(intel_obj->buffer, offset, size, data);
+}
+
+
+
+/**
+ * Called via glMapBufferARB().
+ */
+static void *
+intel_bufferobj_map(GLcontext * ctx,
+ GLenum target,
+ GLenum access, struct gl_buffer_object *obj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ /* XXX: Translate access to flags arg below:
+ */
+ assert(intel_obj);
+
+ if (intel_obj->region)
+ intel_bufferobj_cow(intel, intel_obj);
+
+ if (intel_obj->buffer == NULL) {
+ obj->Pointer = NULL;
+ return NULL;
+ }
+
+ dri_bo_map(intel_obj->buffer, GL_TRUE);
+ obj->Pointer = intel_obj->buffer->virtual;
+ return obj->Pointer;
+}
+
+
+/**
+ * Called via glMapBufferARB().
+ */
+static GLboolean
+intel_bufferobj_unmap(GLcontext * ctx,
+ GLenum target, struct gl_buffer_object *obj)
+{
+ struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
+
+ assert(intel_obj);
+ if (intel_obj->buffer != NULL) {
+ assert(obj->Pointer);
+ dri_bo_unmap(intel_obj->buffer);
+ obj->Pointer = NULL;
+ }
+ return GL_TRUE;
+}
+
+dri_bo *
+intel_bufferobj_buffer(struct intel_context *intel,
+ struct intel_buffer_object *intel_obj, GLuint flag)
+{
+ if (intel_obj->region) {
+ if (flag == INTEL_WRITE_PART)
+ intel_bufferobj_cow(intel, intel_obj);
+ else if (flag == INTEL_WRITE_FULL) {
+ intel_bufferobj_release_region(intel, intel_obj);
+ intel_bufferobj_alloc_buffer(intel, intel_obj);
+ }
+ }
+
+ return intel_obj->buffer;
+}
+
+void
+intel_bufferobj_init(struct intel_context *intel)
+{
+ GLcontext *ctx = &intel->ctx;
+
+ ctx->Driver.NewBufferObject = intel_bufferobj_alloc;
+ ctx->Driver.DeleteBuffer = intel_bufferobj_free;
+ ctx->Driver.BufferData = intel_bufferobj_data;
+ ctx->Driver.BufferSubData = intel_bufferobj_subdata;
+ ctx->Driver.GetBufferSubData = intel_bufferobj_get_subdata;
+ ctx->Driver.MapBuffer = intel_bufferobj_map;
+ ctx->Driver.UnmapBuffer = intel_bufferobj_unmap;
+}
diff --git a/src/mesa/drivers/dri/i915/intel_buffer_objects.h b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
index db579a8ae4..db579a8ae4 100644
--- a/src/mesa/drivers/dri/i915/intel_buffer_objects.h
+++ b/src/mesa/drivers/dri/intel/intel_buffer_objects.h
diff --git a/src/mesa/drivers/dri/intel/intel_buffers.c b/src/mesa/drivers/dri/intel/intel_buffers.c
new file mode 100644
index 0000000000..1ae8b5feb4
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_buffers.c
@@ -0,0 +1,1209 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_blit.h"
+#include "intel_buffers.h"
+#include "intel_depthstencil.h"
+#include "intel_fbo.h"
+#include "intel_tris.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+#include "intel_reg.h"
+#include "context.h"
+#include "utils.h"
+#include "drirenderbuffer.h"
+#include "framebuffer.h"
+#include "swrast/swrast.h"
+#include "vblank.h"
+
+
+/* This block can be removed when libdrm >= 2.3.1 is required */
+
+#ifndef DRM_IOCTL_I915_FLIP
+
+#define DRM_VBLANK_FLIP 0x8000000
+
+typedef struct drm_i915_flip {
+ int pipes;
+} drm_i915_flip_t;
+
+#undef DRM_IOCTL_I915_FLIP
+#define DRM_IOCTL_I915_FLIP DRM_IOW(DRM_COMMAND_BASE + DRM_I915_FLIP, \
+ drm_i915_flip_t)
+
+#endif
+
+
+/**
+ * XXX move this into a new dri/common/cliprects.c file.
+ */
+GLboolean
+intel_intersect_cliprects(drm_clip_rect_t * dst,
+ const drm_clip_rect_t * a,
+ const drm_clip_rect_t * b)
+{
+ GLint bx = b->x1;
+ GLint by = b->y1;
+ GLint bw = b->x2 - bx;
+ GLint bh = b->y2 - by;
+
+ if (bx < a->x1)
+ bw -= a->x1 - bx, bx = a->x1;
+ if (by < a->y1)
+ bh -= a->y1 - by, by = a->y1;
+ if (bx + bw > a->x2)
+ bw = a->x2 - bx;
+ if (by + bh > a->y2)
+ bh = a->y2 - by;
+ if (bw <= 0)
+ return GL_FALSE;
+ if (bh <= 0)
+ return GL_FALSE;
+
+ dst->x1 = bx;
+ dst->y1 = by;
+ dst->x2 = bx + bw;
+ dst->y2 = by + bh;
+
+ return GL_TRUE;
+}
+
+/**
+ * Return pointer to current color drawing region, or NULL.
+ */
+struct intel_region *
+intel_drawbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0][0]);
+ if (irbColor)
+ return irbColor->region;
+ else
+ return NULL;
+}
+
+/**
+ * Return pointer to current color reading region, or NULL.
+ */
+struct intel_region *
+intel_readbuf_region(struct intel_context *intel)
+{
+ struct intel_renderbuffer *irb
+ = intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+
+
+/**
+ * Update the following fields for rendering to a user-created FBO:
+ * intel->numClipRects
+ * intel->pClipRects
+ * intel->drawX
+ * intel->drawY
+ */
+static void
+intelSetRenderbufferClipRects(struct intel_context *intel)
+{
+ assert(intel->ctx.DrawBuffer->Width > 0);
+ assert(intel->ctx.DrawBuffer->Height > 0);
+ intel->fboRect.x1 = 0;
+ intel->fboRect.y1 = 0;
+ intel->fboRect.x2 = intel->ctx.DrawBuffer->Width;
+ intel->fboRect.y2 = intel->ctx.DrawBuffer->Height;
+ intel->numClipRects = 1;
+ intel->pClipRects = &intel->fboRect;
+ intel->drawX = 0;
+ intel->drawY = 0;
+}
+
+
+/**
+ * As above, but for rendering to front buffer of a window.
+ * \sa intelSetRenderbufferClipRects
+ */
+static void
+intelSetFrontClipRects(struct intel_context *intel)
+{
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+
+ if (!dPriv)
+ return;
+
+ intel->numClipRects = dPriv->numClipRects;
+ intel->pClipRects = dPriv->pClipRects;
+ intel->drawX = dPriv->x;
+ intel->drawY = dPriv->y;
+}
+
+
+/**
+ * As above, but for rendering to back buffer of a window.
+ */
+static void
+intelSetBackClipRects(struct intel_context *intel)
+{
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ struct intel_framebuffer *intel_fb;
+
+ if (!dPriv)
+ return;
+
+ intel_fb = dPriv->driverPrivate;
+
+ if (intel_fb->pf_active || dPriv->numBackClipRects == 0) {
+ /* use the front clip rects */
+ intel->numClipRects = dPriv->numClipRects;
+ intel->pClipRects = dPriv->pClipRects;
+ intel->drawX = dPriv->x;
+ intel->drawY = dPriv->y;
+ }
+ else {
+ /* use the back clip rects */
+ intel->numClipRects = dPriv->numBackClipRects;
+ intel->pClipRects = dPriv->pBackClipRects;
+ intel->drawX = dPriv->backX;
+ intel->drawY = dPriv->backY;
+ }
+}
+
+
+/**
+ * This will be called whenever the currently bound window is moved/resized.
+ * XXX: actually, it seems to NOT be called when the window is only moved (BP).
+ */
+void
+intelWindowMoved(struct intel_context *intel)
+{
+ GLcontext *ctx = &intel->ctx;
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
+
+ if (!intel->ctx.DrawBuffer) {
+ /* when would this happen? -BP */
+ intelSetFrontClipRects(intel);
+ }
+ else if (intel->ctx.DrawBuffer->Name != 0) {
+ /* drawing to user-created FBO - do nothing */
+ /* Cliprects would be set from intelDrawBuffer() */
+ }
+ else {
+ /* drawing to a window */
+ switch (intel_fb->Base._ColorDrawBufferMask[0]) {
+ case BUFFER_BIT_FRONT_LEFT:
+ intelSetFrontClipRects(intel);
+ break;
+ case BUFFER_BIT_BACK_LEFT:
+ intelSetBackClipRects(intel);
+ break;
+ default:
+ /* glDrawBuffer(GL_NONE or GL_FRONT_AND_BACK): software fallback */
+ intelSetFrontClipRects(intel);
+ }
+ }
+
+ if (intel->intelScreen->driScrnPriv->ddx_version.minor >= 7) {
+ drmI830Sarea *sarea = intel->sarea;
+ drm_clip_rect_t drw_rect = { .x1 = dPriv->x, .x2 = dPriv->x + dPriv->w,
+ .y1 = dPriv->y, .y2 = dPriv->y + dPriv->h };
+ drm_clip_rect_t planeA_rect = { .x1 = sarea->planeA_x, .y1 = sarea->planeA_y,
+ .x2 = sarea->planeA_x + sarea->planeA_w,
+ .y2 = sarea->planeA_y + sarea->planeA_h };
+ drm_clip_rect_t planeB_rect = { .x1 = sarea->planeB_x, .y1 = sarea->planeB_y,
+ .x2 = sarea->planeB_x + sarea->planeB_w,
+ .y2 = sarea->planeB_y + sarea->planeB_h };
+ GLint areaA = driIntersectArea( drw_rect, planeA_rect );
+ GLint areaB = driIntersectArea( drw_rect, planeB_rect );
+ GLuint flags = dPriv->vblFlags;
+ GLboolean pf_active;
+ GLint pf_planes;
+
+ /* Update page flipping info
+ */
+ pf_planes = 0;
+
+ if (areaA > 0)
+ pf_planes |= 1;
+
+ if (areaB > 0)
+ pf_planes |= 2;
+
+ intel_fb->pf_current_page = (intel->sarea->pf_current_page >>
+ (intel_fb->pf_planes & 0x2)) & 0x3;
+
+ intel_fb->pf_num_pages = intel->intelScreen->third.handle ? 3 : 2;
+
+ pf_active = pf_planes && (pf_planes & intel->sarea->pf_active) == pf_planes;
+
+ if (INTEL_DEBUG & DEBUG_LOCK)
+ if (pf_active != intel_fb->pf_active)
+ _mesa_printf("%s - Page flipping %sactive\n", __progname,
+ pf_active ? "" : "in");
+
+ if (pf_active) {
+ /* Sync pages between planes if flipping on both at the same time */
+ if (pf_planes == 0x3 && pf_planes != intel_fb->pf_planes &&
+ (intel->sarea->pf_current_page & 0x3) !=
+ (((intel->sarea->pf_current_page) >> 2) & 0x3)) {
+ drm_i915_flip_t flip;
+
+ if (intel_fb->pf_current_page ==
+ (intel->sarea->pf_current_page & 0x3)) {
+ /* XXX: This is ugly, but emitting two flips 'in a row' can cause
+ * lockups for unknown reasons.
+ */
+ intel->sarea->pf_current_page =
+ intel->sarea->pf_current_page & 0x3;
+ intel->sarea->pf_current_page |=
+ ((intel_fb->pf_current_page + intel_fb->pf_num_pages - 1) %
+ intel_fb->pf_num_pages) << 2;
+
+ flip.pipes = 0x2;
+ } else {
+ intel->sarea->pf_current_page =
+ intel->sarea->pf_current_page & (0x3 << 2);
+ intel->sarea->pf_current_page |=
+ (intel_fb->pf_current_page + intel_fb->pf_num_pages - 1) %
+ intel_fb->pf_num_pages;
+
+ flip.pipes = 0x1;
+ }
+
+ drmCommandWrite(intel->driFd, DRM_I915_FLIP, &flip, sizeof(flip));
+ }
+
+ intel_fb->pf_planes = pf_planes;
+ }
+
+ intel_fb->pf_active = pf_active;
+ intel_flip_renderbuffers(intel_fb);
+ intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
+
+ /* Update vblank info
+ */
+ if (areaB > areaA || (areaA == areaB && areaB > 0)) {
+ flags = dPriv->vblFlags | VBLANK_FLAG_SECONDARY;
+ } else {
+ flags = dPriv->vblFlags & ~VBLANK_FLAG_SECONDARY;
+ }
+
+ /* Check to see if we changed pipes */
+ if (flags != dPriv->vblFlags && dPriv->vblFlags &&
+ !(dPriv->vblFlags & VBLANK_FLAG_NO_IRQ)) {
+ int64_t count;
+ drmVBlank vbl;
+ int i;
+
+ /*
+ * Deal with page flipping
+ */
+ vbl.request.type = DRM_VBLANK_ABSOLUTE;
+
+ if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
+ vbl.request.type |= DRM_VBLANK_SECONDARY;
+ }
+
+ for (i = 0; i < intel_fb->pf_num_pages; i++) {
+ if (!intel_fb->color_rb[i] ||
+ (intel_fb->vbl_waited - intel_fb->color_rb[i]->vbl_pending) <=
+ (1<<23))
+ continue;
+
+ vbl.request.sequence = intel_fb->color_rb[i]->vbl_pending;
+ drmWaitVBlank(intel->driFd, &vbl);
+ }
+
+ /*
+ * Update msc_base from old pipe
+ */
+ driDrawableGetMSC32(dPriv->driScreenPriv, dPriv, &count);
+ dPriv->msc_base = count;
+ /*
+ * Then get new vblank_base and vblSeq values
+ */
+ dPriv->vblFlags = flags;
+ driGetCurrentVBlank(dPriv);
+ dPriv->vblank_base = dPriv->vblSeq;
+
+ intel_fb->vbl_waited = dPriv->vblSeq;
+
+ for (i = 0; i < intel_fb->pf_num_pages; i++) {
+ if (intel_fb->color_rb[i])
+ intel_fb->color_rb[i]->vbl_pending = intel_fb->vbl_waited;
+ }
+ }
+ } else {
+ dPriv->vblFlags &= ~VBLANK_FLAG_SECONDARY;
+ }
+
+ /* Update Mesa's notion of window size */
+ driUpdateFramebufferSize(ctx, dPriv);
+ intel_fb->Base.Initialized = GL_TRUE; /* XXX remove someday */
+
+ /* Update hardware scissor */
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+
+ /* Re-calculate viewport related state */
+ ctx->Driver.DepthRange( ctx, ctx->Viewport.Near, ctx->Viewport.Far );
+}
+
+
+
+/* A true meta version of this would be very simple and additionally
+ * machine independent. Maybe we'll get there one day.
+ */
+static void
+intelClearWithTris(struct intel_context *intel, GLbitfield mask)
+{
+ GLcontext *ctx = &intel->ctx;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ drm_clip_rect_t clear;
+
+ if (INTEL_DEBUG & DEBUG_BLIT)
+ _mesa_printf("%s 0x%x\n", __FUNCTION__, mask);
+
+ LOCK_HARDWARE(intel);
+
+ /* XXX FBO: was: intel->driDrawable->numClipRects */
+ if (intel->numClipRects) {
+ GLint cx, cy, cw, ch;
+ GLuint buf;
+
+ intel->vtbl.install_meta_state(intel);
+
+ /* Get clear bounds after locking */
+ cx = fb->_Xmin;
+ cy = fb->_Ymin;
+ ch = fb->_Ymax - cx;
+ cw = fb->_Xmax - cy;
+
+ /* note: regardless of 'all', cx, cy, cw, ch are now correct */
+ clear.x1 = cx;
+ clear.y1 = cy;
+ clear.x2 = cx + cw;
+ clear.y2 = cy + ch;
+
+ /* Back and stencil cliprects are the same. Try and do both
+ * buffers at once:
+ */
+ if (mask &
+ (BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH)) {
+ struct intel_region *backRegion =
+ intel_get_rb_region(fb, BUFFER_BACK_LEFT);
+ struct intel_region *depthRegion =
+ intel_get_rb_region(fb, BUFFER_DEPTH);
+ const GLuint clearColor = (backRegion && backRegion->cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+
+ intel->vtbl.meta_draw_region(intel, backRegion, depthRegion);
+
+ if (mask & BUFFER_BIT_BACK_LEFT)
+ intel->vtbl.meta_color_mask(intel, GL_TRUE);
+ else
+ intel->vtbl.meta_color_mask(intel, GL_FALSE);
+
+ if (mask & BUFFER_BIT_STENCIL)
+ intel->vtbl.meta_stencil_replace(intel,
+ intel->ctx.Stencil.WriteMask[0],
+ intel->ctx.Stencil.Clear);
+ else
+ intel->vtbl.meta_no_stencil_write(intel);
+
+ if (mask & BUFFER_BIT_DEPTH)
+ intel->vtbl.meta_depth_replace(intel);
+ else
+ intel->vtbl.meta_no_depth_write(intel);
+
+ /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
+ * drawing origin may not be correctly emitted.
+ */
+ intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, intel->ctx.Depth.Clear, clearColor, 0, 0, 0, 0); /* texcoords */
+
+ mask &=
+ ~(BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH);
+ }
+
+ /* clear the remaining (color) renderbuffers */
+ for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
+ const GLuint bufBit = 1 << buf;
+ if (mask & bufBit) {
+ struct intel_renderbuffer *irbColor =
+ intel_renderbuffer(fb->Attachment[buf].Renderbuffer);
+ GLuint color = (irbColor->region->cpp == 4)
+ ? intel->ClearColor8888 : intel->ClearColor565;
+
+ ASSERT(irbColor);
+
+ intel->vtbl.meta_no_depth_write(intel);
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_color_mask(intel, GL_TRUE);
+ intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
+
+ /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
+ * drawing origin may not be correctly emitted.
+ */
+ intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, 0, /* depth clear val */
+ color, 0, 0, 0, 0); /* texcoords */
+
+ mask &= ~bufBit;
+ }
+ }
+
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+
+/**
+ * Copy the window contents named by dPriv to the rotated (or reflected)
+ * color buffer.
+ * srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
+ */
+void
+intelRotateWindow(struct intel_context *intel,
+ __DRIdrawablePrivate * dPriv, GLuint srcBuf)
+{
+ intelScreenPrivate *screen = intel->intelScreen;
+ drm_clip_rect_t fullRect;
+ struct intel_framebuffer *intel_fb;
+ struct intel_region *src;
+ const drm_clip_rect_t *clipRects;
+ int numClipRects;
+ int i;
+ GLenum format, type;
+
+ int xOrig, yOrig;
+ int origNumClipRects;
+ drm_clip_rect_t *origRects;
+
+ /*
+ * set up hardware state
+ */
+ intelFlush(&intel->ctx);
+
+ LOCK_HARDWARE(intel);
+
+ if (!intel->numClipRects) {
+ UNLOCK_HARDWARE(intel);
+ return;
+ }
+
+ intel->vtbl.install_meta_state(intel);
+
+ intel->vtbl.meta_no_depth_write(intel);
+ intel->vtbl.meta_no_stencil_write(intel);
+ intel->vtbl.meta_color_mask(intel, GL_FALSE);
+
+
+ /* save current drawing origin and cliprects (restored at end) */
+ xOrig = intel->drawX;
+ yOrig = intel->drawY;
+ origNumClipRects = intel->numClipRects;
+ origRects = intel->pClipRects;
+
+ /*
+ * set drawing origin, cliprects for full-screen access to rotated screen
+ */
+ fullRect.x1 = 0;
+ fullRect.y1 = 0;
+ fullRect.x2 = screen->rotatedWidth;
+ fullRect.y2 = screen->rotatedHeight;
+ intel->drawX = 0;
+ intel->drawY = 0;
+ intel->numClipRects = 1;
+ intel->pClipRects = &fullRect;
+
+ intel->vtbl.meta_draw_region(intel, screen->rotated_region, NULL); /* ? */
+
+ intel_fb = dPriv->driverPrivate;
+
+ if ((srcBuf == BUFFER_BIT_BACK_LEFT && !intel_fb->pf_active)) {
+ src = intel_get_rb_region(&intel_fb->Base, BUFFER_BACK_LEFT);
+ clipRects = dPriv->pBackClipRects;
+ numClipRects = dPriv->numBackClipRects;
+ }
+ else {
+ src = intel_get_rb_region(&intel_fb->Base, BUFFER_FRONT_LEFT);
+ clipRects = dPriv->pClipRects;
+ numClipRects = dPriv->numClipRects;
+ }
+
+ if (src->cpp == 4) {
+ format = GL_BGRA;
+ type = GL_UNSIGNED_BYTE;
+ }
+ else {
+ format = GL_BGR;
+ type = GL_UNSIGNED_SHORT_5_6_5_REV;
+ }
+
+ /* set the whole screen up as a texture to avoid alignment issues */
+ intel->vtbl.meta_tex_rect_source(intel,
+ src->buffer,
+ screen->width,
+ screen->height, src->pitch, format, type);
+
+ intel->vtbl.meta_texture_blend_replace(intel);
+
+ /*
+ * loop over the source window's cliprects
+ */
+ for (i = 0; i < numClipRects; i++) {
+ int srcX0 = clipRects[i].x1;
+ int srcY0 = clipRects[i].y1;
+ int srcX1 = clipRects[i].x2;
+ int srcY1 = clipRects[i].y2;
+ GLfloat verts[4][2], tex[4][2];
+ int j;
+
+ /* build vertices for four corners of clip rect */
+ verts[0][0] = srcX0;
+ verts[0][1] = srcY0;
+ verts[1][0] = srcX1;
+ verts[1][1] = srcY0;
+ verts[2][0] = srcX1;
+ verts[2][1] = srcY1;
+ verts[3][0] = srcX0;
+ verts[3][1] = srcY1;
+
+ /* .. and texcoords */
+ tex[0][0] = srcX0;
+ tex[0][1] = srcY0;
+ tex[1][0] = srcX1;
+ tex[1][1] = srcY0;
+ tex[2][0] = srcX1;
+ tex[2][1] = srcY1;
+ tex[3][0] = srcX0;
+ tex[3][1] = srcY1;
+
+ /* transform coords to rotated screen coords */
+
+ for (j = 0; j < 4; j++) {
+ matrix23TransformCoordf(&screen->rotMatrix,
+ &verts[j][0], &verts[j][1]);
+ }
+
+ /* draw polygon to map source image to dest region */
+ intel_meta_draw_poly(intel, 4, verts, 0, 0, tex);
+
+ } /* cliprect loop */
+
+ intel->vtbl.leave_meta_state(intel);
+ intel_batchbuffer_flush(intel->batch);
+
+ /* restore original drawing origin and cliprects */
+ intel->drawX = xOrig;
+ intel->drawY = yOrig;
+ intel->numClipRects = origNumClipRects;
+ intel->pClipRects = origRects;
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+/**
+ * Called by ctx->Driver.Clear.
+ */
+static void
+intelClear(GLcontext *ctx, GLbitfield mask)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask);
+ GLbitfield tri_mask = 0;
+ GLbitfield blit_mask = 0;
+ GLbitfield swrast_mask = 0;
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ GLuint i;
+
+ if (0)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ /* HW color buffers (front, back, aux, generic FBO, etc) */
+ if (colorMask == ~0) {
+ /* clear all R,G,B,A */
+ /* XXX FBO: need to check if colorbuffers are software RBOs! */
+ blit_mask |= (mask & BUFFER_BITS_COLOR);
+ }
+ else {
+ /* glColorMask in effect */
+ tri_mask |= (mask & BUFFER_BITS_COLOR);
+ }
+
+ /* HW stencil */
+ if (mask & BUFFER_BIT_STENCIL) {
+ const struct intel_region *stencilRegion
+ = intel_get_rb_region(fb, BUFFER_STENCIL);
+ if (stencilRegion) {
+ /* have hw stencil */
+ if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
+ /* not clearing all stencil bits, so use triangle clearing */
+ tri_mask |= BUFFER_BIT_STENCIL;
+ }
+ else {
+ /* clearing all stencil bits, use blitting */
+ blit_mask |= BUFFER_BIT_STENCIL;
+ }
+ }
+ }
+
+ /* HW depth */
+ if (mask & BUFFER_BIT_DEPTH) {
+ /* clear depth with whatever method is used for stencil (see above) */
+ if (tri_mask & BUFFER_BIT_STENCIL)
+ tri_mask |= BUFFER_BIT_DEPTH;
+ else
+ blit_mask |= BUFFER_BIT_DEPTH;
+ }
+
+ /* SW fallback clearing */
+ swrast_mask = mask & ~tri_mask & ~blit_mask;
+
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ GLuint bufBit = 1 << i;
+ if ((blit_mask | tri_mask) & bufBit) {
+ if (!fb->Attachment[i].Renderbuffer->ClassID) {
+ blit_mask &= ~bufBit;
+ tri_mask &= ~bufBit;
+ swrast_mask |= bufBit;
+ }
+ }
+ }
+
+
+ intelFlush(ctx); /* XXX intelClearWithBlit also does this */
+
+ if (blit_mask)
+ intelClearWithBlit(ctx, blit_mask);
+
+ if (tri_mask)
+ intelClearWithTris(intel, tri_mask);
+
+ if (swrast_mask)
+ _swrast_Clear(ctx, swrast_mask);
+}
+
+
+/* Emit wait for pending flips */
+void
+intel_wait_flips(struct intel_context *intel, GLuint batch_flags)
+{
+ struct intel_framebuffer *intel_fb =
+ (struct intel_framebuffer *) intel->ctx.DrawBuffer;
+ struct intel_renderbuffer *intel_rb =
+ intel_get_renderbuffer(&intel_fb->Base,
+ intel_fb->Base._ColorDrawBufferMask[0] ==
+ BUFFER_BIT_FRONT_LEFT ? BUFFER_FRONT_LEFT :
+ BUFFER_BACK_LEFT);
+
+ if (intel_fb->Base.Name == 0 && intel_rb->pf_pending == intel_fb->pf_seq) {
+ GLint pf_planes = intel_fb->pf_planes;
+ BATCH_LOCALS;
+
+ /* Wait for pending flips to take effect */
+ BEGIN_BATCH(2, batch_flags);
+ OUT_BATCH(pf_planes & 0x1 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP)
+ : 0);
+ OUT_BATCH(pf_planes & 0x2 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_B_FLIP)
+ : 0);
+ ADVANCE_BATCH();
+
+ intel_rb->pf_pending--;
+ }
+}
+
+
+/* Flip the front & back buffers
+ */
+static GLboolean
+intelPageFlip(const __DRIdrawablePrivate * dPriv)
+{
+ struct intel_context *intel;
+ int ret;
+ struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
+
+ if (INTEL_DEBUG & DEBUG_IOCTL)
+ fprintf(stderr, "%s\n", __FUNCTION__);
+
+ assert(dPriv);
+ assert(dPriv->driContextPriv);
+ assert(dPriv->driContextPriv->driverPrivate);
+
+ intel = (struct intel_context *) dPriv->driContextPriv->driverPrivate;
+
+ if (intel->intelScreen->drmMinor < 9)
+ return GL_FALSE;
+
+ intelFlush(&intel->ctx);
+
+ ret = 0;
+
+ LOCK_HARDWARE(intel);
+
+ if (dPriv->numClipRects && intel_fb->pf_active) {
+ drm_i915_flip_t flip;
+
+ flip.pipes = intel_fb->pf_planes;
+
+ ret = drmCommandWrite(intel->driFd, DRM_I915_FLIP, &flip, sizeof(flip));
+ }
+
+ UNLOCK_HARDWARE(intel);
+
+ if (ret || !intel_fb->pf_active)
+ return GL_FALSE;
+
+ if (!dPriv->numClipRects) {
+ usleep(10000); /* throttle invisible client 10ms */
+ }
+
+ intel_fb->pf_current_page = (intel->sarea->pf_current_page >>
+ (intel_fb->pf_planes & 0x2)) & 0x3;
+
+ if (dPriv->numClipRects != 0) {
+ intel_get_renderbuffer(&intel_fb->Base, BUFFER_FRONT_LEFT)->pf_pending =
+ intel_get_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT)->pf_pending =
+ ++intel_fb->pf_seq;
+ }
+
+ intel_flip_renderbuffers(intel_fb);
+ intel_draw_buffer(&intel->ctx, &intel_fb->Base);
+
+ return GL_TRUE;
+}
+
+#if 0
+void
+intelSwapBuffers(__DRIdrawablePrivate * dPriv)
+{
+ if (dPriv->driverPrivate) {
+ const struct gl_framebuffer *fb
+ = (struct gl_framebuffer *) dPriv->driverPrivate;
+ if (fb->Visual.doubleBufferMode) {
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx && ctx->DrawBuffer == fb) {
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering */
+ }
+ if (intel->doPageFlip) {
+ intelPageFlip(dPriv);
+ }
+ else {
+ intelCopyBuffer(dPriv);
+ }
+ }
+ }
+ else {
+ _mesa_problem(NULL,
+ "dPriv has no gl_framebuffer pointer in intelSwapBuffers");
+ }
+}
+#else
+/* Trunk version:
+ */
+
+static GLboolean
+intelScheduleSwap(__DRIdrawablePrivate * dPriv, GLboolean *missed_target)
+{
+ struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
+ unsigned int interval;
+ struct intel_context *intel =
+ intelScreenContext(dPriv->driScreenPriv->private);
+ const intelScreenPrivate *intelScreen = intel->intelScreen;
+ unsigned int target;
+ drm_i915_vblank_swap_t swap;
+ GLboolean ret;
+
+ if (!dPriv->vblFlags ||
+ (dPriv->vblFlags & VBLANK_FLAG_NO_IRQ) ||
+ intelScreen->current_rotation != 0 ||
+ intelScreen->drmMinor < (intel_fb->pf_active ? 9 : 6))
+ return GL_FALSE;
+
+ interval = driGetVBlankInterval(dPriv);
+
+ swap.seqtype = DRM_VBLANK_ABSOLUTE;
+
+ if (dPriv->vblFlags & VBLANK_FLAG_SYNC) {
+ swap.seqtype |= DRM_VBLANK_NEXTONMISS;
+ } else if (interval == 0) {
+ return GL_FALSE;
+ }
+
+ swap.drawable = dPriv->hHWDrawable;
+ target = swap.sequence = dPriv->vblSeq + interval;
+
+ if ( dPriv->vblFlags & VBLANK_FLAG_SECONDARY ) {
+ swap.seqtype |= DRM_VBLANK_SECONDARY;
+ }
+
+ LOCK_HARDWARE(intel);
+
+ intel_batchbuffer_flush(intel->batch);
+
+ if ( intel_fb->pf_active ) {
+ swap.seqtype |= DRM_VBLANK_FLIP;
+
+ intel_fb->pf_current_page = (((intel->sarea->pf_current_page >>
+ (intel_fb->pf_planes & 0x2)) & 0x3) + 1) %
+ intel_fb->pf_num_pages;
+ }
+
+ if (!drmCommandWriteRead(intel->driFd, DRM_I915_VBLANK_SWAP, &swap,
+ sizeof(swap))) {
+ dPriv->vblSeq = swap.sequence;
+ swap.sequence -= target;
+ *missed_target = swap.sequence > 0 && swap.sequence <= (1 << 23);
+
+ intel_get_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT)->vbl_pending =
+ intel_get_renderbuffer(&intel_fb->Base,
+ BUFFER_FRONT_LEFT)->vbl_pending =
+ dPriv->vblSeq;
+
+ if (swap.seqtype & DRM_VBLANK_FLIP) {
+ intel_flip_renderbuffers(intel_fb);
+ intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
+ }
+
+ ret = GL_TRUE;
+ } else {
+ if (swap.seqtype & DRM_VBLANK_FLIP) {
+ intel_fb->pf_current_page = ((intel->sarea->pf_current_page >>
+ (intel_fb->pf_planes & 0x2)) & 0x3) %
+ intel_fb->pf_num_pages;
+ }
+
+ ret = GL_FALSE;
+ }
+
+ UNLOCK_HARDWARE(intel);
+
+ return ret;
+}
+
+void
+intelSwapBuffers(__DRIdrawablePrivate * dPriv)
+{
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel;
+
+ if (ctx == NULL)
+ return;
+
+ intel = intel_context(ctx);
+
+ if (ctx->Visual.doubleBufferMode) {
+ intelScreenPrivate *screen = intel->intelScreen;
+ GLboolean missed_target;
+ struct intel_framebuffer *intel_fb = dPriv->driverPrivate;
+ int64_t ust;
+
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
+
+ if (screen->current_rotation != 0 ||
+ !intelScheduleSwap(dPriv, &missed_target)) {
+ driWaitForVBlank(dPriv, &missed_target);
+
+ if (screen->current_rotation != 0 || !intelPageFlip(dPriv)) {
+ intelCopyBuffer(dPriv, NULL);
+ }
+
+ if (screen->current_rotation != 0) {
+ intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
+ }
+ }
+
+ intel_fb->swap_count++;
+ (*dri_interface->getUST) (&ust);
+ if (missed_target) {
+ intel_fb->swap_missed_count++;
+ intel_fb->swap_missed_ust = ust - intel_fb->swap_ust;
+ }
+
+ intel_fb->swap_ust = ust;
+ }
+ }
+ else {
+ /* XXX this shouldn't be an error but we can't handle it for now */
+ fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
+ }
+}
+#endif
+
+void
+intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
+{
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
+ struct intel_context *intel =
+ (struct intel_context *) dPriv->driContextPriv->driverPrivate;
+ GLcontext *ctx = &intel->ctx;
+
+ if (ctx->Visual.doubleBufferMode) {
+ drm_clip_rect_t rect;
+ rect.x1 = x + dPriv->x;
+ rect.y1 = (dPriv->h - y - h) + dPriv->y;
+ rect.x2 = rect.x1 + w;
+ rect.y2 = rect.y1 + h;
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
+ intelCopyBuffer(dPriv, &rect);
+ }
+ }
+ else {
+ /* XXX this shouldn't be an error but we can't handle it for now */
+ fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
+ }
+}
+
+
+/**
+ * Update the hardware state for drawing into a window or framebuffer object.
+ *
+ * Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
+ * places within the driver.
+ *
+ * Basically, this needs to be called any time the current framebuffer
+ * changes, the renderbuffers change, or we need to draw into different
+ * color buffers.
+ */
+void
+intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_region *colorRegion, *depthRegion = NULL;
+ struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
+ int front = 0; /* drawing to front color buffer? */
+
+ if (!fb) {
+ /* this can happen during the initial context initialization */
+ return;
+ }
+
+ /* Do this here, note core Mesa, since this function is called from
+ * many places within the driver.
+ */
+ if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
+ /* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
+ _mesa_update_framebuffer(ctx);
+ /* this updates the DrawBuffer's Width/Height if it's a FBO */
+ _mesa_update_draw_buffer_bounds(ctx);
+ }
+
+ if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
+ /* this may occur when we're called by glBindFrameBuffer() during
+ * the process of someone setting up renderbuffers, etc.
+ */
+ /*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
+ return;
+ }
+
+ if (fb->Name)
+ intel_validate_paired_depth_stencil(ctx, fb);
+
+ /*
+ * How many color buffers are we drawing into?
+ */
+ if (fb->_NumColorDrawBuffers[0] != 1
+#if 0
+ /* XXX FBO temporary - always use software rendering */
+ || 1
+#endif
+ ) {
+ /* writing to 0 or 2 or 4 color buffers */
+ /*_mesa_debug(ctx, "Software rendering\n");*/
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ front = 1; /* might not have back color buffer */
+ }
+ else {
+ /* draw to exactly one color buffer */
+ /*_mesa_debug(ctx, "Hardware rendering\n");*/
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ if (fb->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
+ front = 1;
+ }
+ }
+
+ /*
+ * Get the intel_renderbuffer for the colorbuffer we're drawing into.
+ * And set up cliprects.
+ */
+ if (fb->Name == 0) {
+ /* drawing to window system buffer */
+ if (front) {
+ intelSetFrontClipRects(intel);
+ colorRegion = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
+ }
+ else {
+ intelSetBackClipRects(intel);
+ colorRegion = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
+ }
+ }
+ else {
+ /* drawing to user-created FBO */
+ struct intel_renderbuffer *irb;
+ intelSetRenderbufferClipRects(intel);
+ irb = intel_renderbuffer(fb->_ColorDrawBuffers[0][0]);
+ colorRegion = (irb && irb->region) ? irb->region : NULL;
+ }
+
+ /* Update culling direction which changes depending on the
+ * orientation of the buffer:
+ */
+ if (ctx->Driver.FrontFace)
+ ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
+ else
+ ctx->NewState |= _NEW_POLYGON;
+
+ if (!colorRegion) {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
+ }
+
+ /***
+ *** Get depth buffer region and check if we need a software fallback.
+ *** Note that the depth buffer is usually a DEPTH_STENCIL buffer.
+ ***/
+ if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
+ irbDepth = intel_renderbuffer(fb->_DepthBuffer->Wrapped);
+ if (irbDepth && irbDepth->region) {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = irbDepth->region;
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
+ depthRegion = NULL;
+ }
+ }
+ else {
+ /* not using depth buffer */
+ FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
+ depthRegion = NULL;
+ }
+
+ /***
+ *** Stencil buffer
+ *** This can only be hardware accelerated if we're using a
+ *** combined DEPTH_STENCIL buffer (for now anyway).
+ ***/
+ if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
+ irbStencil = intel_renderbuffer(fb->_StencilBuffer->Wrapped);
+ if (irbStencil && irbStencil->region) {
+ ASSERT(irbStencil->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ /* need to re-compute stencil hw state */
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ if (!depthRegion)
+ depthRegion = irbStencil->region;
+ }
+ else {
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
+ }
+ }
+ else {
+ /* XXX FBO: instead of FALSE, pass ctx->Stencil.Enabled ??? */
+ FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
+ /* need to re-compute stencil hw state */
+ ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
+ }
+
+ /*
+ * Update depth test state
+ */
+ if (ctx->Depth.Test && fb->Visual.depthBits > 0) {
+ ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_TRUE);
+ }
+ else {
+ ctx->Driver.Enable(ctx, GL_DEPTH_TEST, GL_FALSE);
+ }
+
+ /**
+ ** Release old regions, reference new regions
+ **/
+#if 0 /* XXX FBO: this seems to be redundant with i915_state_draw_region() */
+ if (intel->draw_region != colorRegion) {
+ intel_region_release(&intel->draw_region);
+ intel_region_reference(&intel->draw_region, colorRegion);
+ }
+ if (intel->intelScreen->depth_region != depthRegion) {
+ intel_region_release(&intel->intelScreen->depth_region);
+ intel_region_reference(&intel->intelScreen->depth_region, depthRegion);
+ }
+#endif
+
+ intel->vtbl.set_draw_region(intel, colorRegion, depthRegion);
+
+ /* update viewport since it depends on window size */
+ ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
+ ctx->Viewport.Width, ctx->Viewport.Height);
+
+ /* Update hardware scissor */
+ ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
+ ctx->Scissor.Width, ctx->Scissor.Height);
+}
+
+
+static void
+intelDrawBuffer(GLcontext * ctx, GLenum mode)
+{
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+}
+
+
+static void
+intelReadBuffer(GLcontext * ctx, GLenum mode)
+{
+ if (ctx->ReadBuffer == ctx->DrawBuffer) {
+ /* This will update FBO completeness status.
+ * A framebuffer will be incomplete if the GL_READ_BUFFER setting
+ * refers to a missing renderbuffer. Calling glReadBuffer can set
+ * that straight and can make the drawing buffer complete.
+ */
+ intel_draw_buffer(ctx, ctx->DrawBuffer);
+ }
+ /* Generally, functions which read pixels (glReadPixels, glCopyPixels, etc)
+ * reference ctx->ReadBuffer and do appropriate state checks.
+ */
+}
+
+
+void
+intelInitBufferFuncs(struct dd_function_table *functions)
+{
+ functions->Clear = intelClear;
+ functions->DrawBuffer = intelDrawBuffer;
+ functions->ReadBuffer = intelReadBuffer;
+}
diff --git a/src/mesa/drivers/dri/i915/intel_buffers.h b/src/mesa/drivers/dri/intel/intel_buffers.h
index 3b686cb5c1..3b686cb5c1 100644
--- a/src/mesa/drivers/dri/i915/intel_buffers.h
+++ b/src/mesa/drivers/dri/intel/intel_buffers.h
diff --git a/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
new file mode 100644
index 0000000000..32d9886091
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.c
@@ -0,0 +1,833 @@
+/**************************************************************************
+ *
+ * Copyright © 2007 Red Hat Inc.
+ * Copyright © 2007 Intel Corporation
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
+ * Eric Anholt <eric@anholt.net>
+ * Dave Airlie <airlied@linux.ie>
+ */
+
+#include <xf86drm.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include "glthread.h"
+#include "errno.h"
+#include "mtypes.h"
+#include "dri_bufmgr.h"
+#include "string.h"
+#include "imports.h"
+
+#include "i915_drm.h"
+
+#include "intel_bufmgr_ttm.h"
+
+#define BUFMGR_DEBUG 0
+
+struct intel_reloc_info
+{
+ GLuint type;
+ GLuint reloc;
+ GLuint delta; /* not needed? */
+ GLuint index;
+ drm_handle_t handle;
+};
+
+struct intel_bo_node
+{
+ drmMMListHead head;
+ drmBO *buf;
+ struct drm_i915_op_arg bo_arg;
+ unsigned long arg0;
+ unsigned long arg1;
+ void (*destroy)(void *);
+ void *priv;
+};
+
+struct intel_bo_reloc_list
+{
+ drmMMListHead head;
+ drmBO buf;
+ uint32_t *relocs;
+};
+
+struct intel_bo_reloc_node
+{
+ drmMMListHead head;
+ drm_handle_t handle;
+ uint32_t nr_reloc_types;
+ struct intel_bo_reloc_list type_list;
+};
+
+struct intel_bo_list {
+ unsigned numCurrent;
+ drmMMListHead list;
+ void (*destroy)(void *node);
+};
+
+typedef struct _dri_bufmgr_ttm {
+ dri_bufmgr bufmgr;
+
+ int fd;
+ _glthread_Mutex mutex;
+ unsigned int fence_type;
+ unsigned int fence_type_flush;
+
+ uint32_t max_relocs;
+ /** ttm relocation list */
+ struct intel_bo_list list;
+ struct intel_bo_list reloc_list;
+
+} dri_bufmgr_ttm;
+
+typedef struct _dri_bo_ttm {
+ dri_bo bo;
+
+ int refcount; /* Protected by bufmgr->mutex */
+ drmBO drm_bo;
+ const char *name;
+} dri_bo_ttm;
+
+typedef struct _dri_fence_ttm
+{
+ dri_fence fence;
+
+ int refcount; /* Protected by bufmgr->mutex */
+ const char *name;
+ drmFence drm_fence;
+} dri_fence_ttm;
+
+
+static void intel_bo_free_list(struct intel_bo_list *list)
+{
+ struct intel_bo_node *node;
+ drmMMListHead *l;
+
+ l = list->list.next;
+ while(l != &list->list) {
+ DRMLISTDEL(l);
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+ list->destroy(node);
+ l = list->list.next;
+ list->numCurrent--;
+ }
+}
+
+static void generic_destroy(void *nodep)
+{
+ free(nodep);
+}
+
+static int intel_create_bo_list(int numTarget, struct intel_bo_list *list, void (*destroy)(void *))
+{
+ DRMINITLISTHEAD(&list->list);
+ list->numCurrent = 0;
+ if (destroy)
+ list->destroy = destroy;
+ else
+ list->destroy = generic_destroy;
+ return 0;
+}
+
+
+static struct drm_i915_op_arg *
+intel_setup_validate_list(int fd, struct intel_bo_list *list, struct intel_bo_list *reloc_list, GLuint *count_p)
+{
+ struct intel_bo_node *node;
+ struct intel_bo_reloc_node *rl_node;
+ drmMMListHead *l, *rl;
+ struct drm_i915_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ uint64_t *prevNext = NULL;
+ GLuint count = 0;
+
+ first = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+
+ arg = &node->bo_arg;
+ req = &arg->d.req;
+
+ if (!first)
+ first = arg;
+
+ if (prevNext)
+ *prevNext = (unsigned long) arg;
+
+ memset(arg, 0, sizeof(*arg));
+ prevNext = &arg->next;
+ req->bo_req.handle = node->buf->handle;
+ req->op = drm_bo_validate;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.hint = 0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.fence_class = 0; /* Backwards compat. */
+ arg->reloc_handle = 0;
+
+ for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
+ rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+
+ if (rl_node->handle == node->buf->handle) {
+ arg->reloc_handle = rl_node->type_list.buf.handle;
+ }
+ }
+ count++;
+ }
+
+ if (!first)
+ return 0;
+
+ *count_p = count;
+ return first;
+}
+
+static void intel_free_validate_list(int fd, struct intel_bo_list *list)
+{
+ struct intel_bo_node *node;
+ drmMMListHead *l;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+
+ if (node->destroy)
+ (*node->destroy)(node->priv);
+
+ }
+}
+
+static void intel_free_reloc_list(int fd, struct intel_bo_list *reloc_list)
+{
+ struct intel_bo_reloc_node *reloc_node;
+ drmMMListHead *rl, *tmp;
+
+ for (rl = reloc_list->list.next, tmp = rl->next; rl != &reloc_list->list; rl = tmp, tmp = rl->next) {
+ reloc_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+
+ DRMLISTDEL(rl);
+
+ if (reloc_node->nr_reloc_types > 1) {
+
+ /* TODO */
+ }
+
+ drmBOUnmap(fd, &reloc_node->type_list.buf);
+ drmBOUnreference(fd, &reloc_node->type_list.buf);
+ free(reloc_node);
+ }
+}
+
+static int intel_add_validate_buffer(struct intel_bo_list *list, dri_bo *buf, unsigned flags,
+ unsigned mask, int *itemLoc, void (*destroy_cb)(void *))
+{
+ struct intel_bo_node *node, *cur;
+ drmMMListHead *l;
+ int count = 0;
+ int ret = 0;
+ drmBO *buf_bo = &((dri_bo_ttm *)buf)->drm_bo;
+ cur = NULL;
+
+ for (l = list->list.next; l != &list->list; l = l->next) {
+ node = DRMLISTENTRY(struct intel_bo_node, l, head);
+ if (node->buf->handle == buf_bo->handle) {
+ cur = node;
+ break;
+ }
+ count++;
+ }
+
+ if (!cur) {
+ cur = drmMalloc(sizeof(*cur));
+ if (!cur) {
+ return -ENOMEM;
+ }
+ cur->buf = buf_bo;
+ cur->priv = buf;
+ cur->arg0 = flags;
+ cur->arg1 = mask;
+ cur->destroy = destroy_cb;
+ ret = 1;
+
+ DRMLISTADDTAIL(&cur->head, &list->list);
+
+ } else {
+ unsigned memMask = (cur->arg1 | mask) & DRM_BO_MASK_MEM;
+ unsigned memFlags = cur->arg0 & flags & memMask;
+
+ if (!memFlags) {
+ return -EINVAL;
+ }
+ if (mask & cur->arg1 & ~DRM_BO_MASK_MEM & (cur->arg0 ^ flags)) {
+ return -EINVAL;
+ }
+ cur->arg1 |= mask;
+ cur->arg0 = memFlags | ((cur->arg0 | flags) &
+ cur->arg1 & ~DRM_BO_MASK_MEM);
+ }
+ *itemLoc = count;
+ return ret;
+}
+
+
+#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * sizeof(uint32_t))
+
+static int intel_create_new_reloc_type_list(int fd, struct intel_bo_reloc_list *cur_type, int max_relocs)
+{
+ int ret;
+
+ /* should allocate a drmBO here */
+ ret = drmBOCreate(fd, RELOC_BUF_SIZE(max_relocs), 0,
+ NULL,
+ DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CACHED,
+ 0, &cur_type->buf);
+ if (ret)
+ return ret;
+
+ ret = drmBOMap(fd, &cur_type->buf, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0, (void **)&cur_type->relocs);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+
+static int intel_add_validate_reloc(int fd, struct intel_bo_list *reloc_list, struct intel_reloc_info *reloc_info, uint32_t max_relocs)
+{
+ struct intel_bo_reloc_node *rl_node, *cur;
+ drmMMListHead *rl, *l;
+ int ret = 0;
+ uint32_t *reloc_start;
+ int num_relocs;
+ struct intel_bo_reloc_list *cur_type;
+
+ cur = NULL;
+
+ for (rl = reloc_list->list.next; rl != &reloc_list->list; rl = rl->next) {
+ rl_node = DRMLISTENTRY(struct intel_bo_reloc_node, rl, head);
+ if (rl_node->handle == reloc_info->handle) {
+ cur = rl_node;
+ break;
+ }
+ }
+
+ if (!cur) {
+
+ cur = malloc(sizeof(*cur));
+ if (!cur)
+ return -ENOMEM;
+
+ cur->nr_reloc_types = 1;
+ cur->handle = reloc_info->handle;
+ cur_type = &cur->type_list;
+
+ DRMINITLISTHEAD(&cur->type_list.head);
+ ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
+ if (ret) {
+ return -1;
+ }
+ DRMLISTADDTAIL(&cur->head, &reloc_list->list);
+
+ cur_type->relocs[0] = 0 | (reloc_info->type << 16);
+ cur_type->relocs[1] = 0; // next reloc buffer handle is 0
+
+ } else {
+ int found = 0;
+ if ((cur->type_list.relocs[0] >> 16) == reloc_info->type) {
+ cur_type = &cur->type_list;
+ found = 1;
+ } else {
+ for (l = cur->type_list.head.next; l != &cur->type_list.head; l = l->next) {
+ cur_type = DRMLISTENTRY(struct intel_bo_reloc_list, l, head);
+ if (((cur_type->relocs[0] >> 16) & 0xffff) == reloc_info->type)
+ found = 1;
+ break;
+ }
+ }
+
+ /* didn't find the relocation type */
+ if (!found) {
+ cur_type = malloc(sizeof(*cur_type));
+ if (!cur_type) {
+ return -ENOMEM;
+ }
+
+ ret = intel_create_new_reloc_type_list(fd, cur_type, max_relocs);
+ DRMLISTADDTAIL(&cur_type->head, &cur->type_list.head);
+
+ cur_type->relocs[0] = (reloc_info->type << 16);
+ cur_type->relocs[1] = 0;
+
+ cur->nr_reloc_types++;
+ }
+ }
+
+ reloc_start = cur_type->relocs;
+
+ num_relocs = (reloc_start[0] & 0xffff);
+
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER] = reloc_info->reloc;
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+1] = reloc_info->delta;
+ reloc_start[num_relocs*I915_RELOC0_STRIDE + I915_RELOC_HEADER+2] = reloc_info->index;
+ reloc_start[0]++;
+ if (((reloc_start[0] & 0xffff)) > (max_relocs)) {
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+
+#if 0
+int
+driFenceSignaled(DriFenceObject * fence, unsigned type)
+{
+ int signaled;
+ int ret;
+
+ if (fence == NULL)
+ return GL_TRUE;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = drmFenceSignaled(bufmgr_ttm->fd, &fence->fence, type, &signaled);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ BM_CKFATAL(ret);
+ return signaled;
+}
+#endif
+
+static dri_bo *
+dri_ttm_alloc(dri_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment,
+ unsigned int location_mask)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ unsigned int pageSize = getpagesize();
+ int ret;
+ unsigned int flags, hint;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
+
+ /* The mask argument doesn't do anything for us that we want other than
+ * determine which pool (TTM or local) the buffer is allocated into, so just
+ * pass all of the allocation class flags.
+ */
+ flags = location_mask | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_EXE;
+ /* No hints we want to use. */
+ hint = 0;
+
+ ret = drmBOCreate(ttm_bufmgr->fd, size, alignment / pageSize,
+ NULL, flags, hint, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_create: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return &ttm_buf->bo;
+}
+
+/* Our TTM backend doesn't allow creation of static buffers, as that requires
+ * privelege for the non-fake case, and the lock in the fake case where we were
+ * working around the X Server not creating buffers and passing handles to us.
+ */
+static dri_bo *
+dri_ttm_alloc_static(dri_bufmgr *bufmgr, const char *name,
+ unsigned long offset, unsigned long size, void *virtual,
+ unsigned int location_mask)
+{
+ return NULL;
+}
+
+/** Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_bo *
+intel_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
+ unsigned int handle)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_bo_ttm *ttm_buf;
+ int ret;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_buf = malloc(sizeof(*ttm_buf));
+ if (!ttm_buf)
+ return NULL;
+
+ ret = drmBOReference(ttm_bufmgr->fd, handle, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ free(ttm_buf);
+ return NULL;
+ }
+ ttm_buf->bo.size = ttm_buf->drm_bo.size;
+ ttm_buf->bo.offset = ttm_buf->drm_bo.offset;
+ ttm_buf->bo.virtual = NULL;
+ ttm_buf->bo.bufmgr = bufmgr;
+ ttm_buf->name = name;
+ ttm_buf->refcount = 1;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_create_from_handle: %p %08x (%s)\n", &ttm_buf->bo, handle,
+ ttm_buf->name);
+#endif
+
+ return &ttm_buf->bo;
+}
+
+static void
+dri_ttm_bo_reference(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ttm_buf->refcount++;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static void
+dri_ttm_bo_unreference(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ if (!buf)
+ return;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--ttm_buf->refcount == 0) {
+ int ret;
+
+ ret = drmBOUnreference(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+ if (ret != 0) {
+ fprintf(stderr, "drmBOUnreference failed (%s): %s\n", ttm_buf->name,
+ strerror(-ret));
+ }
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_unreference final: %p (%s)\n",
+ &ttm_buf->bo, ttm_buf->name);
+#endif
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(buf);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static int
+dri_ttm_bo_map(dri_bo *buf, GLboolean write_enable)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+ unsigned int flags;
+
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+
+ flags = DRM_BO_FLAG_READ;
+ if (write_enable)
+ flags |= DRM_BO_FLAG_WRITE;
+
+ assert(buf->virtual == NULL);
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_map: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return drmBOMap(bufmgr_ttm->fd, &ttm_buf->drm_bo, flags, 0, &buf->virtual);
+}
+
+static int
+dri_ttm_bo_unmap(dri_bo *buf)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)buf;
+
+ if (buf == NULL)
+ return 0;
+
+ bufmgr_ttm = (dri_bufmgr_ttm *)buf->bufmgr;
+
+ assert(buf->virtual != NULL);
+
+ buf->virtual = NULL;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "bo_unmap: %p (%s)\n", &ttm_buf->bo, ttm_buf->name);
+#endif
+
+ return drmBOUnmap(bufmgr_ttm->fd, &ttm_buf->drm_bo);
+}
+
+/* Returns a dri_bo wrapping the given buffer object handle.
+ *
+ * This can be used when one application needs to pass a buffer object
+ * to another.
+ */
+dri_fence *
+intel_ttm_fence_create_from_arg(dri_bufmgr *bufmgr, const char *name,
+ drm_fence_arg_t *arg)
+{
+ dri_bufmgr_ttm *ttm_bufmgr;
+ dri_fence_ttm *ttm_fence;
+
+ ttm_bufmgr = (dri_bufmgr_ttm *)bufmgr;
+
+ ttm_fence = malloc(sizeof(*ttm_fence));
+ if (!ttm_fence)
+ return NULL;
+
+ ttm_fence->drm_fence.handle = arg->handle;
+ ttm_fence->drm_fence.fence_class = arg->fence_class;
+ ttm_fence->drm_fence.type = arg->type;
+ ttm_fence->drm_fence.flags = arg->flags;
+ ttm_fence->drm_fence.signaled = 0;
+ ttm_fence->drm_fence.sequence = arg->sequence;
+
+ ttm_fence->fence.bufmgr = bufmgr;
+ ttm_fence->name = name;
+ ttm_fence->refcount = 1;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_create_from_handle: %p (%s)\n", &ttm_fence->fence,
+ ttm_fence->name);
+#endif
+
+ return &ttm_fence->fence;
+}
+
+
+static void
+dri_ttm_fence_reference(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ++fence_ttm->refcount;
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_reference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+}
+
+static void
+dri_ttm_fence_unreference(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+
+ if (!fence)
+ return;
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_unreference: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ if (--fence_ttm->refcount == 0) {
+ int ret;
+
+ ret = drmFenceUnreference(bufmgr_ttm->fd, &fence_ttm->drm_fence);
+ if (ret != 0) {
+ fprintf(stderr, "drmFenceUnreference failed (%s): %s\n",
+ fence_ttm->name, strerror(-ret));
+ }
+
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ free(fence);
+ return;
+ }
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+}
+
+static void
+dri_ttm_fence_wait(dri_fence *fence)
+{
+ dri_fence_ttm *fence_ttm = (dri_fence_ttm *)fence;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)fence->bufmgr;
+ int ret;
+
+ _glthread_LOCK_MUTEX(bufmgr_ttm->mutex);
+ ret = drmFenceWait(bufmgr_ttm->fd, 0, &fence_ttm->drm_fence, 0);
+ _glthread_UNLOCK_MUTEX(bufmgr_ttm->mutex);
+ if (ret != 0) {
+ _mesa_printf("%s:%d: Error %d waiting for fence %s.\n",
+ __FILE__, __LINE__, ret, fence_ttm->name);
+ abort();
+ }
+
+#if BUFMGR_DEBUG
+ fprintf(stderr, "fence_wait: %p (%s)\n", &fence_ttm->fence,
+ fence_ttm->name);
+#endif
+}
+
+static void
+dri_bufmgr_ttm_destroy(dri_bufmgr *bufmgr)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)bufmgr;
+
+ intel_bo_free_list(&bufmgr_ttm->list);
+ intel_bo_free_list(&bufmgr_ttm->reloc_list);
+
+ _glthread_DESTROY_MUTEX(bufmgr_ttm->mutex);
+ free(bufmgr);
+}
+
+
+static void intel_dribo_destroy_callback(void *priv)
+{
+ dri_bo *dribo = priv;
+
+ if (dribo) {
+ dri_bo_unreference(dribo);
+ }
+}
+
+static void
+dri_ttm_emit_reloc(dri_bo *batch_buf, GLuint flags, GLuint delta, GLuint offset,
+ dri_bo *relocatee)
+{
+ dri_bo_ttm *ttm_buf = (dri_bo_ttm *)batch_buf;
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ int newItem;
+ struct intel_reloc_info reloc;
+ int mask;
+ int ret;
+
+ mask = DRM_BO_MASK_MEM;
+ mask |= flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE);
+
+ ret = intel_add_validate_buffer(&bufmgr_ttm->list, relocatee, flags, mask, &newItem, intel_dribo_destroy_callback);
+ if (ret < 0)
+ return;
+
+ if (ret == 1) {
+ dri_bo_reference(relocatee);
+ }
+
+ reloc.type = I915_RELOC_TYPE_0;
+ reloc.reloc = offset;
+ reloc.delta = delta;
+ reloc.index = newItem;
+ reloc.handle = ttm_buf->drm_bo.handle;
+
+ intel_add_validate_reloc(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list, &reloc, bufmgr_ttm->max_relocs);
+ return;
+}
+
+
+static void *
+dri_ttm_process_reloc(dri_bo *batch_buf, GLuint *count)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+ void *ptr;
+ int itemLoc;
+
+ dri_bo_unmap(batch_buf);
+
+ intel_add_validate_buffer(&bufmgr_ttm->list, batch_buf, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_EXE,
+ DRM_BO_MASK_MEM | DRM_BO_FLAG_EXE, &itemLoc, NULL);
+
+ ptr = intel_setup_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list, &bufmgr_ttm->reloc_list, count);
+
+ return ptr;
+}
+
+static void
+dri_ttm_post_submit(dri_bo *batch_buf, dri_fence **last_fence)
+{
+ dri_bufmgr_ttm *bufmgr_ttm = (dri_bufmgr_ttm *)batch_buf->bufmgr;
+
+ intel_free_validate_list(bufmgr_ttm->fd, &bufmgr_ttm->list);
+ intel_free_reloc_list(bufmgr_ttm->fd, &bufmgr_ttm->reloc_list);
+
+ intel_bo_free_list(&bufmgr_ttm->list);
+}
+
+/**
+ * Initializes the TTM buffer manager, which uses the kernel to allocate, map,
+ * and manage map buffer objections.
+ *
+ * \param fd File descriptor of the opened DRM device.
+ * \param fence_type Driver-specific fence type used for fences with no flush.
+ * \param fence_type_flush Driver-specific fence type used for fences with a
+ * flush.
+ */
+dri_bufmgr *
+intel_bufmgr_ttm_init(int fd, unsigned int fence_type,
+ unsigned int fence_type_flush, int batch_size)
+{
+ dri_bufmgr_ttm *bufmgr_ttm;
+
+ bufmgr_ttm = malloc(sizeof(*bufmgr_ttm));
+ bufmgr_ttm->fd = fd;
+ bufmgr_ttm->fence_type = fence_type;
+ bufmgr_ttm->fence_type_flush = fence_type_flush;
+ _glthread_INIT_MUTEX(bufmgr_ttm->mutex);
+
+ /* lets go with one relocation per every four dwords - purely heuristic */
+ bufmgr_ttm->max_relocs = batch_size / sizeof(uint32_t) / 4;
+
+ intel_create_bo_list(10, &bufmgr_ttm->list, NULL);
+ intel_create_bo_list(1, &bufmgr_ttm->reloc_list, NULL);
+
+ bufmgr_ttm->bufmgr.bo_alloc = dri_ttm_alloc;
+ bufmgr_ttm->bufmgr.bo_alloc_static = dri_ttm_alloc_static;
+ bufmgr_ttm->bufmgr.bo_reference = dri_ttm_bo_reference;
+ bufmgr_ttm->bufmgr.bo_unreference = dri_ttm_bo_unreference;
+ bufmgr_ttm->bufmgr.bo_map = dri_ttm_bo_map;
+ bufmgr_ttm->bufmgr.bo_unmap = dri_ttm_bo_unmap;
+ bufmgr_ttm->bufmgr.fence_reference = dri_ttm_fence_reference;
+ bufmgr_ttm->bufmgr.fence_unreference = dri_ttm_fence_unreference;
+ bufmgr_ttm->bufmgr.fence_wait = dri_ttm_fence_wait;
+ bufmgr_ttm->bufmgr.destroy = dri_bufmgr_ttm_destroy;
+ bufmgr_ttm->bufmgr.emit_reloc = dri_ttm_emit_reloc;
+ bufmgr_ttm->bufmgr.process_relocs = dri_ttm_process_reloc;
+ bufmgr_ttm->bufmgr.post_submit = dri_ttm_post_submit;
+ return &bufmgr_ttm->bufmgr;
+}
+
diff --git a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h
index 0738839cef..0738839cef 100644
--- a/src/mesa/drivers/dri/i915/intel_bufmgr_ttm.h
+++ b/src/mesa/drivers/dri/intel/intel_bufmgr_ttm.h
diff --git a/src/mesa/drivers/dri/intel/intel_depthstencil.c b/src/mesa/drivers/dri/intel/intel_depthstencil.c
new file mode 100644
index 0000000000..d269a85a3c
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_depthstencil.c
@@ -0,0 +1,282 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "imports.h"
+#include "context.h"
+#include "depthstencil.h"
+#include "fbobject.h"
+#include "framebuffer.h"
+#include "hash.h"
+#include "mtypes.h"
+#include "renderbuffer.h"
+
+#include "intel_context.h"
+#include "intel_fbo.h"
+#include "intel_depthstencil.h"
+#include "intel_regions.h"
+
+
+/**
+ * The GL_EXT_framebuffer_object allows the user to create their own
+ * framebuffer objects consisting of color renderbuffers (0 or more),
+ * depth renderbuffers (0 or 1) and stencil renderbuffers (0 or 1).
+ *
+ * The spec considers depth and stencil renderbuffers to be totally independent
+ * buffers. In reality, most graphics hardware today uses a combined
+ * depth+stencil buffer (one 32-bit pixel = 24 bits of Z + 8 bits of stencil).
+ *
+ * This causes difficulty because the user may create some number of depth
+ * renderbuffers and some number of stencil renderbuffers and bind them
+ * together in framebuffers in any combination.
+ *
+ * This code manages all that.
+ *
+ * 1. Depth renderbuffers are always allocated in hardware as 32bpp
+ * GL_DEPTH24_STENCIL8 buffers.
+ *
+ * 2. Stencil renderbuffers are initially allocated in software as 8bpp
+ * GL_STENCIL_INDEX8 buffers.
+ *
+ * 3. Depth and Stencil renderbuffers use the PairedStencil and PairedDepth
+ * fields (respectively) to indicate if the buffer's currently paired
+ * with another stencil or depth buffer (respectively).
+ *
+ * 4. When a depth and stencil buffer are initially both attached to the
+ * current framebuffer, we merge the stencil buffer values into the
+ * depth buffer (really a depth+stencil buffer). The then hardware uses
+ * the combined buffer.
+ *
+ * 5. Whenever a depth or stencil buffer is reallocated (with
+ * glRenderbufferStorage) we undo the pairing and copy the stencil values
+ * from the combined depth/stencil buffer back to the stencil-only buffer.
+ *
+ * 6. We also undo the pairing when we find a change in buffer bindings.
+ *
+ * 7. If a framebuffer is only using a depth renderbuffer (no stencil), we
+ * just use the combined depth/stencil buffer and ignore the stencil values.
+ *
+ * 8. If a framebuffer is only using a stencil renderbuffer (no depth) we have
+ * to promote the 8bpp software stencil buffer to a 32bpp hardware
+ * depth+stencil buffer.
+ *
+ */
+
+
+
+static void
+map_regions(GLcontext * ctx,
+ struct intel_renderbuffer *depthRb,
+ struct intel_renderbuffer *stencilRb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ if (depthRb && depthRb->region) {
+ intel_region_map(intel->intelScreen, depthRb->region);
+ depthRb->pfMap = depthRb->region->map;
+ depthRb->pfPitch = depthRb->region->pitch;
+ }
+ if (stencilRb && stencilRb->region) {
+ intel_region_map(intel->intelScreen, stencilRb->region);
+ stencilRb->pfMap = stencilRb->region->map;
+ stencilRb->pfPitch = stencilRb->region->pitch;
+ }
+}
+
+static void
+unmap_regions(GLcontext * ctx,
+ struct intel_renderbuffer *depthRb,
+ struct intel_renderbuffer *stencilRb)
+{
+ struct intel_context *intel = intel_context(ctx);
+ if (depthRb && depthRb->region) {
+ intel_region_unmap(intel->intelScreen, depthRb->region);
+ depthRb->pfMap = NULL;
+ depthRb->pfPitch = 0;
+ }
+ if (stencilRb && stencilRb->region) {
+ intel_region_unmap(intel->intelScreen, stencilRb->region);
+ stencilRb->pfMap = NULL;
+ stencilRb->pfPitch = 0;
+ }
+}
+
+
+
+/**
+ * Undo the pairing/interleaving between depth and stencil buffers.
+ * irb should be a depth/stencil or stencil renderbuffer.
+ */
+void
+intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb)
+{
+ if (irb->PairedStencil) {
+ /* irb is a depth/stencil buffer */
+ struct gl_renderbuffer *stencilRb;
+ struct intel_renderbuffer *stencilIrb;
+
+ ASSERT(irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ stencilRb = _mesa_lookup_renderbuffer(ctx, irb->PairedStencil);
+ stencilIrb = intel_renderbuffer(stencilRb);
+ if (stencilIrb) {
+ /* need to extract stencil values from the depth buffer */
+ ASSERT(stencilIrb->PairedDepth == irb->Base.Name);
+ map_regions(ctx, irb, stencilIrb);
+ _mesa_extract_stencil(ctx, &irb->Base, &stencilIrb->Base);
+ unmap_regions(ctx, irb, stencilIrb);
+ stencilIrb->PairedDepth = 0;
+ }
+ irb->PairedStencil = 0;
+ }
+ else if (irb->PairedDepth) {
+ /* irb is a stencil buffer */
+ struct gl_renderbuffer *depthRb;
+ struct intel_renderbuffer *depthIrb;
+
+ ASSERT(irb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
+ irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ depthRb = _mesa_lookup_renderbuffer(ctx, irb->PairedDepth);
+ depthIrb = intel_renderbuffer(depthRb);
+ if (depthIrb) {
+ /* need to extract stencil values from the depth buffer */
+ ASSERT(depthIrb->PairedStencil == irb->Base.Name);
+ map_regions(ctx, depthIrb, irb);
+ _mesa_extract_stencil(ctx, &depthIrb->Base, &irb->Base);
+ unmap_regions(ctx, depthIrb, irb);
+ depthIrb->PairedStencil = 0;
+ }
+ irb->PairedDepth = 0;
+ }
+ else {
+ _mesa_problem(ctx, "Problem in undo_depth_stencil_pairing");
+ }
+
+ ASSERT(irb->PairedStencil == 0);
+ ASSERT(irb->PairedDepth == 0);
+}
+
+
+/**
+ * Examine the depth and stencil renderbuffers which are attached to the
+ * framebuffer. If both depth and stencil are attached, make sure that the
+ * renderbuffers are 'paired' (combined). If only depth or only stencil is
+ * attached, undo any previous pairing.
+ *
+ * Must be called if NewState & _NEW_BUFFER (when renderbuffer attachments
+ * change, for example).
+ */
+void
+intel_validate_paired_depth_stencil(GLcontext * ctx,
+ struct gl_framebuffer *fb)
+{
+ struct intel_renderbuffer *depthRb, *stencilRb;
+
+ depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ stencilRb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
+
+ if (depthRb && stencilRb) {
+ if (depthRb == stencilRb) {
+ /* Using a user-created combined depth/stencil buffer.
+ * Nothing to do.
+ */
+ ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_STENCIL_EXT);
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ }
+ else {
+ /* Separate depth/stencil buffers, need to interleave now */
+ ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_COMPONENT);
+ ASSERT(stencilRb->Base._BaseFormat == GL_STENCIL_INDEX);
+ /* may need to interleave depth/stencil now */
+ if (depthRb->PairedStencil == stencilRb->Base.Name) {
+ /* OK, the depth and stencil buffers are already interleaved */
+ ASSERT(stencilRb->PairedDepth == depthRb->Base.Name);
+ }
+ else {
+ /* need to setup new pairing/interleaving */
+ if (depthRb->PairedStencil) {
+ intel_unpair_depth_stencil(ctx, depthRb);
+ }
+ if (stencilRb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, stencilRb);
+ }
+
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ ASSERT(stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
+ stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+
+ /* establish new pairing: interleave stencil into depth buffer */
+ map_regions(ctx, depthRb, stencilRb);
+ _mesa_insert_stencil(ctx, &depthRb->Base, &stencilRb->Base);
+ unmap_regions(ctx, depthRb, stencilRb);
+ depthRb->PairedStencil = stencilRb->Base.Name;
+ stencilRb->PairedDepth = depthRb->Base.Name;
+ }
+
+ }
+ }
+ else if (depthRb) {
+ /* Depth buffer but no stencil buffer.
+ * We'll use a GL_DEPTH24_STENCIL8 buffer and ignore the stencil bits.
+ */
+ /* can't assert this until storage is allocated:
+ ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ */
+ /* intel_undo any previous pairing */
+ if (depthRb->PairedStencil) {
+ intel_unpair_depth_stencil(ctx, depthRb);
+ }
+ }
+ else if (stencilRb) {
+ /* Stencil buffer but no depth buffer.
+ * Since h/w doesn't typically support just 8bpp stencil w/out Z,
+ * we'll use a GL_DEPTH24_STENCIL8 buffer and ignore the depth bits.
+ */
+ /* undo any previous pairing */
+ if (stencilRb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, stencilRb);
+ }
+ if (stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
+ /* promote buffer to GL_DEPTH24_STENCIL8 for hw rendering */
+ _mesa_promote_stencil(ctx, &stencilRb->Base);
+ ASSERT(stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
+ }
+ }
+
+ /* Finally, update the fb->_DepthBuffer and fb->_StencilBuffer fields */
+ _mesa_update_depth_buffer(ctx, fb, BUFFER_DEPTH);
+ if (depthRb && depthRb->PairedStencil)
+ _mesa_update_stencil_buffer(ctx, fb, BUFFER_DEPTH);
+ else
+ _mesa_update_stencil_buffer(ctx, fb, BUFFER_STENCIL);
+
+
+ /* The hardware should use fb->Attachment[BUFFER_DEPTH].Renderbuffer
+ * first, if present, then fb->Attachment[BUFFER_STENCIL].Renderbuffer
+ * if present.
+ */
+}
diff --git a/src/mesa/drivers/dri/i915/intel_depthstencil.h b/src/mesa/drivers/dri/intel/intel_depthstencil.h
index 2d3fc48b3a..2d3fc48b3a 100644
--- a/src/mesa/drivers/dri/i915/intel_depthstencil.h
+++ b/src/mesa/drivers/dri/intel/intel_depthstencil.h
diff --git a/src/mesa/drivers/dri/intel/intel_fbo.c b/src/mesa/drivers/dri/intel/intel_fbo.c
new file mode 100644
index 0000000000..6f99f401c7
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_fbo.c
@@ -0,0 +1,687 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "imports.h"
+#include "mtypes.h"
+#include "fbobject.h"
+#include "framebuffer.h"
+#include "renderbuffer.h"
+#include "context.h"
+#include "texformat.h"
+#include "texrender.h"
+
+#include "intel_context.h"
+#include "intel_buffers.h"
+#include "intel_depthstencil.h"
+#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_span.h"
+
+
+#define FILE_DEBUG_FLAG DEBUG_FBO
+
+#define INTEL_RB_CLASS 0x12345678
+
+
+/* XXX FBO: move this to intel_context.h (inlined) */
+/**
+ * Return a gl_renderbuffer ptr casted to intel_renderbuffer.
+ * NULL will be returned if the rb isn't really an intel_renderbuffer.
+ * This is determiend by checking the ClassID.
+ */
+struct intel_renderbuffer *
+intel_renderbuffer(struct gl_renderbuffer *rb)
+{
+ struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
+ if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
+ /*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
+ return irb;
+ }
+ else
+ return NULL;
+}
+
+
+struct intel_renderbuffer *
+intel_get_renderbuffer(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
+}
+
+
+void
+intel_flip_renderbuffers(struct intel_framebuffer *intel_fb)
+{
+ int current_page = intel_fb->pf_current_page;
+ int next_page = (current_page + 1) % intel_fb->pf_num_pages;
+ struct gl_renderbuffer *tmp_rb;
+
+ /* Exchange renderbuffers if necessary but make sure their reference counts
+ * are preserved.
+ */
+ if (intel_fb->color_rb[current_page] &&
+ intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer !=
+ &intel_fb->color_rb[current_page]->Base) {
+ tmp_rb = NULL;
+ _mesa_reference_renderbuffer(&tmp_rb,
+ intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
+ tmp_rb = &intel_fb->color_rb[current_page]->Base;
+ _mesa_reference_renderbuffer(
+ &intel_fb->Base.Attachment[BUFFER_FRONT_LEFT].Renderbuffer, tmp_rb);
+ _mesa_reference_renderbuffer(&tmp_rb, NULL);
+ }
+
+ if (intel_fb->color_rb[next_page] &&
+ intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer !=
+ &intel_fb->color_rb[next_page]->Base) {
+ tmp_rb = NULL;
+ _mesa_reference_renderbuffer(&tmp_rb,
+ intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer);
+ tmp_rb = &intel_fb->color_rb[next_page]->Base;
+ _mesa_reference_renderbuffer(
+ &intel_fb->Base.Attachment[BUFFER_BACK_LEFT].Renderbuffer, tmp_rb);
+ _mesa_reference_renderbuffer(&tmp_rb, NULL);
+ }
+}
+
+
+struct intel_region *
+intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
+{
+ struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
+
+ if (irb)
+ return irb->region;
+ else
+ return NULL;
+}
+
+
+
+/**
+ * Create a new framebuffer object.
+ */
+static struct gl_framebuffer *
+intel_new_framebuffer(GLcontext * ctx, GLuint name)
+{
+ /* Only drawable state in intel_framebuffer at this time, just use Mesa's
+ * class
+ */
+ return _mesa_new_framebuffer(ctx, name);
+}
+
+
+static void
+intel_delete_renderbuffer(struct gl_renderbuffer *rb)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ ASSERT(irb);
+
+ if (irb->PairedStencil || irb->PairedDepth) {
+ intel_unpair_depth_stencil(ctx, irb);
+ }
+
+ if (intel && irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ _mesa_free(irb);
+}
+
+
+
+/**
+ * Return a pointer to a specific pixel in a renderbuffer.
+ */
+static void *
+intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLint x, GLint y)
+{
+ /* By returning NULL we force all software rendering to go through
+ * the span routines.
+ */
+ return NULL;
+}
+
+
+
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ GLboolean softwareBuffer = GL_FALSE;
+ int cpp;
+
+ ASSERT(rb->Name != 0);
+
+ switch (internalFormat) {
+ case GL_R3_G3_B2:
+ case GL_RGB4:
+ case GL_RGB5:
+ rb->_ActualFormat = GL_RGB5;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ rb->RedBits = 5;
+ rb->GreenBits = 6;
+ rb->BlueBits = 5;
+ cpp = 2;
+ break;
+ case GL_RGB:
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ case GL_RGBA:
+ case GL_RGBA2:
+ case GL_RGBA4:
+ case GL_RGB5_A1:
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ rb->_ActualFormat = GL_RGBA8;
+ rb->DataType = GL_UNSIGNED_BYTE;
+ rb->RedBits = 8;
+ rb->GreenBits = 8;
+ rb->BlueBits = 8;
+ rb->AlphaBits = 8;
+ cpp = 4;
+ break;
+ case GL_STENCIL_INDEX:
+ case GL_STENCIL_INDEX1_EXT:
+ case GL_STENCIL_INDEX4_EXT:
+ case GL_STENCIL_INDEX8_EXT:
+ case GL_STENCIL_INDEX16_EXT:
+ /* alloc a depth+stencil buffer */
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->StencilBits = 8;
+ cpp = 4;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ rb->_ActualFormat = GL_DEPTH_COMPONENT16;
+ rb->DataType = GL_UNSIGNED_SHORT;
+ rb->DepthBits = 16;
+ cpp = 2;
+ break;
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->DepthBits = 24;
+ cpp = 4;
+ break;
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
+ rb->DepthBits = 24;
+ rb->StencilBits = 8;
+ cpp = 4;
+ break;
+ default:
+ _mesa_problem(ctx,
+ "Unexpected format in intel_alloc_renderbuffer_storage");
+ return GL_FALSE;
+ }
+
+ intelFlush(ctx);
+
+ /* free old region */
+ if (irb->region) {
+ intel_region_release(&irb->region);
+ }
+
+ /* allocate new memory region/renderbuffer */
+ if (softwareBuffer) {
+ return _mesa_soft_renderbuffer_storage(ctx, rb, internalFormat,
+ width, height);
+ }
+ else {
+ /* Choose a pitch to match hardware requirements:
+ */
+ GLuint pitch = ((cpp * width + 63) & ~63) / cpp;
+
+ /* alloc hardware renderbuffer */
+ DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width,
+ height, pitch);
+
+ irb->region = intel_region_alloc(intel->intelScreen, cpp, pitch, height);
+ if (!irb->region)
+ return GL_FALSE; /* out of memory? */
+
+ ASSERT(irb->region->buffer);
+
+ rb->Width = width;
+ rb->Height = height;
+
+ /* This sets the Get/PutRow/Value functions */
+ intel_set_span_functions(&irb->Base);
+
+ return GL_TRUE;
+ }
+}
+
+
+
+/**
+ * Called for each hardware renderbuffer when a _window_ is resized.
+ * Just update fields.
+ * Not used for user-created renderbuffers!
+ */
+static GLboolean
+intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ ASSERT(rb->Name == 0);
+ rb->Width = width;
+ rb->Height = height;
+ rb->_ActualFormat = internalFormat;
+
+ return GL_TRUE;
+}
+
+static void
+intel_resize_buffers(GLcontext *ctx, struct gl_framebuffer *fb,
+ GLuint width, GLuint height)
+{
+ struct intel_framebuffer *intel_fb = (struct intel_framebuffer*)fb;
+ int i;
+
+ _mesa_resize_framebuffer(ctx, fb, width, height);
+
+ fb->Initialized = GL_TRUE; /* XXX remove someday */
+
+ if (fb->Name != 0) {
+ return;
+ }
+
+ /* Make sure all window system renderbuffers are up to date */
+ for (i = 0; i < 3; i++) {
+ struct gl_renderbuffer *rb = &intel_fb->color_rb[i]->Base;
+
+ /* only resize if size is changing */
+ if (rb && (rb->Width != width || rb->Height != height)) {
+ rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
+ }
+ }
+}
+
+static GLboolean
+intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat, GLuint width, GLuint height)
+{
+ _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ return GL_FALSE;
+}
+
+
+
+/**
+ * Create a new intel_renderbuffer which corresponds to an on-screen window,
+ * not a user-created renderbuffer.
+ * \param width the screen width
+ * \param height the screen height
+ */
+struct intel_renderbuffer *
+intel_create_renderbuffer(GLenum intFormat, GLsizei width, GLsizei height,
+ int offset, int pitch, int cpp, void *map)
+{
+ GET_CURRENT_CONTEXT(ctx);
+
+ struct intel_renderbuffer *irb;
+ const GLuint name = 0;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ switch (intFormat) {
+ case GL_RGB5:
+ irb->Base._ActualFormat = GL_RGB5;
+ irb->Base._BaseFormat = GL_RGBA;
+ irb->Base.RedBits = 5;
+ irb->Base.GreenBits = 6;
+ irb->Base.BlueBits = 5;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 2;
+ break;
+ case GL_RGBA8:
+ irb->Base._ActualFormat = GL_RGBA8;
+ irb->Base._BaseFormat = GL_RGBA;
+ irb->Base.RedBits = 8;
+ irb->Base.GreenBits = 8;
+ irb->Base.BlueBits = 8;
+ irb->Base.AlphaBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 4;
+ break;
+ case GL_STENCIL_INDEX8_EXT:
+ irb->Base._ActualFormat = GL_STENCIL_INDEX8_EXT;
+ irb->Base._BaseFormat = GL_STENCIL_INDEX;
+ irb->Base.StencilBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_BYTE;
+ cpp = 1;
+ break;
+ case GL_DEPTH_COMPONENT16:
+ irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DepthBits = 16;
+ irb->Base.DataType = GL_UNSIGNED_SHORT;
+ cpp = 2;
+ break;
+ case GL_DEPTH_COMPONENT24:
+ irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ irb->Base.DepthBits = 24;
+ irb->Base.DataType = GL_UNSIGNED_INT;
+ cpp = 4;
+ break;
+ case GL_DEPTH24_STENCIL8_EXT:
+ irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
+ irb->Base._BaseFormat = GL_DEPTH_STENCIL_EXT;
+ irb->Base.DepthBits = 24;
+ irb->Base.StencilBits = 8;
+ irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
+ cpp = 4;
+ break;
+ default:
+ _mesa_problem(NULL,
+ "Unexpected intFormat in intel_create_renderbuffer");
+ return NULL;
+ }
+
+ irb->Base.InternalFormat = intFormat;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_window_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+ /* This sets the Get/PutRow/Value functions */
+ intel_set_span_functions(&irb->Base);
+
+ irb->pfMap = map;
+ irb->pfPitch = pitch / cpp; /* in pixels */
+
+#if 00
+ irb->region = intel_region_create_static(intel,
+ DRM_MM_TT,
+ offset, map, cpp, width, height);
+#endif
+
+ return irb;
+}
+
+
+/**
+ * Create a new renderbuffer object.
+ * Typically called via glBindRenderbufferEXT().
+ */
+static struct gl_renderbuffer *
+intel_new_renderbuffer(GLcontext * ctx, GLuint name)
+{
+ /*struct intel_context *intel = intel_context(ctx); */
+ struct intel_renderbuffer *irb;
+
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ /* intel-specific methods */
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
+ irb->Base.GetPointer = intel_get_pointer;
+ /* span routines set in alloc_storage function */
+
+ return &irb->Base;
+}
+
+
+/**
+ * Called via glBindFramebufferEXT().
+ */
+static void
+intel_bind_framebuffer(GLcontext * ctx, GLenum target,
+ struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
+{
+ if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
+ intel_draw_buffer(ctx, fb);
+ /* Integer depth range depends on depth buffer bits */
+ ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
+ }
+ else {
+ /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
+ }
+}
+
+
+/**
+ * Called via glFramebufferRenderbufferEXT().
+ */
+static void
+intel_framebuffer_renderbuffer(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment, struct gl_renderbuffer *rb)
+{
+ DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
+
+ intelFlush(ctx);
+
+ _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
+ intel_draw_buffer(ctx, fb);
+}
+
+
+/**
+ * When glFramebufferTexture[123]D is called this function sets up the
+ * gl_renderbuffer wrapper around the texture image.
+ * This will have the region info needed for hardware rendering.
+ */
+static struct intel_renderbuffer *
+intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ const GLuint name = ~0; /* not significant, but distinct for debugging */
+ struct intel_renderbuffer *irb;
+
+ /* make an intel_renderbuffer to wrap the texture image */
+ irb = CALLOC_STRUCT(intel_renderbuffer);
+ if (!irb) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
+ return NULL;
+ }
+
+ _mesa_init_renderbuffer(&irb->Base, name);
+ irb->Base.ClassID = INTEL_RB_CLASS;
+
+ if (texImage->TexFormat == &_mesa_texformat_argb8888) {
+ irb->Base._ActualFormat = GL_RGBA8;
+ irb->Base._BaseFormat = GL_RGBA;
+ DBG("Render to RGBA8 texture OK\n");
+ }
+ else if (texImage->TexFormat == &_mesa_texformat_rgb565) {
+ irb->Base._ActualFormat = GL_RGB5;
+ irb->Base._BaseFormat = GL_RGB;
+ DBG("Render to RGB5 texture OK\n");
+ }
+ else if (texImage->TexFormat == &_mesa_texformat_z16) {
+ irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
+ irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
+ DBG("Render to DEPTH16 texture OK\n");
+ }
+ else {
+ DBG("Render to texture BAD FORMAT %d\n",
+ texImage->TexFormat->MesaFormat);
+ _mesa_free(irb);
+ return NULL;
+ }
+
+ irb->Base.InternalFormat = irb->Base._ActualFormat;
+ irb->Base.Width = texImage->Width;
+ irb->Base.Height = texImage->Height;
+ irb->Base.DataType = GL_UNSIGNED_BYTE; /* FBO XXX fix */
+ irb->Base.RedBits = texImage->TexFormat->RedBits;
+ irb->Base.GreenBits = texImage->TexFormat->GreenBits;
+ irb->Base.BlueBits = texImage->TexFormat->BlueBits;
+ irb->Base.AlphaBits = texImage->TexFormat->AlphaBits;
+ irb->Base.DepthBits = texImage->TexFormat->DepthBits;
+
+ irb->Base.Delete = intel_delete_renderbuffer;
+ irb->Base.AllocStorage = intel_nop_alloc_storage;
+ intel_set_span_functions(&irb->Base);
+
+ irb->RenderToTexture = GL_TRUE;
+
+ return irb;
+}
+
+
+/**
+ * Called by glFramebufferTexture[123]DEXT() (and other places) to
+ * prepare for rendering into texture memory. This might be called
+ * many times to choose different texture levels, cube faces, etc
+ * before intel_finish_render_texture() is ever called.
+ */
+static void
+intel_render_texture(GLcontext * ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct gl_texture_image *newImage
+ = att->Texture->Image[att->CubeMapFace][att->TextureLevel];
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct intel_texture_image *intel_image;
+ GLuint imageOffset;
+
+ (void) fb;
+
+ ASSERT(newImage);
+
+ if (!irb) {
+ irb = intel_wrap_texture(ctx, newImage);
+ if (irb) {
+ /* bind the wrapper to the attachment point */
+ _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
+ }
+ else {
+ /* fallback to software rendering */
+ _mesa_render_texture(ctx, fb, att);
+ return;
+ }
+ }
+
+ DBG("Begin render texture tid %x tex=%u w=%d h=%d refcount=%d\n",
+ _glthread_GetID(),
+ att->Texture->Name, newImage->Width, newImage->Height,
+ irb->Base.RefCount);
+
+ /* point the renderbufer's region to the texture image region */
+ intel_image = intel_texture_image(newImage);
+ if (irb->region != intel_image->mt->region) {
+ if (irb->region)
+ intel_region_release(&irb->region);
+ intel_region_reference(&irb->region, intel_image->mt->region);
+ }
+
+ /* compute offset of the particular 2D image within the texture region */
+ imageOffset = intel_miptree_image_offset(intel_image->mt,
+ att->CubeMapFace,
+ att->TextureLevel);
+
+ if (att->Texture->Target == GL_TEXTURE_3D) {
+ const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
+ att->TextureLevel);
+ imageOffset += offsets[att->Zoffset];
+ }
+
+ /* store that offset in the region */
+ intel_image->mt->region->draw_offset = imageOffset;
+
+ /* update drawing region, etc */
+ intel_draw_buffer(ctx, fb);
+}
+
+
+/**
+ * Called by Mesa when rendering to a texture is done.
+ */
+static void
+intel_finish_render_texture(GLcontext * ctx,
+ struct gl_renderbuffer_attachment *att)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+
+ DBG("End render texture (tid %x) tex %u\n", _glthread_GetID(), att->Texture->Name);
+
+ if (irb) {
+ /* just release the region */
+ intel_region_release(&irb->region);
+ }
+ else if (att->Renderbuffer) {
+ /* software fallback */
+ _mesa_finish_render_texture(ctx, att);
+ /* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
+ }
+}
+
+
+/**
+ * Do one-time context initializations related to GL_EXT_framebuffer_object.
+ * Hook in device driver functions.
+ */
+void
+intel_fbo_init(struct intel_context *intel)
+{
+ intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
+ intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
+ intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
+ intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
+ intel->ctx.Driver.RenderTexture = intel_render_texture;
+ intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
+ intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
+}
diff --git a/src/mesa/drivers/dri/i915/intel_fbo.h b/src/mesa/drivers/dri/intel/intel_fbo.h
index f9a11d02e3..f9a11d02e3 100644
--- a/src/mesa/drivers/dri/i915/intel_fbo.h
+++ b/src/mesa/drivers/dri/intel/intel_fbo.h
diff --git a/src/mesa/drivers/dri/intel/intel_mipmap_tree.c b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
new file mode 100644
index 0000000000..2c167a9ab7
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.c
@@ -0,0 +1,388 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "enums.h"
+
+#define FILE_DEBUG_FLAG DEBUG_MIPTREE
+
+static GLenum
+target_to_target(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return GL_TEXTURE_CUBE_MAP_ARB;
+ default:
+ return target;
+ }
+}
+
+struct intel_mipmap_tree *
+intel_miptree_create(struct intel_context *intel,
+ GLenum target,
+ GLenum internal_format,
+ GLuint first_level,
+ GLuint last_level,
+ GLuint width0,
+ GLuint height0,
+ GLuint depth0, GLuint cpp, GLuint compress_byte)
+{
+ GLboolean ok;
+ struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
+
+ DBG("%s target %s format %s level %d..%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ _mesa_lookup_enum_by_nr(internal_format), first_level, last_level);
+
+ mt->target = target_to_target(target);
+ mt->internal_format = internal_format;
+ mt->first_level = first_level;
+ mt->last_level = last_level;
+ mt->width0 = width0;
+ mt->height0 = height0;
+ mt->depth0 = depth0;
+ mt->cpp = compress_byte ? compress_byte : cpp;
+ mt->compressed = compress_byte ? 1 : 0;
+ mt->refcount = 1;
+
+ switch (intel->intelScreen->deviceID) {
+ case PCI_CHIP_I945_G:
+ case PCI_CHIP_I945_GM:
+ case PCI_CHIP_I945_GME:
+ case PCI_CHIP_G33_G:
+ case PCI_CHIP_Q33_G:
+ case PCI_CHIP_Q35_G:
+ ok = i945_miptree_layout(mt);
+ break;
+ case PCI_CHIP_I915_G:
+ case PCI_CHIP_I915_GM:
+ case PCI_CHIP_I830_M:
+ case PCI_CHIP_I855_GM:
+ case PCI_CHIP_I865_G:
+ default:
+ /* All the i830 chips and the i915 use this layout:
+ */
+ ok = i915_miptree_layout(mt);
+ break;
+ }
+
+ if (ok) {
+ if (!mt->compressed) {
+ int align;
+
+ if (intel->intelScreen->ttm) {
+ /* XXX: Align pitch to multiple of 64 bytes for now to allow
+ * render-to-texture to work in all cases. This should probably be
+ * replaced at some point by some scheme to only do this when really
+ * necessary.
+ */
+ align = 63;
+ } else {
+ align = 3;
+ }
+
+ mt->pitch = (mt->pitch * cpp + align) & ~align;
+
+ /* XXX: At least the i915 seems very upset when the pitch is a multiple
+ * of 1024 and sometimes 512 bytes - performance can drop by several
+ * times. Go to the next multiple of the required alignment for now.
+ */
+ if (!(mt->pitch & 511))
+ mt->pitch += align + 1;
+
+ mt->pitch /= cpp;
+ }
+
+ mt->region = intel_region_alloc(intel->intelScreen,
+ mt->cpp, mt->pitch, mt->total_height);
+ }
+
+ if (!mt->region) {
+ free(mt);
+ return NULL;
+ }
+
+ return mt;
+}
+
+
+void
+intel_miptree_reference(struct intel_mipmap_tree **dst,
+ struct intel_mipmap_tree *src)
+{
+ src->refcount++;
+ *dst = src;
+ DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
+}
+
+void
+intel_miptree_release(struct intel_context *intel,
+ struct intel_mipmap_tree **mt)
+{
+ if (!*mt)
+ return;
+
+ DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
+ if (--(*mt)->refcount <= 0) {
+ GLuint i;
+
+ DBG("%s deleting %p\n", __FUNCTION__, *mt);
+
+ intel_region_release(&((*mt)->region));
+
+ for (i = 0; i < MAX_TEXTURE_LEVELS; i++)
+ if ((*mt)->level[i].image_offset)
+ free((*mt)->level[i].image_offset);
+
+ free(*mt);
+ }
+ *mt = NULL;
+}
+
+
+
+
+/* Can the image be pulled into a unified mipmap tree. This mirrors
+ * the completeness test in a lot of ways.
+ *
+ * Not sure whether I want to pass gl_texture_image here.
+ */
+GLboolean
+intel_miptree_match_image(struct intel_mipmap_tree *mt,
+ struct gl_texture_image *image,
+ GLuint face, GLuint level)
+{
+ /* Images with borders are never pulled into mipmap trees.
+ */
+ if (image->Border)
+ return GL_FALSE;
+
+ if (image->InternalFormat != mt->internal_format ||
+ image->IsCompressed != mt->compressed)
+ return GL_FALSE;
+
+ /* Test image dimensions against the base level image adjusted for
+ * minification. This will also catch images not present in the
+ * tree, changed targets, etc.
+ */
+ if (image->Width != mt->level[level].width ||
+ image->Height != mt->level[level].height ||
+ image->Depth != mt->level[level].depth)
+ return GL_FALSE;
+
+ return GL_TRUE;
+}
+
+
+void
+intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
+ GLuint level,
+ GLuint nr_images,
+ GLuint x, GLuint y, GLuint w, GLuint h, GLuint d)
+{
+
+ mt->level[level].width = w;
+ mt->level[level].height = h;
+ mt->level[level].depth = d;
+ mt->level[level].level_offset = (x + y * mt->pitch) * mt->cpp;
+ mt->level[level].nr_images = nr_images;
+
+ DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
+ level, w, h, d, x, y, mt->level[level].level_offset);
+
+ /* Not sure when this would happen, but anyway:
+ */
+ if (mt->level[level].image_offset) {
+ free(mt->level[level].image_offset);
+ mt->level[level].image_offset = NULL;
+ }
+
+ assert(nr_images);
+
+ mt->level[level].image_offset = malloc(nr_images * sizeof(GLuint));
+ mt->level[level].image_offset[0] = 0;
+}
+
+
+
+void
+intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
+ GLuint level, GLuint img, GLuint x, GLuint y)
+{
+ if (img == 0 && level == 0)
+ assert(x == 0 && y == 0);
+
+ assert(img < mt->level[level].nr_images);
+
+ mt->level[level].image_offset[img] = (x + y * mt->pitch);
+
+ DBG("%s level %d img %d pos %d,%d image_offset %x\n",
+ __FUNCTION__, level, img, x, y, mt->level[level].image_offset[img]);
+}
+
+
+/* Although we use the image_offset[] array to store relative offsets
+ * to cube faces, Mesa doesn't know anything about this and expects
+ * each cube face to be treated as a separate image.
+ *
+ * These functions present that view to mesa:
+ */
+const GLuint *
+intel_miptree_depth_offsets(struct intel_mipmap_tree *mt, GLuint level)
+{
+ static const GLuint zero = 0;
+
+ if (mt->target != GL_TEXTURE_3D || mt->level[level].nr_images == 1)
+ return &zero;
+ else
+ return mt->level[level].image_offset;
+}
+
+
+GLuint
+intel_miptree_image_offset(struct intel_mipmap_tree * mt,
+ GLuint face, GLuint level)
+{
+ if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
+ return (mt->level[level].level_offset +
+ mt->level[level].image_offset[face] * mt->cpp);
+ else
+ return mt->level[level].level_offset;
+}
+
+
+
+/**
+ * Map a teximage in a mipmap tree.
+ * \param row_stride returns row stride in bytes
+ * \param image_stride returns image stride in bytes (for 3D textures).
+ * \return address of mapping
+ */
+GLubyte *
+intel_miptree_image_map(struct intel_context * intel,
+ struct intel_mipmap_tree * mt,
+ GLuint face,
+ GLuint level,
+ GLuint * row_stride, GLuint * image_offsets)
+{
+ DBG("%s \n", __FUNCTION__);
+
+ if (row_stride)
+ *row_stride = mt->pitch * mt->cpp;
+
+ if (image_offsets)
+ memcpy(image_offsets, mt->level[level].image_offset,
+ mt->level[level].depth * sizeof(GLuint));
+
+ return (intel_region_map(intel->intelScreen, mt->region) +
+ intel_miptree_image_offset(mt, face, level));
+}
+
+void
+intel_miptree_image_unmap(struct intel_context *intel,
+ struct intel_mipmap_tree *mt)
+{
+ DBG("%s\n", __FUNCTION__);
+ intel_region_unmap(intel->intelScreen, mt->region);
+}
+
+
+
+/* Upload data for a particular image.
+ */
+void
+intel_miptree_image_data(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face,
+ GLuint level,
+ void *src,
+ GLuint src_row_pitch, GLuint src_image_pitch)
+{
+ GLuint depth = dst->level[level].depth;
+ GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
+ const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
+ GLuint i;
+ GLuint height = 0;
+
+ DBG("%s\n", __FUNCTION__);
+ for (i = 0; i < depth; i++) {
+ height = dst->level[level].height;
+ if(dst->compressed)
+ height /= 4;
+ intel_region_data(intel->intelScreen, dst->region,
+ dst_offset + dst_depth_offset[i], /* dst_offset */
+ 0, 0, /* dstx, dsty */
+ src,
+ src_row_pitch,
+ 0, 0, /* source x, y */
+ dst->level[level].width, height); /* width, height */
+
+ src += src_image_pitch * dst->cpp;
+ }
+}
+
+extern GLuint intel_compressed_alignment(GLenum);
+/* Copy mipmap image between trees
+ */
+void
+intel_miptree_image_copy(struct intel_context *intel,
+ struct intel_mipmap_tree *dst,
+ GLuint face, GLuint level,
+ struct intel_mipmap_tree *src)
+{
+ GLuint width = src->level[level].width;
+ GLuint height = src->level[level].height;
+ GLuint depth = src->level[level].depth;
+ GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
+ GLuint src_offset = intel_miptree_image_offset(src, face, level);
+ const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
+ const GLuint *src_depth_offset = intel_miptree_depth_offsets(src, level);
+ GLuint i;
+
+ if (dst->compressed) {
+ GLuint alignment = intel_compressed_alignment(dst->internal_format);
+ height = (height + 3) / 4;
+ width = ((width + alignment - 1) & ~(alignment - 1));
+ }
+
+ for (i = 0; i < depth; i++) {
+ intel_region_copy(intel->intelScreen,
+ dst->region, dst_offset + dst_depth_offset[i],
+ 0,
+ 0,
+ src->region, src_offset + src_depth_offset[i],
+ 0, 0, width, height);
+ }
+
+}
diff --git a/src/mesa/drivers/dri/i915/intel_mipmap_tree.h b/src/mesa/drivers/dri/intel/intel_mipmap_tree.h
index ecdb7be244..ecdb7be244 100644
--- a/src/mesa/drivers/dri/i915/intel_mipmap_tree.h
+++ b/src/mesa/drivers/dri/intel/intel_mipmap_tree.h
diff --git a/src/mesa/drivers/dri/intel/intel_regions.c b/src/mesa/drivers/dri/intel/intel_regions.c
new file mode 100644
index 0000000000..7799fdbb11
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_regions.c
@@ -0,0 +1,483 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/* Provide additional functionality on top of bufmgr buffers:
+ * - 2d semantics and blit operations
+ * - refcounting of buffers for multiple images in a buffer.
+ * - refcounting of buffer mappings.
+ * - some logic for moving the buffers to the best memory pools for
+ * given operations.
+ *
+ * Most of this is to make it easier to implement the fixed-layout
+ * mipmap tree required by intel hardware in the face of GL's
+ * programming interface where each image can be specifed in random
+ * order and it isn't clear what layout the tree should have until the
+ * last moment.
+ */
+
+#include "intel_context.h"
+#include "intel_regions.h"
+#include "intel_blit.h"
+#include "intel_buffer_objects.h"
+#include "dri_bufmgr.h"
+#include "intel_bufmgr_ttm.h"
+#include "intel_batchbuffer.h"
+
+#define FILE_DEBUG_FLAG DEBUG_REGION
+
+void
+intel_region_idle(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ /* XXX: Using this function is likely bogus -- it ought to only have been
+ * used before a map, anyway, but leave this cheap implementation of it
+ * for now.
+ */
+ if (region && region->buffer) {
+ /* Mapping it for read will ensure that any acceleration to the region
+ * would have landed already.
+ */
+ dri_bo_map(region->buffer, GL_TRUE);
+ dri_bo_unmap(region->buffer);
+ }
+}
+
+/* XXX: Thread safety?
+ */
+GLubyte *
+intel_region_map(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ if (!region->map_refcount++) {
+ if (region->pbo)
+ intel_region_cow(intelScreen, region);
+
+ dri_bo_map(region->buffer, GL_TRUE);
+ region->map = region->buffer->virtual;
+ }
+
+ return region->map;
+}
+
+void
+intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ DBG("%s\n", __FUNCTION__);
+ if (!--region->map_refcount) {
+ dri_bo_unmap(region->buffer);
+ region->map = NULL;
+ }
+}
+
+struct intel_region *
+intel_region_alloc(intelScreenPrivate *intelScreen,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ struct intel_region *region = calloc(sizeof(*region), 1);
+
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+ region->refcount = 1;
+
+ region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
+ pitch * cpp * height, 64, DRM_BO_FLAG_MEM_TT);
+ return region;
+}
+
+void
+intel_region_reference(struct intel_region **dst, struct intel_region *src)
+{
+ assert(*dst == NULL);
+ if (src) {
+ src->refcount++;
+ *dst = src;
+ }
+}
+
+void
+intel_region_release(struct intel_region **region)
+{
+ if (!*region)
+ return;
+
+ DBG("%s %d\n", __FUNCTION__, (*region)->refcount - 1);
+
+ ASSERT((*region)->refcount > 0);
+ (*region)->refcount--;
+
+ if ((*region)->refcount == 0) {
+ assert((*region)->map_refcount == 0);
+
+ if ((*region)->pbo)
+ (*region)->pbo->region = NULL;
+ (*region)->pbo = NULL;
+ dri_bo_unreference((*region)->buffer);
+ free(*region);
+ }
+ *region = NULL;
+}
+
+
+struct intel_region *
+intel_region_create_static(intelScreenPrivate *intelScreen,
+ GLuint mem_type,
+ unsigned int bo_handle,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ struct intel_region *region = calloc(sizeof(*region), 1);
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+ region->refcount = 1;
+
+ if (intelScreen->ttm) {
+ assert(bo_handle != -1);
+ region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
+ "static region",
+ bo_handle);
+ } else {
+ region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
+ "static region",
+ offset, pitch * cpp * height,
+ virtual,
+ DRM_BO_FLAG_MEM_TT);
+ }
+
+ return region;
+}
+
+
+
+void
+intel_region_update_static(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ GLuint mem_type,
+ unsigned int bo_handle,
+ GLuint offset,
+ void *virtual,
+ GLuint cpp, GLuint pitch, GLuint height)
+{
+ DBG("%s\n", __FUNCTION__);
+
+ region->cpp = cpp;
+ region->pitch = pitch;
+ region->height = height; /* needed? */
+
+ /*
+ * We use a "shared" buffer type to indicate buffers created and
+ * shared by others.
+ */
+
+ dri_bo_unreference(region->buffer);
+ if (intelScreen->ttm) {
+ assert(bo_handle != -1);
+ region->buffer = intel_ttm_bo_create_from_handle(intelScreen->bufmgr,
+ "static region",
+ bo_handle);
+ } else {
+ region->buffer = dri_bo_alloc_static(intelScreen->bufmgr,
+ "static region",
+ offset, pitch * cpp * height,
+ virtual,
+ DRM_BO_FLAG_MEM_TT);
+ }
+}
+
+
+
+/*
+ * XXX Move this into core Mesa?
+ */
+static void
+_mesa_copy_rect(GLubyte * dst,
+ GLuint cpp,
+ GLuint dst_pitch,
+ GLuint dst_x,
+ GLuint dst_y,
+ GLuint width,
+ GLuint height,
+ const GLubyte * src,
+ GLuint src_pitch, GLuint src_x, GLuint src_y)
+{
+ GLuint i;
+
+ dst_pitch *= cpp;
+ src_pitch *= cpp;
+ dst += dst_x * cpp;
+ src += src_x * cpp;
+ dst += dst_y * dst_pitch;
+ src += src_y * dst_pitch;
+ width *= cpp;
+
+ if (width == dst_pitch && width == src_pitch)
+ memcpy(dst, src, height * width);
+ else {
+ for (i = 0; i < height; i++) {
+ memcpy(dst, src, width);
+ dst += dst_pitch;
+ src += src_pitch;
+ }
+ }
+}
+
+
+/* Upload data to a rectangular sub-region. Lots of choices how to do this:
+ *
+ * - memcpy by span to current destination
+ * - upload data as new buffer and blit
+ *
+ * Currently always memcpy.
+ */
+void
+intel_region_data(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ const void *src, GLuint src_pitch,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+
+ LOCK_HARDWARE(intel);
+
+ _mesa_copy_rect(intel_region_map(intelScreen, dst) + dst_offset,
+ dst->cpp,
+ dst->pitch,
+ dstx, dsty, width, height, src, src_pitch, srcx, srcy);
+
+ intel_region_unmap(intelScreen, dst);
+
+ UNLOCK_HARDWARE(intel);
+
+}
+
+/* Copy rectangular sub-regions. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+void
+intel_region_copy(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ struct intel_region *src,
+ GLuint src_offset,
+ GLuint srcx, GLuint srcy, GLuint width, GLuint height)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+ assert(src->cpp == dst->cpp);
+
+ intelEmitCopyBlit(intel,
+ dst->cpp,
+ src->pitch, src->buffer, src_offset,
+ dst->pitch, dst->buffer, dst_offset,
+ srcx, srcy, dstx, dsty, width, height,
+ GL_COPY);
+}
+
+/* Fill a rectangular sub-region. Need better logic about when to
+ * push buffers into AGP - will currently do so whenever possible.
+ */
+void
+intel_region_fill(intelScreenPrivate *intelScreen,
+ struct intel_region *dst,
+ GLuint dst_offset,
+ GLuint dstx, GLuint dsty,
+ GLuint width, GLuint height, GLuint color)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intel == NULL)
+ return;
+
+ if (dst->pbo) {
+ if (dstx == 0 &&
+ dsty == 0 && width == dst->pitch && height == dst->height)
+ intel_region_release_pbo(intelScreen, dst);
+ else
+ intel_region_cow(intelScreen, dst);
+ }
+
+ intelEmitFillBlit(intel,
+ dst->cpp,
+ dst->pitch, dst->buffer, dst_offset,
+ dstx, dsty, width, height, color);
+}
+
+/* Attach to a pbo, discarding our data. Effectively zero-copy upload
+ * the pbo's data.
+ */
+void
+intel_region_attach_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ struct intel_buffer_object *pbo)
+{
+ if (region->pbo == pbo)
+ return;
+
+ /* If there is already a pbo attached, break the cow tie now.
+ * Don't call intel_region_release_pbo() as that would
+ * unnecessarily allocate a new buffer we would have to immediately
+ * discard.
+ */
+ if (region->pbo) {
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ }
+
+ if (region->buffer) {
+ dri_bo_unreference(region->buffer);
+ region->buffer = NULL;
+ }
+
+ region->pbo = pbo;
+ region->pbo->region = region;
+ dri_bo_reference(pbo->buffer);
+ region->buffer = pbo->buffer;
+}
+
+
+/* Break the COW tie to the pbo and allocate a new buffer.
+ * The pbo gets to keep the data.
+ */
+void
+intel_region_release_pbo(intelScreenPrivate *intelScreen,
+ struct intel_region *region)
+{
+ assert(region->buffer == region->pbo->buffer);
+ region->pbo->region = NULL;
+ region->pbo = NULL;
+ dri_bo_unreference(region->buffer);
+ region->buffer = NULL;
+
+ region->buffer = dri_bo_alloc(intelScreen->bufmgr, "region",
+ region->pitch * region->cpp * region->height,
+ 64, DRM_BO_FLAG_MEM_TT);
+}
+
+/* Break the COW tie to the pbo. Both the pbo and the region end up
+ * with a copy of the data.
+ */
+void
+intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
+{
+ struct intel_context *intel = intelScreenContext(intelScreen);
+ struct intel_buffer_object *pbo = region->pbo;
+
+ if (intel == NULL)
+ return;
+
+ intel_region_release_pbo(intelScreen, region);
+
+ assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
+
+ DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
+
+ /* Now blit from the texture buffer to the new buffer:
+ */
+
+ intel_batchbuffer_flush(intel->batch);
+
+ if (!intel->locked) {
+ LOCK_HARDWARE(intel);
+ intelEmitCopyBlit(intel,
+ region->cpp,
+ region->pitch,
+ region->buffer, 0,
+ region->pitch,
+ pbo->buffer, 0,
+ 0, 0, 0, 0,
+ region->pitch, region->height,
+ GL_COPY);
+
+ intel_batchbuffer_flush(intel->batch);
+ UNLOCK_HARDWARE(intel);
+ }
+ else {
+ intelEmitCopyBlit(intel,
+ region->cpp,
+ region->pitch,
+ region->buffer, 0,
+ region->pitch,
+ pbo->buffer, 0,
+ 0, 0, 0, 0,
+ region->pitch, region->height,
+ GL_COPY);
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+}
+
+dri_bo *
+intel_region_buffer(intelScreenPrivate *intelScreen,
+ struct intel_region *region, GLuint flag)
+{
+ if (region->pbo) {
+ if (flag == INTEL_WRITE_PART)
+ intel_region_cow(intelScreen, region);
+ else if (flag == INTEL_WRITE_FULL)
+ intel_region_release_pbo(intelScreen, region);
+ }
+
+ return region->buffer;
+}
diff --git a/src/mesa/drivers/dri/i915/intel_regions.h b/src/mesa/drivers/dri/intel/intel_regions.h
index 42d7b17711..42d7b17711 100644
--- a/src/mesa/drivers/dri/i915/intel_regions.h
+++ b/src/mesa/drivers/dri/intel/intel_regions.h
diff --git a/src/mesa/drivers/dri/intel/intel_screen.c b/src/mesa/drivers/dri/intel/intel_screen.c
new file mode 100644
index 0000000000..25f5efa7bc
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_screen.c
@@ -0,0 +1,945 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "context.h"
+#include "framebuffer.h"
+#include "matrix.h"
+#include "renderbuffer.h"
+#include "simple_list.h"
+#include "utils.h"
+#include "vblank.h"
+#include "xmlpool.h"
+
+
+#include "intel_screen.h"
+
+#include "intel_buffers.h"
+#include "intel_tex.h"
+#include "intel_span.h"
+#include "intel_tris.h"
+#include "intel_ioctl.h"
+#include "intel_fbo.h"
+
+#include "i830_dri.h"
+#include "dri_bufmgr.h"
+#include "intel_regions.h"
+#include "intel_batchbuffer.h"
+
+#include "intel_bufmgr_ttm.h"
+
+PUBLIC const char __driConfigOptions[] =
+ DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
+ DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
+ DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
+ DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
+ DRI_CONF_FORCE_S3TC_ENABLE(false)
+ DRI_CONF_ALLOW_LARGE_TEXTURES(1)
+ DRI_CONF_SECTION_END DRI_CONF_END;
+ const GLuint __driNConfigOptions = 4;
+
+#ifdef USE_NEW_INTERFACE
+ static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
+#endif /*USE_NEW_INTERFACE */
+
+ extern const struct dri_extension card_extensions[];
+ extern const struct dri_extension ttm_extensions[];
+
+/**
+ * Map all the memory regions described by the screen.
+ * \return GL_TRUE if success, GL_FALSE if error.
+ */
+GLboolean
+intelMapScreenRegions(__DRIscreenPrivate * sPriv)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ if (intelScreen->front.handle) {
+ if (drmMap(sPriv->fd,
+ intelScreen->front.handle,
+ intelScreen->front.size,
+ (drmAddress *) & intelScreen->front.map) != 0) {
+ _mesa_problem(NULL, "drmMap(frontbuffer) failed!");
+ return GL_FALSE;
+ }
+ }
+ else {
+ _mesa_warning(NULL, "no front buffer handle in intelMapScreenRegions!");
+ }
+
+ if (0)
+ _mesa_printf("Back 0x%08x ", intelScreen->back.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->back.handle,
+ intelScreen->back.size,
+ (drmAddress *) & intelScreen->back.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+
+ if (intelScreen->third.handle) {
+ if (0)
+ _mesa_printf("Third 0x%08x ", intelScreen->third.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->third.handle,
+ intelScreen->third.size,
+ (drmAddress *) & intelScreen->third.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+ }
+
+ if (0)
+ _mesa_printf("Depth 0x%08x ", intelScreen->depth.handle);
+ if (drmMap(sPriv->fd,
+ intelScreen->depth.handle,
+ intelScreen->depth.size,
+ (drmAddress *) & intelScreen->depth.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+
+ if (0)
+ _mesa_printf("TEX 0x%08x ", intelScreen->tex.handle);
+ if (intelScreen->tex.size != 0) {
+ if (drmMap(sPriv->fd,
+ intelScreen->tex.handle,
+ intelScreen->tex.size,
+ (drmAddress *) & intelScreen->tex.map) != 0) {
+ intelUnmapScreenRegions(intelScreen);
+ return GL_FALSE;
+ }
+ }
+
+ if (0)
+ printf("Mappings: front: %p back: %p third: %p depth: %p tex: %p\n",
+ intelScreen->front.map,
+ intelScreen->back.map, intelScreen->third.map,
+ intelScreen->depth.map, intelScreen->tex.map);
+ return GL_TRUE;
+}
+
+/** Driver-specific fence emit implementation for the fake memory manager. */
+static unsigned int
+intel_fence_emit(void *private)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
+ unsigned int fence;
+
+ /* XXX: Need to emit a flush, if we haven't already (at least with the
+ * current batchbuffer implementation, we have).
+ */
+
+ fence = intelEmitIrqLocked(intelScreen);
+
+ return fence;
+}
+
+/** Driver-specific fence wait implementation for the fake memory manager. */
+static int
+intel_fence_wait(void *private, unsigned int cookie)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *)private;
+
+ intelWaitIrq(intelScreen, cookie);
+
+ return 0;
+}
+
+static struct intel_region *
+intel_recreate_static(intelScreenPrivate *intelScreen,
+ struct intel_region *region,
+ intelRegion *region_desc,
+ GLuint mem_type)
+{
+ if (region) {
+ intel_region_update_static(intelScreen, region, mem_type,
+ region_desc->bo_handle, region_desc->offset,
+ region_desc->map, intelScreen->cpp,
+ region_desc->pitch / intelScreen->cpp,
+ intelScreen->height);
+ } else {
+ region = intel_region_create_static(intelScreen, mem_type,
+ region_desc->bo_handle,
+ region_desc->offset,
+ region_desc->map, intelScreen->cpp,
+ region_desc->pitch / intelScreen->cpp,
+ intelScreen->height);
+ }
+
+ assert(region->buffer != NULL);
+
+ return region;
+}
+
+
+/* Create intel_region structs to describe the static front,back,depth
+ * buffers created by the xserver.
+ *
+ * Although FBO's mean we now no longer use these as render targets in
+ * all circumstances, they won't go away until the back and depth
+ * buffers become private, and the front and rotated buffers will
+ * remain even then.
+ *
+ * Note that these don't allocate video memory, just describe
+ * allocations alread made by the X server.
+ */
+static void
+intel_recreate_static_regions(intelScreenPrivate *intelScreen)
+{
+ intelScreen->front_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->front_region,
+ &intelScreen->front,
+ DRM_BO_FLAG_MEM_TT);
+
+ /* The rotated region is only used for old DDXes that didn't handle rotation
+\ * on their own.
+ */
+ if (intelScreen->driScrnPriv->ddx_version.minor < 8) {
+ intelScreen->rotated_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->rotated_region,
+ &intelScreen->rotated,
+ DRM_BO_FLAG_MEM_TT);
+ }
+
+ intelScreen->back_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->back_region,
+ &intelScreen->back,
+ DRM_BO_FLAG_MEM_TT);
+
+ if (intelScreen->third.handle) {
+ intelScreen->third_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->third_region,
+ &intelScreen->third,
+ DRM_BO_FLAG_MEM_TT);
+ }
+
+ /* Still assumes front.cpp == depth.cpp. We can kill this when we move to
+ * private buffers.
+ */
+ intelScreen->depth_region =
+ intel_recreate_static(intelScreen,
+ intelScreen->depth_region,
+ &intelScreen->depth,
+ DRM_BO_FLAG_MEM_TT);
+}
+
+/**
+ * Use the information in the sarea to update the screen parameters
+ * related to screen rotation. Needs to be called locked.
+ */
+void
+intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ intelUnmapScreenRegions(intelScreen);
+ intelUpdateScreenFromSAREA(intelScreen, sarea);
+ if (!intelMapScreenRegions(sPriv)) {
+ fprintf(stderr, "ERROR Remapping screen regions!!!\n");
+ }
+ intel_recreate_static_regions(intelScreen);
+}
+
+
+void
+intelUnmapScreenRegions(intelScreenPrivate * intelScreen)
+{
+#define REALLY_UNMAP 1
+ if (intelScreen->front.map) {
+#if REALLY_UNMAP
+ if (drmUnmap(intelScreen->front.map, intelScreen->front.size) != 0)
+ printf("drmUnmap front failed!\n");
+#endif
+ intelScreen->front.map = NULL;
+ }
+ if (intelScreen->back.map) {
+#if REALLY_UNMAP
+ if (drmUnmap(intelScreen->back.map, intelScreen->back.size) != 0)
+ printf("drmUnmap back failed!\n");
+#endif
+ intelScreen->back.map = NULL;
+ }
+ if (intelScreen->third.map) {
+#if REALLY_UNMAP
+ if (drmUnmap(intelScreen->third.map, intelScreen->third.size) != 0)
+ printf("drmUnmap third failed!\n");
+#endif
+ intelScreen->third.map = NULL;
+ }
+ if (intelScreen->depth.map) {
+#if REALLY_UNMAP
+ drmUnmap(intelScreen->depth.map, intelScreen->depth.size);
+ intelScreen->depth.map = NULL;
+#endif
+ }
+ if (intelScreen->tex.map) {
+#if REALLY_UNMAP
+ drmUnmap(intelScreen->tex.map, intelScreen->tex.size);
+ intelScreen->tex.map = NULL;
+#endif
+ }
+}
+
+
+static void
+intelPrintDRIInfo(intelScreenPrivate * intelScreen,
+ __DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
+{
+ fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->front.size, intelScreen->front.offset,
+ intelScreen->front.pitch);
+ fprintf(stderr, "*** Back size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->back.size, intelScreen->back.offset,
+ intelScreen->back.pitch);
+ fprintf(stderr, "*** Depth size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->depth.size, intelScreen->depth.offset,
+ intelScreen->depth.pitch);
+ fprintf(stderr, "*** Rotated size: 0x%x offset: 0x%x pitch: %d\n",
+ intelScreen->rotated.size, intelScreen->rotated.offset,
+ intelScreen->rotated.pitch);
+ fprintf(stderr, "*** Texture size: 0x%x offset: 0x%x\n",
+ intelScreen->tex.size, intelScreen->tex.offset);
+ fprintf(stderr, "*** Memory : 0x%x\n", gDRIPriv->mem);
+}
+
+
+static void
+intelPrintSAREA(const drmI830Sarea * sarea)
+{
+ fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
+ sarea->height);
+ fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
+ fprintf(stderr,
+ "SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->front_offset, sarea->front_size,
+ (unsigned) sarea->front_handle);
+ fprintf(stderr,
+ "SAREA: back offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->back_offset, sarea->back_size,
+ (unsigned) sarea->back_handle);
+ fprintf(stderr, "SAREA: depth offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->depth_offset, sarea->depth_size,
+ (unsigned) sarea->depth_handle);
+ fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
+ sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
+ fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
+ fprintf(stderr,
+ "SAREA: rotated offset: 0x%08x size: 0x%x\n",
+ sarea->rotated_offset, sarea->rotated_size);
+ fprintf(stderr, "SAREA: rotated pitch: %d\n", sarea->rotated_pitch);
+}
+
+
+/**
+ * A number of the screen parameters are obtained/computed from
+ * information in the SAREA. This function updates those parameters.
+ */
+void
+intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
+ drmI830Sarea * sarea)
+{
+ intelScreen->width = sarea->width;
+ intelScreen->height = sarea->height;
+
+ intelScreen->front.offset = sarea->front_offset;
+ intelScreen->front.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->front.handle = sarea->front_handle;
+ intelScreen->front.size = sarea->front_size;
+
+ intelScreen->back.offset = sarea->back_offset;
+ intelScreen->back.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->back.handle = sarea->back_handle;
+ intelScreen->back.size = sarea->back_size;
+
+ if (intelScreen->driScrnPriv->ddx_version.minor >= 8) {
+ intelScreen->third.offset = sarea->third_offset;
+ intelScreen->third.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->third.handle = sarea->third_handle;
+ intelScreen->third.size = sarea->third_size;
+ }
+
+ intelScreen->depth.offset = sarea->depth_offset;
+ intelScreen->depth.pitch = sarea->pitch * intelScreen->cpp;
+ intelScreen->depth.handle = sarea->depth_handle;
+ intelScreen->depth.size = sarea->depth_size;
+
+ if (intelScreen->driScrnPriv->ddx_version.minor >= 9) {
+ intelScreen->front.bo_handle = sarea->front_bo_handle;
+ intelScreen->back.bo_handle = sarea->back_bo_handle;
+ intelScreen->third.bo_handle = sarea->third_bo_handle;
+ intelScreen->depth.bo_handle = sarea->depth_bo_handle;
+ } else {
+ intelScreen->front.bo_handle = -1;
+ intelScreen->back.bo_handle = -1;
+ intelScreen->third.bo_handle = -1;
+ intelScreen->depth.bo_handle = -1;
+ }
+
+ intelScreen->tex.offset = sarea->tex_offset;
+ intelScreen->logTextureGranularity = sarea->log_tex_granularity;
+ intelScreen->tex.handle = sarea->tex_handle;
+ intelScreen->tex.size = sarea->tex_size;
+
+ intelScreen->rotated.offset = sarea->rotated_offset;
+ intelScreen->rotated.pitch = sarea->rotated_pitch * intelScreen->cpp;
+ intelScreen->rotated.size = sarea->rotated_size;
+ intelScreen->current_rotation = sarea->rotation;
+ matrix23Rotate(&intelScreen->rotMatrix,
+ sarea->width, sarea->height, sarea->rotation);
+ intelScreen->rotatedWidth = sarea->virtualX;
+ intelScreen->rotatedHeight = sarea->virtualY;
+
+ if (0)
+ intelPrintSAREA(sarea);
+}
+
+static const __DRItexOffsetExtension intelTexOffsetExtension = {
+ { __DRI_TEX_OFFSET },
+ intelSetTexOffset,
+};
+
+static const __DRIextension *intelExtensions[] = {
+ &driReadDrawableExtension,
+ &driCopySubBufferExtension.base,
+ &driSwapControlExtension.base,
+ &driFrameTrackingExtension.base,
+ &driMediaStreamCounterExtension.base,
+ &intelTexOffsetExtension.base,
+ NULL
+};
+
+
+static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
+{
+ intelScreenPrivate *intelScreen;
+ I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
+ drmI830Sarea *sarea;
+
+ if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
+ fprintf(stderr,
+ "\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
+ return GL_FALSE;
+ }
+
+ /* Allocate the private area */
+ intelScreen = (intelScreenPrivate *) CALLOC(sizeof(intelScreenPrivate));
+ if (!intelScreen) {
+ fprintf(stderr, "\nERROR! Allocating private area failed\n");
+ return GL_FALSE;
+ }
+ /* parse information in __driConfigOptions */
+ driParseOptionInfo(&intelScreen->optionCache,
+ __driConfigOptions, __driNConfigOptions);
+
+ intelScreen->driScrnPriv = sPriv;
+ sPriv->private = (void *) intelScreen;
+ intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
+ sarea = (drmI830Sarea *)
+ (((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
+
+ intelScreen->deviceID = gDRIPriv->deviceID;
+ if (intelScreen->deviceID == PCI_CHIP_I865_G)
+ intelScreen->maxBatchSize = 4096;
+ else
+ intelScreen->maxBatchSize = BATCH_SZ;
+
+ intelScreen->mem = gDRIPriv->mem;
+ intelScreen->cpp = gDRIPriv->cpp;
+
+ switch (gDRIPriv->bitsPerPixel) {
+ case 16:
+ intelScreen->fbFormat = DV_PF_565;
+ break;
+ case 32:
+ intelScreen->fbFormat = DV_PF_8888;
+ break;
+ default:
+ exit(1);
+ break;
+ }
+
+ intelUpdateScreenFromSAREA(intelScreen, sarea);
+
+ if (!intelMapScreenRegions(sPriv)) {
+ fprintf(stderr, "\nERROR! mapping regions\n");
+ _mesa_free(intelScreen);
+ sPriv->private = NULL;
+ return GL_FALSE;
+ }
+
+ intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
+
+ if (0)
+ intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
+
+ intelScreen->drmMinor = sPriv->drm_version.minor;
+
+ /* Determine if IRQs are active? */
+ {
+ int ret;
+ drmI830GetParam gp;
+
+ gp.param = I830_PARAM_IRQ_ACTIVE;
+ gp.value = &intelScreen->irq_active;
+
+ ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
+ &gp, sizeof(gp));
+ if (ret) {
+ fprintf(stderr, "drmI830GetParam: %d\n", ret);
+ return GL_FALSE;
+ }
+ }
+
+ /* Determine if batchbuffers are allowed */
+ {
+ int ret;
+ drmI830GetParam gp;
+
+ gp.param = I830_PARAM_ALLOW_BATCHBUFFER;
+ gp.value = &intelScreen->allow_batchbuffer;
+
+ ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
+ &gp, sizeof(gp));
+ if (ret) {
+ fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
+ return GL_FALSE;
+ }
+ }
+
+ sPriv->extensions = intelExtensions;
+
+ /* If we've got a new enough DDX that's initializing TTM and giving us
+ * object handles for the shared buffers, use that.
+ */
+ intelScreen->ttm = GL_FALSE;
+ if (getenv("INTEL_NO_TTM") == NULL &&
+ intelScreen->driScrnPriv->ddx_version.minor >= 9 &&
+ intelScreen->drmMinor >= 11 &&
+ intelScreen->front.bo_handle != -1) {
+ intelScreen->bufmgr = intel_bufmgr_ttm_init(sPriv->fd,
+ DRM_FENCE_TYPE_EXE,
+ DRM_FENCE_TYPE_EXE |
+ DRM_I915_FENCE_TYPE_RW,
+ BATCH_SZ);
+ if (intelScreen->bufmgr != NULL)
+ intelScreen->ttm = GL_TRUE;
+ }
+ /* Otherwise, use the classic buffer manager. */
+ if (intelScreen->bufmgr == NULL) {
+ if (intelScreen->tex.size == 0) {
+ fprintf(stderr, "[%s:%u] Error initializing buffer manager.\n",
+ __func__, __LINE__);
+ return GL_FALSE;
+ }
+ fprintf(stderr, "[%s:%u] Failed to init TTM buffer manager, falling back"
+ " to classic.\n", __func__, __LINE__);
+ intelScreen->bufmgr = dri_bufmgr_fake_init(intelScreen->tex.offset,
+ intelScreen->tex.map,
+ intelScreen->tex.size,
+ intel_fence_emit,
+ intel_fence_wait,
+ intelScreen);
+ }
+
+ intel_recreate_static_regions(intelScreen);
+
+ return GL_TRUE;
+}
+
+
+static void
+intelDestroyScreen(__DRIscreenPrivate * sPriv)
+{
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ intelUnmapScreenRegions(intelScreen);
+
+ dri_bufmgr_destroy(intelScreen->bufmgr);
+ FREE(intelScreen);
+ sPriv->private = NULL;
+}
+
+
+/**
+ * This is called when we need to set up GL rendering to a new X window.
+ */
+static GLboolean
+intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
+ __DRIdrawablePrivate * driDrawPriv,
+ const __GLcontextModes * mesaVis, GLboolean isPixmap)
+{
+ intelScreenPrivate *screen = (intelScreenPrivate *) driScrnPriv->private;
+
+ if (isPixmap) {
+ return GL_FALSE; /* not implemented */
+ }
+ else {
+ GLboolean swStencil = (mesaVis->stencilBits > 0 &&
+ mesaVis->depthBits != 24);
+ GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
+
+ struct intel_framebuffer *intel_fb = CALLOC_STRUCT(intel_framebuffer);
+
+ if (!intel_fb)
+ return GL_FALSE;
+
+ _mesa_initialize_framebuffer(&intel_fb->Base, mesaVis);
+
+ /* setup the hardware-based renderbuffers */
+ {
+ intel_fb->color_rb[0]
+ = intel_create_renderbuffer(rgbFormat,
+ screen->width, screen->height,
+ screen->front.offset,
+ screen->front.pitch,
+ screen->cpp,
+ screen->front.map);
+ intel_set_span_functions(&intel_fb->color_rb[0]->Base);
+ _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_FRONT_LEFT,
+ &intel_fb->color_rb[0]->Base);
+ }
+
+ if (mesaVis->doubleBufferMode) {
+ intel_fb->color_rb[1]
+ = intel_create_renderbuffer(rgbFormat,
+ screen->width, screen->height,
+ screen->back.offset,
+ screen->back.pitch,
+ screen->cpp,
+ screen->back.map);
+ intel_set_span_functions(&intel_fb->color_rb[1]->Base);
+ _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_BACK_LEFT,
+ &intel_fb->color_rb[1]->Base);
+
+ if (screen->third.handle) {
+ struct gl_renderbuffer *tmp_rb = NULL;
+
+ intel_fb->color_rb[2]
+ = intel_create_renderbuffer(rgbFormat,
+ screen->width, screen->height,
+ screen->third.offset,
+ screen->third.pitch,
+ screen->cpp,
+ screen->third.map);
+ intel_set_span_functions(&intel_fb->color_rb[2]->Base);
+ _mesa_reference_renderbuffer(&tmp_rb, &intel_fb->color_rb[2]->Base);
+ }
+ }
+
+ if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
+ /* combined depth/stencil buffer */
+ struct intel_renderbuffer *depthStencilRb
+ = intel_create_renderbuffer(GL_DEPTH24_STENCIL8_EXT,
+ screen->width, screen->height,
+ screen->depth.offset,
+ screen->depth.pitch,
+ screen->cpp, /* 4! */
+ screen->depth.map);
+ intel_set_span_functions(&depthStencilRb->Base);
+ /* note: bind RB to two attachment points */
+ _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH,
+ &depthStencilRb->Base);
+ _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_STENCIL,
+ &depthStencilRb->Base);
+ }
+ else if (mesaVis->depthBits == 16) {
+ /* just 16-bit depth buffer, no hw stencil */
+ struct intel_renderbuffer *depthRb
+ = intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
+ screen->width, screen->height,
+ screen->depth.offset,
+ screen->depth.pitch,
+ screen->cpp, /* 2! */
+ screen->depth.map);
+ intel_set_span_functions(&depthRb->Base);
+ _mesa_add_renderbuffer(&intel_fb->Base, BUFFER_DEPTH, &depthRb->Base);
+ }
+
+ /* now add any/all software-based renderbuffers we may need */
+ _mesa_add_soft_renderbuffers(&intel_fb->Base,
+ GL_FALSE, /* never sw color */
+ GL_FALSE, /* never sw depth */
+ swStencil, mesaVis->accumRedBits > 0,
+ GL_FALSE, /* never sw alpha */
+ GL_FALSE /* never sw aux */ );
+ driDrawPriv->driverPrivate = (void *) intel_fb;
+
+ return GL_TRUE;
+ }
+}
+
+static void
+intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
+{
+ _mesa_unreference_framebuffer((GLframebuffer **)(&(driDrawPriv->driverPrivate)));
+}
+
+
+/**
+ * Get information about previous buffer swaps.
+ */
+static int
+intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
+{
+ struct intel_framebuffer *intel_fb;
+
+ if ((dPriv == NULL) || (dPriv->driverPrivate == NULL)
+ || (sInfo == NULL)) {
+ return -1;
+ }
+
+ intel_fb = dPriv->driverPrivate;
+ sInfo->swap_count = intel_fb->swap_count;
+ sInfo->swap_ust = intel_fb->swap_ust;
+ sInfo->swap_missed_count = intel_fb->swap_missed_count;
+
+ sInfo->swap_missed_usage = (sInfo->swap_missed_count != 0)
+ ? driCalculateSwapUsage(dPriv, 0, intel_fb->swap_missed_ust)
+ : 0.0;
+
+ return 0;
+}
+
+
+/* There are probably better ways to do this, such as an
+ * init-designated function to register chipids and createcontext
+ * functions.
+ */
+extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate);
+
+
+
+
+static GLboolean
+intelCreateContext(const __GLcontextModes * mesaVis,
+ __DRIcontextPrivate * driContextPriv,
+ void *sharedContextPrivate)
+{
+ __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
+ intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
+
+ switch (intelScreen->deviceID) {
+ /* Don't deal with i830 until texture work complete:
+ */
+ case PCI_CHIP_845_G:
+ case PCI_CHIP_I830_M:
+ case PCI_CHIP_I855_GM:
+ case PCI_CHIP_I865_G:
+ return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
+
+ case PCI_CHIP_I915_G:
+ case PCI_CHIP_I915_GM:
+ case PCI_CHIP_I945_G:
+ case PCI_CHIP_I945_GM:
+ case PCI_CHIP_I945_GME:
+ case PCI_CHIP_G33_G:
+ case PCI_CHIP_Q35_G:
+ case PCI_CHIP_Q33_G:
+ return i915CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
+
+ default:
+ fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
+ return GL_FALSE;
+ }
+}
+
+
+static const struct __DriverAPIRec intelAPI = {
+ .DestroyScreen = intelDestroyScreen,
+ .CreateContext = intelCreateContext,
+ .DestroyContext = intelDestroyContext,
+ .CreateBuffer = intelCreateBuffer,
+ .DestroyBuffer = intelDestroyBuffer,
+ .SwapBuffers = intelSwapBuffers,
+ .MakeCurrent = intelMakeCurrent,
+ .UnbindContext = intelUnbindContext,
+ .GetSwapInfo = intelGetSwapInfo,
+ .GetMSC = driGetMSC32,
+ .GetDrawableMSC = driDrawableGetMSC32,
+ .WaitForMSC = driWaitForMSC32,
+ .WaitForSBC = NULL,
+ .SwapBuffersMSC = NULL,
+ .CopySubBuffer = intelCopySubBuffer,
+ .setTexOffset = intelSetTexOffset,
+};
+
+
+static __GLcontextModes *
+intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
+ unsigned stencil_bits, GLboolean have_back_buffer)
+{
+ __GLcontextModes *modes;
+ __GLcontextModes *m;
+ unsigned num_modes;
+ unsigned depth_buffer_factor;
+ unsigned back_buffer_factor;
+ GLenum fb_format;
+ GLenum fb_type;
+
+ /* GLX_SWAP_COPY_OML is only supported because the Intel driver doesn't
+ * support pageflipping at all.
+ */
+ static const GLenum back_buffer_modes[] = {
+ GLX_NONE, GLX_SWAP_UNDEFINED_OML, GLX_SWAP_COPY_OML
+ };
+
+ u_int8_t depth_bits_array[3];
+ u_int8_t stencil_bits_array[3];
+
+
+ depth_bits_array[0] = 0;
+ depth_bits_array[1] = depth_bits;
+ depth_bits_array[2] = depth_bits;
+
+ /* Just like with the accumulation buffer, always provide some modes
+ * with a stencil buffer. It will be a sw fallback, but some apps won't
+ * care about that.
+ */
+ stencil_bits_array[0] = 0;
+ stencil_bits_array[1] = 0;
+ if (depth_bits == 24)
+ stencil_bits_array[1] = (stencil_bits == 0) ? 8 : stencil_bits;
+
+ stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
+
+ depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
+ back_buffer_factor = (have_back_buffer) ? 3 : 1;
+
+ num_modes = depth_buffer_factor * back_buffer_factor * 4;
+
+ if (pixel_bits == 16) {
+ fb_format = GL_RGB;
+ fb_type = GL_UNSIGNED_SHORT_5_6_5;
+ }
+ else {
+ fb_format = GL_BGRA;
+ fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
+ }
+
+ modes =
+ (*dri_interface->createContextModes) (num_modes,
+ sizeof(__GLcontextModes));
+ m = modes;
+ if (!driFillInModes(&m, fb_format, fb_type,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor, GLX_TRUE_COLOR)) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+ if (!driFillInModes(&m, fb_format, fb_type,
+ depth_bits_array, stencil_bits_array,
+ depth_buffer_factor, back_buffer_modes,
+ back_buffer_factor, GLX_DIRECT_COLOR)) {
+ fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
+ __LINE__);
+ return NULL;
+ }
+
+ /* Mark the visual as slow if there are "fake" stencil bits.
+ */
+ for (m = modes; m != NULL; m = m->next) {
+ if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
+ m->visualRating = GLX_SLOW_CONFIG;
+ }
+ }
+
+ return modes;
+}
+
+
+/**
+ * This is the driver specific part of the createNewScreen entry point.
+ *
+ * \todo maybe fold this into intelInitDriver
+ *
+ * \return the __GLcontextModes supported by this driver
+ */
+PUBLIC __GLcontextModes *__driDriverInitScreen(__DRIscreenPrivate *psp)
+{
+ static const __DRIversion ddx_expected = { 1, 5, 0 };
+ static const __DRIversion dri_expected = { 4, 0, 0 };
+ static const __DRIversion drm_expected = { 1, 5, 0 };
+ I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
+
+ psp->DriverAPI = intelAPI;
+
+ if (!driCheckDriDdxDrmVersions2("i915",
+ &psp->dri_version, &dri_expected,
+ &psp->ddx_version, &ddx_expected,
+ &psp->drm_version, &drm_expected)) {
+ return NULL;
+ }
+
+ /* Calling driInitExtensions here, with a NULL context pointer,
+ * does not actually enable the extensions. It just makes sure
+ * that all the dispatch offsets for all the extensions that
+ * *might* be enables are known. This is needed because the
+ * dispatch offsets need to be known when _mesa_context_create is
+ * called, but we can't enable the extensions until we have a
+ * context pointer.
+ *
+ * Hello chicken. Hello egg. How are you two today?
+ */
+ driInitExtensions(NULL, card_extensions, GL_FALSE);
+ driInitExtensions(NULL, ttm_extensions, GL_FALSE);
+
+ if (!intelInitDriver(psp))
+ return NULL;
+
+ return intelFillInModes(dri_priv->cpp * 8,
+ (dri_priv->cpp == 2) ? 16 : 24,
+ (dri_priv->cpp == 2) ? 0 : 8, 1);
+}
+
+struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen)
+{
+ /*
+ * This should probably change to have the screen allocate a dummy
+ * context at screen creation. For now just use the current context.
+ */
+
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx == NULL) {
+ _mesa_problem(NULL, "No current context in intelScreenContext\n");
+ return NULL;
+ }
+ return intel_context(ctx);
+}
+
diff --git a/src/mesa/drivers/dri/i915/intel_screen.h b/src/mesa/drivers/dri/intel/intel_screen.h
index aa0ef2c509..aa0ef2c509 100644
--- a/src/mesa/drivers/dri/i915/intel_screen.h
+++ b/src/mesa/drivers/dri/intel/intel_screen.h
diff --git a/src/mesa/drivers/dri/intel/intel_span.c b/src/mesa/drivers/dri/intel/intel_span.c
new file mode 100644
index 0000000000..d1f8ef06be
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_span.c
@@ -0,0 +1,409 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "colormac.h"
+
+#include "intel_fbo.h"
+#include "intel_screen.h"
+#include "intel_span.h"
+#include "intel_regions.h"
+#include "intel_ioctl.h"
+#include "intel_tex.h"
+
+#include "swrast/swrast.h"
+
+/*
+ break intelWriteRGBASpan_ARGB8888
+*/
+
+#undef DBG
+#define DBG 0
+
+#define LOCAL_VARS \
+ struct intel_context *intel = intel_context(ctx); \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLint yScale = irb->RenderToTexture ? 1 : -1; \
+ const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
+ GLubyte *buf = (GLubyte *) irb->pfMap \
+ + (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
+ GLuint p; \
+ assert(irb->pfMap);\
+ (void) p;
+
+/* XXX FBO: this is identical to the macro in spantmp2.h except we get
+ * the cliprect info from the context, not the driDrawable.
+ * Move this into spantmp2.h someday.
+ */
+#define HW_CLIPLOOP() \
+ do { \
+ int _nc = intel->numClipRects; \
+ while ( _nc-- ) { \
+ int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
+ int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
+ int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
+ int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
+
+
+
+
+#define Y_FLIP(_y) ((_y) * yScale + yBias)
+
+#define HW_LOCK()
+
+#define HW_UNLOCK()
+
+/* 16 bit, RGB565 color spanline and pixel functions
+ */
+#define SPANTMP_PIXEL_FMT GL_RGB
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
+
+#define TAG(x) intel##x##_RGB565
+#define TAG2(x,y) intel##x##_RGB565##y
+#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
+#include "spantmp2.h"
+
+/* 32 bit, ARGB8888 color spanline and pixel functions
+ */
+#define SPANTMP_PIXEL_FMT GL_BGRA
+#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
+
+#define TAG(x) intel##x##_ARGB8888
+#define TAG2(x,y) intel##x##_ARGB8888##y
+#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
+#include "spantmp2.h"
+
+
+#define LOCAL_DEPTH_VARS \
+ struct intel_context *intel = intel_context(ctx); \
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
+ const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
+ const GLint yScale = irb->RenderToTexture ? 1 : -1; \
+ const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
+ char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
+ (intel->drawY * pitch + intel->drawX) * irb->region->cpp;
+
+
+#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
+
+/**
+ ** 16-bit depthbuffer functions.
+ **/
+#define WRITE_DEPTH( _x, _y, d ) \
+ ((GLushort *)buf)[(_x) + (_y) * pitch] = d;
+
+#define READ_DEPTH( d, _x, _y ) \
+ d = ((GLushort *)buf)[(_x) + (_y) * pitch];
+
+
+#define TAG(x) intel##x##_z16
+#include "depthtmp.h"
+
+
+/**
+ ** 24/8-bit interleaved depth/stencil functions
+ ** Note: we're actually reading back combined depth+stencil values.
+ ** The wrappers in main/depthstencil.c are used to extract the depth
+ ** and stencil values.
+ **/
+/* Change ZZZS -> SZZZ */
+#define WRITE_DEPTH( _x, _y, d ) { \
+ GLuint tmp = ((d) >> 8) | ((d) << 24); \
+ ((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
+}
+
+/* Change SZZZ -> ZZZS */
+#define READ_DEPTH( d, _x, _y ) { \
+ GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
+ d = (tmp << 8) | (tmp >> 24); \
+}
+
+#define TAG(x) intel##x##_z24_s8
+#include "depthtmp.h"
+
+
+/**
+ ** 8-bit stencil function (XXX FBO: This is obsolete)
+ **/
+#define WRITE_STENCIL( _x, _y, d ) { \
+ GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
+ tmp &= 0xffffff; \
+ tmp |= ((d) << 24); \
+ ((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
+}
+
+#define READ_STENCIL( d, _x, _y ) \
+ d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
+
+#define TAG(x) intel##x##_z24_s8
+#include "stenciltmp.h"
+
+
+
+/**
+ * Map or unmap all the renderbuffers which we may need during
+ * software rendering.
+ * XXX in the future, we could probably convey extra information to
+ * reduce the number of mappings needed. I.e. if doing a glReadPixels
+ * from the depth buffer, we really only need one mapping.
+ *
+ * XXX Rewrite this function someday.
+ * We can probably just loop over all the renderbuffer attachments,
+ * map/unmap all of them, and not worry about the _ColorDrawBuffers
+ * _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
+ */
+static void
+intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
+{
+ GLcontext *ctx = &intel->ctx;
+ GLuint i, j;
+ struct intel_renderbuffer *irb;
+
+ /* color draw buffers */
+ for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
+ for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
+ struct gl_renderbuffer *rb =
+ ctx->DrawBuffer->_ColorDrawBuffers[i][j];
+ irb = intel_renderbuffer(rb);
+ if (irb) {
+ /* this is a user-created intel_renderbuffer */
+ if (irb->region) {
+ if (map)
+ intel_region_map(intel->intelScreen, irb->region);
+ else
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ }
+ }
+ }
+
+ /* check for render to textures */
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ struct gl_renderbuffer_attachment *att =
+ ctx->DrawBuffer->Attachment + i;
+ struct gl_texture_object *tex = att->Texture;
+ if (tex) {
+ /* render to texture */
+ ASSERT(att->Renderbuffer);
+ if (map) {
+ struct gl_texture_image *texImg;
+ texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
+ intel_tex_map_images(intel, intel_texture_object(tex));
+ }
+ else {
+ intel_tex_unmap_images(intel, intel_texture_object(tex));
+ }
+ }
+ }
+
+ /* color read buffers */
+ irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
+ if (irb && irb->region) {
+ if (map)
+ intel_region_map(intel->intelScreen, irb->region);
+ else
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+
+ /* Account for front/back color page flipping.
+ * The span routines use the pfMap and pfPitch fields which will
+ * swap the front/back region map/pitch if we're page flipped.
+ * Do this after mapping, above, so the map field is valid.
+ */
+#if 0
+ if (map && ctx->DrawBuffer->Name == 0) {
+ struct intel_renderbuffer *irbFront
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
+ struct intel_renderbuffer *irbBack
+ = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
+ if (irbBack) {
+ /* double buffered */
+ if (intel->sarea->pf_current_page == 0) {
+ irbFront->pfMap = irbFront->region->map;
+ irbFront->pfPitch = irbFront->region->pitch;
+ irbBack->pfMap = irbBack->region->map;
+ irbBack->pfPitch = irbBack->region->pitch;
+ }
+ else {
+ irbFront->pfMap = irbBack->region->map;
+ irbFront->pfPitch = irbBack->region->pitch;
+ irbBack->pfMap = irbFront->region->map;
+ irbBack->pfPitch = irbFront->region->pitch;
+ }
+ }
+ }
+#endif
+
+ /* depth buffer (Note wrapper!) */
+ if (ctx->DrawBuffer->_DepthBuffer) {
+ irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
+ if (irb && irb->region && irb->Base.Name != 0) {
+ if (map) {
+ intel_region_map(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ else {
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = NULL;
+ irb->pfPitch = 0;
+ }
+ }
+ }
+
+ /* stencil buffer (Note wrapper!) */
+ if (ctx->DrawBuffer->_StencilBuffer) {
+ irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
+ if (irb && irb->region && irb->Base.Name != 0) {
+ if (map) {
+ intel_region_map(intel->intelScreen, irb->region);
+ irb->pfMap = irb->region->map;
+ irb->pfPitch = irb->region->pitch;
+ }
+ else {
+ intel_region_unmap(intel->intelScreen, irb->region);
+ irb->pfMap = NULL;
+ irb->pfPitch = 0;
+ }
+ }
+ }
+}
+
+
+
+/**
+ * Prepare for softare rendering. Map current read/draw framebuffers'
+ * renderbuffes and all currently bound texture objects.
+ *
+ * Old note: Moved locking out to get reasonable span performance.
+ */
+void
+intelSpanRenderStart(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ intelFinish(&intel->ctx);
+ LOCK_HARDWARE(intel);
+
+#if 0
+ /* Just map the framebuffer and all textures. Bufmgr code will
+ * take care of waiting on the necessary fences:
+ */
+ intel_region_map(intel->intelScreen, intel->front_region);
+ intel_region_map(intel->intelScreen, intel->back_region);
+ intel_region_map(intel->intelScreen, intel->intelScreen->depth_region);
+#endif
+
+ for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_map_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_buffers(intel, GL_TRUE);
+}
+
+/**
+ * Called when done softare rendering. Unmap the buffers we mapped in
+ * the above function.
+ */
+void
+intelSpanRenderFinish(GLcontext * ctx)
+{
+ struct intel_context *intel = intel_context(ctx);
+ GLuint i;
+
+ _swrast_flush(ctx);
+
+ /* Now unmap the framebuffer:
+ */
+#if 0
+ intel_region_unmap(intel, intel->front_region);
+ intel_region_unmap(intel, intel->back_region);
+ intel_region_unmap(intel, intel->intelScreen->depth_region);
+#endif
+
+ for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
+ if (ctx->Texture.Unit[i]._ReallyEnabled) {
+ struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
+ intel_tex_unmap_images(intel, intel_texture_object(texObj));
+ }
+ }
+
+ intel_map_unmap_buffers(intel, GL_FALSE);
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+void
+intelInitSpanFuncs(GLcontext * ctx)
+{
+ struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
+ swdd->SpanRenderStart = intelSpanRenderStart;
+ swdd->SpanRenderFinish = intelSpanRenderFinish;
+}
+
+
+/**
+ * Plug in appropriate span read/write functions for the given renderbuffer.
+ * These are used for the software fallbacks.
+ */
+void
+intel_set_span_functions(struct gl_renderbuffer *rb)
+{
+ if (rb->_ActualFormat == GL_RGB5) {
+ /* 565 RGB */
+ intelInitPointers_RGB565(rb);
+ }
+ else if (rb->_ActualFormat == GL_RGBA8) {
+ /* 8888 RGBA */
+ intelInitPointers_ARGB8888(rb);
+ }
+ else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
+ intelInitDepthPointers_z16(rb);
+ }
+ else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
+ rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
+ intelInitDepthPointers_z24_s8(rb);
+ }
+ else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
+ intelInitStencilPointers_z24_s8(rb);
+ }
+ else {
+ _mesa_problem(NULL,
+ "Unexpected _ActualFormat in intelSetSpanFunctions");
+ }
+}
diff --git a/src/mesa/drivers/dri/i915/intel_span.h b/src/mesa/drivers/dri/intel/intel_span.h
index 5201f6d6c6..5201f6d6c6 100644
--- a/src/mesa/drivers/dri/i915/intel_span.h
+++ b/src/mesa/drivers/dri/intel/intel_span.h
diff --git a/src/mesa/drivers/dri/intel/intel_tex.c b/src/mesa/drivers/dri/intel/intel_tex.c
new file mode 100644
index 0000000000..b08dee43bc
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex.c
@@ -0,0 +1,192 @@
+#include "texobj.h"
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static GLboolean
+intelIsTextureResident(GLcontext * ctx, struct gl_texture_object *texObj)
+{
+#if 0
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ return
+ intelObj->mt &&
+ intelObj->mt->region &&
+ intel_is_region_resident(intel, intelObj->mt->region);
+#endif
+ return 1;
+}
+
+
+
+static struct gl_texture_image *
+intelNewTextureImage(GLcontext * ctx)
+{
+ DBG("%s\n", __FUNCTION__);
+ (void) ctx;
+ return (struct gl_texture_image *) CALLOC_STRUCT(intel_texture_image);
+}
+
+
+static struct gl_texture_object *
+intelNewTextureObject(GLcontext * ctx, GLuint name, GLenum target)
+{
+ struct intel_texture_object *obj = CALLOC_STRUCT(intel_texture_object);
+
+ DBG("%s\n", __FUNCTION__);
+ _mesa_initialize_texture_object(&obj->base, name, target);
+
+ return &obj->base;
+}
+
+static void
+intelDeleteTextureObject(GLcontext *ctx,
+ struct gl_texture_object *texObj)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ _mesa_delete_texture_object(ctx, texObj);
+}
+
+
+static void
+intelFreeTextureImageData(GLcontext * ctx, struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+
+ if (texImage->Data) {
+ free(texImage->Data);
+ texImage->Data = NULL;
+ }
+}
+
+
+/* The system memcpy (at least on ubuntu 5.10) has problems copying
+ * to agp (writecombined) memory from a source which isn't 64-byte
+ * aligned - there is a 4x performance falloff.
+ *
+ * The x86 __memcpy is immune to this but is slightly slower
+ * (10%-ish) than the system memcpy.
+ *
+ * The sse_memcpy seems to have a slight cliff at 64/32 bytes, but
+ * isn't much faster than x86_memcpy for agp copies.
+ *
+ * TODO: switch dynamically.
+ */
+static void *
+do_memcpy(void *dest, const void *src, size_t n)
+{
+ if ((((unsigned) src) & 63) || (((unsigned) dest) & 63)) {
+ return __memcpy(dest, src, n);
+ }
+ else
+ return memcpy(dest, src, n);
+}
+
+
+#if DO_DEBUG
+
+#ifndef __x86_64__
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t"
+ "pushl %%ebx\n\t"
+ "cpuid\n\t" ".byte 0x0f, 0x31\n\t"
+ "popl %%ebx\n":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "cc");
+
+ return eax;
+}
+#else
+static unsigned
+fastrdtsc(void)
+{
+ unsigned eax;
+ __asm__ volatile ("\t" "cpuid\n\t" ".byte 0x0f, 0x31\n\t":"=a" (eax)
+ :"0"(0)
+ :"ecx", "edx", "ebx", "cc");
+
+ return eax;
+}
+#endif
+
+static unsigned
+time_diff(unsigned t, unsigned t2)
+{
+ return ((t < t2) ? t2 - t : 0xFFFFFFFFU - (t - t2 - 1));
+}
+
+
+static void *
+timed_memcpy(void *dest, const void *src, size_t n)
+{
+ void *ret;
+ unsigned t1, t2;
+ double rate;
+
+ if ((((unsigned) src) & 63) || (((unsigned) dest) & 63))
+ _mesa_printf("Warning - non-aligned texture copy!\n");
+
+ t1 = fastrdtsc();
+ ret = do_memcpy(dest, src, n);
+ t2 = fastrdtsc();
+
+ rate = time_diff(t1, t2);
+ rate /= (double) n;
+ _mesa_printf("timed_memcpy: %u %u --> %f clocks/byte\n", t1, t2, rate);
+ return ret;
+}
+#endif /* DO_DEBUG */
+
+
+void
+intelInitTextureFuncs(struct dd_function_table *functions)
+{
+ functions->ChooseTextureFormat = intelChooseTextureFormat;
+ functions->TexImage1D = intelTexImage1D;
+ functions->TexImage2D = intelTexImage2D;
+ functions->TexImage3D = intelTexImage3D;
+ functions->TexSubImage1D = intelTexSubImage1D;
+ functions->TexSubImage2D = intelTexSubImage2D;
+ functions->TexSubImage3D = intelTexSubImage3D;
+ functions->CopyTexImage1D = intelCopyTexImage1D;
+ functions->CopyTexImage2D = intelCopyTexImage2D;
+ functions->CopyTexSubImage1D = intelCopyTexSubImage1D;
+ functions->CopyTexSubImage2D = intelCopyTexSubImage2D;
+ functions->GetTexImage = intelGetTexImage;
+
+ /* compressed texture functions */
+ functions->CompressedTexImage2D = intelCompressedTexImage2D;
+ functions->GetCompressedTexImage = intelGetCompressedTexImage;
+
+ functions->NewTextureObject = intelNewTextureObject;
+ functions->NewTextureImage = intelNewTextureImage;
+ functions->DeleteTexture = intelDeleteTextureObject;
+ functions->FreeTexImageData = intelFreeTextureImageData;
+ functions->UpdateTexturePalette = 0;
+ functions->IsTextureResident = intelIsTextureResident;
+
+#if DO_DEBUG
+ if (INTEL_DEBUG & DEBUG_BUFMGR)
+ functions->TextureMemCpy = timed_memcpy;
+ else
+#endif
+ functions->TextureMemCpy = do_memcpy;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_copy.c b/src/mesa/drivers/dri/intel/intel_tex_copy.c
new file mode 100644
index 0000000000..b85a25642a
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_copy.c
@@ -0,0 +1,302 @@
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mtypes.h"
+#include "enums.h"
+#include "image.h"
+#include "teximage.h"
+#include "swrast/swrast.h"
+
+#include "intel_screen.h"
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_buffers.h"
+#include "intel_mipmap_tree.h"
+#include "intel_regions.h"
+#include "intel_fbo.h"
+#include "intel_tex.h"
+#include "intel_blit.h"
+#include "intel_pixel.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Get the intel_region which is the source for any glCopyTex[Sub]Image call.
+ *
+ * Do the best we can using the blitter. A future project is to use
+ * the texture engine and fragment programs for these copies.
+ */
+static const struct intel_region *
+get_teximage_source(struct intel_context *intel, GLenum internalFormat)
+{
+ struct intel_renderbuffer *irb;
+
+ DBG("%s %s\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(internalFormat));
+
+ switch (internalFormat) {
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16_ARB:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 2)
+ return irb->region;
+ return NULL;
+ case GL_DEPTH24_STENCIL8_EXT:
+ case GL_DEPTH_STENCIL_EXT:
+ irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
+ if (irb && irb->region && irb->region->cpp == 4)
+ return irb->region;
+ return NULL;
+ case GL_RGBA:
+ case GL_RGBA8:
+ return intel_readbuf_region(intel);
+ case GL_RGB:
+ if (intel->intelScreen->cpp == 2)
+ return intel_readbuf_region(intel);
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
+
+static GLboolean
+do_copy_texsubimage(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ GLenum internalFormat,
+ GLint dstx, GLint dsty,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ GLcontext *ctx = &intel->ctx;
+ const struct intel_region *src =
+ get_teximage_source(intel, internalFormat);
+
+ if (!intelImage->mt || !src) {
+ DBG("%s fail %p %p\n", __FUNCTION__, intelImage->mt, src);
+ return GL_FALSE;
+ }
+
+ intelFlush(ctx);
+ LOCK_HARDWARE(intel);
+ {
+ GLuint image_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+ const GLint orig_x = x;
+ const GLint orig_y = y;
+ const struct gl_framebuffer *fb = ctx->DrawBuffer;
+
+ if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax,
+ &x, &y, &width, &height)) {
+ /* Update dst for clipped src. Need to also clip the source rect.
+ */
+ dstx += x - orig_x;
+ dsty += y - orig_y;
+
+ if (ctx->ReadBuffer->Name == 0) {
+ /* reading from a window, adjust x, y */
+ __DRIdrawablePrivate *dPriv = intel->driDrawable;
+ GLuint window_y;
+ /* window_y = position of window on screen if y=0=bottom */
+ window_y = intel->intelScreen->height - (dPriv->y + dPriv->h);
+ y = window_y + y;
+ x += dPriv->x;
+ }
+ else {
+ /* reading from a FBO */
+ /* invert Y */
+ y = ctx->ReadBuffer->Height - y - 1;
+ }
+
+
+ /* A bit of fiddling to get the blitter to work with -ve
+ * pitches. But we get a nice inverted blit this way, so it's
+ * worth it:
+ */
+ intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ -src->pitch,
+ src->buffer,
+ src->height * src->pitch * src->cpp,
+ intelImage->mt->pitch,
+ intelImage->mt->region->buffer,
+ image_offset,
+ x, y + height, dstx, dsty, width, height,
+ GL_COPY); /* ? */
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+ }
+
+
+ UNLOCK_HARDWARE(intel);
+
+#if 0
+ /* GL_SGIS_generate_mipmap -- this can be accelerated now.
+ * XXX Add a ctx->Driver.GenerateMipmaps() function?
+ */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ intel_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+
+ return GL_TRUE;
+}
+
+
+
+
+
+void
+intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLint border)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage1D(ctx, target, level, internalFormat,
+ width, border,
+ GL_RGBA, CHAN_TYPE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, 1))
+ goto fail;
+
+ return;
+
+ fail:
+ _swrast_copy_teximage1d(ctx, target, level, internalFormat, x, y,
+ width, border);
+}
+
+void
+intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLenum internalFormat,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLint border)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+
+ if (border)
+ goto fail;
+
+ /* Setup or redefine the texture object, mipmap tree and texture
+ * image. Don't populate yet.
+ */
+ ctx->Driver.TexImage2D(ctx, target, level, internalFormat,
+ width, height, border,
+ GL_RGBA, CHAN_TYPE, NULL,
+ &ctx->DefaultPacking, texObj, texImage);
+
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, 0, 0, x, y, width, height))
+ goto fail;
+
+ return;
+
+ fail:
+ _swrast_copy_teximage2d(ctx, target, level, internalFormat, x, y,
+ width, height, border);
+}
+
+
+void
+intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint x, GLint y, GLsizei width)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+ /* XXX need to check <border> as in above function? */
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat, xoffset, 0, x, y, width, 1)) {
+ _swrast_copy_texsubimage1d(ctx, target, level, xoffset, x, y, width);
+ }
+}
+
+
+
+void
+intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint x, GLint y, GLsizei width, GLsizei height)
+{
+ struct gl_texture_unit *texUnit =
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit];
+ struct gl_texture_object *texObj =
+ _mesa_select_tex_object(ctx, texUnit, target);
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, level);
+ GLenum internalFormat = texImage->InternalFormat;
+
+
+ /* Need to check texture is compatible with source format.
+ */
+
+ if (!do_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ internalFormat,
+ xoffset, yoffset, x, y, width, height)) {
+
+ DBG("%s - fallback to swrast\n", __FUNCTION__);
+
+ _swrast_copy_texsubimage2d(ctx, target, level,
+ xoffset, yoffset, x, y, width, height);
+ }
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_format.c b/src/mesa/drivers/dri/intel/intel_tex_format.c
new file mode 100644
index 0000000000..6e058dff69
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_format.c
@@ -0,0 +1,172 @@
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "texformat.h"
+#include "enums.h"
+
+/* It works out that this function is fine for all the supported
+ * hardware. However, there is still a need to map the formats onto
+ * hardware descriptors.
+ */
+/* Note that the i915 can actually support many more formats than
+ * these if we take the step of simply swizzling the colors
+ * immediately after sampling...
+ */
+const struct gl_texture_format *
+intelChooseTextureFormat(GLcontext * ctx, GLint internalFormat,
+ GLenum format, GLenum type)
+{
+ struct intel_context *intel = intel_context(ctx);
+ const GLboolean do32bpt = (intel->intelScreen->cpp == 4);
+
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ case GL_COMPRESSED_RGBA:
+ if (format == GL_BGRA) {
+ if (type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) {
+ return &_mesa_texformat_argb8888;
+ }
+ else if (type == GL_UNSIGNED_SHORT_4_4_4_4_REV) {
+ return &_mesa_texformat_argb4444;
+ }
+ else if (type == GL_UNSIGNED_SHORT_1_5_5_5_REV) {
+ return &_mesa_texformat_argb1555;
+ }
+ }
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
+
+ case 3:
+ case GL_RGB:
+ case GL_COMPRESSED_RGB:
+ if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
+ return &_mesa_texformat_rgb565;
+ }
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_rgb565;
+
+ case GL_RGBA8:
+ case GL_RGB10_A2:
+ case GL_RGBA12:
+ case GL_RGBA16:
+ return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
+
+ case GL_RGBA4:
+ case GL_RGBA2:
+ return &_mesa_texformat_argb4444;
+
+ case GL_RGB5_A1:
+ return &_mesa_texformat_argb1555;
+
+ case GL_RGB8:
+ case GL_RGB10:
+ case GL_RGB12:
+ case GL_RGB16:
+ return &_mesa_texformat_argb8888;
+
+ case GL_RGB5:
+ case GL_RGB4:
+ case GL_R3_G3_B2:
+ return &_mesa_texformat_rgb565;
+
+ case GL_ALPHA:
+ case GL_ALPHA4:
+ case GL_ALPHA8:
+ case GL_ALPHA12:
+ case GL_ALPHA16:
+ case GL_COMPRESSED_ALPHA:
+ return &_mesa_texformat_a8;
+
+ case 1:
+ case GL_LUMINANCE:
+ case GL_LUMINANCE4:
+ case GL_LUMINANCE8:
+ case GL_LUMINANCE12:
+ case GL_LUMINANCE16:
+ case GL_COMPRESSED_LUMINANCE:
+ return &_mesa_texformat_l8;
+
+ case 2:
+ case GL_LUMINANCE_ALPHA:
+ case GL_LUMINANCE4_ALPHA4:
+ case GL_LUMINANCE6_ALPHA2:
+ case GL_LUMINANCE8_ALPHA8:
+ case GL_LUMINANCE12_ALPHA4:
+ case GL_LUMINANCE12_ALPHA12:
+ case GL_LUMINANCE16_ALPHA16:
+ case GL_COMPRESSED_LUMINANCE_ALPHA:
+ return &_mesa_texformat_al88;
+
+ case GL_INTENSITY:
+ case GL_INTENSITY4:
+ case GL_INTENSITY8:
+ case GL_INTENSITY12:
+ case GL_INTENSITY16:
+ case GL_COMPRESSED_INTENSITY:
+ return &_mesa_texformat_i8;
+
+ case GL_YCBCR_MESA:
+ if (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE)
+ return &_mesa_texformat_ycbcr;
+ else
+ return &_mesa_texformat_ycbcr_rev;
+
+ case GL_COMPRESSED_RGB_FXT1_3DFX:
+ return &_mesa_texformat_rgb_fxt1;
+ case GL_COMPRESSED_RGBA_FXT1_3DFX:
+ return &_mesa_texformat_rgba_fxt1;
+
+ case GL_RGB_S3TC:
+ case GL_RGB4_S3TC:
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
+ return &_mesa_texformat_rgb_dxt1;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
+ return &_mesa_texformat_rgba_dxt1;
+
+ case GL_RGBA_S3TC:
+ case GL_RGBA4_S3TC:
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
+ return &_mesa_texformat_rgba_dxt3;
+
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
+ return &_mesa_texformat_rgba_dxt5;
+
+ case GL_DEPTH_COMPONENT:
+ case GL_DEPTH_COMPONENT16:
+ case GL_DEPTH_COMPONENT24:
+ case GL_DEPTH_COMPONENT32:
+ return &_mesa_texformat_z16;
+
+ case GL_DEPTH_STENCIL_EXT:
+ case GL_DEPTH24_STENCIL8_EXT:
+ return &_mesa_texformat_z24_s8;
+
+ default:
+ fprintf(stderr, "unexpected texture format %s in %s\n",
+ _mesa_lookup_enum_by_nr(internalFormat), __FUNCTION__);
+ return NULL;
+ }
+
+ return NULL; /* never get here */
+}
+
+int intel_compressed_num_bytes(GLuint mesaFormat)
+{
+ int bytes = 0;
+ switch(mesaFormat) {
+
+ case MESA_FORMAT_RGB_FXT1:
+ case MESA_FORMAT_RGBA_FXT1:
+ case MESA_FORMAT_RGB_DXT1:
+ case MESA_FORMAT_RGBA_DXT1:
+ bytes = 2;
+ break;
+
+ case MESA_FORMAT_RGBA_DXT3:
+ case MESA_FORMAT_RGBA_DXT5:
+ bytes = 4;
+ default:
+ break;
+ }
+
+ return bytes;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_image.c b/src/mesa/drivers/dri/intel/intel_tex_image.c
new file mode 100644
index 0000000000..197cf35ebe
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_image.c
@@ -0,0 +1,690 @@
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "glheader.h"
+#include "macros.h"
+#include "mtypes.h"
+#include "enums.h"
+#include "colortab.h"
+#include "convolve.h"
+#include "context.h"
+#include "simple_list.h"
+#include "texcompress.h"
+#include "texformat.h"
+#include "texobj.h"
+#include "texstore.h"
+
+#include "intel_context.h"
+#include "intel_mipmap_tree.h"
+#include "intel_buffer_objects.h"
+#include "intel_batchbuffer.h"
+#include "intel_tex.h"
+#include "intel_ioctl.h"
+#include "intel_blit.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/* Functions to store texture images. Where possible, mipmap_tree's
+ * will be created or further instantiated with image data, otherwise
+ * images will be stored in malloc'd memory. A validation step is
+ * required to pull those images into a mipmap tree, or otherwise
+ * decide a fallback is required.
+ */
+
+
+static int
+logbase2(int n)
+{
+ GLint i = 1;
+ GLint log2 = 0;
+
+ while (n > i) {
+ i *= 2;
+ log2++;
+ }
+
+ return log2;
+}
+
+
+/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
+ * 1).
+ *
+ * Otherwise, if max_level >= level >= min_level, create tree with
+ * space for textures from min_level down to max_level.
+ *
+ * Otherwise, create tree with space for textures from (level
+ * 0)..(1x1). Consider pruning this tree at a validation if the
+ * saving is worth it.
+ */
+static void
+guess_and_alloc_mipmap_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage)
+{
+ GLuint firstLevel;
+ GLuint lastLevel;
+ GLuint width = intelImage->base.Width;
+ GLuint height = intelImage->base.Height;
+ GLuint depth = intelImage->base.Depth;
+ GLuint l2width, l2height, l2depth;
+ GLuint i, comp_byte = 0;
+
+ DBG("%s\n", __FUNCTION__);
+
+ if (intelImage->base.Border)
+ return;
+
+ if (intelImage->level > intelObj->base.BaseLevel &&
+ (intelImage->base.Width == 1 ||
+ (intelObj->base.Target != GL_TEXTURE_1D &&
+ intelImage->base.Height == 1) ||
+ (intelObj->base.Target == GL_TEXTURE_3D &&
+ intelImage->base.Depth == 1)))
+ return;
+
+ /* If this image disrespects BaseLevel, allocate from level zero.
+ * Usually BaseLevel == 0, so it's unlikely to happen.
+ */
+ if (intelImage->level < intelObj->base.BaseLevel)
+ firstLevel = 0;
+ else
+ firstLevel = intelObj->base.BaseLevel;
+
+
+ /* Figure out image dimensions at start level.
+ */
+ for (i = intelImage->level; i > firstLevel; i--) {
+ width <<= 1;
+ if (height != 1)
+ height <<= 1;
+ if (depth != 1)
+ depth <<= 1;
+ }
+
+ /* Guess a reasonable value for lastLevel. This is probably going
+ * to be wrong fairly often and might mean that we have to look at
+ * resizable buffers, or require that buffers implement lazy
+ * pagetable arrangements.
+ */
+ if ((intelObj->base.MinFilter == GL_NEAREST ||
+ intelObj->base.MinFilter == GL_LINEAR) &&
+ intelImage->level == firstLevel) {
+ lastLevel = firstLevel;
+ }
+ else {
+ l2width = logbase2(width);
+ l2height = logbase2(height);
+ l2depth = logbase2(depth);
+ lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
+ }
+
+ assert(!intelObj->mt);
+ if (intelImage->base.IsCompressed)
+ comp_byte = intel_compressed_num_bytes(intelImage->base.TexFormat->MesaFormat);
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ intelImage->base.InternalFormat,
+ firstLevel,
+ lastLevel,
+ width,
+ height,
+ depth,
+ intelImage->base.TexFormat->TexelBytes,
+ comp_byte);
+
+ DBG("%s - success\n", __FUNCTION__);
+}
+
+
+
+
+static GLuint
+target_to_face(GLenum target)
+{
+ switch (target) {
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
+ return ((GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
+ default:
+ return 0;
+ }
+}
+
+/* There are actually quite a few combinations this will work for,
+ * more than what I've listed here.
+ */
+static GLboolean
+check_pbo_format(GLint internalFormat,
+ GLenum format, GLenum type,
+ const struct gl_texture_format *mesa_format)
+{
+ switch (internalFormat) {
+ case 4:
+ case GL_RGBA:
+ return (format == GL_BGRA &&
+ (type == GL_UNSIGNED_BYTE ||
+ type == GL_UNSIGNED_INT_8_8_8_8_REV) &&
+ mesa_format == &_mesa_texformat_argb8888);
+ case 3:
+ case GL_RGB:
+ return (format == GL_RGB &&
+ type == GL_UNSIGNED_SHORT_5_6_5 &&
+ mesa_format == &_mesa_texformat_rgb565);
+ case GL_YCBCR_MESA:
+ return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
+ default:
+ return GL_FALSE;
+ }
+}
+
+
+/* XXX: Do this for TexSubImage also:
+ */
+static GLboolean
+try_pbo_upload(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_offset, dst_stride;
+
+ if (!pbo ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ _mesa_printf("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ src_offset = (GLuint) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ dst_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+
+ dst_stride = intelImage->mt->pitch;
+
+ intelFlush(&intel->ctx);
+ LOCK_HARDWARE(intel);
+ {
+ dri_bo *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ);
+ dri_bo *dst_buffer = intel_region_buffer(intel->intelScreen,
+ intelImage->mt->region,
+ INTEL_WRITE_FULL);
+
+
+ intelEmitCopyBlit(intel,
+ intelImage->mt->cpp,
+ src_stride, src_buffer, src_offset,
+ dst_stride, dst_buffer, dst_offset,
+ 0, 0, 0, 0, width, height,
+ GL_COPY);
+
+ intel_batchbuffer_flush(intel->batch);
+ }
+ UNLOCK_HARDWARE(intel);
+
+ return GL_TRUE;
+}
+
+
+
+static GLboolean
+try_pbo_zcopy(struct intel_context *intel,
+ struct intel_texture_image *intelImage,
+ const struct gl_pixelstore_attrib *unpack,
+ GLint internalFormat,
+ GLint width, GLint height,
+ GLenum format, GLenum type, const void *pixels)
+{
+ struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
+ GLuint src_offset, src_stride;
+ GLuint dst_offset, dst_stride;
+
+ if (!pbo ||
+ intel->ctx._ImageTransferState ||
+ unpack->SkipPixels || unpack->SkipRows) {
+ _mesa_printf("%s: failure 1\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ src_offset = (GLuint) pixels;
+
+ if (unpack->RowLength > 0)
+ src_stride = unpack->RowLength;
+ else
+ src_stride = width;
+
+ dst_offset = intel_miptree_image_offset(intelImage->mt,
+ intelImage->face,
+ intelImage->level);
+
+ dst_stride = intelImage->mt->pitch;
+
+ if (src_stride != dst_stride || dst_offset != 0 || src_offset != 0) {
+ _mesa_printf("%s: failure 2\n", __FUNCTION__);
+ return GL_FALSE;
+ }
+
+ intel_region_attach_pbo(intel->intelScreen, intelImage->mt->region, pbo);
+
+ return GL_TRUE;
+}
+
+
+
+
+
+
+static void
+intelTexImage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage, GLsizei imageSize, int compressed)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_object *intelObj = intel_texture_object(texObj);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLint postConvWidth = width;
+ GLint postConvHeight = height;
+ GLint texelBytes, sizeInBytes;
+ GLuint dstRowStride;
+
+
+ DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target), level, width, height, depth, border);
+
+ intelFlush(ctx);
+
+ intelImage->face = target_to_face(target);
+ intelImage->level = level;
+
+ if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
+ _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
+ &postConvHeight);
+ }
+
+ /* choose the texture format */
+ texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat,
+ format, type);
+
+ _mesa_set_fetch_functions(texImage, dims);
+
+ if (texImage->TexFormat->TexelBytes == 0) {
+ /* must be a compressed format */
+ texelBytes = 0;
+ texImage->IsCompressed = GL_TRUE;
+ texImage->CompressedSize =
+ ctx->Driver.CompressedTextureSize(ctx, texImage->Width,
+ texImage->Height, texImage->Depth,
+ texImage->TexFormat->MesaFormat);
+ } else {
+ texelBytes = texImage->TexFormat->TexelBytes;
+
+ /* Minimum pitch of 32 bytes */
+ if (postConvWidth * texelBytes < 32) {
+ postConvWidth = 32 / texelBytes;
+ texImage->RowStride = postConvWidth;
+ }
+
+ assert(texImage->RowStride == postConvWidth);
+ }
+
+ /* Release the reference to a potentially orphaned buffer.
+ * Release any old malloced memory.
+ */
+ if (intelImage->mt) {
+ intel_miptree_release(intel, &intelImage->mt);
+ assert(!texImage->Data);
+ }
+ else if (texImage->Data) {
+ _mesa_align_free(texImage->Data);
+ }
+
+ /* If this is the only texture image in the tree, could call
+ * bmBufferData with NULL data to free the old block and avoid
+ * waiting on any outstanding fences.
+ */
+ if (intelObj->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level &&
+ intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
+ !intel_miptree_match_image(intelObj->mt, &intelImage->base,
+ intelImage->face, intelImage->level)) {
+
+ DBG("release it\n");
+ intel_miptree_release(intel, &intelObj->mt);
+ assert(!intelObj->mt);
+ }
+
+ if (!intelObj->mt) {
+ guess_and_alloc_mipmap_tree(intel, intelObj, intelImage);
+ if (!intelObj->mt) {
+ DBG("guess_and_alloc_mipmap_tree: failed\n");
+ }
+ }
+
+ assert(!intelImage->mt);
+
+ if (intelObj->mt &&
+ intel_miptree_match_image(intelObj->mt, &intelImage->base,
+ intelImage->face, intelImage->level)) {
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+ assert(intelImage->mt);
+ }
+
+ if (!intelImage->mt)
+ DBG("XXX: Image did not fit into tree - storing in local memory!\n");
+
+ /* PBO fastpaths:
+ */
+ if (dims <= 2 &&
+ intelImage->mt &&
+ intel_buffer_object(unpack->BufferObj) &&
+ check_pbo_format(internalFormat, format,
+ type, intelImage->base.TexFormat)) {
+
+ DBG("trying pbo upload\n");
+
+ /* Attempt to texture directly from PBO data (zero copy upload).
+ *
+ * Currently disable as it can lead to worse as well as better
+ * performance (in particular when intel_region_cow() is
+ * required).
+ */
+ if (intelObj->mt == intelImage->mt &&
+ intelObj->mt->first_level == level &&
+ intelObj->mt->last_level == level) {
+
+ if (try_pbo_zcopy(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+
+ DBG("pbo zcopy upload succeeded\n");
+ return;
+ }
+ }
+
+
+ /* Otherwise, attempt to use the blitter for PBO image uploads.
+ */
+ if (try_pbo_upload(intel, intelImage, unpack,
+ internalFormat,
+ width, height, format, type, pixels)) {
+ DBG("pbo upload succeeded\n");
+ return;
+ }
+
+ DBG("pbo upload failed\n");
+ }
+
+
+
+ /* intelCopyTexImage calls this function with pixels == NULL, with
+ * the expectation that the mipmap tree will be set up but nothing
+ * more will be done. This is where those calls return:
+ */
+ if (compressed) {
+ pixels = _mesa_validate_pbo_compressed_teximage(ctx, imageSize, pixels,
+ unpack,
+ "glCompressedTexImage");
+ } else {
+ pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1,
+ format, type,
+ pixels, unpack, "glTexImage");
+ }
+ if (!pixels)
+ return;
+
+
+ if (intelImage->mt)
+ intel_region_idle(intel->intelScreen, intelImage->mt->region);
+
+ LOCK_HARDWARE(intel);
+
+ if (intelImage->mt) {
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ intelImage->base.ImageOffsets);
+ }
+ else {
+ /* Allocate regular memory and store the image there temporarily. */
+ if (texImage->IsCompressed) {
+ sizeInBytes = texImage->CompressedSize;
+ dstRowStride =
+ _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width);
+ assert(dims != 3);
+ }
+ else {
+ dstRowStride = postConvWidth * texelBytes;
+ sizeInBytes = depth * dstRowStride * postConvHeight;
+ }
+
+ texImage->Data = malloc(sizeInBytes);
+ }
+
+ DBG("Upload image %dx%dx%d row_len %x "
+ "pitch %x\n",
+ width, height, depth, width * texelBytes, dstRowStride);
+
+ /* Copy data. Would like to know when it's ok for us to eg. use
+ * the blitter to copy. Or, use the hardware to do the format
+ * conversion and copy:
+ */
+ if (compressed) {
+ memcpy(texImage->Data, pixels, imageSize);
+ } else if (!texImage->TexFormat->StoreImage(ctx, dims,
+ texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, unpack)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
+ }
+
+ _mesa_unmap_teximage_pbo(ctx, unpack);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+
+ UNLOCK_HARDWARE(intel);
+
+#if 0
+ /* GL_SGIS_generate_mipmap -- this can be accelerated now.
+ */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ intel_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+}
+
+void
+intelTexImage3D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint depth,
+ GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 3, target, level,
+ internalFormat, width, height, depth, border,
+ format, type, pixels, unpack, texObj, texImage, 0, 0);
+}
+
+
+void
+intelTexImage2D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 2, target, level,
+ internalFormat, width, height, 1, border,
+ format, type, pixels, unpack, texObj, texImage, 0, 0);
+}
+
+void
+intelTexImage1D(GLcontext * ctx,
+ GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint border,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *unpack,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexImage(ctx, 1, target, level,
+ internalFormat, width, 1, 1, border,
+ format, type, pixels, unpack, texObj, texImage, 0, 0);
+}
+
+void intelCompressedTexImage2D( GLcontext *ctx, GLenum target, GLint level,
+ GLint internalFormat,
+ GLint width, GLint height, GLint border,
+ GLsizei imageSize, const GLvoid *data,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage )
+{
+ intelTexImage(ctx, 2, target, level,
+ internalFormat, width, height, 1, border,
+ 0, 0, data, &ctx->Unpack, texObj, texImage, imageSize, 1);
+}
+
+/**
+ * Need to map texture image into memory before copying image data,
+ * then unmap it.
+ */
+static void
+intel_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage, int compressed)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+
+ /* Map */
+ if (intelImage->mt) {
+ /* Image is stored in hardware format in a buffer managed by the
+ * kernel. Need to explicitly map and unmap it.
+ */
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ intelImage->base.RowStride /= intelImage->mt->cpp;
+ }
+ else {
+ /* Otherwise, the image should actually be stored in
+ * intelImage->base.Data. This is pretty confusing for
+ * everybody, I'd much prefer to separate the two functions of
+ * texImage->Data - storage for texture images in main memory
+ * and access (ie mappings) of images. In other words, we'd
+ * create a new texImage->Map field and leave Data simply for
+ * storage.
+ */
+ assert(intelImage->base.Data);
+ }
+
+
+ if (compressed) {
+ _mesa_get_compressed_teximage(ctx, target, level, pixels,
+ texObj, texImage);
+ } else {
+ _mesa_get_teximage(ctx, target, level, format, type, pixels,
+ texObj, texImage);
+ }
+
+
+ /* Unmap */
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+}
+
+void
+intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
+ GLenum format, GLenum type, GLvoid * pixels,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intel_get_tex_image(ctx, target, level, format, type, pixels,
+ texObj, texImage, 0);
+
+
+}
+
+void
+intelGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
+ GLvoid *pixels,
+ const struct gl_texture_object *texObj,
+ const struct gl_texture_image *texImage)
+{
+ intel_get_tex_image(ctx, target, level, 0, 0, pixels,
+ texObj, texImage, 1);
+
+}
+
+void
+intelSetTexOffset(__DRIcontext *pDRICtx, GLint texname,
+ unsigned long long offset, GLint depth, GLuint pitch)
+{
+ struct intel_context *intel = (struct intel_context*)
+ ((__DRIcontextPrivate*)pDRICtx->private)->driverPrivate;
+ struct gl_texture_object *tObj = _mesa_lookup_texture(&intel->ctx, texname);
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+
+ if (!intelObj)
+ return;
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intelObj->imageOverride = GL_TRUE;
+ intelObj->depthOverride = depth;
+ intelObj->pitchOverride = pitch;
+
+ if (offset)
+ intelObj->textureOffset = offset;
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_subimage.c b/src/mesa/drivers/dri/intel/intel_tex_subimage.c
new file mode 100644
index 0000000000..3935787806
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_subimage.c
@@ -0,0 +1,182 @@
+
+/**************************************************************************
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "mtypes.h"
+#include "texobj.h"
+#include "texstore.h"
+#include "enums.h"
+
+#include "intel_context.h"
+#include "intel_tex.h"
+#include "intel_mipmap_tree.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+static void
+intelTexSubimage(GLcontext * ctx,
+ GLint dims,
+ GLenum target, GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLint width, GLint height, GLint depth,
+ GLenum format, GLenum type, const void *pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ struct intel_context *intel = intel_context(ctx);
+ struct intel_texture_image *intelImage = intel_texture_image(texImage);
+ GLuint dstRowStride;
+
+ DBG("%s target %s level %d offset %d,%d %dx%d\n", __FUNCTION__,
+ _mesa_lookup_enum_by_nr(target),
+ level, xoffset, yoffset, width, height);
+
+ intelFlush(ctx);
+
+ pixels =
+ _mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format,
+ type, pixels, packing, "glTexSubImage2D");
+ if (!pixels)
+ return;
+
+ if (intelImage->mt)
+ intel_region_idle(intel->intelScreen, intelImage->mt->region);
+
+ LOCK_HARDWARE(intel);
+
+ /* Map buffer if necessary. Need to lock to prevent other contexts
+ * from uploading the buffer under us.
+ */
+ if (intelImage->mt)
+ texImage->Data = intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &dstRowStride,
+ texImage->ImageOffsets);
+
+ assert(dstRowStride);
+
+ if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
+ texImage->TexFormat,
+ texImage->Data,
+ xoffset, yoffset, zoffset,
+ dstRowStride,
+ texImage->ImageOffsets,
+ width, height, depth,
+ format, type, pixels, packing)) {
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
+ }
+
+#if 0
+ /* GL_SGIS_generate_mipmap */
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
+ _mesa_generate_mipmap(ctx, target,
+ &ctx->Texture.Unit[ctx->Texture.CurrentUnit],
+ texObj);
+ }
+#endif
+
+ _mesa_unmap_teximage_pbo(ctx, packing);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ texImage->Data = NULL;
+ }
+
+ UNLOCK_HARDWARE(intel);
+}
+
+
+
+
+
+void
+intelTexSubImage3D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+
+ intelTexSubimage(ctx, 3,
+ target, level,
+ xoffset, yoffset, zoffset,
+ width, height, depth,
+ format, type, pixels, packing, texObj, texImage);
+
+}
+
+
+
+void
+intelTexSubImage2D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+
+ intelTexSubimage(ctx, 2,
+ target, level,
+ xoffset, yoffset, 0,
+ width, height, 1,
+ format, type, pixels, packing, texObj, texImage);
+
+}
+
+
+void
+intelTexSubImage1D(GLcontext * ctx,
+ GLenum target,
+ GLint level,
+ GLint xoffset,
+ GLsizei width,
+ GLenum format, GLenum type,
+ const GLvoid * pixels,
+ const struct gl_pixelstore_attrib *packing,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage)
+{
+ intelTexSubimage(ctx, 1,
+ target, level,
+ xoffset, 0, 0,
+ width, 1, 1,
+ format, type, pixels, packing, texObj, texImage);
+
+}
diff --git a/src/mesa/drivers/dri/intel/intel_tex_validate.c b/src/mesa/drivers/dri/intel/intel_tex_validate.c
new file mode 100644
index 0000000000..af18c26d55
--- /dev/null
+++ b/src/mesa/drivers/dri/intel/intel_tex_validate.c
@@ -0,0 +1,272 @@
+#include "mtypes.h"
+#include "macros.h"
+
+#include "intel_context.h"
+#include "intel_batchbuffer.h"
+#include "intel_mipmap_tree.h"
+#include "intel_tex.h"
+
+#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+
+/**
+ * Compute which mipmap levels that really need to be sent to the hardware.
+ * This depends on the base image size, GL_TEXTURE_MIN_LOD,
+ * GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
+ */
+static void
+intel_calculate_first_last_level(struct intel_texture_object *intelObj)
+{
+ struct gl_texture_object *tObj = &intelObj->base;
+ const struct gl_texture_image *const baseImage =
+ tObj->Image[0][tObj->BaseLevel];
+
+ /* These must be signed values. MinLod and MaxLod can be negative numbers,
+ * and having firstLevel and lastLevel as signed prevents the need for
+ * extra sign checks.
+ */
+ int firstLevel;
+ int lastLevel;
+
+ /* Yes, this looks overly complicated, but it's all needed.
+ */
+ switch (tObj->Target) {
+ case GL_TEXTURE_1D:
+ case GL_TEXTURE_2D:
+ case GL_TEXTURE_3D:
+ case GL_TEXTURE_CUBE_MAP:
+ if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
+ /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
+ */
+ firstLevel = lastLevel = tObj->BaseLevel;
+ }
+ else {
+ firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
+ firstLevel = MAX2(firstLevel, tObj->BaseLevel);
+ lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
+ lastLevel = MAX2(lastLevel, tObj->BaseLevel);
+ lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
+ lastLevel = MIN2(lastLevel, tObj->MaxLevel);
+ lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
+ }
+ break;
+ case GL_TEXTURE_RECTANGLE_NV:
+ case GL_TEXTURE_4D_SGIS:
+ firstLevel = lastLevel = 0;
+ break;
+ default:
+ return;
+ }
+
+ /* save these values */
+ intelObj->firstLevel = firstLevel;
+ intelObj->lastLevel = lastLevel;
+}
+
+static void
+copy_image_data_to_tree(struct intel_context *intel,
+ struct intel_texture_object *intelObj,
+ struct intel_texture_image *intelImage)
+{
+ if (intelImage->mt) {
+ /* Copy potentially with the blitter:
+ */
+ intel_miptree_image_copy(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level, intelImage->mt);
+
+ intel_miptree_release(intel, &intelImage->mt);
+ }
+ else {
+ assert(intelImage->base.Data != NULL);
+
+ /* More straightforward upload.
+ */
+ intel_miptree_image_data(intel,
+ intelObj->mt,
+ intelImage->face,
+ intelImage->level,
+ intelImage->base.Data,
+ intelImage->base.RowStride,
+ intelImage->base.RowStride *
+ intelImage->base.Height);
+ _mesa_align_free(intelImage->base.Data);
+ intelImage->base.Data = NULL;
+ }
+
+ intel_miptree_reference(&intelImage->mt, intelObj->mt);
+}
+
+
+/*
+ */
+GLuint
+intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
+{
+ struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
+ struct intel_texture_object *intelObj = intel_texture_object(tObj);
+ int comp_byte = 0;
+ int cpp;
+
+ GLuint face, i;
+ GLuint nr_faces = 0;
+ struct intel_texture_image *firstImage;
+
+ GLboolean need_flush = GL_FALSE;
+
+ /* We know/require this is true by now:
+ */
+ assert(intelObj->base._Complete);
+
+ /* What levels must the tree include at a minimum?
+ */
+ intel_calculate_first_last_level(intelObj);
+ firstImage =
+ intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]);
+
+ /* Fallback case:
+ */
+ if (firstImage->base.Border) {
+ if (intelObj->mt) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+ return GL_FALSE;
+ }
+
+
+ /* If both firstImage and intelObj have a tree which can contain
+ * all active images, favour firstImage. Note that because of the
+ * completeness requirement, we know that the image dimensions
+ * will match.
+ */
+ if (firstImage->mt &&
+ firstImage->mt != intelObj->mt &&
+ firstImage->mt->first_level <= intelObj->firstLevel &&
+ firstImage->mt->last_level >= intelObj->lastLevel) {
+
+ if (intelObj->mt)
+ intel_miptree_release(intel, &intelObj->mt);
+
+ intel_miptree_reference(&intelObj->mt, firstImage->mt);
+ }
+
+ if (firstImage->base.IsCompressed) {
+ comp_byte = intel_compressed_num_bytes(firstImage->base.TexFormat->MesaFormat);
+ cpp = comp_byte;
+ }
+ else cpp = firstImage->base.TexFormat->TexelBytes;
+
+ /* Check tree can hold all active levels. Check tree matches
+ * target, imageFormat, etc.
+ *
+ * XXX: For some layouts (eg i945?), the test might have to be
+ * first_level == firstLevel, as the tree isn't valid except at the
+ * original start level. Hope to get around this by
+ * programming minLod, maxLod, baseLevel into the hardware and
+ * leaving the tree alone.
+ */
+ if (intelObj->mt &&
+ (intelObj->mt->target != intelObj->base.Target ||
+ intelObj->mt->internal_format != firstImage->base.InternalFormat ||
+ intelObj->mt->first_level != intelObj->firstLevel ||
+ intelObj->mt->last_level != intelObj->lastLevel ||
+ intelObj->mt->width0 != firstImage->base.Width ||
+ intelObj->mt->height0 != firstImage->base.Height ||
+ intelObj->mt->depth0 != firstImage->base.Depth ||
+ intelObj->mt->cpp != cpp ||
+ intelObj->mt->compressed != firstImage->base.IsCompressed)) {
+ intel_miptree_release(intel, &intelObj->mt);
+ }
+
+
+ /* May need to create a new tree:
+ */
+ if (!intelObj->mt) {
+ intelObj->mt = intel_miptree_create(intel,
+ intelObj->base.Target,
+ firstImage->base.InternalFormat,
+ intelObj->firstLevel,
+ intelObj->lastLevel,
+ firstImage->base.Width,
+ firstImage->base.Height,
+ firstImage->base.Depth,
+ cpp,
+ comp_byte);
+ }
+
+ /* Pull in any images not in the object's tree:
+ */
+ nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ /* Need to import images in main memory or held in other trees.
+ */
+ if (intelObj->mt != intelImage->mt) {
+ copy_image_data_to_tree(intel, intelObj, intelImage);
+ need_flush = GL_TRUE;
+ }
+ }
+ }
+
+ if (need_flush)
+ intel_batchbuffer_flush(intel->batch);
+
+ return GL_TRUE;
+}
+
+
+
+void
+intel_tex_map_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face, i;
+
+ DBG("%s\n", __FUNCTION__);
+
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ if (intelImage->mt) {
+ intelImage->base.Data =
+ intel_miptree_image_map(intel,
+ intelImage->mt,
+ intelImage->face,
+ intelImage->level,
+ &intelImage->base.RowStride,
+ intelImage->base.ImageOffsets);
+ /* convert stride to texels, not bytes */
+ intelImage->base.RowStride /= intelImage->mt->cpp;
+/* intelImage->base.ImageStride /= intelImage->mt->cpp; */
+ }
+ }
+ }
+}
+
+
+
+void
+intel_tex_unmap_images(struct intel_context *intel,
+ struct intel_texture_object *intelObj)
+{
+ GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
+ GLuint face, i;
+
+ for (face = 0; face < nr_faces; face++) {
+ for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
+ struct intel_texture_image *intelImage =
+ intel_texture_image(intelObj->base.Image[face][i]);
+
+ if (intelImage->mt) {
+ intel_miptree_image_unmap(intel, intelImage->mt);
+ intelImage->base.Data = NULL;
+ }
+ }
+ }
+}